From f3eb2eb0ca06a0e227fb99eee68628de6007423c Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 30 Mar 2023 14:50:33 -0400 Subject: [PATCH 001/140] wip: implementing a poc around warp sync --- chain/westend/config.toml | 2 +- dot/network/service.go | 8 +++- dot/network/sync.go | 62 ++++++++++++++++++++++++++++++ dot/network/warp_sync_message.go | 65 ++++++++++++++++++++++++++++++++ internal/sync/service.go | 47 +++++++++++++++++++++++ internal/sync/warp.go | 18 +++++++++ 6 files changed, 199 insertions(+), 3 deletions(-) create mode 100644 dot/network/warp_sync_message.go create mode 100644 internal/sync/service.go create mode 100644 internal/sync/warp.go diff --git a/chain/westend/config.toml b/chain/westend/config.toml index f9403abcf2..15b3e08c8d 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -11,7 +11,7 @@ state = "" runtime = "" babe = "" grandpa = "" -sync = "" +sync = "trace" digest = "" [init] diff --git a/dot/network/service.go b/dot/network/service.go index 1b868b02ba..a7bcb48f2e 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -31,11 +31,13 @@ const ( // the following are sub-protocols used by the node syncID = "/sync/2" + warpSyncID = "/sync/warp" lightID = "/light/2" blockAnnounceID = "/block-announces/1" transactionsID = "/transactions/1" - maxMessageSize = 1024 * 64 // 64kb for now + warpSyncMaxResponseSize = 16 * 1024 * 1024 + maxMessageSize = 1024 * 64 // 64kb for now ) var ( @@ -246,7 +248,9 @@ func (s *Service) Start() error { s.ctx, s.cancel = context.WithCancel(context.Background()) } - s.host.registerStreamHandler(s.host.protocolID+syncID, s.handleSyncStream) + s.host.registerStreamHandler(s.host.protocolID+syncID, s.handleWarpSyncStream) + // TODO: enable this protocol to receive request from other nodes + //s.host.registerStreamHandler(s.host.protocolID+warpSync, s.handleSyncStream) s.host.registerStreamHandler(s.host.protocolID+lightID, s.handleLightStream) // register block announce protocol diff --git a/dot/network/sync.go b/dot/network/sync.go index 54e32c461f..85a584d7f3 100644 --- a/dot/network/sync.go +++ b/dot/network/sync.go @@ -17,6 +17,34 @@ var ( blockRequestTimeout = time.Second * 20 ) +func (s *Service) RequestWarpProof(to peer.ID, request *WarpSyncProofRequestMessage) (warpSyncResponse interface{}, err error) { + legacyWarpSyncID := s.host.protocolID + warpSyncID + + s.host.p2pHost.ConnManager().Protect(to, "") + defer s.host.p2pHost.ConnManager().Unprotect(to, "") + + ctx, cancel := context.WithTimeout(s.ctx, blockRequestTimeout) + defer cancel() + + stream, err := s.host.p2pHost.NewStream(ctx, to, legacyWarpSyncID) + if err != nil { + return nil, err + } + + defer func() { + err := stream.Close() + if err != nil { + logger.Warnf("failed to close stream: %s", err) + } + }() + + if err = s.host.writeToStream(stream, request); err != nil { + return nil, err + } + + return s.handleWarpSyncProofResponse(stream) +} + // DoBlockRequest sends a request to the given peer. // If a response is received within a certain time period, it is returned, // otherwise an error is returned. @@ -48,6 +76,32 @@ func (s *Service) DoBlockRequest(to peer.ID, req *BlockRequestMessage) (*BlockRe return s.receiveBlockResponse(stream) } +func (s *Service) handleWarpSyncProofResponse(stream libp2pnetwork.Stream) (interface{}, error) { + s.blockResponseBufMu.Lock() + defer s.blockResponseBufMu.Unlock() + + // TODO: should we create another buffer pool for warp proof response buffers? + buf := s.blockResponseBuf + + n, err := readStream(stream, &buf, warpSyncMaxResponseSize) + if err != nil { + return nil, fmt.Errorf("reading warp sync stream: %w", err) + } + + if n == 0 { + return nil, fmt.Errorf("empty warp sync proof") + } + + fmt.Printf("WARP PROOF BYTES ---> %v\n", buf[:n]) + warpProof := new(WarpSyncProofResponse) + err = warpProof.Decode(buf[:n]) + if err != nil { + panic(fmt.Sprintf("failed to decode warp proof: %s", err)) + } + fmt.Printf("WARP PROOF ---> %v\n", warpProof) + return nil, nil +} + func (s *Service) receiveBlockResponse(stream libp2pnetwork.Stream) (*BlockResponseMessage, error) { // allocating a new (large) buffer every time slows down the syncing by a dramatic amount, // as malloc is one of the most CPU intensive tasks. @@ -91,6 +145,14 @@ func (s *Service) handleSyncStream(stream libp2pnetwork.Stream) { s.readStream(stream, decodeSyncMessage, s.handleSyncMessage, maxBlockResponseSize) } +func (s *Service) handleWarpSyncStream(stream libp2pnetwork.Stream) { + if stream == nil { + return + } + + fmt.Printf("====> %v\n", stream) +} + func decodeSyncMessage(in []byte, _ peer.ID, _ bool) (Message, error) { msg := new(BlockRequestMessage) err := msg.Decode(in) diff --git a/dot/network/warp_sync_message.go b/dot/network/warp_sync_message.go new file mode 100644 index 0000000000..efcc628f57 --- /dev/null +++ b/dot/network/warp_sync_message.go @@ -0,0 +1,65 @@ +package network + +import ( + "fmt" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/crypto/ed25519" + "github.com/ChainSafe/gossamer/pkg/scale" +) + +type WarpSyncProofRequestMessage struct { + Begin common.Hash +} + +func (w *WarpSyncProofRequestMessage) String() string { + return fmt.Sprintf("WarpSyncProofRequestMessage Begin=%v", w.Begin) +} + +func (w *WarpSyncProofRequestMessage) Encode() ([]byte, error) { + return scale.Marshal(*w) +} + +func (w *WarpSyncProofRequestMessage) Decode(in []byte) error { + panic("not implemented yet") +} + +type Vote struct { + Hash common.Hash + Number uint32 +} + +type SignedVote struct { + Vote Vote + Signature [64]byte + AuthorityID ed25519.PublicKeyBytes +} + +// Commit contains all the signed precommits for a given block +type Commit struct { + Hash common.Hash + Number uint32 + Precommits []SignedVote +} + +// Justification represents a finality justification for a block +type Justification struct { + Round uint64 + Commit Commit +} + +type WarpSyncFragment struct { + Header types.Header + Justification Justification +} + +type WarpSyncProofResponse struct { + Fragments []WarpSyncFragment + IsFinished bool +} + +func (w *WarpSyncProofResponse) Encode() ([]byte, error) { return nil, nil } +func (w *WarpSyncProofResponse) Decode(in []byte) error { + return scale.Unmarshal(in, w) +} diff --git a/internal/sync/service.go b/internal/sync/service.go new file mode 100644 index 0000000000..ed1ba4e9d1 --- /dev/null +++ b/internal/sync/service.go @@ -0,0 +1,47 @@ +package sync + +import ( + "github.com/ChainSafe/gossamer/dot/network" + "github.com/libp2p/go-libp2p/core/peer" +) + +type Service struct { + blockState interface{} + chainSync interface{} + chainProcessor interface{} + network interface{} + + warpSync *WarpSync +} + +// Start begins the chainSync and chainProcessor modules. It begins syncing in bootstrap mode +func (s *Service) Start() error { + go s.warpSync.sync() + return nil +} + +// Stop stops the chainSync and chainProcessor modules +func (s *Service) Stop() error { + return nil +} + +// HandleBlockAnnounceHandshake notifies the `chainSync` module that +// we have received a BlockAnnounceHandshake from the given peer. +func (s *Service) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { + return nil +} + +// HandleBlockAnnounce notifies the `chainSync` module that we have received a block announcement from the given peer. +func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { + return nil +} + +// IsSynced exposes the synced state +func (s *Service) IsSynced() bool { + return false +} + +// HighestBlock gets the highest known block number +func (s *Service) HighestBlock() uint { + return 0 +} diff --git a/internal/sync/warp.go b/internal/sync/warp.go new file mode 100644 index 0000000000..ffae43347e --- /dev/null +++ b/internal/sync/warp.go @@ -0,0 +1,18 @@ +package sync + +import "time" + +type WarpSync struct { + network interface{} +} + +func (w *WarpSync) sync() { + w.waitForConnections() + +} + +func (w *WarpSync) waitForConnections() { + // TODO: implement actual code to wait + // for the minimal amount of peers + time.Sleep(30 * time.Second) +} From d7c07efda55ed2c4440583a207d16f8517de323d Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 6 Apr 2023 10:20:58 -0400 Subject: [PATCH 002/140] chore: include informative logs to debug sync --- chain/westend/config.toml | 2 +- dot/sync/chain_processor.go | 2 +- dot/sync/chain_sync.go | 25 +++++++++++++++++++++---- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/chain/westend/config.toml b/chain/westend/config.toml index 15b3e08c8d..bf3db45119 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -1,5 +1,5 @@ [global] -basepath = "~/.gossamer/westend" +basepath = "./tmp/.gossamer/westend" log = "info" metrics-address = "localhost:9876" diff --git a/dot/sync/chain_processor.go b/dot/sync/chain_processor.go index 4ba27803f0..d12eccbe23 100644 --- a/dot/sync/chain_processor.go +++ b/dot/sync/chain_processor.go @@ -112,7 +112,7 @@ func (s *chainProcessor) processReadyBlocks() { // returns the index of the last BlockData it handled on success, // or the index of the block data that errored on failure. func (c *chainProcessor) processBlockData(blockData types.BlockData) error { //nolint:revive - logger.Debugf("processing block data with hash %s", blockData.Hash) + // logger.Debugf("processing block data with hash %s", blockData.Hash) headerInState, err := c.blockState.HasHeader(blockData.Hash) if err != nil { diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 5f45beb725..b691ddae71 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -372,6 +372,10 @@ func (cs *chainSync) logSyncSpeed() { continue } + cs.Lock() + totalWorkers := len(cs.peerState) + cs.Unlock() + switch cs.state { case bootstrap: cs.benchmarker.end(time.Now(), after.Number) @@ -382,18 +386,20 @@ func (cs *chainSync) logSyncSpeed() { before.Number, after.Number, before.Hash(), after.Hash()) logger.Infof( - "🚣 currently syncing, %d peers connected, "+ + "🚣 currently syncing, %d connected peers, %d peers available to sync, "+ "target block number %d, %.2f average blocks/second, "+ "%.2f overall average, finalised block number %d with hash %s", len(cs.network.Peers()), + totalWorkers, target, cs.benchmarker.mostRecentAverage(), cs.benchmarker.average(), finalised.Number, finalised.Hash()) case tip: logger.Infof( - "💤 node waiting, %d peers connected, "+ + "💤 node waiting, %d connected peers, %d peers available to sync, "+ "head block number %d with hash %s, "+ "finalised block number %d with hash %s", len(cs.network.Peers()), + totalWorkers, after.Number, after.Hash(), finalised.Number, finalised.Hash()) } @@ -669,6 +675,8 @@ func (cs *chainSync) dispatchWorker(w *worker) { // TODO: if we find a good peer, do sync with them, right now it re-selects a peer each time (#1399) if err := cs.doSync(req, w.peersTried); err != nil { // failed to sync, set worker error and put into result queue + logger.Errorf("while executing sync: %q", err) + w.err = err return } @@ -719,7 +727,16 @@ func (cs *chainSync) doSync(req *network.BlockRequestMessage, peersTried map[pee } } - logger.Trace("success! placing block response data in ready queue") + logger.Tracef("success! placing %d blocks response data in ready queue", len(resp.BlockData)) + + if len(resp.BlockData) > 0 { + firstBlockInResponse := resp.BlockData[0] + lastBlockInResponse := resp.BlockData[len(resp.BlockData)-1] + + logger.Tracef("processing %d (%s) to %d (%s)", + firstBlockInResponse.Header.Number, firstBlockInResponse.Hash, + lastBlockInResponse.Header.Number, lastBlockInResponse.Hash) + } // response was validated! place into ready block queue for _, bd := range resp.BlockData { @@ -767,7 +784,7 @@ func (cs *chainSync) handleReadyBlock(bd *types.BlockData) { return } - logger.Tracef("new ready block number %d with hash %s", bd.Header.Number, bd.Hash) + //logger.Tracef("new ready block number %d with hash %s", bd.Header.Number, bd.Hash) // see if there are any descendents in the pending queue that are now ready to be processed, // as we have just become aware of their parent block From 428ff138ce5c1f745689908493a47c4bd7d27141 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 11 Apr 2023 15:43:54 -0400 Subject: [PATCH 003/140] chore: enable state trace logs and improve error for unfinalized ancestor --- chain/westend/config.toml | 2 +- dot/state/grandpa_changes.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/chain/westend/config.toml b/chain/westend/config.toml index bf3db45119..6fd7df7c6d 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -7,7 +7,7 @@ metrics-address = "localhost:9876" core = "" network = "" rpc = "" -state = "" +state = "trace" runtime = "" babe = "" grandpa = "" diff --git a/dot/state/grandpa_changes.go b/dot/state/grandpa_changes.go index e06f1fe0fa..ff4fb0c0d4 100644 --- a/dot/state/grandpa_changes.go +++ b/dot/state/grandpa_changes.go @@ -285,7 +285,8 @@ func (ct changeTree) findApplicableChange(hash common.Hash, number uint, } if child.change.announcingHeader.Number <= number && isDescendant { - return false, errUnfinalizedAncestor + return false, fmt.Errorf("%w: %s (%d)", errUnfinalizedAncestor, + child.change.announcingHeader.Hash(), child.change.announcingHeader.Number) } } From 25b30d502f8a014379059f2f6ffb108b7d532283 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 13 Apr 2023 16:00:19 -0400 Subject: [PATCH 004/140] wip: handle workers results --- chain/westend/config.toml | 4 +- docker-compose.yml | 82 +++---- .../provisioning/dashboards/gossamer.json | 12 +- docker/prometheus/prometheus.yml | 2 +- dot/network/service.go | 4 + dot/sync/chain_processor.go | 4 +- dot/sync/chain_sync.go | 225 +++++++++++++++--- dot/sync/interfaces.go | 2 + dot/sync/mocks_test.go | 14 ++ dot/sync/requests.go | 62 +++++ dot/sync/sync_worker.go | 98 ++++++++ dot/sync/worker_pool.go | 151 ++++++++++++ dot/types/block_data.go | 4 + go.mod | 1 + go.sum | 2 + 15 files changed, 578 insertions(+), 89 deletions(-) create mode 100644 dot/sync/requests.go create mode 100644 dot/sync/sync_worker.go create mode 100644 dot/sync/worker_pool.go diff --git a/chain/westend/config.toml b/chain/westend/config.toml index 6fd7df7c6d..0cfdd31547 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -1,5 +1,5 @@ [global] -basepath = "./tmp/.gossamer/westend" +basepath = "./tmp/gossamer/westend" log = "info" metrics-address = "localhost:9876" @@ -7,7 +7,7 @@ metrics-address = "localhost:9876" core = "" network = "" rpc = "" -state = "trace" +state = "" runtime = "" babe = "" grandpa = "" diff --git a/docker-compose.yml b/docker-compose.yml index 8a300720e2..28ea00d54a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,50 +11,50 @@ # # To rebuild the Gossamer Docker image: `docker-compose build` -version: '3' +version: "3" services: - gossamer: - image: chainsafe/gossamer - build: . - volumes: - # Remove with: docker volume rm gossamer - - gossamer:/data/gossamer - command: - - --basepath=/data/gossamer - - --chain=kusama - - --log=info - - --publish-metrics - - --metrics-address=:9876 - - --pprofserver - ports: - - 6060:6060/tcp # Pprof server - - 7001:7001/tcp # Network port - - 8545:8545/tcp # RPC HTTP port - - 8546:8546/tcp # RPC Websocket port - expose: - - 9876/tcp # Prometheus metrics for Prometheus server + gossamer: + image: chainsafe/gossamer + build: . + volumes: + # Remove with: docker volume rm gossamer + - gossamer:/data/gossamer + command: + - --basepath=/data/gossamer + - --chain=kusama + - --log=info + - --publish-metrics + - --metrics-address=:9876 + - --pprofserver + ports: + - 6060:6060/tcp # Pprof server + - 7001:7001/tcp # Network port + - 8545:8545/tcp # RPC HTTP port + - 8546:8546/tcp # RPC Websocket port + expose: + - 9876/tcp # Prometheus metrics for Prometheus server - prometheus: - image: prom/prometheus - volumes: - - ./docker/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro - # The following line can be uncommented to persist metrics data. - # - gossamer-prometheus:/prometheus - expose: - - 9090/tcp # Prometheus metrics for Grafana + prometheus: + image: prom/prometheus + volumes: + - ./docker/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro + # The following line can be uncommented to persist metrics data. + # - gossamer-prometheus:/prometheus + expose: + - 9090/tcp # Prometheus metrics for Grafana - grafana: - image: grafana/grafana-oss - ports: - - 3000:3000/tcp # HTTP Web interface at http://localhost:3000/ - volumes: - - ./docker/grafana/grafana.ini:/etc/grafana/grafana.ini:ro - - ./docker/grafana/provisioning:/etc/grafana/provisioning:ro - # The following line can be uncommented to persist modifications. - # - gossamer-grafana:/var/lib/grafana + grafana: + image: grafana/grafana-oss + ports: + - 3000:3000/tcp # HTTP Web interface at http://localhost:3000/ + volumes: + - ./docker/grafana/grafana.ini:/etc/grafana/grafana.ini:ro + - ./docker/grafana/provisioning:/etc/grafana/provisioning:ro + # The following line can be uncommented to persist modifications. + # - gossamer-grafana:/var/lib/grafana volumes: - gossamer: - gossamer-prometheus: - gossamer-grafana: + gossamer: + gossamer-prometheus: + gossamer-grafana: diff --git a/docker/grafana/provisioning/dashboards/gossamer.json b/docker/grafana/provisioning/dashboards/gossamer.json index 315f253aff..1c4ffeaaf5 100644 --- a/docker/grafana/provisioning/dashboards/gossamer.json +++ b/docker/grafana/provisioning/dashboards/gossamer.json @@ -90,7 +90,7 @@ "type": "prometheus", "uid": "prometheus_id" }, - "expr": "go_goroutines{instance=~\".*gossamer.*\"}", + "expr": "go_goroutines{instance=~\".*internal.*\"}", "refId": "A" } ], @@ -148,7 +148,7 @@ "type": "prometheus", "uid": "prometheus_id" }, - "expr": "go_threads{instance=~\".*gossamer.*\"}", + "expr": "go_threads{instance=~\".*internal.*\"}", "refId": "A" } ], @@ -207,7 +207,7 @@ "type": "prometheus", "uid": "prometheus_id" }, - "expr": "process_resident_memory_bytes{instance=~\".*gossamer.*\"}", + "expr": "process_resident_memory_bytes{instance=~\".*internal.*\"}", "refId": "A" } ], @@ -292,7 +292,7 @@ "uid": "prometheus_id" }, "editorMode": "code", - "expr": "process_resident_memory_bytes{instance=~\".*gossamer.*\"}", + "expr": "process_resident_memory_bytes{instance=~\".*internal.*\"}", "legendFormat": "Resident memory", "range": true, "refId": "A" @@ -303,7 +303,7 @@ "uid": "prometheus_id" }, "editorMode": "code", - "expr": "go_memstats_heap_inuse_bytes{instance=~\".*gossamer.*\"}", + "expr": "go_memstats_heap_inuse_bytes{instance=~\".*internal.*\"}", "hide": false, "legendFormat": "Heap", "range": true, @@ -315,7 +315,7 @@ "uid": "prometheus_id" }, "editorMode": "code", - "expr": "go_memstats_stack_inuse_bytes{instance=~\".*gossamer.*\"}", + "expr": "go_memstats_stack_inuse_bytes{instance=~\".*internal.*\"}", "hide": false, "legendFormat": "Stack", "range": true, diff --git a/docker/prometheus/prometheus.yml b/docker/prometheus/prometheus.yml index 88d165df8a..7c847ca4df 100644 --- a/docker/prometheus/prometheus.yml +++ b/docker/prometheus/prometheus.yml @@ -10,4 +10,4 @@ scrape_configs: - job_name: gossamer metrics_path: /metrics static_configs: - - targets: ["gossamer:9876"] + - targets: ["host.docker.internal:9876"] diff --git a/dot/network/service.go b/dot/network/service.go index a7bcb48f2e..8b40006481 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -590,6 +590,10 @@ func (s *Service) NetworkState() common.NetworkState { } } +func (s *Service) TotalConnectedPeers() []peer.ID { + return s.host.p2pHost.Network().Peers() +} + // Peers returns information about connected peers needed for the rpc server func (s *Service) Peers() []common.PeerInfo { var peers []common.PeerInfo diff --git a/dot/sync/chain_processor.go b/dot/sync/chain_processor.go index d12eccbe23..d74b8bea47 100644 --- a/dot/sync/chain_processor.go +++ b/dot/sync/chain_processor.go @@ -141,7 +141,7 @@ func (c *chainProcessor) processBlockData(blockData types.BlockData) error { //n if err != nil { return fmt.Errorf("processing block data with header and body: %w", err) } - logger.Debugf("block with hash %s processed", blockData.Hash) + //logger.Debugf("block with hash %s processed", blockData.Hash) } if blockData.Justification != nil && len(*blockData.Justification) > 0 { @@ -269,7 +269,7 @@ func (s *chainProcessor) handleBlock(block *types.Block, announceImportedBlock b return err } - logger.Debugf("🔗 imported block number %d with hash %s", block.Header.Number, block.Header.Hash()) + //logger.Debugf("🔗 imported block number %d with hash %s", block.Header.Number, block.Header.Hash()) blockHash := block.Header.Hash() s.telemetry.SendMessage(telemetry.NewBlockImport( diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index b691ddae71..ec2c155550 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -114,6 +114,9 @@ type chainSync struct { blockState BlockState network Network + // to replace the worker queue + workerPool *syncWorkerPool + // queue of work created by setting peer heads workQueue chan *peerState @@ -204,16 +207,15 @@ func newChainSync(cfg chainSyncConfig) *chainSync { logSyncTicker: logSyncTicker, logSyncTickerC: logSyncTicker.C, logSyncDone: make(chan struct{}), + workerPool: newSyncWorkerPool(cfg.net), } } func (cs *chainSync) start() { - // wait until we have received at least `minPeers` peer heads + // wait until we have a minimal workers in the sync worker pool for { - cs.RLock() - n := len(cs.peerState) - cs.RUnlock() - if n >= cs.minPeers { + totalAvailable := cs.workerPool.totalWorkers() + if totalAvailable >= uint(cs.minPeers) { break } time.Sleep(time.Millisecond * 100) @@ -223,6 +225,7 @@ func (cs *chainSync) start() { pendingBlockDoneCh := make(chan struct{}) cs.pendingBlockDoneCh = pendingBlockDoneCh + go cs.pendingBlocks.run(pendingBlockDoneCh) go cs.sync() cs.logSyncStarted = true @@ -267,15 +270,11 @@ func (cs *chainSync) setBlockAnnounce(from peer.ID, header *types.Header) error } // setPeerHead sets a peer's best known block and potentially adds the peer's state to the workQueue -func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error { - ps := &peerState{ - who: p, - hash: hash, - number: number, +func (cs *chainSync) setPeerHead(p peer.ID, bestHash common.Hash, bestNumber uint) error { + err := cs.workerPool.addWorker(p, bestHash, bestNumber) + if err != nil { + logger.Errorf("adding a potential worker: %s", err) } - cs.Lock() - cs.peerState[p] = ps - cs.Unlock() // if the peer reports a lower or equal best block number than us, // check if they are on a fork or not @@ -284,15 +283,15 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error return fmt.Errorf("best block header: %w", err) } - if ps.number <= head.Number { + if bestNumber <= head.Number { // check if our block hash for that number is the same, if so, do nothing // as we already have that block - ourHash, err := cs.blockState.GetHashByNumber(ps.number) + ourHash, err := cs.blockState.GetHashByNumber(bestNumber) if err != nil { return fmt.Errorf("get block hash by number: %w", err) } - if ourHash == ps.hash { + if ourHash == bestHash { return nil } @@ -307,7 +306,7 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error // their block hash doesn't match ours for that number (ie. they are on a different // chain), and also the highest finalised block is higher than that number. // thus the peer is on an invalid chain - if fin.Number >= ps.number { + if fin.Number >= bestNumber { // TODO: downscore this peer, or temporarily don't sync from them? (#1399) // perhaps we need another field in `peerState` to mark whether the state is valid or not cs.network.ReportPeer(peerset.ReputationChange{ @@ -315,12 +314,12 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error Reason: peerset.BadBlockAnnouncementReason, }, p) return fmt.Errorf("%w: for peer %s and block number %d", - errPeerOnInvalidFork, p, ps.number) + errPeerOnInvalidFork, p, bestNumber) } // peer is on a fork, check if we have processed the fork already or not // ie. is their block written to our db? - has, err := cs.blockState.HasHeader(ps.hash) + has, err := cs.blockState.HasHeader(bestHash) if err != nil { return fmt.Errorf("has header: %w", err) } @@ -333,12 +332,11 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error // the peer has a higher best block than us, or they are on some fork we are not aware of // add it to the disjoint block set - if err = cs.pendingBlocks.addHashAndNumber(ps.hash, ps.number); err != nil { + if err = cs.pendingBlocks.addHashAndNumber(bestHash, bestNumber); err != nil { return fmt.Errorf("add hash and number: %w", err) } - cs.workQueue <- ps - logger.Debugf("set peer %s head with block number %d and hash %s", p, number, hash) + //cs.workQueue <- nil return nil } @@ -372,9 +370,7 @@ func (cs *chainSync) logSyncSpeed() { continue } - cs.Lock() - totalWorkers := len(cs.peerState) - cs.Unlock() + totalWorkers := cs.workerPool.totalWorkers() switch cs.state { case bootstrap: @@ -432,24 +428,51 @@ func (cs *chainSync) sync() { if err := cs.handleResult(res); err != nil { logger.Errorf("failed to handle chain sync result: %s", err) } + + // TODO: re-think the usage of ticker in bootstrap mode but in tip sync mode + // we should use it when we don't receive a block-announcement for a while (let's say for a slot duration time) case <-ticker.C: cs.maybeSwitchMode() - workers, err := cs.handler.handleTick() - if err != nil { - logger.Errorf("failed to handle tick: %s", err) - continue - } - - for _, worker := range workers { - cs.tryDispatchWorker(worker) - } case fin := <-cs.finalisedCh: // on finalised block, call pendingBlocks.removeLowerBlocks() to remove blocks on // invalid forks from the pending blocks set cs.pendingBlocks.removeLowerBlocks(fin.Header.Number) case <-cs.ctx.Done(): return + default: + cs.maybeSwitchMode() + + if cs.state == bootstrap { + cs.workerPool.useConnectedPeers() + + head, err := cs.blockState.BestBlockHeader() + if err != nil { + logger.Errorf("getting best block header while syncing: %s", err) + continue + } + + availablePeers := cs.workerPool.totalWorkers() + startRequestAt := head.Number + 1 + targetBlockNumber := startRequestAt + uint(availablePeers)*128 + requests, err := ascedingBlockRequest( + startRequestAt, targetBlockNumber, bootstrapRequestData) + + if err != nil { + logger.Errorf("failed to setup ascending block requests: %s", err) + } + + expectedAmountOfBlocks := totalRequestedBlocks(requests) + wg := sync.WaitGroup{} + + resultsQueue := make(chan *syncTaskResult) + + wg.Add(1) + go cs.handleWorkersResults(resultsQueue, expectedAmountOfBlocks, &wg) + cs.workerPool.submitRequests(requests, resultsQueue) + + wg.Wait() + } } } } @@ -671,6 +694,7 @@ func (cs *chainSync) dispatchWorker(w *worker) { return } + logger.Debugf("created %d request to be dispatched", len(reqs)) for _, req := range reqs { // TODO: if we find a good peer, do sync with them, right now it re-selects a peer each time (#1399) if err := cs.doSync(req, w.peersTried); err != nil { @@ -683,6 +707,133 @@ func (cs *chainSync) dispatchWorker(w *worker) { } } +// handleWorkersResults, every time we submit requests to workers they results should be computed here +// and every cicle we should endup with a complete chain, whenever we identify +// any error from a worker we should evaluate the error and re-insert the request +// in the queue and wait for it to completes +func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, totalBlocks uint32, wg *sync.WaitGroup) { + defer wg.Done() + + logger.Infof("starting handleWorkersResults, waiting %d blocks", totalBlocks) + syncingChain := make([]*types.BlockData, 0, totalBlocks) + +loop: + for { + select { + // TODO: implement a case to stop + case taskResult := <-workersResults: + logger.Infof("task result: peer(%s), error: %v, hasResponse: %v", + taskResult.who, taskResult.err != nil, taskResult.response != nil) + + if taskResult.err != nil { + switch { + // TODO add this worker in a ignorePeers list, implement some expiration time for + // peers added to it (peerJail where peers have a release date and maybe extend the punishment + // if fail again ang again Jimmy's + Diego's idea) + case strings.Contains(taskResult.err.Error(), "dial backoff") || + taskResult.err.Error() == "protocol not supported": + + logger.Criticalf("response invalid: %s", taskResult.err) + cs.workerPool.shutdownWorker(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue + } + } + + who := taskResult.who + request := taskResult.request + response := taskResult.response + + if request.Direction == network.Descending { + // reverse blocks before pre-validating and placing in ready queue + reverseBlockData(response.BlockData) + } + + err := cs.validateResponse(request, response, who) + switch { + case errors.Is(err, errEmptyBlockData): + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue + case errors.Is(err, errUnknownParent): + case err != nil: + logger.Criticalf("response invalid: %s", err) + cs.workerPool.shutdownWorker(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue + } + + if len(response.BlockData) > 0 { + firstBlockInResponse := response.BlockData[0] + lastBlockInResponse := response.BlockData[len(response.BlockData)-1] + + logger.Tracef("processing %d blocks: %d (%s) to %d (%s)", + len(response.BlockData), + firstBlockInResponse.Header.Number, firstBlockInResponse.Hash, + lastBlockInResponse.Header.Number, lastBlockInResponse.Hash) + } + + previousLen := len(syncingChain) + + syncingChain = mergeSortedSlices(syncingChain, response.BlockData) + logger.Infof("building a syncing chain, previous length: %d, current length: %d", + previousLen, len(syncingChain)) + + if len(syncingChain) >= int(totalBlocks) { + break loop + } + } + } + + logger.Infof("synced %d blocks, starting process", len(syncingChain)) + // response was validated! place into ready block queue + for _, bd := range syncingChain { + // block is ready to be processed! + cs.handleReadyBlock(bd) + } +} + +type LessOrEqual[T any] interface { + LessOrEqual(T) bool +} + +func mergeSortedSlices[T LessOrEqual[T]](a, b []T) []T { + // if one slice is empty just return the other + switch { + case len(a) < 1: + return b + case len(b) < 1: + return a + } + + aIndex, bIndex := 0, 0 + resultSlice := make([]T, 0, len(a)+len(b)) + + for aIndex < 0 && bIndex < 0 { + elemA := a[aIndex] + elemB := b[bIndex] + + if elemA.LessOrEqual(elemB) { + resultSlice = append(resultSlice, elemA) + aIndex++ + } else { + resultSlice = append(resultSlice, elemB) + bIndex++ + } + } + + // if there is remaining items in both arrays after the ordering phase + // we just append them in the result slice + for idx := aIndex; idx < len(a); idx++ { + resultSlice = append(resultSlice, a[idx]) + } + + for idx := bIndex; idx < len(b); idx++ { + resultSlice = append(resultSlice, b[idx]) + } + + return resultSlice +} + func (cs *chainSync) doSync(req *network.BlockRequestMessage, peersTried map[peer.ID]struct{}) *workerError { // determine which peers have the blocks we want to request peers := cs.determineSyncPeers(req, peersTried) @@ -693,12 +844,12 @@ func (cs *chainSync) doSync(req *network.BlockRequestMessage, peersTried map[pee } } - // send out request and potentially receive response, error if timeout - logger.Tracef("sending out block request: %s", req) - // TODO: use scoring to determine what peer to try to sync from first (#1399) idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(peers)))) who := peers[idx.Int64()] + + // send out request and potentially receive response, error if timeout + logger.Tracef("sending out block request to %s: %s", who, req) resp, err := cs.network.DoBlockRequest(who, req) if err != nil { return &workerError{ diff --git a/dot/sync/interfaces.go b/dot/sync/interfaces.go index c43b5a8d3c..558d7e6afb 100644 --- a/dot/sync/interfaces.go +++ b/dot/sync/interfaces.go @@ -81,6 +81,8 @@ type Network interface { // ReportPeer reports peer based on the peer behaviour. ReportPeer(change peerset.ReputationChange, p peer.ID) + + TotalConnectedPeers() []peer.ID } // Telemetry is the telemetry client to send telemetry messages. diff --git a/dot/sync/mocks_test.go b/dot/sync/mocks_test.go index cdf596ff84..f4bbdfa564 100644 --- a/dot/sync/mocks_test.go +++ b/dot/sync/mocks_test.go @@ -649,3 +649,17 @@ func (mr *MockNetworkMockRecorder) ReportPeer(arg0, arg1 interface{}) *gomock.Ca mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeer", reflect.TypeOf((*MockNetwork)(nil).ReportPeer), arg0, arg1) } + +// TotalConnectedPeers mocks base method. +func (m *MockNetwork) TotalConnectedPeers() []peer.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TotalConnectedPeers") + ret0, _ := ret[0].([]peer.ID) + return ret0 +} + +// TotalConnectedPeers indicates an expected call of TotalConnectedPeers. +func (mr *MockNetworkMockRecorder) TotalConnectedPeers() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TotalConnectedPeers", reflect.TypeOf((*MockNetwork)(nil).TotalConnectedPeers)) +} diff --git a/dot/sync/requests.go b/dot/sync/requests.go new file mode 100644 index 0000000000..0632962db8 --- /dev/null +++ b/dot/sync/requests.go @@ -0,0 +1,62 @@ +package sync + +import ( + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/lib/common/variadic" +) + +func ascedingBlockRequest(startNumber uint, targetNumber uint, requestedData byte) ([]*network.BlockRequestMessage, error) { + diff := int(targetNumber) - int(startNumber) + if diff < 0 { + return nil, errInvalidDirection + } + + // start and end block are the same, just request 1 block + if diff == 0 { + one := uint32(1) + return []*network.BlockRequestMessage{ + &network.BlockRequestMessage{ + RequestedData: requestedData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(startNumber)), + Direction: network.Ascending, + Max: &one, + }, + }, nil + } + + numRequests := uint(diff) / maxResponseSize + if diff%maxResponseSize != 0 { + numRequests++ + } + + reqs := make([]*network.BlockRequestMessage, numRequests) + + // check if we want to specify a size + const max = uint32(maxResponseSize) + for i := uint(0); i < numRequests; i++ { + max := max + start := variadic.MustNewUint32OrHash(startNumber) + + reqs[i] = &network.BlockRequestMessage{ + RequestedData: requestedData, + StartingBlock: *start, + Direction: network.Ascending, + Max: &max, + } + startNumber += maxResponseSize + } + + return reqs, nil +} + +func totalRequestedBlocks(requests []*network.BlockRequestMessage) uint32 { + acc := uint32(0) + + for _, request := range requests { + if request.Max != nil { + acc += *request.Max + } + } + + return acc +} diff --git a/dot/sync/sync_worker.go b/dot/sync/sync_worker.go new file mode 100644 index 0000000000..271df3b015 --- /dev/null +++ b/dot/sync/sync_worker.go @@ -0,0 +1,98 @@ +package sync + +import ( + "context" + "errors" + "sync" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/libp2p/go-libp2p/core/peer" +) + +type syncTaskResult struct { + who peer.ID + request *network.BlockRequestMessage + response *network.BlockResponseMessage + err error +} + +// syncWorker represents a available peer that could be a source +// for requesting blocks, once a peer is disconnected or is ignored +// we can just disable its worker. +type syncWorker struct { + isEphemeral bool + // context shared between all workers + ctx context.Context + l sync.RWMutex + + doneCh chan struct{} + stopCh chan struct{} + + who peer.ID + network Network + bestHash common.Hash + bestNumber uint +} + +func newSyncWorker(ctx context.Context, who peer.ID, + bestHash common.Hash, bestNumber uint, network Network) *syncWorker { + return &syncWorker{ + ctx: ctx, + who: who, + bestHash: bestHash, + bestNumber: bestNumber, + network: network, + doneCh: make(chan struct{}), + stopCh: make(chan struct{}), + } +} + +func (s *syncWorker) update(bestHash common.Hash, bestNumber uint) { + s.l.Lock() + defer s.l.Unlock() + + s.bestHash = bestHash + s.bestNumber = bestNumber +} + +var errBadPeerWorker = errors.New("bad peer worker") +var errBadBlock = errors.New("bad block") + +func (s *syncWorker) Start(tasks <-chan *syncTask, wg *sync.WaitGroup) { + wg.Add(1) + + go func() { + defer func() { + wg.Done() + close(s.doneCh) + logger.Infof("[SHUTDOWN] worker %s", s.who) + }() + + logger.Infof("worker %s started, waiting for tasks...", s.who) + + for { + select { + case <-s.stopCh: + return + case task := <-tasks: + request := task.request + logger.Infof("[EXECUTING] worker %s: block request: %s", s.who, request) + + response, err := s.network.DoBlockRequest(s.who, request) + if err != nil { + logger.Infof("[FINISHED] worker %s: err: %s", s.who, err) + } else if response != nil { + logger.Infof("[FINISHED] worker %s: block data amount: %d", s.who, len(response.BlockData)) + } + + task.resultCh <- &syncTaskResult{s.who, request, response, err} + } + } + }() +} + +func (s *syncWorker) Stop() { + close(s.stopCh) + <-s.doneCh +} diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go new file mode 100644 index 0000000000..2393414d3f --- /dev/null +++ b/dot/sync/worker_pool.go @@ -0,0 +1,151 @@ +package sync + +import ( + "context" + "errors" + "math/big" + "sync" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/libp2p/go-libp2p/core/peer" + "golang.org/x/exp/maps" +) + +type syncTask struct { + request *network.BlockRequestMessage + resultCh chan<- *syncTaskResult +} + +type syncWorkerPool struct { + ctx context.Context + l sync.RWMutex + wg sync.WaitGroup + + network Network + taskQueue chan *syncTask + workers map[peer.ID]*syncWorker +} + +func newSyncWorkerPool(net Network) *syncWorkerPool { + return &syncWorkerPool{ + network: net, + workers: make(map[peer.ID]*syncWorker), + taskQueue: make(chan *syncTask), + } +} + +func (s *syncWorkerPool) useConnectedPeers() { + connectedPeers := s.network.TotalConnectedPeers() + + s.l.Lock() + defer s.l.Unlock() + + for _, connectedPeer := range connectedPeers { + _, has := s.workers[connectedPeer] + if has { + continue + } + + // they are ephemeral because once we reach the tip we + // should remove them and use only peers who send us + // block announcements + ephemeralSyncWorker := newSyncWorker(s.ctx, connectedPeer, common.Hash{}, 0, s.network) + ephemeralSyncWorker.isEphemeral = true + ephemeralSyncWorker.Start(s.taskQueue, &s.wg) + s.workers[connectedPeer] = ephemeralSyncWorker + } +} + +func (s *syncWorkerPool) addWorker(who peer.ID, bestHash common.Hash, bestNumber uint) error { + s.l.Lock() + defer s.l.Unlock() + + worker, has := s.workers[who] + if has { + worker.update(bestHash, bestNumber) + return nil + } + + syncWorker := newSyncWorker(s.ctx, who, bestHash, bestNumber, s.network) + logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) + + syncWorker.Start(s.taskQueue, &s.wg) + s.workers[who] = syncWorker + return nil +} + +func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { + s.taskQueue <- &syncTask{ + request: request, + resultCh: resultCh, + } +} + +func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { + for _, request := range requests { + s.submitRequest(request, resultCh) + } +} + +func (s *syncWorkerPool) shutdownWorker(who peer.ID) { + s.l.Lock() + defer s.l.Unlock() + + peer, has := s.workers[who] + if !has { + return + } + + peer.Stop() + delete(s.workers, who) +} + +func (s *syncWorkerPool) totalWorkers() (total uint) { + s.l.RLock() + defer s.l.RUnlock() + + total = 0 + for range s.workers { + total++ + } + + return total +} + +// getTargetBlockNumber takes the average of all peer heads +// TODO: should we just return the highest? could be an attack vector potentially, if a peer reports some very large +// head block number, it would leave us in bootstrap mode forever +// it would be better to have some sort of standard deviation calculation and discard any outliers (#1861) +func (s *syncWorkerPool) getTargetBlockNumber() (uint, error) { + s.l.RLock() + activeWorkers := maps.Values(s.workers) + s.l.RUnlock() + + // in practice, this shouldn't happen, as we only start the module once we have some peer states + if len(activeWorkers) == 0 { + // return max uint32 instead of 0, as returning 0 would switch us to tip mode unexpectedly + return 0, errors.New("no active workers yet") + } + + // we are going to sort the data and remove the outliers then we will return the avg of all the valid elements + blockNumbers := make([]uint, 0, len(activeWorkers)) + for _, worker := range activeWorkers { + // we don't count ephemeral workers since they don't have + // a best block hash/number informations, they are connected peers + // who can help us sync blocks faster + if worker.isEphemeral { + continue + } + + blockNumbers = append(blockNumbers, worker.bestNumber) + } + + if len(blockNumbers) < 1 { + return 0, errors.New("no active workers yet") + } + + sum, count := nonOutliersSumCount(blockNumbers) + quotientBigInt := big.NewInt(0).Div(sum, big.NewInt(int64(count))) + return uint(quotientBigInt.Uint64()), nil +} diff --git a/dot/types/block_data.go b/dot/types/block_data.go index 35525c86d0..c32d7f69a7 100644 --- a/dot/types/block_data.go +++ b/dot/types/block_data.go @@ -31,6 +31,10 @@ func (bd *BlockData) Number() uint { return bd.Header.Number } +func (bd *BlockData) LessOrEqual(o *BlockData) bool { + return bd.Header.Number <= o.Header.Number +} + func (bd *BlockData) String() string { str := fmt.Sprintf("Hash=%s ", bd.Hash) diff --git a/go.mod b/go.mod index ecc617943f..fc1f2ac959 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/docker/docker v23.0.1+incompatible github.com/ethereum/go-ethereum v1.11.4 github.com/fatih/color v1.15.0 + github.com/gammazero/deque v0.2.1 github.com/go-playground/validator/v10 v10.12.0 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.5.9 diff --git a/go.sum b/go.sum index 9ad1e2a273..a1908a03d3 100644 --- a/go.sum +++ b/go.sum @@ -147,6 +147,8 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= From d05fdecd49d8eaf2f609b44882405817b03fa6fc Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 13 Apr 2023 18:05:18 -0400 Subject: [PATCH 005/140] wip: handling workers results --- dot/sync/chain_processor.go | 248 ++------------ dot/sync/chain_sync.go | 655 +++++++++++++++++------------------- dot/sync/requests.go | 4 +- dot/sync/syncer.go | 24 +- dot/types/block_data.go | 3 +- 5 files changed, 349 insertions(+), 585 deletions(-) diff --git a/dot/sync/chain_processor.go b/dot/sync/chain_processor.go index d74b8bea47..a008529a57 100644 --- a/dot/sync/chain_processor.go +++ b/dot/sync/chain_processor.go @@ -4,14 +4,7 @@ package sync import ( - "bytes" "context" - "errors" - "fmt" - - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/blocktree" ) // ChainProcessor processes ready blocks. @@ -81,219 +74,30 @@ func (s *chainProcessor) stop() { } func (s *chainProcessor) processReadyBlocks() { - for { - bd, err := s.readyBlocks.pop(s.ctx) - if err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return - } - panic(fmt.Sprintf("unhandled error: %s", err)) - } - - if err := s.processBlockData(*bd); err != nil { - // depending on the error, we might want to save this block for later - if !errors.Is(err, errFailedToGetParent) && !errors.Is(err, blocktree.ErrParentNotFound) { - logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) - continue - } - - logger.Tracef("block data processing for block with hash %s failed: %s", bd.Hash, err) - if err := s.pendingBlocks.addBlock(&types.Block{ - Header: *bd.Header, - Body: *bd.Body, - }); err != nil { - logger.Debugf("failed to re-add block to pending blocks: %s", err) - } - } - } -} - -// processBlockData processes the BlockData from a BlockResponse and -// returns the index of the last BlockData it handled on success, -// or the index of the block data that errored on failure. -func (c *chainProcessor) processBlockData(blockData types.BlockData) error { //nolint:revive - // logger.Debugf("processing block data with hash %s", blockData.Hash) - - headerInState, err := c.blockState.HasHeader(blockData.Hash) - if err != nil { - return fmt.Errorf("checking if block state has header: %w", err) - } - - bodyInState, err := c.blockState.HasBlockBody(blockData.Hash) - if err != nil { - return fmt.Errorf("checking if block state has body: %w", err) - } - - // while in bootstrap mode we don't need to broadcast block announcements - announceImportedBlock := c.chainSync.syncState() == tip - if headerInState && bodyInState { - err = c.processBlockDataWithStateHeaderAndBody(blockData, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and "+ - "body in block state: %w", err) - } - return nil - } - - if blockData.Header != nil { - if blockData.Body != nil { - err = c.processBlockDataWithHeaderAndBody(blockData, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and body: %w", err) - } - //logger.Debugf("block with hash %s processed", blockData.Hash) - } - - if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err = c.handleJustification(blockData.Header, *blockData.Justification) - if err != nil { - return fmt.Errorf("handling justification: %w", err) - } - } - } - - err = c.blockState.CompareAndSetBlockData(&blockData) - if err != nil { - return fmt.Errorf("comparing and setting block data: %w", err) - } - - return nil -} - -func (c *chainProcessor) processBlockDataWithStateHeaderAndBody(blockData types.BlockData, //nolint:revive - announceImportedBlock bool) (err error) { - // TODO: fix this; sometimes when the node shuts down the "best block" isn't stored properly, - // so when the node restarts it has blocks higher than what it thinks is the best, causing it not to sync - // if we update the node to only store finalised blocks in the database, this should be fixed and the entire - // code block can be removed (#1784) - block, err := c.blockState.GetBlockByHash(blockData.Hash) - if err != nil { - return fmt.Errorf("getting block by hash: %w", err) - } - - err = c.blockState.AddBlockToBlockTree(block) - if errors.Is(err, blocktree.ErrBlockExists) { - logger.Debugf( - "block number %d with hash %s already exists in block tree, skipping it.", - block.Header.Number, blockData.Hash) - return nil - } else if err != nil { - return fmt.Errorf("adding block to blocktree: %w", err) - } - - if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err = c.handleJustification(&block.Header, *blockData.Justification) - if err != nil { - return fmt.Errorf("handling justification: %w", err) - } - } - - // TODO: this is probably unnecessary, since the state is already in the database - // however, this case shouldn't be hit often, since it's only hit if the node state - // is rewinded or if the node shuts down unexpectedly (#1784) - state, err := c.storageState.TrieState(&block.Header.StateRoot) - if err != nil { - return fmt.Errorf("loading trie state: %w", err) - } - - err = c.blockImportHandler.HandleBlockImport(block, state, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block import: %w", err) - } - - return nil -} - -func (c *chainProcessor) processBlockDataWithHeaderAndBody(blockData types.BlockData, //nolint:revive - announceImportedBlock bool) (err error) { - err = c.babeVerifier.VerifyBlock(blockData.Header) - if err != nil { - return fmt.Errorf("babe verifying block: %w", err) - } - - c.handleBody(blockData.Body) - - block := &types.Block{ - Header: *blockData.Header, - Body: *blockData.Body, - } - - err = c.handleBlock(block, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block: %w", err) - } - - return nil -} - -// handleHeader handles block bodies included in BlockResponses -func (s *chainProcessor) handleBody(body *types.Body) { - for _, ext := range *body { - s.transactionState.RemoveExtrinsic(ext) - } -} - -// handleHeader handles blocks (header+body) included in BlockResponses -func (s *chainProcessor) handleBlock(block *types.Block, announceImportedBlock bool) error { - parent, err := s.blockState.GetHeader(block.Header.ParentHash) - if err != nil { - return fmt.Errorf("%w: %s", errFailedToGetParent, err) - } - - s.storageState.Lock() - defer s.storageState.Unlock() - - ts, err := s.storageState.TrieState(&parent.StateRoot) - if err != nil { - return err - } - - root := ts.MustRoot() - if !bytes.Equal(parent.StateRoot[:], root[:]) { - panic("parent state root does not match snapshot state root") - } - - rt, err := s.blockState.GetRuntime(parent.Hash()) - if err != nil { - return err - } - - rt.SetContextStorage(ts) - - _, err = rt.ExecuteBlock(block) - if err != nil { - return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) - } - - if err = s.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { - return err - } - - //logger.Debugf("🔗 imported block number %d with hash %s", block.Header.Number, block.Header.Hash()) - - blockHash := block.Header.Hash() - s.telemetry.SendMessage(telemetry.NewBlockImport( - &blockHash, - block.Header.Number, - "NetworkInitialSync")) - - return nil -} - -func (s *chainProcessor) handleJustification(header *types.Header, justification []byte) (err error) { - logger.Debugf("handling justification for block %d...", header.Number) - - headerHash := header.Hash() - err = s.finalityGadget.VerifyBlockJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) - } - - err = s.blockState.SetJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) - } - - logger.Infof("🔨 finalised block number %d with hash %s", header.Number, headerHash) - return nil + // for { + // bd, err := s.readyBlocks.pop(s.ctx) + // if err != nil { + // if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + // return + // } + // panic(fmt.Sprintf("unhandled error: %s", err)) + // } + + // if err := s.processBlockData(*bd); err != nil { + // // depending on the error, we might want to save this block for later + // logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) + + // if !errors.Is(err, errFailedToGetParent) && !errors.Is(err, blocktree.ErrParentNotFound) { + // continue + // } + + // logger.Tracef("block data processing for block with hash %s failed: %s", bd.Hash, err) + // if err := s.pendingBlocks.addBlock(&types.Block{ + // Header: *bd.Header, + // Body: *bd.Body, + // }); err != nil { + // logger.Debugf("failed to re-add block to pending blocks: %s", err) + // } + // } + // } } diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index ec2c155550..7454896119 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -4,8 +4,8 @@ package sync import ( + "bytes" "context" - "crypto/rand" "errors" "fmt" "math/big" @@ -20,6 +20,7 @@ import ( "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/peerset" + "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" @@ -168,6 +169,13 @@ type chainSync struct { logSyncTickerC <-chan time.Time // channel as field for unit testing logSyncStarted bool logSyncDone chan struct{} + + storageState StorageState + transactionState TransactionState + babeVerifier BabeVerifier + finalityGadget FinalityGadget + blockImportHandler BlockImportHandler + telemetry Telemetry } type chainSyncConfig struct { @@ -177,37 +185,49 @@ type chainSyncConfig struct { pendingBlocks DisjointBlockSet minPeers, maxPeers int slotDuration time.Duration + storageState StorageState + transactionState TransactionState + babeVerifier BabeVerifier + finalityGadget FinalityGadget + blockImportHandler BlockImportHandler + telemetry Telemetry } func newChainSync(cfg chainSyncConfig) *chainSync { ctx, cancel := context.WithCancel(context.Background()) const syncSamplesToKeep = 30 - const logSyncPeriod = 5 * time.Second + const logSyncPeriod = 10 * time.Second logSyncTicker := time.NewTicker(logSyncPeriod) return &chainSync{ - ctx: ctx, - cancel: cancel, - blockState: cfg.bs, - network: cfg.net, - workQueue: make(chan *peerState, 1024), - resultQueue: make(chan *worker, 1024), - peerState: make(map[peer.ID]*peerState), - ignorePeers: make(map[peer.ID]struct{}), - workerState: newWorkerState(), - readyBlocks: cfg.readyBlocks, - pendingBlocks: cfg.pendingBlocks, - state: bootstrap, - handler: newBootstrapSyncer(cfg.bs), - benchmarker: newSyncBenchmarker(syncSamplesToKeep), - finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), - minPeers: cfg.minPeers, - maxWorkerRetries: uint16(cfg.maxPeers), - slotDuration: cfg.slotDuration, - logSyncTicker: logSyncTicker, - logSyncTickerC: logSyncTicker.C, - logSyncDone: make(chan struct{}), - workerPool: newSyncWorkerPool(cfg.net), + storageState: cfg.storageState, + transactionState: cfg.transactionState, + babeVerifier: cfg.babeVerifier, + finalityGadget: cfg.finalityGadget, + blockImportHandler: cfg.blockImportHandler, + telemetry: cfg.telemetry, + ctx: ctx, + cancel: cancel, + blockState: cfg.bs, + network: cfg.net, + workQueue: make(chan *peerState, 1024), + resultQueue: make(chan *worker, 1024), + peerState: make(map[peer.ID]*peerState), + ignorePeers: make(map[peer.ID]struct{}), + workerState: newWorkerState(), + readyBlocks: cfg.readyBlocks, + pendingBlocks: cfg.pendingBlocks, + state: bootstrap, + handler: newBootstrapSyncer(cfg.bs), + benchmarker: newSyncBenchmarker(syncSamplesToKeep), + finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), + minPeers: cfg.minPeers, + maxWorkerRetries: uint16(cfg.maxPeers), + slotDuration: cfg.slotDuration, + logSyncTicker: logSyncTicker, + logSyncTickerC: logSyncTicker.C, + logSyncDone: make(chan struct{}), + workerPool: newSyncWorkerPool(cfg.net), } } @@ -226,7 +246,7 @@ func (cs *chainSync) start() { pendingBlockDoneCh := make(chan struct{}) cs.pendingBlockDoneCh = pendingBlockDoneCh - go cs.pendingBlocks.run(pendingBlockDoneCh) + //go cs.pendingBlocks.run(pendingBlockDoneCh) go cs.sync() cs.logSyncStarted = true go cs.logSyncSpeed() @@ -402,78 +422,55 @@ func (cs *chainSync) logSyncSpeed() { } } -func (cs *chainSync) ignorePeer(who peer.ID) { - if err := who.Validate(); err != nil { - return - } - - cs.Lock() - cs.ignorePeers[who] = struct{}{} - cs.Unlock() -} - func (cs *chainSync) sync() { - // set to slot time - ticker := time.NewTicker(cs.slotDuration) for { - select { - case ps := <-cs.workQueue: - cs.maybeSwitchMode() - - if err := cs.handleWork(ps); err != nil { - logger.Errorf("failed to handle chain sync work: %s", err) - } - case res := <-cs.resultQueue: - if err := cs.handleResult(res); err != nil { - logger.Errorf("failed to handle chain sync result: %s", err) + cs.maybeSwitchMode() + if cs.state == bootstrap { + err := cs.executeBootstrapSync() + if err != nil { + logger.Errorf("executing bootstrap sync: %s", err) + return } + } else { + // TODO executeTipSync() + } + } +} - // TODO: re-think the usage of ticker in bootstrap mode but in tip sync mode - // we should use it when we don't receive a block-announcement for a while (let's say for a slot duration time) - case <-ticker.C: - cs.maybeSwitchMode() +func (cs *chainSync) executeBootstrapSync() error { + for { + head, err := cs.blockState.BestBlockHeader() + if err != nil { + return fmt.Errorf("getting best block header while syncing: %w", err) + } - case fin := <-cs.finalisedCh: - // on finalised block, call pendingBlocks.removeLowerBlocks() to remove blocks on - // invalid forks from the pending blocks set - cs.pendingBlocks.removeLowerBlocks(fin.Header.Number) - case <-cs.ctx.Done(): - return - default: - cs.maybeSwitchMode() + startRequestAt := head.Number + 1 - if cs.state == bootstrap { - cs.workerPool.useConnectedPeers() + fmt.Printf("=====> REQUEST FROM %d; BEST BLOCK HEADER: %d\n", startRequestAt, head.Number+1) + cs.workerPool.useConnectedPeers() - head, err := cs.blockState.BestBlockHeader() - if err != nil { - logger.Errorf("getting best block header while syncing: %s", err) - continue - } + availablePeers := cs.workerPool.totalWorkers() + targetBlockNumber := startRequestAt + uint(availablePeers)*128 - availablePeers := cs.workerPool.totalWorkers() - startRequestAt := head.Number + 1 - targetBlockNumber := startRequestAt + uint(availablePeers)*128 - requests, err := ascedingBlockRequest( - startRequestAt, targetBlockNumber, bootstrapRequestData) + fmt.Printf("=====> requesting from %d targeting %d\n", startRequestAt, targetBlockNumber) + requests, err := ascedingBlockRequest( + startRequestAt, targetBlockNumber, bootstrapRequestData) - if err != nil { - logger.Errorf("failed to setup ascending block requests: %s", err) - } + if err != nil { + logger.Errorf("failed to setup ascending block requests: %s", err) + } - expectedAmountOfBlocks := totalRequestedBlocks(requests) - wg := sync.WaitGroup{} + expectedAmountOfBlocks := totalRequestedBlocks(requests) + wg := sync.WaitGroup{} - resultsQueue := make(chan *syncTaskResult) + resultsQueue := make(chan *syncTaskResult) - wg.Add(1) - go cs.handleWorkersResults(resultsQueue, expectedAmountOfBlocks, &wg) - cs.workerPool.submitRequests(requests, resultsQueue) + wg.Add(1) + go cs.handleWorkersResults(resultsQueue, expectedAmountOfBlocks, &wg) + cs.workerPool.submitRequests(requests, resultsQueue) - wg.Wait() - } - } + wg.Wait() } } @@ -498,80 +495,6 @@ func (cs *chainSync) maybeSwitchMode() { } } -func (cs *chainSync) handleResult(resultWorker *worker) error { - // delete worker from workers map - cs.workerState.delete(resultWorker.id) - - // handle results from worker - // if there is an error, potentially retry the worker - if resultWorker.err == nil || resultWorker.ctx.Err() != nil { - return nil //nolint:nilerr - } - - logger.Debugf("worker id %d failed: %s", resultWorker.id, resultWorker.err.err) - - // handle errors. in the case that a peer did not respond to us in time, - // temporarily add them to the ignore list. - switch { - case errors.Is(resultWorker.err.err, context.Canceled): - return nil - case errors.Is(resultWorker.err.err, errNoPeers): - logger.Debugf("worker id %d not able to sync with any peer", resultWorker.id) - return nil - case errors.Is(resultWorker.err.err, context.DeadlineExceeded): - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.TimeOutValue, - Reason: peerset.TimeOutReason, - }, resultWorker.err.who) - cs.ignorePeer(resultWorker.err.who) - case strings.Contains(resultWorker.err.err.Error(), "dial backoff"): - cs.ignorePeer(resultWorker.err.who) - return nil - case resultWorker.err.err.Error() == "protocol not supported": - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, resultWorker.err.who) - cs.ignorePeer(resultWorker.err.who) - return nil - } - - worker, err := cs.handler.handleWorkerResult(resultWorker) - if err != nil { - logger.Errorf("failed to handle worker result: %s", err) - return err - } else if worker == nil { - return nil - } - - worker.retryCount = resultWorker.retryCount + 1 - if worker.retryCount > cs.maxWorkerRetries { - logger.Debugf( - "discarding worker id %d: maximum retry count %d reached", - worker.id, cs.maxWorkerRetries) - - // if this worker was triggered due to a block in the pending blocks set, - // we want to remove it from the set, as we asked all our peers for it - // and none replied with the info we need. - if worker.pendingBlock != nil { - cs.pendingBlocks.removeBlock(worker.pendingBlock.hash) - } - return nil - } - - // if we've already tried a peer and there was an error, - // then we shouldn't try them again. - if resultWorker.peersTried != nil { - worker.peersTried = resultWorker.peersTried - } else { - worker.peersTried = make(map[peer.ID]struct{}) - } - - worker.peersTried[resultWorker.err.who] = struct{}{} - cs.tryDispatchWorker(worker) - return nil -} - // setMode stops all existing workers and clears the worker set and switches the `handler` // based on the new mode, if the mode is different than previous func (cs *chainSync) setMode(mode chainSyncState) { @@ -587,7 +510,7 @@ func (cs *chainSync) setMode(mode chainSyncState) { case bootstrap: cs.handler = newBootstrapSyncer(cs.blockState) case tip: - cs.handler = newTipSyncer(cs.blockState, cs.pendingBlocks, cs.readyBlocks, cs.handleReadyBlock) + cs.handler = newTipSyncer(cs.blockState, cs.pendingBlocks, cs.readyBlocks, nil) } cs.state = mode @@ -620,93 +543,6 @@ func (cs *chainSync) getTarget() uint { return uint(quotientBigInt.Uint64()) } -// handleWork handles potential new work that may be triggered on receiving a peer's state -// in bootstrap mode, this begins the bootstrap process -// in tip mode, this adds the peer's state to the pendingBlocks set and potentially starts -// a fork sync -func (cs *chainSync) handleWork(ps *peerState) error { - logger.Tracef("handling potential work for target block number %d and hash %s", ps.number, ps.hash) - worker, err := cs.handler.handleNewPeerState(ps) - if err != nil { - return err - } else if worker != nil { - cs.tryDispatchWorker(worker) - } - - return nil -} - -func (cs *chainSync) tryDispatchWorker(w *worker) { - // if we already have the maximum number of workers, don't dispatch another - if len(cs.workerState.workers) >= maxWorkers { - logger.Trace("reached max workers, ignoring potential work") - return - } - - // check current worker set for workers already working on these blocks - // if there are none, dispatch new worker - if cs.handler.hasCurrentWorker(w, cs.workerState.workers) { - return - } - - cs.workerState.add(w) - go cs.dispatchWorker(w) -} - -// dispatchWorker begins making requests to the network and attempts to receive responses up until the target -// if it fails due to any reason, it sets the worker `err` and returns -// this function always places the worker into the `resultCh` for result handling upon return -func (cs *chainSync) dispatchWorker(w *worker) { - if w.targetNumber == nil || w.startNumber == nil { - return - } - - logger.Debugf("dispatching sync worker id %d, "+ - "start number %d, target number %d, "+ - "start hash %s, target hash %s, "+ - "request data %d, direction %s", - w.id, - *w.startNumber, *w.targetNumber, - w.startHash, w.targetHash, - w.requestData, w.direction) - - start := time.Now() - defer func() { - end := time.Now() - w.duration = end.Sub(start) - outcome := "success" - if w.err != nil { - outcome = "failure" - } - logger.Debugf( - "sync worker completed in %s with %s for worker id %d", - w.duration, outcome, w.id) - cs.resultQueue <- w - }() - - reqs, err := workerToRequests(w) - if err != nil { - // if we are creating valid workers, this should not happen - logger.Criticalf("failed to create requests from worker id %d: %s", w.id, err) - w.err = &workerError{ - err: err, - } - return - } - - logger.Debugf("created %d request to be dispatched", len(reqs)) - for _, req := range reqs { - // TODO: if we find a good peer, do sync with them, right now it re-selects a peer each time (#1399) - if err := cs.doSync(req, w.peersTried); err != nil { - // failed to sync, set worker error and put into result queue - logger.Errorf("while executing sync: %q", err) - - w.err = err - return - } - } -} - // handleWorkersResults, every time we submit requests to workers they results should be computed here // and every cicle we should endup with a complete chain, whenever we identify // any error from a worker we should evaluate the error and re-insert the request @@ -751,6 +587,11 @@ loop: err := cs.validateResponse(request, response, who) switch { + case errors.Is(err, errResponseIsNotChain): + logger.Criticalf("response invalid: %s", err) + cs.workerPool.shutdownWorker(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue case errors.Is(err, errEmptyBlockData): cs.workerPool.submitRequest(taskResult.request, workersResults) continue @@ -773,7 +614,6 @@ loop: } previousLen := len(syncingChain) - syncingChain = mergeSortedSlices(syncingChain, response.BlockData) logger.Infof("building a syncing chain, previous length: %d, current length: %d", previousLen, len(syncingChain)) @@ -785,10 +625,27 @@ loop: } logger.Infof("synced %d blocks, starting process", len(syncingChain)) + + if len(syncingChain) >= 2 { + // ensuring the parents are in the right place + parentElement := syncingChain[0] + for _, element := range syncingChain[1:] { + if parentElement.Header.Hash() != element.Header.ParentHash { + logger.Criticalf("expected %s be parent of %s", parentElement.Header.Hash(), element.Header.ParentHash) + panic("") + } + + parentElement = element + } + } + // response was validated! place into ready block queue for _, bd := range syncingChain { // block is ready to be processed! - cs.handleReadyBlock(bd) + if err := cs.handleReadyBlock(bd); err != nil { + logger.Criticalf("error while handling a ready block: %s", err) + return + } } } @@ -808,7 +665,7 @@ func mergeSortedSlices[T LessOrEqual[T]](a, b []T) []T { aIndex, bIndex := 0, 0 resultSlice := make([]T, 0, len(a)+len(b)) - for aIndex < 0 && bIndex < 0 { + for aIndex < len(a) && bIndex < len(b) { elemA := a[aIndex] elemB := b[bIndex] @@ -834,118 +691,243 @@ func mergeSortedSlices[T LessOrEqual[T]](a, b []T) []T { return resultSlice } -func (cs *chainSync) doSync(req *network.BlockRequestMessage, peersTried map[peer.ID]struct{}) *workerError { - // determine which peers have the blocks we want to request - peers := cs.determineSyncPeers(req, peersTried) +func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { + if cs.readyBlocks.has(bd.Hash) { + logger.Tracef("ignoring block %s (%d) in response, already in ready queue", bd.Hash, bd.Header.Number) + return nil + } + + // if header was not requested, get it from the pending set + // if we're expecting headers, validate should ensure we have a header + if bd.Header == nil { + block := cs.pendingBlocks.getBlock(bd.Hash) + if block == nil { + // block wasn't in the pending set! + // let's check the db as maybe we already processed it + has, err := cs.blockState.HasHeader(bd.Hash) + if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + logger.Debugf("failed to check if header is known for hash %s: %s", bd.Hash, err) + return err + } - if len(peers) == 0 { - return &workerError{ - err: errNoPeers, + if has { + logger.Tracef("ignoring block we've already processed, hash=%s", bd.Hash) + return err + } + + // this is bad and shouldn't happen + logger.Errorf("block with unknown header is ready: hash=%s", bd.Hash) + return err } + + bd.Header = block.header + } + + if bd.Header == nil { + logger.Errorf("new ready block number (unknown) with hash %s", bd.Hash) + return nil + } + + //logger.Tracef("new ready block number %d with hash %s", bd.Header.Number, bd.Hash) + + err := cs.processBlockData(*bd) + if err != nil { + // depending on the error, we might want to save this block for later + logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) + return err } - // TODO: use scoring to determine what peer to try to sync from first (#1399) - idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(peers)))) - who := peers[idx.Int64()] + return nil +} + +// processBlockData processes the BlockData from a BlockResponse and +// returns the index of the last BlockData it handled on success, +// or the index of the block data that errored on failure. +func (cs *chainSync) processBlockData(blockData types.BlockData) error { //nolint:revive + // logger.Debugf("processing block data with hash %s", blockData.Hash) + + headerInState, err := cs.blockState.HasHeader(blockData.Hash) + if err != nil { + return fmt.Errorf("checking if block state has header: %w", err) + } - // send out request and potentially receive response, error if timeout - logger.Tracef("sending out block request to %s: %s", who, req) - resp, err := cs.network.DoBlockRequest(who, req) + bodyInState, err := cs.blockState.HasBlockBody(blockData.Hash) if err != nil { - return &workerError{ - err: err, - who: who, + return fmt.Errorf("checking if block state has body: %w", err) + } + + // while in bootstrap mode we don't need to broadcast block announcements + announceImportedBlock := cs.state == tip + if headerInState && bodyInState { + err = cs.processBlockDataWithStateHeaderAndBody(blockData, announceImportedBlock) + if err != nil { + return fmt.Errorf("processing block data with header and "+ + "body in block state: %w", err) } + return nil } - if resp == nil { - return &workerError{ - err: errNilResponse, - who: who, + if blockData.Header != nil { + if blockData.Body != nil { + err = cs.processBlockDataWithHeaderAndBody(blockData, announceImportedBlock) + if err != nil { + return fmt.Errorf("processing block data with header and body: %w", err) + } + //logger.Debugf("block with hash %s processed", blockData.Hash) + } + + if blockData.Justification != nil && len(*blockData.Justification) > 0 { + err = cs.handleJustification(blockData.Header, *blockData.Justification) + if err != nil { + return fmt.Errorf("handling justification: %w", err) + } } } - if req.Direction == network.Descending { - // reverse blocks before pre-validating and placing in ready queue - reverseBlockData(resp.BlockData) + err = cs.blockState.CompareAndSetBlockData(&blockData) + if err != nil { + return fmt.Errorf("comparing and setting block data: %w", err) + } + + return nil +} + +func (cs *chainSync) processBlockDataWithStateHeaderAndBody(blockData types.BlockData, //nolint:revive + announceImportedBlock bool) (err error) { + // TODO: fix this; sometimes when the node shuts down the "best block" isn't stored properly, + // so when the node restarts it has blocks higher than what it thinks is the best, causing it not to sync + // if we update the node to only store finalised blocks in the database, this should be fixed and the entire + // code block can be removed (#1784) + block, err := cs.blockState.GetBlockByHash(blockData.Hash) + if err != nil { + return fmt.Errorf("getting block by hash: %w", err) } - // perform some pre-validation of response, error if failure - if err := cs.validateResponse(req, resp, who); err != nil { - return &workerError{ - err: err, - who: who, + err = cs.blockState.AddBlockToBlockTree(block) + if errors.Is(err, blocktree.ErrBlockExists) { + logger.Debugf( + "block number %d with hash %s already exists in block tree, skipping it.", + block.Header.Number, blockData.Hash) + return nil + } else if err != nil { + return fmt.Errorf("adding block to blocktree: %w", err) + } + + if blockData.Justification != nil && len(*blockData.Justification) > 0 { + err = cs.handleJustification(&block.Header, *blockData.Justification) + if err != nil { + return fmt.Errorf("handling justification: %w", err) } } - logger.Tracef("success! placing %d blocks response data in ready queue", len(resp.BlockData)) + // TODO: this is probably unnecessary, since the state is already in the database + // however, this case shouldn't be hit often, since it's only hit if the node state + // is rewinded or if the node shuts down unexpectedly (#1784) + state, err := cs.storageState.TrieState(&block.Header.StateRoot) + if err != nil { + return fmt.Errorf("loading trie state: %w", err) + } + + err = cs.blockImportHandler.HandleBlockImport(block, state, announceImportedBlock) + if err != nil { + return fmt.Errorf("handling block import: %w", err) + } - if len(resp.BlockData) > 0 { - firstBlockInResponse := resp.BlockData[0] - lastBlockInResponse := resp.BlockData[len(resp.BlockData)-1] + return nil +} - logger.Tracef("processing %d (%s) to %d (%s)", - firstBlockInResponse.Header.Number, firstBlockInResponse.Hash, - lastBlockInResponse.Header.Number, lastBlockInResponse.Hash) +func (cs *chainSync) processBlockDataWithHeaderAndBody(blockData types.BlockData, //nolint:revive + announceImportedBlock bool) (err error) { + err = cs.babeVerifier.VerifyBlock(blockData.Header) + if err != nil { + return fmt.Errorf("babe verifying block: %w", err) } - // response was validated! place into ready block queue - for _, bd := range resp.BlockData { - // block is ready to be processed! - cs.handleReadyBlock(bd) + cs.handleBody(blockData.Body) + + block := &types.Block{ + Header: *blockData.Header, + Body: *blockData.Body, + } + + err = cs.handleBlock(block, announceImportedBlock) + if err != nil { + return fmt.Errorf("handling block: %w", err) } return nil } -func (cs *chainSync) handleReadyBlock(bd *types.BlockData) { - if cs.readyBlocks.has(bd.Hash) { - logger.Tracef("ignoring block %s in response, already in ready queue", bd.Hash) - return +// handleHeader handles block bodies included in BlockResponses +func (cs *chainSync) handleBody(body *types.Body) { + for _, ext := range *body { + cs.transactionState.RemoveExtrinsic(ext) } +} - // if header was not requested, get it from the pending set - // if we're expecting headers, validate should ensure we have a header - if bd.Header == nil { - block := cs.pendingBlocks.getBlock(bd.Hash) - if block == nil { - // block wasn't in the pending set! - // let's check the db as maybe we already processed it - has, err := cs.blockState.HasHeader(bd.Hash) - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { - logger.Debugf("failed to check if header is known for hash %s: %s", bd.Hash, err) - return - } +func (cs *chainSync) handleJustification(header *types.Header, justification []byte) (err error) { + logger.Debugf("handling justification for block %d...", header.Number) - if has { - logger.Tracef("ignoring block we've already processed, hash=%s", bd.Hash) - return - } + headerHash := header.Hash() + err = cs.finalityGadget.VerifyBlockJustification(headerHash, justification) + if err != nil { + return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) + } - // this is bad and shouldn't happen - logger.Errorf("block with unknown header is ready: hash=%s", bd.Hash) - return - } + err = cs.blockState.SetJustification(headerHash, justification) + if err != nil { + return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) + } - bd.Header = block.header + logger.Infof("🔨 finalised block number %d with hash %s", header.Number, headerHash) + return nil +} + +// handleHeader handles blocks (header+body) included in BlockResponses +func (cs *chainSync) handleBlock(block *types.Block, announceImportedBlock bool) error { + parent, err := cs.blockState.GetHeader(block.Header.ParentHash) + if err != nil { + return fmt.Errorf("%w: %s", errFailedToGetParent, err) } - if bd.Header == nil { - logger.Errorf("new ready block number (unknown) with hash %s", bd.Hash) - return + cs.storageState.Lock() + defer cs.storageState.Unlock() + + ts, err := cs.storageState.TrieState(&parent.StateRoot) + if err != nil { + return err } - //logger.Tracef("new ready block number %d with hash %s", bd.Header.Number, bd.Hash) + root := ts.MustRoot() + if !bytes.Equal(parent.StateRoot[:], root[:]) { + panic("parent state root does not match snapshot state root") + } - // see if there are any descendents in the pending queue that are now ready to be processed, - // as we have just become aware of their parent block - ready := []*types.BlockData{bd} - ready = cs.pendingBlocks.getReadyDescendants(bd.Hash, ready) + rt, err := cs.blockState.GetRuntime(parent.Hash()) + if err != nil { + return err + } - for _, rb := range ready { - cs.pendingBlocks.removeBlock(rb.Hash) - cs.readyBlocks.push(rb) + rt.SetContextStorage(ts) + + _, err = rt.ExecuteBlock(block) + if err != nil { + return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) + } + + if err = cs.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { + return err } + + //logger.Debugf("🔗 imported block number %d with hash %s", block.Header.Number, block.Header.Hash()) + + blockHash := block.Header.Hash() + cs.telemetry.SendMessage(telemetry.NewBlockImport( + &blockHash, + block.Header.Number, + "NetworkInitialSync")) + + return nil } // determineSyncPeers returns a list of peers that likely have the blocks in the given block request. @@ -1045,41 +1027,12 @@ func (cs *chainSync) validateResponse(req *network.BlockRequestMessage, continue } - // parent unknown, add to pending blocks - if err := cs.pendingBlocks.addBlock(&types.Block{ - Header: *curr, - Body: *bd.Body, - }); err != nil { - return err - } - - if bd.Justification != nil { - if err := cs.pendingBlocks.addJustification(bd.Hash, *bd.Justification); err != nil { - return err - } - } - return errUnknownParent } // otherwise, check that this response forms a chain // ie. curr's parent hash is hash of previous header, and curr's number is previous number + 1 if prev.Hash() != curr.ParentHash || curr.Number != prev.Number+1 { - // the response is missing some blocks, place blocks from curr onwards into pending blocks set - for _, bd := range resp.BlockData[i:] { - if err := cs.pendingBlocks.addBlock(&types.Block{ - Header: *curr, - Body: *bd.Body, - }); err != nil { - return err - } - - if bd.Justification != nil { - if err := cs.pendingBlocks.addJustification(bd.Hash, *bd.Justification); err != nil { - return err - } - } - } return errResponseIsNotChain } diff --git a/dot/sync/requests.go b/dot/sync/requests.go index 0632962db8..a59bc5d018 100644 --- a/dot/sync/requests.go +++ b/dot/sync/requests.go @@ -15,7 +15,7 @@ func ascedingBlockRequest(startNumber uint, targetNumber uint, requestedData byt if diff == 0 { one := uint32(1) return []*network.BlockRequestMessage{ - &network.BlockRequestMessage{ + { RequestedData: requestedData, StartingBlock: *variadic.MustNewUint32OrHash(uint32(startNumber)), Direction: network.Ascending, @@ -43,7 +43,7 @@ func ascedingBlockRequest(startNumber uint, targetNumber uint, requestedData byt Direction: network.Ascending, Max: &max, } - startNumber += maxResponseSize + startNumber += uint(max) } return reqs, nil diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 32c9d74fa5..80cdb4cfa2 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -46,13 +46,19 @@ func NewService(cfg *Config) (*Service, error) { pendingBlocks := newDisjointBlockSet(pendingBlocksLimit) csCfg := chainSyncConfig{ - bs: cfg.BlockState, - net: cfg.Network, - readyBlocks: readyBlocks, - pendingBlocks: pendingBlocks, - minPeers: cfg.MinPeers, - maxPeers: cfg.MaxPeers, - slotDuration: cfg.SlotDuration, + bs: cfg.BlockState, + net: cfg.Network, + readyBlocks: readyBlocks, + pendingBlocks: pendingBlocks, + minPeers: cfg.MinPeers, + maxPeers: cfg.MaxPeers, + slotDuration: cfg.SlotDuration, + storageState: cfg.StorageState, + transactionState: cfg.TransactionState, + babeVerifier: cfg.BabeVerifier, + finalityGadget: cfg.FinalityGadget, + blockImportHandler: cfg.BlockImportHandler, + telemetry: cfg.Telemetry, } chainSync := newChainSync(csCfg) @@ -81,14 +87,14 @@ func NewService(cfg *Config) (*Service, error) { // Start begins the chainSync and chainProcessor modules. It begins syncing in bootstrap mode func (s *Service) Start() error { go s.chainSync.start() - go s.chainProcessor.processReadyBlocks() + //go s.chainProcessor.processReadyBlocks() return nil } // Stop stops the chainSync and chainProcessor modules func (s *Service) Stop() error { s.chainSync.stop() - s.chainProcessor.stop() + //s.chainProcessor.stop() return nil } diff --git a/dot/types/block_data.go b/dot/types/block_data.go index c32d7f69a7..97a0c6b691 100644 --- a/dot/types/block_data.go +++ b/dot/types/block_data.go @@ -32,7 +32,8 @@ func (bd *BlockData) Number() uint { } func (bd *BlockData) LessOrEqual(o *BlockData) bool { - return bd.Header.Number <= o.Header.Number + res := bd.Header.Number <= o.Header.Number + return res } func (bd *BlockData) String() string { From 6e272a65f69a94c473cca56ec62bcffc36c37ad3 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 14 Apr 2023 10:17:34 -0400 Subject: [PATCH 006/140] chore: start ignoring peers + use `idleTicker` to check the pool --- dot/sync/chain_sync.go | 181 ++++++++-------------------------------- dot/sync/worker_pool.go | 80 ++++++++---------- 2 files changed, 70 insertions(+), 191 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 7454896119..de8e197c9c 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" "math/big" - "strings" "sync" "time" @@ -24,7 +23,6 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" ) const ( @@ -296,6 +294,15 @@ func (cs *chainSync) setPeerHead(p peer.ID, bestHash common.Hash, bestNumber uin logger.Errorf("adding a potential worker: %s", err) } + ps := &peerState{ + who: p, + hash: bestHash, + number: bestNumber, + } + cs.Lock() + cs.peerState[p] = ps + cs.Unlock() + // if the peer reports a lower or equal best block number than us, // check if they are on a fork or not head, err := cs.blockState.BestBlockHeader() @@ -439,6 +446,7 @@ func (cs *chainSync) sync() { } func (cs *chainSync) executeBootstrapSync() error { + const maxRequestAllowed uint = 40 for { head, err := cs.blockState.BestBlockHeader() if err != nil { @@ -446,17 +454,22 @@ func (cs *chainSync) executeBootstrapSync() error { } startRequestAt := head.Number + 1 - - fmt.Printf("=====> REQUEST FROM %d; BEST BLOCK HEADER: %d\n", startRequestAt, head.Number+1) cs.workerPool.useConnectedPeers() + // we build the set of requests based on the amount of available peers + // in the worker pool, if we have more peers than `maxRequestAllowed` + // so we limit to `maxRequestAllowed` to avoid the error + // cannot reserve outbound connection: resource limit exceeded availablePeers := cs.workerPool.totalWorkers() + if availablePeers > maxRequestAllowed { + availablePeers = maxRequestAllowed + } + targetBlockNumber := startRequestAt + uint(availablePeers)*128 fmt.Printf("=====> requesting from %d targeting %d\n", startRequestAt, targetBlockNumber) requests, err := ascedingBlockRequest( startRequestAt, targetBlockNumber, bootstrapRequestData) - if err != nil { logger.Errorf("failed to setup ascending block requests: %s", err) } @@ -555,25 +568,33 @@ func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, t loop: for { + // in a case where we don't handle workers results we should check the pool + idleDuration := 3 * time.Minute + idleTicker := time.NewTimer(idleDuration) + select { + case <-idleTicker.C: + logger.Warnf("idle ticker triggered! checking pool") + cs.workerPool.useConnectedPeers() + continue + // TODO: implement a case to stop case taskResult := <-workersResults: + if !idleTicker.Stop() { + <-idleTicker.C + } + logger.Infof("task result: peer(%s), error: %v, hasResponse: %v", taskResult.who, taskResult.err != nil, taskResult.response != nil) if taskResult.err != nil { - switch { + logger.Criticalf("task result error: %s", taskResult.err) // TODO add this worker in a ignorePeers list, implement some expiration time for // peers added to it (peerJail where peers have a release date and maybe extend the punishment // if fail again ang again Jimmy's + Diego's idea) - case strings.Contains(taskResult.err.Error(), "dial backoff") || - taskResult.err.Error() == "protocol not supported": - - logger.Criticalf("response invalid: %s", taskResult.err) - cs.workerPool.shutdownWorker(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue - } + cs.workerPool.shutdownWorker(taskResult.who, true) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue } who := taskResult.who @@ -589,7 +610,7 @@ loop: switch { case errors.Is(err, errResponseIsNotChain): logger.Criticalf("response invalid: %s", err) - cs.workerPool.shutdownWorker(taskResult.who) + cs.workerPool.shutdownWorker(taskResult.who, true) cs.workerPool.submitRequest(taskResult.request, workersResults) continue case errors.Is(err, errEmptyBlockData): @@ -598,7 +619,7 @@ loop: case errors.Is(err, errUnknownParent): case err != nil: logger.Criticalf("response invalid: %s", err) - cs.workerPool.shutdownWorker(taskResult.who) + cs.workerPool.shutdownWorker(taskResult.who, true) cs.workerPool.submitRequest(taskResult.request, workersResults) continue } @@ -625,7 +646,6 @@ loop: } logger.Infof("synced %d blocks, starting process", len(syncingChain)) - if len(syncingChain) >= 2 { // ensuring the parents are in the right place parentElement := syncingChain[0] @@ -930,50 +950,6 @@ func (cs *chainSync) handleBlock(block *types.Block, announceImportedBlock bool) return nil } -// determineSyncPeers returns a list of peers that likely have the blocks in the given block request. -func (cs *chainSync) determineSyncPeers(req *network.BlockRequestMessage, peersTried map[peer.ID]struct{}) []peer.ID { - var start uint32 - if req.StartingBlock.IsUint32() { - start = req.StartingBlock.Uint32() - } - - cs.RLock() - defer cs.RUnlock() - - // if we're currently ignoring all our peers, clear out the list. - if len(cs.peerState) == len(cs.ignorePeers) { - cs.RUnlock() - cs.Lock() - for p := range cs.ignorePeers { - delete(cs.ignorePeers, p) - } - cs.Unlock() - cs.RLock() - } - - peers := make([]peer.ID, 0, len(cs.peerState)) - - for p, state := range cs.peerState { - if _, has := cs.ignorePeers[p]; has { - continue - } - - if _, has := peersTried[p]; has { - continue - } - - // if peer definitely doesn't have any blocks we want in the request, - // don't request from them - if start > 0 && uint32(state.number) < start { - continue - } - - peers = append(peers, p) - } - - return peers -} - // validateResponse performs pre-validation of a block response before placing it into either the // pendingBlocks or readyBlocks set. // It checks the following: @@ -1101,86 +1077,3 @@ func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { return highestBlock, nil } - -func workerToRequests(w *worker) ([]*network.BlockRequestMessage, error) { - diff := int(*w.targetNumber) - int(*w.startNumber) - if diff < 0 && w.direction != network.Descending { - return nil, errInvalidDirection - } - - if diff > 0 && w.direction != network.Ascending { - return nil, errInvalidDirection - } - - // start and end block are the same, just request 1 block - if diff == 0 { - diff = 1 - } - - // to deal with descending requests (ie. target may be lower than start) which are used in tip mode, - // take absolute value of difference between start and target - numBlocks := diff - if numBlocks < 0 { - numBlocks = -numBlocks - } - numRequests := uint(numBlocks) / maxResponseSize - - if numBlocks%maxResponseSize != 0 { - numRequests++ - } - - startNumber := *w.startNumber - reqs := make([]*network.BlockRequestMessage, numRequests) - - for i := uint(0); i < numRequests; i++ { - // check if we want to specify a size - max := uint32(maxResponseSize) - - if w.direction == network.Descending && i == numRequests-1 { - size := numBlocks % maxResponseSize - if size == 0 { - size = maxResponseSize - } - max = uint32(size) - } - - var start *variadic.Uint32OrHash - if w.startHash.IsEmpty() { - // worker startHash is unspecified if we are in bootstrap mode - start = variadic.MustNewUint32OrHash(uint32(startNumber)) - } else { - // in tip-syncing mode, we know the hash of the block on the fork we wish to sync - start = variadic.MustNewUint32OrHash(w.startHash) - - // if we're doing descending requests and not at the last (highest starting) request, - // then use number as start block - if w.direction == network.Descending && i != numRequests-1 { - start = variadic.MustNewUint32OrHash(startNumber) - } - } - - reqs[i] = &network.BlockRequestMessage{ - RequestedData: w.requestData, - StartingBlock: *start, - Direction: w.direction, - Max: &max, - } - - switch w.direction { - case network.Ascending: - startNumber += maxResponseSize - case network.Descending: - startNumber -= maxResponseSize - } - } - - // if our direction is descending, we want to send out the request with the lowest - // startNumber first - if w.direction == network.Descending { - for i, j := 0, len(reqs)-1; i < j; i, j = i+1, j-1 { - reqs[i], reqs[j] = reqs[j], reqs[i] - } - } - - return reqs, nil -} diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 2393414d3f..c46347fd9e 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -2,14 +2,12 @@ package sync import ( "context" - "errors" - "math/big" "sync" + "time" "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/lib/common" "github.com/libp2p/go-libp2p/core/peer" - "golang.org/x/exp/maps" ) type syncTask struct { @@ -22,19 +20,23 @@ type syncWorkerPool struct { l sync.RWMutex wg sync.WaitGroup - network Network - taskQueue chan *syncTask - workers map[peer.ID]*syncWorker + network Network + taskQueue chan *syncTask + workers map[peer.ID]*syncWorker + ignorePeers map[peer.ID]time.Time } func newSyncWorkerPool(net Network) *syncWorkerPool { return &syncWorkerPool{ - network: net, - workers: make(map[peer.ID]*syncWorker), - taskQueue: make(chan *syncTask), + network: net, + workers: make(map[peer.ID]*syncWorker), + taskQueue: make(chan *syncTask), + ignorePeers: make(map[peer.ID]time.Time), } } +const ignorePeerTimeout = 2 * time.Minute + func (s *syncWorkerPool) useConnectedPeers() { connectedPeers := s.network.TotalConnectedPeers() @@ -47,6 +49,15 @@ func (s *syncWorkerPool) useConnectedPeers() { continue } + releaseTime, has := s.ignorePeers[connectedPeer] + if has { + if time.Now().Before(releaseTime) { + continue + } else { + delete(s.ignorePeers, connectedPeer) + } + } + // they are ephemeral because once we reach the tip we // should remove them and use only peers who send us // block announcements @@ -61,6 +72,13 @@ func (s *syncWorkerPool) addWorker(who peer.ID, bestHash common.Hash, bestNumber s.l.Lock() defer s.l.Unlock() + // delete it since it sends a block announcement so it might be + // a valid peer to request blocks for now + _, has := s.ignorePeers[who] + if has { + delete(s.ignorePeers, who) + } + worker, has := s.workers[who] if has { worker.update(bestHash, bestNumber) @@ -88,7 +106,7 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage, } } -func (s *syncWorkerPool) shutdownWorker(who peer.ID) { +func (s *syncWorkerPool) shutdownWorker(who peer.ID, ignore bool) { s.l.Lock() defer s.l.Unlock() @@ -99,6 +117,11 @@ func (s *syncWorkerPool) shutdownWorker(who peer.ID) { peer.Stop() delete(s.workers, who) + + if ignore { + ignorePeerTimeout := time.Now().Add(ignorePeerTimeout) + s.ignorePeers[who] = ignorePeerTimeout + } } func (s *syncWorkerPool) totalWorkers() (total uint) { @@ -112,40 +135,3 @@ func (s *syncWorkerPool) totalWorkers() (total uint) { return total } - -// getTargetBlockNumber takes the average of all peer heads -// TODO: should we just return the highest? could be an attack vector potentially, if a peer reports some very large -// head block number, it would leave us in bootstrap mode forever -// it would be better to have some sort of standard deviation calculation and discard any outliers (#1861) -func (s *syncWorkerPool) getTargetBlockNumber() (uint, error) { - s.l.RLock() - activeWorkers := maps.Values(s.workers) - s.l.RUnlock() - - // in practice, this shouldn't happen, as we only start the module once we have some peer states - if len(activeWorkers) == 0 { - // return max uint32 instead of 0, as returning 0 would switch us to tip mode unexpectedly - return 0, errors.New("no active workers yet") - } - - // we are going to sort the data and remove the outliers then we will return the avg of all the valid elements - blockNumbers := make([]uint, 0, len(activeWorkers)) - for _, worker := range activeWorkers { - // we don't count ephemeral workers since they don't have - // a best block hash/number informations, they are connected peers - // who can help us sync blocks faster - if worker.isEphemeral { - continue - } - - blockNumbers = append(blockNumbers, worker.bestNumber) - } - - if len(blockNumbers) < 1 { - return 0, errors.New("no active workers yet") - } - - sum, count := nonOutliersSumCount(blockNumbers) - quotientBigInt := big.NewInt(0).Div(sum, big.NewInt(int64(count))) - return uint(quotientBigInt.Uint64()), nil -} From 39dd7c3e35b52c49cd87c03afbbe6fe899056076 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Sat, 15 Apr 2023 10:01:54 -0400 Subject: [PATCH 007/140] fix: use buffered channel for taskQueue + wip: implementing tip sync --- dot/sync/chain_processor.go | 103 -- dot/sync/chain_processor_integration_test.go | 345 ----- dot/sync/chain_processor_test.go | 1181 ------------------ dot/sync/chain_sync.go | 114 +- dot/sync/syncer.go | 28 +- dot/sync/worker_pool.go | 4 +- dot/types/block_data.go | 5 - 7 files changed, 58 insertions(+), 1722 deletions(-) delete mode 100644 dot/sync/chain_processor.go delete mode 100644 dot/sync/chain_processor_integration_test.go delete mode 100644 dot/sync/chain_processor_test.go diff --git a/dot/sync/chain_processor.go b/dot/sync/chain_processor.go deleted file mode 100644 index a008529a57..0000000000 --- a/dot/sync/chain_processor.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" -) - -// ChainProcessor processes ready blocks. -// it is implemented by *chainProcessor -type ChainProcessor interface { - processReadyBlocks() - stop() -} - -type chainProcessor struct { - ctx context.Context - cancel context.CancelFunc - - chainSync ChainSync - - // blocks that are ready for processing. ie. their parent is known, or their parent is ahead - // of them within this channel and thus will be processed first - readyBlocks *blockQueue - - // set of block not yet ready to be processed. - // blocks are placed here if they fail to be processed due to missing parent block - pendingBlocks DisjointBlockSet - - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry -} - -type chainProcessorConfig struct { - readyBlocks *blockQueue - pendingBlocks DisjointBlockSet - syncer ChainSync - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry -} - -func newChainProcessor(cfg chainProcessorConfig) *chainProcessor { - ctx, cancel := context.WithCancel(context.Background()) - - return &chainProcessor{ - ctx: ctx, - cancel: cancel, - readyBlocks: cfg.readyBlocks, - pendingBlocks: cfg.pendingBlocks, - chainSync: cfg.syncer, - blockState: cfg.blockState, - storageState: cfg.storageState, - transactionState: cfg.transactionState, - babeVerifier: cfg.babeVerifier, - finalityGadget: cfg.finalityGadget, - blockImportHandler: cfg.blockImportHandler, - telemetry: cfg.telemetry, - } -} - -func (s *chainProcessor) stop() { - s.cancel() -} - -func (s *chainProcessor) processReadyBlocks() { - // for { - // bd, err := s.readyBlocks.pop(s.ctx) - // if err != nil { - // if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - // return - // } - // panic(fmt.Sprintf("unhandled error: %s", err)) - // } - - // if err := s.processBlockData(*bd); err != nil { - // // depending on the error, we might want to save this block for later - // logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) - - // if !errors.Is(err, errFailedToGetParent) && !errors.Is(err, blocktree.ErrParentNotFound) { - // continue - // } - - // logger.Tracef("block data processing for block with hash %s failed: %s", bd.Hash, err) - // if err := s.pendingBlocks.addBlock(&types.Block{ - // Header: *bd.Header, - // Body: *bd.Body, - // }); err != nil { - // logger.Debugf("failed to re-add block to pending blocks: %s", err) - // } - // } - // } -} diff --git a/dot/sync/chain_processor_integration_test.go b/dot/sync/chain_processor_integration_test.go deleted file mode 100644 index fc84b960a9..0000000000 --- a/dot/sync/chain_processor_integration_test.go +++ /dev/null @@ -1,345 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/state" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/babe/inherents" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" - "github.com/ChainSafe/gossamer/lib/transaction" - "github.com/ChainSafe/gossamer/pkg/scale" - - "github.com/stretchr/testify/require" -) - -func buildBlockWithSlotAndTimestamp(t *testing.T, instance state.Runtime, - parent *types.Header, currentSlot, timestamp uint64) *types.Block { - t.Helper() - - digest := types.NewDigest() - prd, err := types.NewBabeSecondaryPlainPreDigest(0, currentSlot).ToPreRuntimeDigest() - require.NoError(t, err) - err = digest.Add(*prd) - require.NoError(t, err) - header := &types.Header{ - ParentHash: parent.Hash(), - StateRoot: common.Hash{}, - ExtrinsicsRoot: common.Hash{}, - Number: parent.Number + 1, - Digest: digest, - } - - err = instance.InitializeBlock(header) - require.NoError(t, err) - - inherentData := types.NewInherentData() - err = inherentData.SetInherent(types.Timstap0, timestamp) - require.NoError(t, err) - - err = inherentData.SetInherent(types.Babeslot, currentSlot) - require.NoError(t, err) - - parachainInherent := inherents.ParachainInherentData{ - ParentHeader: *parent, - } - - err = inherentData.SetInherent(types.Parachn0, parachainInherent) - require.NoError(t, err) - - err = inherentData.SetInherent(types.Newheads, []byte{0}) - require.NoError(t, err) - - encodedInherentData, err := inherentData.Encode() - require.NoError(t, err) - - // Call BlockBuilder_inherent_extrinsics which returns the inherents as encoded extrinsics - encodedInherentExtrinsics, err := instance.InherentExtrinsics(encodedInherentData) - require.NoError(t, err) - - var inherentExtrinsics [][]byte - err = scale.Unmarshal(encodedInherentExtrinsics, &inherentExtrinsics) - require.NoError(t, err) - - for _, inherent := range inherentExtrinsics { - encodedInherent, err := scale.Marshal(inherent) - require.NoError(t, err) - - applyExtrinsicResult, err := instance.ApplyExtrinsic(encodedInherent) - require.NoError(t, err) - require.Equal(t, applyExtrinsicResult, []byte{0, 0}) - } - - finalisedHeader, err := instance.FinalizeBlock() - require.NoError(t, err) - - body := types.Body(types.BytesArrayToExtrinsics(inherentExtrinsics)) - - finalisedHeader.Number = header.Number - finalisedHeader.Hash() - - return &types.Block{ - Header: *finalisedHeader, - Body: body, - } -} - -func buildAndAddBlocksToState(t *testing.T, runtime state.Runtime, blockState *state.BlockState, amount uint) { - t.Helper() - - parent, err := blockState.BestBlockHeader() - require.NoError(t, err) - - babeConfig, err := runtime.BabeConfiguration() - require.NoError(t, err) - - timestamp := uint64(time.Now().Unix()) - slotDuration := babeConfig.SlotDuration - - for i := uint(0); i < amount; i++ { - // calculate the exact slot for each produced block - currentSlot := timestamp / slotDuration - - block := buildBlockWithSlotAndTimestamp(t, runtime, parent, currentSlot, timestamp) - err = blockState.AddBlock(block) - require.NoError(t, err) - parent = &block.Header - - // increase the timestamp by the slot duration - // so we will get a different slot for the next block - timestamp += slotDuration - } - -} - -func TestChainProcessor_HandleBlockResponse_ValidChain(t *testing.T) { - syncer := newTestSyncer(t) - responder := newTestSyncer(t) - - bestBlockHash := responder.blockState.(*state.BlockState).BestBlockHash() - runtimeInstance, err := responder.blockState.GetRuntime(bestBlockHash) - require.NoError(t, err) - - buildAndAddBlocksToState(t, runtimeInstance, - responder.blockState.(*state.BlockState), maxResponseSize*2) - - // syncer makes request for chain - startNum := 1 - start, err := variadic.NewUint32OrHash(startNum) - require.NoError(t, err) - - req := &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader + network.RequestedDataBody, - StartingBlock: *start, - } - - // get response - resp, err := responder.CreateBlockResponse(req) - require.NoError(t, err) - - // process response - for _, bd := range resp.BlockData { - err = syncer.chainProcessor.(*chainProcessor).processBlockData(*bd) - require.NoError(t, err) - } - - // syncer makes request for chain again (block 129+) - startNum = 129 - start, err = variadic.NewUint32OrHash(startNum) - require.NoError(t, err) - - req = &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader + network.RequestedDataBody, - StartingBlock: *start, - } - - // get response - resp, err = responder.CreateBlockResponse(req) - require.NoError(t, err) - - // process response - for _, bd := range resp.BlockData { - err = syncer.chainProcessor.(*chainProcessor).processBlockData(*bd) - require.NoError(t, err) - } -} - -func TestChainProcessor_HandleBlockResponse_MissingBlocks(t *testing.T) { - syncer := newTestSyncer(t) - - bestBlockHash := syncer.blockState.(*state.BlockState).BestBlockHash() - syncerRuntime, err := syncer.blockState.GetRuntime(bestBlockHash) - require.NoError(t, err) - - const syncerAmountOfBlocks = 4 - buildAndAddBlocksToState(t, syncerRuntime, syncer.blockState.(*state.BlockState), syncerAmountOfBlocks) - - responder := newTestSyncer(t) - responderRuntime, err := responder.blockState.GetRuntime(bestBlockHash) - require.NoError(t, err) - - const responderAmountOfBlocks = 16 - buildAndAddBlocksToState(t, responderRuntime, responder.blockState.(*state.BlockState), responderAmountOfBlocks) - - startNum := 15 - start, err := variadic.NewUint32OrHash(startNum) - require.NoError(t, err) - - req := &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - } - - // resp contains blocks 15 to 15 + maxResponseSize) - resp, err := responder.CreateBlockResponse(req) - require.NoError(t, err) - - for _, bd := range resp.BlockData { - err = syncer.chainProcessor.(*chainProcessor).processBlockData(*bd) - require.True(t, errors.Is(err, errFailedToGetParent)) - } -} - -func TestChainProcessor_handleBody_ShouldRemoveIncludedExtrinsics(t *testing.T) { - syncer := newTestSyncer(t) - - ext := []byte("nootwashere") - tx := &transaction.ValidTransaction{ - Extrinsic: ext, - Validity: &transaction.Validity{Priority: 1}, - } - - _, err := syncer.chainProcessor.(*chainProcessor).transactionState.(*state.TransactionState).Push(tx) - require.NoError(t, err) - - body := types.NewBody([]types.Extrinsic{ext}) - syncer.chainProcessor.(*chainProcessor).handleBody(body) - - inQueue := syncer.chainProcessor.(*chainProcessor).transactionState.(*state.TransactionState).Pop() - require.Nil(t, inQueue, "queue should be empty") -} - -func TestChainProcessor_HandleBlockResponse_BlockData(t *testing.T) { - syncer := newTestSyncer(t) - - parent, err := syncer.blockState.(*state.BlockState).BestBlockHeader() - require.NoError(t, err) - - runtimeInstance, err := syncer.blockState.GetRuntime(parent.Hash()) - require.NoError(t, err) - - babeConfig, err := runtimeInstance.BabeConfiguration() - require.NoError(t, err) - - timestamp := uint64(time.Now().Unix()) - slotDuration := babeConfig.SlotDuration - - // calculate the exact slot for each produced block - currentSlot := timestamp / slotDuration - block := buildBlockWithSlotAndTimestamp(t, runtimeInstance, parent, currentSlot, timestamp) - - bd := []*types.BlockData{{ - Hash: block.Header.Hash(), - Header: &block.Header, - Body: &block.Body, - Receipt: nil, - MessageQueue: nil, - Justification: nil, - }} - msg := &network.BlockResponseMessage{ - BlockData: bd, - } - - for _, bd := range msg.BlockData { - err = syncer.chainProcessor.(*chainProcessor).processBlockData(*bd) - require.NoError(t, err) - } -} - -func TestChainProcessor_ExecuteBlock(t *testing.T) { - syncer := newTestSyncer(t) - - parent, err := syncer.blockState.(*state.BlockState).BestBlockHeader() - require.NoError(t, err) - - bestBlockHash := syncer.blockState.(*state.BlockState).BestBlockHash() - runtimeInstance, err := syncer.blockState.GetRuntime(bestBlockHash) - require.NoError(t, err) - - babeConfig, err := runtimeInstance.BabeConfiguration() - require.NoError(t, err) - - timestamp := uint64(time.Now().Unix()) - slotDuration := babeConfig.SlotDuration - - // calculate the exact slot for each produced block - currentSlot := timestamp / slotDuration - block := buildBlockWithSlotAndTimestamp(t, runtimeInstance, parent, currentSlot, timestamp) - - // reset parentState - parentState, err := syncer.chainProcessor.(*chainProcessor).storageState.TrieState(&parent.StateRoot) - require.NoError(t, err) - runtimeInstance.SetContextStorage(parentState) - - _, err = runtimeInstance.ExecuteBlock(block) - require.NoError(t, err) -} - -func TestChainProcessor_HandleJustification(t *testing.T) { - syncer := newTestSyncer(t) - - d, err := types.NewBabeSecondaryPlainPreDigest(0, 1).ToPreRuntimeDigest() - require.NoError(t, err) - digest := types.NewDigest() - err = digest.Add(*d) - require.NoError(t, err) - - header := &types.Header{ - ParentHash: syncer.blockState.(*state.BlockState).GenesisHash(), - Number: 1, - Digest: digest, - } - - just := []byte("testjustification") - - err = syncer.blockState.(*state.BlockState).AddBlock(&types.Block{ - Header: *header, - Body: types.Body{}, - }) - require.NoError(t, err) - - err = syncer.chainProcessor.(*chainProcessor).handleJustification(header, just) - require.NoError(t, err) - - res, err := syncer.blockState.GetJustification(header.Hash()) - require.NoError(t, err) - require.Equal(t, just, res) -} - -func TestChainProcessor_processReadyBlocks_errFailedToGetParent(t *testing.T) { - syncer := newTestSyncer(t) - processor := syncer.chainProcessor.(*chainProcessor) - go processor.processReadyBlocks() - defer processor.cancel() - - header := &types.Header{ - Number: 1, - } - - processor.readyBlocks.push(&types.BlockData{ - Header: header, - Body: &types.Body{}, - }) - - time.Sleep(time.Millisecond * 100) - require.True(t, processor.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) -} diff --git a/dot/sync/chain_processor_test.go b/dot/sync/chain_processor_test.go deleted file mode 100644 index 902d6626ba..0000000000 --- a/dot/sync/chain_processor_test.go +++ /dev/null @@ -1,1181 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "errors" - "testing" - - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/blocktree" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/ChainSafe/gossamer/lib/trie" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func Test_chainProcessor_handleBlock(t *testing.T) { - t.Parallel() - mockError := errors.New("test mock error") - testHash := common.MustHexToHash("0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") - testParentHash := common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a") - - tests := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - block *types.Block - announce bool - wantErr error - }{ - "handle_getHeader_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) - chainProcessor.blockState = mockBlockState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: errFailedToGetParent, - }, - "handle_trieState_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, mockError) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "handle_getRuntime_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - StateRoot: testHash, - }, nil) - mockBlockState.EXPECT().GetRuntime(testParentHash).Return(nil, mockError) - chainProcessor.blockState = mockBlockState - trieState := storage.NewTrieState(nil) - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "handle_runtime_ExecuteBlock_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - StateRoot: testHash, - }, nil) - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(&types.Block{Body: types.Body{}}).Return(nil, mockError) - mockBlockState.EXPECT().GetRuntime(testParentHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "handle_block_import_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - StateRoot: testHash, - }, nil) - mockBlock := &types.Block{Body: types.Body{}} - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState.EXPECT().GetRuntime(testParentHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, - trieState, false).Return(mockError) - chainProcessor.blockImportHandler = mockBlockImportHandler - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "base_case": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlock := &types.Block{ - Body: types.Body{}, // empty slice of extrinsics - } - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockHeader := &types.Header{ - Number: 0, - StateRoot: trie.EmptyHash, - } - mockHeaderHash := mockHeader.Hash() - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(mockHeader, nil) - - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState.EXPECT().GetRuntime(mockHeaderHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().Unlock() - mockStorageState.EXPECT().TrieState(&trie.EmptyHash).Return(trieState, nil) - chainProcessor.storageState = mockStorageState - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, trieState, false).Return(nil) - chainProcessor.blockImportHandler = mockBlockImportHandler - mockTelemetry := NewMockTelemetry(ctrl) - mockTelemetry.EXPECT().SendMessage(gomock.Any()) - chainProcessor.telemetry = mockTelemetry - return - }, - block: &types.Block{ - Header: types.Header{ - Number: 0, - }, - Body: types.Body{}, - }, - }, - "import_block_and_announce": { - announce: true, - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlock := &types.Block{ - Body: types.Body{}, // empty slice of extrinsics - } - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockHeader := &types.Header{ - Number: 0, - StateRoot: trie.EmptyHash, - } - mockHeaderHash := mockHeader.Hash() - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(mockHeader, nil) - - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState.EXPECT().GetRuntime(mockHeaderHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().Unlock() - mockStorageState.EXPECT().TrieState(&trie.EmptyHash).Return(trieState, nil) - chainProcessor.storageState = mockStorageState - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, trieState, true).Return(nil) - chainProcessor.blockImportHandler = mockBlockImportHandler - mockTelemetry := NewMockTelemetry(ctrl) - mockTelemetry.EXPECT().SendMessage(gomock.Any()) - chainProcessor.telemetry = mockTelemetry - return - }, - block: &types.Block{ - Header: types.Header{ - Number: 0, - }, - Body: types.Body{}, - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := tt.chainProcessorBuilder(ctrl) - - err := s.handleBlock(tt.block, tt.announce) - assert.ErrorIs(t, err, tt.wantErr) - }) - } - t.Run("panics on different parent state root", func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - bock := &types.Block{ - Header: types.Header{ - ParentHash: common.Hash{1}, - }, - } - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().GetHeader(common.Hash{1}). - Return(&types.Header{StateRoot: common.Hash{2}}, nil) - trieState := storage.NewTrieState(nil) - storageState := NewMockStorageState(ctrl) - lockCall := storageState.EXPECT().Lock() - trieStateCall := storageState.EXPECT().TrieState(&common.Hash{2}). - Return(trieState, nil).After(lockCall) - storageState.EXPECT().Unlock().After(trieStateCall) - chainProcessor := &chainProcessor{ - blockState: blockState, - storageState: storageState, - } - const expectedPanicValue = "parent state root does not match snapshot state root" - assert.PanicsWithValue(t, expectedPanicValue, func() { - _ = chainProcessor.handleBlock(bock, false) - }) - }) -} - -func Test_chainProcessor_handleBody(t *testing.T) { - t.Parallel() - - testExtrinsics := []types.Extrinsic{{1, 2, 3}, {7, 8, 9, 0}, {0xa, 0xb}} - testBody := types.NewBody(testExtrinsics) - - t.Run("base case", func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - mockTransactionState := NewMockTransactionState(ctrl) - mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[0]) - mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[1]) - mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[2]) - processor := chainProcessor{ - transactionState: mockTransactionState, - } - processor.handleBody(testBody) - }) -} - -func Test_chainProcessor_handleJustification(t *testing.T) { - t.Parallel() - - header := &types.Header{ - Number: 2, - } - headerHash := header.Hash() - errTest := errors.New("test error") - - type args struct { - header *types.Header - justification []byte - } - tests := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - args args - sentinelError error - errorMessage string - }{ - "invalid_justification": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, - []byte(`x`)).Return(errTest) - return chainProcessor{ - finalityGadget: mockFinalityGadget, - } - }, - args: args{ - header: header, - justification: []byte(`x`), - }, - sentinelError: errTest, - errorMessage: "verifying block number 2 justification: test error", - }, - "set_justification_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().SetJustification(headerHash, []byte(`xx`)).Return(errTest) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, []byte(`xx`)).Return(nil) - return chainProcessor{ - blockState: mockBlockState, - finalityGadget: mockFinalityGadget, - } - }, - args: args{ - header: header, - justification: []byte(`xx`), - }, - sentinelError: errTest, - errorMessage: "setting justification for block number 2: test error", - }, - "base_case_set": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().SetJustification(headerHash, []byte(`1234`)).Return(nil) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, []byte(`1234`)).Return(nil) - return chainProcessor{ - blockState: mockBlockState, - finalityGadget: mockFinalityGadget, - } - }, - args: args{ - header: header, - justification: []byte(`1234`), - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - processor := tt.chainProcessorBuilder(ctrl) - - err := processor.handleJustification(tt.args.header, tt.args.justification) - - assert.ErrorIs(t, err, tt.sentinelError) - if tt.sentinelError != nil { - assert.EqualError(t, err, tt.errorMessage) - } - }) - } -} - -func Test_chainProcessor_processBlockData(t *testing.T) { - t.Parallel() - - mockError := errors.New("mock test error") - - tests := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - blockData types.BlockData - expectedError error - }{ - "handle_has_header_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, mockError) - - return chainProcessor{ - blockState: mockBlockState, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "handle_has_block_body_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, mockError) - return chainProcessor{ - blockState: mockBlockState, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "handle_getBlockByHash_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(nil, mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - return chainProcessor{ - blockState: mockBlockState, - chainSync: mockChainSync, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "handle_block_data_justification_!=_nil": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlock := &types.Block{ - Header: types.Header{ - Number: uint(1), - }, - } - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(mockBlock, nil) - mockBlockState.EXPECT().AddBlockToBlockTree(&types.Block{ - Header: types.Header{Number: 1}}).Return(nil) - mockBlockState.EXPECT().SetJustification(common.MustHexToHash( - "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, 3}) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(common.MustHexToHash( - "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, - 3}).Return(nil) - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, nil) - - // given our current chain sync state is `tip` - // the `HandleBlockImport` method should expect - // true as the announce parameter - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(tip) - - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, - nil, true).Return(nil) - - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - finalityGadget: mockFinalityGadget, - storageState: mockStorageState, - blockImportHandler: mockBlockImportHandler, - } - }, - blockData: types.BlockData{ - Justification: &[]byte{1, 2, 3}, - }, - }, - "handle_babe_verify_block_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - babeVerifier: mockBabeVerifier, - } - }, - blockData: types.BlockData{ - Header: &types.Header{}, - Body: &types.Body{}, - }, - expectedError: mockError, - }, - "no_header_and_body_-_fail_to_handle_justification": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().HasHeader(common.Hash{1}).Return(false, nil) - blockState.EXPECT().HasBlockBody(common.Hash{1}).Return(true, nil) - - finalityGadget := NewMockFinalityGadget(ctrl) - expectedBlockDataHeader := &types.Header{Number: 2} - expectedBlockDataHeaderHash := expectedBlockDataHeader.Hash() - finalityGadget.EXPECT(). - VerifyBlockJustification(expectedBlockDataHeaderHash, []byte{1, 2, 3}). - Return(mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - - return chainProcessor{ - chainSync: mockChainSync, - blockState: blockState, - finalityGadget: finalityGadget, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - Header: &types.Header{Number: 2}, - Justification: &[]byte{1, 2, 3}, - }, - expectedError: mockError, - }, - "handle_compareAndSetBlockData_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{}).Return(mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "success_with_justification": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - stateRootHash := common.MustHexToHash("0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") - runtimeHash := common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a") - mockTrieState := storage.NewTrieState(nil) - mockBlock := &types.Block{Header: types.Header{}, Body: types.Body{}} - - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(mockTrieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - Number: 0, - StateRoot: stateRootHash, - }, nil) - mockBlockState.EXPECT().SetJustification( - common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), []byte{1, 2, 3}) - mockBlockState.EXPECT().CompareAndSetBlockData(gomock.AssignableToTypeOf(&types.BlockData{})) - mockBlockState.EXPECT().GetRuntime(runtimeHash).Return(mockInstance, nil) - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}) - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&stateRootHash).Return(mockTrieState, nil) - mockStorageState.EXPECT().Unlock() - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, mockTrieState, false) - - mockTelemetry := NewMockTelemetry(ctrl) - mockTelemetry.EXPECT().SendMessage(gomock.Any()).AnyTimes() - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification( - common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), - []byte{1, 2, 3}).Return(nil) - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - babeVerifier: mockBabeVerifier, - storageState: mockStorageState, - blockImportHandler: mockBlockImportHandler, - telemetry: mockTelemetry, - finalityGadget: mockFinalityGadget, - } - }, - blockData: types.BlockData{ - Header: &types.Header{ - Number: 0, - }, - Body: &types.Body{}, - Justification: &[]byte{1, 2, 3}, - }, - }, - } - - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - processor := tt.chainProcessorBuilder(ctrl) - err := processor.processBlockData(tt.blockData) - assert.ErrorIs(t, err, tt.expectedError) - }) - } -} - -func Test_chainProcessor_processBlockDataWithStateHeaderAndBody(t *testing.T) { - t.Parallel() - - errTest := errors.New("test error") - - testCases := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - blockData types.BlockData - announceImportedBlock bool - sentinelError error - errorMessage string - }{ - "get_block_by_hash_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().GetBlockByHash(common.Hash{1}). - Return(nil, errTest) - return chainProcessor{ - blockState: blockState, - } - }, - blockData: types.BlockData{Hash: common.Hash{1}}, - sentinelError: errTest, - errorMessage: "getting block by hash: test error", - }, - "block_already_exists_in_blocktree": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - block := &types.Block{Header: types.Header{Number: 2}} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(blocktree.ErrBlockExists) - return chainProcessor{ - blockState: blockState, - } - }, - blockData: types.BlockData{Hash: common.Hash{1}}, - }, - "add_block_to_blocktree_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - block := &types.Block{Header: types.Header{Number: 2}} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(errTest) - return chainProcessor{ - blockState: blockState, - } - }, - blockData: types.BlockData{Hash: common.Hash{1}}, - sentinelError: errTest, - errorMessage: "adding block to blocktree: test error", - }, - "handle_justification_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{Number: 2} - blockHeaderHash := blockHeader.Hash() - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - finalityGadget := NewMockFinalityGadget(ctrl) - finalityGadget.EXPECT(). - VerifyBlockJustification(blockHeaderHash, []byte{3}). - Return(errTest) - - return chainProcessor{ - blockState: blockState, - finalityGadget: finalityGadget, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - Justification: &[]byte{3}, - }, - sentinelError: errTest, - errorMessage: "handling justification: verifying block number 2 justification: test error", - }, - "trie_state_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{StateRoot: common.Hash{2}} - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - storageState := NewMockStorageState(ctrl) - storageState.EXPECT().TrieState(&common.Hash{2}). - Return(nil, errTest) - - return chainProcessor{ - blockState: blockState, - storageState: storageState, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - }, - sentinelError: errTest, - errorMessage: "loading trie state: test error", - }, - "handle_block_import_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{StateRoot: common.Hash{2}} - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - storageState := NewMockStorageState(ctrl) - trieState := storage.NewTrieState(nil) - storageState.EXPECT().TrieState(&common.Hash{2}). - Return(trieState, nil) - - blockImportHandler := NewMockBlockImportHandler(ctrl) - const announceImportedBlock = true - blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). - Return(errTest) - - return chainProcessor{ - blockState: blockState, - storageState: storageState, - blockImportHandler: blockImportHandler, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - }, - announceImportedBlock: true, - sentinelError: errTest, - errorMessage: "handling block import: test error", - }, - "success": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{StateRoot: common.Hash{2}} - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - storageState := NewMockStorageState(ctrl) - trieState := storage.NewTrieState(nil) - storageState.EXPECT().TrieState(&common.Hash{2}). - Return(trieState, nil) - - blockImportHandler := NewMockBlockImportHandler(ctrl) - const announceImportedBlock = true - blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). - Return(nil) - - return chainProcessor{ - blockState: blockState, - storageState: storageState, - blockImportHandler: blockImportHandler, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - }, - announceImportedBlock: true, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - processor := testCase.chainProcessorBuilder(ctrl) - - err := processor.processBlockDataWithStateHeaderAndBody( - testCase.blockData, testCase.announceImportedBlock) - - assert.ErrorIs(t, err, testCase.sentinelError) - if testCase.sentinelError != nil { - assert.EqualError(t, err, testCase.errorMessage) - } - }) - } -} - -func Test_chainProcessor_processBlockDataWithHeaderAndBody(t *testing.T) { - t.Parallel() - - errTest := errors.New("test error") - - testCases := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - blockData types.BlockData - announceImportedBlock bool - sentinelError error - errorMessage string - }{ - "verify_block_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - babeVerifier := NewMockBabeVerifier(ctrl) - babeVerifier.EXPECT().VerifyBlock(&types.Header{Number: 1}). - Return(errTest) - - return chainProcessor{ - babeVerifier: babeVerifier, - } - }, - blockData: types.BlockData{ - Header: &types.Header{Number: 1}, - }, - sentinelError: errTest, - errorMessage: "babe verifying block: test error", - }, - "handle_block_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - babeVerifier := NewMockBabeVerifier(ctrl) - expectedHeader := &types.Header{ParentHash: common.Hash{1}} - babeVerifier.EXPECT().VerifyBlock(expectedHeader). - Return(nil) - - transactionState := NewMockTransactionState(ctrl) - transactionState.EXPECT().RemoveExtrinsic(types.Extrinsic{2}) - - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().GetHeader(common.Hash{1}). - Return(nil, errTest) - - return chainProcessor{ - babeVerifier: babeVerifier, - transactionState: transactionState, - blockState: blockState, - } - }, - blockData: types.BlockData{ - Header: &types.Header{ParentHash: common.Hash{1}}, - Body: &types.Body{{2}}, - }, - sentinelError: errFailedToGetParent, - errorMessage: "handling block: failed to get parent header: test error", - }, - "success": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - babeVerifier := NewMockBabeVerifier(ctrl) - expectedHeader := &types.Header{ - ParentHash: common.Hash{1}, - Number: 5, - } - babeVerifier.EXPECT().VerifyBlock(expectedHeader). - Return(nil) - - transactionState := NewMockTransactionState(ctrl) - transactionState.EXPECT().RemoveExtrinsic(types.Extrinsic{2}) - - blockState := NewMockBlockState(ctrl) - parentHeader := &types.Header{StateRoot: trie.EmptyHash} - blockState.EXPECT().GetHeader(common.Hash{1}). - Return(parentHeader, nil) - - storageState := NewMockStorageState(ctrl) - lockCall := storageState.EXPECT().Lock() - storageState.EXPECT().Unlock().After(lockCall) - trieState := storage.NewTrieState(nil) - storageState.EXPECT().TrieState(&trie.EmptyHash). - Return(trieState, nil) - - parentHeaderHash := parentHeader.Hash() - instance := NewMockInstance(ctrl) - blockState.EXPECT().GetRuntime(parentHeaderHash). - Return(instance, nil) - - instance.EXPECT().SetContextStorage(trieState) - block := &types.Block{ - Header: *expectedHeader, - Body: types.Body{{2}}, - } - instance.EXPECT().ExecuteBlock(block).Return(nil, nil) - - blockImportHandler := NewMockBlockImportHandler(ctrl) - const announceImportedBlock = true - blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). - Return(nil) - - telemetryClient := NewMockTelemetry(ctrl) - headerHash := common.MustHexToHash("0x18d21d2901e4a4ac6a8c6431da2dfee1b8701f31a9e49283a082e6c744d4117c") - message := telemetry.NewBlockImport(&headerHash, expectedHeader.Number, "NetworkInitialSync") - telemetryClient.EXPECT().SendMessage(message) - - return chainProcessor{ - babeVerifier: babeVerifier, - transactionState: transactionState, - blockState: blockState, - storageState: storageState, - blockImportHandler: blockImportHandler, - telemetry: telemetryClient, - } - }, - blockData: types.BlockData{ - Header: &types.Header{ - ParentHash: common.Hash{1}, - Number: 5, - }, - Body: &types.Body{{2}}, - }, - announceImportedBlock: true, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - processor := testCase.chainProcessorBuilder(ctrl) - - err := processor.processBlockDataWithHeaderAndBody( - testCase.blockData, testCase.announceImportedBlock) - - assert.ErrorIs(t, err, testCase.sentinelError) - if testCase.sentinelError != nil { - assert.EqualError(t, err, testCase.errorMessage) - } - }) - } -} - -func Test_chainProcessor_processReadyBlocks(t *testing.T) { - t.Parallel() - mockError := errors.New("test mock error") - tests := map[string]struct { - chainSyncBuilder func(ctrl *gomock.Controller) ChainSync - blockStateBuilder func(ctrl *gomock.Controller, done chan struct{}) BlockState - blockData *types.BlockData - babeVerifierBuilder func(ctrl *gomock.Controller) BabeVerifier - pendingBlockBuilder func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet - storageStateBuilder func(ctrl *gomock.Controller, done chan struct{}) StorageState - }{ - "base_case": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{}).DoAndReturn(func(*types. - BlockData) error { - close(done) - return nil - }) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - return nil - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - return nil - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - return nil - }, - }, - "add_block": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - Header: &types.Header{}, - Body: &types.Body{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) - return mockBabeVerifier - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().addBlock(&types.Block{ - Header: types.Header{}, - Body: types.Body{}, - }).DoAndReturn(func(block *types.Block) error { - close(done) - return nil - }) - return mockDisjointBlockSet - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - return nil - }, - }, - "error_in_process_block": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - Header: &types.Header{}, - Body: &types.Body{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) - return mockBabeVerifier - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - return nil - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().Unlock() - mockStorageState.EXPECT().TrieState(&common.Hash{}).DoAndReturn(func(hash *common.Hash) (*storage. - TrieState, error) { - close(done) - return nil, mockError - }) - return mockStorageState - }, - }, - "add_block_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - Header: &types.Header{}, - Body: &types.Body{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) - return mockBabeVerifier - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().addBlock(&types.Block{ - Header: types.Header{}, - Body: types.Body{}, - }).DoAndReturn(func(block *types.Block) error { - close(done) - return mockError - }) - return mockDisjointBlockSet - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - return nil - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - ctx, cancel := context.WithCancel(context.Background()) - readyBlock := newBlockQueue(5) - done := make(chan struct{}) - - s := &chainProcessor{ - ctx: ctx, - cancel: cancel, - readyBlocks: readyBlock, - chainSync: tt.chainSyncBuilder(ctrl), - blockState: tt.blockStateBuilder(ctrl, done), - babeVerifier: tt.babeVerifierBuilder(ctrl), - pendingBlocks: tt.pendingBlockBuilder(ctrl, done), - storageState: tt.storageStateBuilder(ctrl, done), - } - - go s.processReadyBlocks() - - readyBlock.push(tt.blockData) - <-done - s.cancel() - }) - } -} - -func Test_newChainProcessor(t *testing.T) { - t.Parallel() - - mockReadyBlock := newBlockQueue(5) - mockDisjointBlockSet := NewMockDisjointBlockSet(nil) - mockBlockState := NewMockBlockState(nil) - mockStorageState := NewMockStorageState(nil) - mockTransactionState := NewMockTransactionState(nil) - mockBabeVerifier := NewMockBabeVerifier(nil) - mockFinalityGadget := NewMockFinalityGadget(nil) - mockBlockImportHandler := NewMockBlockImportHandler(nil) - - type args struct { - readyBlocks *blockQueue - pendingBlocks DisjointBlockSet - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - } - tests := []struct { - name string - args args - want *chainProcessor - }{ - { - name: "with_args", - args: args{ - readyBlocks: mockReadyBlock, - pendingBlocks: mockDisjointBlockSet, - blockState: mockBlockState, - storageState: mockStorageState, - transactionState: mockTransactionState, - babeVerifier: mockBabeVerifier, - finalityGadget: mockFinalityGadget, - blockImportHandler: mockBlockImportHandler, - }, - want: &chainProcessor{ - readyBlocks: mockReadyBlock, - pendingBlocks: mockDisjointBlockSet, - blockState: mockBlockState, - storageState: mockStorageState, - transactionState: mockTransactionState, - babeVerifier: mockBabeVerifier, - finalityGadget: mockFinalityGadget, - blockImportHandler: mockBlockImportHandler, - }, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - cpCfg := chainProcessorConfig{ - readyBlocks: tt.args.readyBlocks, - pendingBlocks: tt.args.pendingBlocks, - blockState: tt.args.blockState, - storageState: tt.args.storageState, - transactionState: tt.args.transactionState, - babeVerifier: tt.args.babeVerifier, - finalityGadget: tt.args.finalityGadget, - blockImportHandler: tt.args.blockImportHandler, - } - - got := newChainProcessor(cpCfg) - assert.NotNil(t, got.ctx) - got.ctx = nil - assert.NotNil(t, got.cancel) - got.cancel = nil - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index de8e197c9c..8e80b8a54e 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -430,30 +430,48 @@ func (cs *chainSync) logSyncSpeed() { } func (cs *chainSync) sync() { - for { + currentTarget := cs.getTarget() + logger.Infof("CURRENT SYNC TARGET: %d", currentTarget) + cs.maybeSwitchMode() if cs.state == bootstrap { + logger.Infof("using bootstrap sync") err := cs.executeBootstrapSync() if err != nil { logger.Errorf("executing bootstrap sync: %s", err) return } } else { - // TODO executeTipSync() + logger.Infof("using tip sync") + err := cs.executeTipSync() + if err != nil { + logger.Errorf("executing tip sync: %s", err) + return + } } } } +func (cs *chainSync) executeTipSync() error { + return nil + +} + +const maxRequestAllowed uint = 40 + func (cs *chainSync) executeBootstrapSync() error { - const maxRequestAllowed uint = 40 + endBootstrapSync := false for { - head, err := cs.blockState.BestBlockHeader() + if endBootstrapSync { + return nil + } + bestBlockHeader, err := cs.blockState.BestBlockHeader() if err != nil { return fmt.Errorf("getting best block header while syncing: %w", err) } - startRequestAt := head.Number + 1 + startRequestAt := bestBlockHeader.Number + 1 cs.workerPool.useConnectedPeers() // we build the set of requests based on the amount of available peers @@ -467,6 +485,14 @@ func (cs *chainSync) executeBootstrapSync() error { targetBlockNumber := startRequestAt + uint(availablePeers)*128 + realTarget := cs.getTarget() + if targetBlockNumber > realTarget { + diff := targetBlockNumber - realTarget + numOfRequestsToDrop := (diff / 128) + 1 + targetBlockNumber = targetBlockNumber - (numOfRequestsToDrop * 128) + endBootstrapSync = true + } + fmt.Printf("=====> requesting from %d targeting %d\n", startRequestAt, targetBlockNumber) requests, err := ascedingBlockRequest( startRequestAt, targetBlockNumber, bootstrapRequestData) @@ -480,7 +506,7 @@ func (cs *chainSync) executeBootstrapSync() error { resultsQueue := make(chan *syncTaskResult) wg.Add(1) - go cs.handleWorkersResults(resultsQueue, expectedAmountOfBlocks, &wg) + go cs.handleWorkersResults(resultsQueue, startRequestAt, expectedAmountOfBlocks, &wg) cs.workerPool.submitRequests(requests, resultsQueue) wg.Wait() @@ -499,9 +525,7 @@ func (cs *chainSync) maybeSwitchMode() { case head.Number+maxResponseSize < target: // we are at least 128 blocks behind the head, switch to bootstrap cs.setMode(bootstrap) - case head.Number >= target: - // bootstrap complete, switch state to tip if not already - // and begin near-head fork-sync + case head.Number+maxResponseSize > target: cs.setMode(tip) default: // head is between (target-128, target), and we don't want to switch modes. @@ -560,28 +584,28 @@ func (cs *chainSync) getTarget() uint { // and every cicle we should endup with a complete chain, whenever we identify // any error from a worker we should evaluate the error and re-insert the request // in the queue and wait for it to completes -func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, totalBlocks uint32, wg *sync.WaitGroup) { +func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, startAtBlock uint, totalBlocks uint32, wg *sync.WaitGroup) { defer wg.Done() logger.Infof("starting handleWorkersResults, waiting %d blocks", totalBlocks) - syncingChain := make([]*types.BlockData, 0, totalBlocks) + syncingChain := make([]*types.BlockData, totalBlocks) loop: for { // in a case where we don't handle workers results we should check the pool idleDuration := 3 * time.Minute - idleTicker := time.NewTimer(idleDuration) + idleTimer := time.NewTimer(idleDuration) select { - case <-idleTicker.C: + case <-idleTimer.C: logger.Warnf("idle ticker triggered! checking pool") cs.workerPool.useConnectedPeers() continue // TODO: implement a case to stop case taskResult := <-workersResults: - if !idleTicker.Stop() { - <-idleTicker.C + if !idleTimer.Stop() { + <-idleTimer.C } logger.Infof("task result: peer(%s), error: %v, hasResponse: %v", @@ -634,14 +658,20 @@ loop: lastBlockInResponse.Header.Number, lastBlockInResponse.Hash) } - previousLen := len(syncingChain) - syncingChain = mergeSortedSlices(syncingChain, response.BlockData) - logger.Infof("building a syncing chain, previous length: %d, current length: %d", - previousLen, len(syncingChain)) + for _, blockInResponse := range response.BlockData { + blockExactIndex := blockInResponse.Header.Number - startAtBlock + syncingChain[blockExactIndex] = blockInResponse + } - if len(syncingChain) >= int(totalBlocks) { - break loop + // we need to check if we've filled all positions + // otherwise we should wait for more responses + for _, element := range syncingChain { + if element == nil { + continue loop + } } + + break loop } } @@ -669,48 +699,6 @@ loop: } } -type LessOrEqual[T any] interface { - LessOrEqual(T) bool -} - -func mergeSortedSlices[T LessOrEqual[T]](a, b []T) []T { - // if one slice is empty just return the other - switch { - case len(a) < 1: - return b - case len(b) < 1: - return a - } - - aIndex, bIndex := 0, 0 - resultSlice := make([]T, 0, len(a)+len(b)) - - for aIndex < len(a) && bIndex < len(b) { - elemA := a[aIndex] - elemB := b[bIndex] - - if elemA.LessOrEqual(elemB) { - resultSlice = append(resultSlice, elemA) - aIndex++ - } else { - resultSlice = append(resultSlice, elemB) - bIndex++ - } - } - - // if there is remaining items in both arrays after the ordering phase - // we just append them in the result slice - for idx := aIndex; idx < len(a); idx++ { - resultSlice = append(resultSlice, a[idx]) - } - - for idx := bIndex; idx < len(b); idx++ { - resultSlice = append(resultSlice, b[idx]) - } - - return resultSlice -} - func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { if cs.readyBlocks.has(bd.Hash) { logger.Tracef("ignoring block %s (%d) in response, already in ready queue", bd.Hash, bd.Header.Number) diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 80cdb4cfa2..12a339099d 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -17,10 +17,9 @@ var logger = log.NewFromGlobal(log.AddContext("pkg", "sync")) // Service deals with chain syncing by sending block request messages and watching for responses. type Service struct { - blockState BlockState - chainSync ChainSync - chainProcessor ChainProcessor - network Network + blockState BlockState + chainSync ChainSync + network Network } // Config is the configuration for the sync Service. @@ -62,25 +61,10 @@ func NewService(cfg *Config) (*Service, error) { } chainSync := newChainSync(csCfg) - cpCfg := chainProcessorConfig{ - readyBlocks: readyBlocks, - pendingBlocks: pendingBlocks, - syncer: chainSync, - blockState: cfg.BlockState, - storageState: cfg.StorageState, - transactionState: cfg.TransactionState, - babeVerifier: cfg.BabeVerifier, - finalityGadget: cfg.FinalityGadget, - blockImportHandler: cfg.BlockImportHandler, - telemetry: cfg.Telemetry, - } - chainProcessor := newChainProcessor(cpCfg) - return &Service{ - blockState: cfg.BlockState, - chainSync: chainSync, - chainProcessor: chainProcessor, - network: cfg.Network, + blockState: cfg.BlockState, + chainSync: chainSync, + network: cfg.Network, }, nil } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index c46347fd9e..854825f366 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -30,7 +30,7 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { return &syncWorkerPool{ network: net, workers: make(map[peer.ID]*syncWorker), - taskQueue: make(chan *syncTask), + taskQueue: make(chan *syncTask, maxRequestAllowed+1), ignorePeers: make(map[peer.ID]time.Time), } } @@ -72,8 +72,6 @@ func (s *syncWorkerPool) addWorker(who peer.ID, bestHash common.Hash, bestNumber s.l.Lock() defer s.l.Unlock() - // delete it since it sends a block announcement so it might be - // a valid peer to request blocks for now _, has := s.ignorePeers[who] if has { delete(s.ignorePeers, who) diff --git a/dot/types/block_data.go b/dot/types/block_data.go index 97a0c6b691..35525c86d0 100644 --- a/dot/types/block_data.go +++ b/dot/types/block_data.go @@ -31,11 +31,6 @@ func (bd *BlockData) Number() uint { return bd.Header.Number } -func (bd *BlockData) LessOrEqual(o *BlockData) bool { - res := bd.Header.Number <= o.Header.Number - return res -} - func (bd *BlockData) String() string { str := fmt.Sprintf("Hash=%s ", bd.Hash) From fc081e106b15f3ab5008a6c30bed03469b660cc0 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 17 Apr 2023 15:35:21 -0400 Subject: [PATCH 008/140] chore: enable state/grandpa/digest trace to investigate forced changes --- chain/westend/config.toml | 3 ++- dot/sync/chain_sync.go | 4 +--- dot/sync/sync_worker.go | 4 ---- dot/sync/worker_pool.go | 9 ++++++++- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/chain/westend/config.toml b/chain/westend/config.toml index 0cfdd31547..6a592a4ba0 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -1,5 +1,6 @@ [global] basepath = "./tmp/gossamer/westend" +#basepath = "/Volumes/SDD01/gossamer_1m_402k_synced_copy/westend" log = "info" metrics-address = "localhost:9876" @@ -7,7 +8,7 @@ metrics-address = "localhost:9876" core = "" network = "" rpc = "" -state = "" +state = "trace" runtime = "" babe = "" grandpa = "" diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 8e80b8a54e..19301a1682 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -458,8 +458,6 @@ func (cs *chainSync) executeTipSync() error { } -const maxRequestAllowed uint = 40 - func (cs *chainSync) executeBootstrapSync() error { endBootstrapSync := false for { @@ -593,7 +591,7 @@ func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, s loop: for { // in a case where we don't handle workers results we should check the pool - idleDuration := 3 * time.Minute + idleDuration := time.Minute idleTimer := time.NewTimer(idleDuration) select { diff --git a/dot/sync/sync_worker.go b/dot/sync/sync_worker.go index 271df3b015..450ebfff63 100644 --- a/dot/sync/sync_worker.go +++ b/dot/sync/sync_worker.go @@ -2,7 +2,6 @@ package sync import ( "context" - "errors" "sync" "github.com/ChainSafe/gossamer/dot/network" @@ -56,9 +55,6 @@ func (s *syncWorker) update(bestHash common.Hash, bestNumber uint) { s.bestNumber = bestNumber } -var errBadPeerWorker = errors.New("bad peer worker") -var errBadBlock = errors.New("bad block") - func (s *syncWorker) Start(tasks <-chan *syncTask, wg *sync.WaitGroup) { wg.Add(1) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 854825f366..86ddcd52c7 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -26,6 +26,8 @@ type syncWorkerPool struct { ignorePeers map[peer.ID]time.Time } +const maxRequestAllowed uint = 40 + func newSyncWorkerPool(net Network) *syncWorkerPool { return &syncWorkerPool{ network: net, @@ -113,7 +115,12 @@ func (s *syncWorkerPool) shutdownWorker(who peer.ID, ignore bool) { return } - peer.Stop() + go func() { + logger.Warnf("trying to stop %s (ignore=%v)", who, ignore) + peer.Stop() + logger.Warnf("peer %s stopped", who) + }() + delete(s.workers, who) if ignore { From 7bc80381ec742cf56aafd3476f041438a6b87b3d Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 18 Apr 2023 10:25:14 -0400 Subject: [PATCH 009/140] chore: making changes to handling forced changes --- chain/westend/config.toml | 8 +-- dot/digest/digest.go | 2 +- dot/digest/digest_integration_test.go | 30 ----------- dot/digest/digest_test.go | 71 +++++++++++++++++++++++++++ dot/state/grandpa.go | 10 +++- dot/state/grandpa_changes.go | 8 +++ 6 files changed, 92 insertions(+), 37 deletions(-) create mode 100644 dot/digest/digest_test.go diff --git a/chain/westend/config.toml b/chain/westend/config.toml index 6a592a4ba0..d810a6c182 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -1,6 +1,6 @@ [global] -basepath = "./tmp/gossamer/westend" -#basepath = "/Volumes/SDD01/gossamer_1m_402k_synced_copy/westend" +#basepath = "./tmp/gossamer/westend" +basepath = "/Volumes/SDD01/gossamer_1m_402k_synced_copy/westend" log = "info" metrics-address = "localhost:9876" @@ -8,12 +8,12 @@ metrics-address = "localhost:9876" core = "" network = "" rpc = "" -state = "trace" +state = "debug" runtime = "" babe = "" grandpa = "" sync = "trace" -digest = "" +digest = "trace" [init] genesis = "./chain/westend/genesis.json" diff --git a/dot/digest/digest.go b/dot/digest/digest.go index a0fac2c4f6..f6f61d353c 100644 --- a/dot/digest/digest.go +++ b/dot/digest/digest.go @@ -96,7 +96,6 @@ func (h *Handler) HandleDigests(header *types.Header) error { // toConsensusDigests converts a slice of scale.VaryingDataType to a slice of types.ConsensusDigest. func (h *Handler) toConsensusDigests(scaleVaryingTypes []scale.VaryingDataType) []types.ConsensusDigest { consensusDigests := make([]types.ConsensusDigest, 0, len(scaleVaryingTypes)) - for _, d := range scaleVaryingTypes { digestValue, err := d.Value() if err != nil { @@ -164,6 +163,7 @@ func (h *Handler) handleConsensusDigest(d *types.ConsensusDigest, header *types. return err } + fmt.Printf("going to handle: %s\n", data.String()) return h.grandpaState.HandleGRANDPADigest(header, data) case types.BabeEngineID: data := types.NewBabeConsensusDigest() diff --git a/dot/digest/digest_integration_test.go b/dot/digest/digest_integration_test.go index 402014417f..1ab523d9b7 100644 --- a/dot/digest/digest_integration_test.go +++ b/dot/digest/digest_integration_test.go @@ -13,7 +13,6 @@ import ( "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" @@ -24,35 +23,6 @@ import ( "github.com/stretchr/testify/require" ) -func newTestHandler(t *testing.T) (*Handler, *state.Service) { - testDatadirPath := t.TempDir() - - ctrl := gomock.NewController(t) - telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() - - config := state.Config{ - Path: testDatadirPath, - Telemetry: telemetryMock, - } - stateSrvc := state.NewService(config) - stateSrvc.UseMemDB() - - gen, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) - err := stateSrvc.Initialise(&gen, &genesisHeader, &genesisTrie) - require.NoError(t, err) - - err = stateSrvc.SetupBase() - require.NoError(t, err) - - err = stateSrvc.Start() - require.NoError(t, err) - - dh, err := NewHandler(log.Critical, stateSrvc.Block, stateSrvc.Epoch, stateSrvc.Grandpa) - require.NoError(t, err) - return dh, stateSrvc -} - func TestHandler_GrandpaScheduledChange(t *testing.T) { handler, _ := newTestHandler(t) handler.Start() diff --git a/dot/digest/digest_test.go b/dot/digest/digest_test.go new file mode 100644 index 0000000000..c77ccc8733 --- /dev/null +++ b/dot/digest/digest_test.go @@ -0,0 +1,71 @@ +package digest + +import ( + "testing" + + "github.com/ChainSafe/gossamer/dot/state" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/log" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func newTestHandler(t *testing.T) (*Handler, *state.Service) { + testDatadirPath := t.TempDir() + + ctrl := gomock.NewController(t) + telemetryMock := NewMockTelemetry(ctrl) + telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + + config := state.Config{ + Path: testDatadirPath, + Telemetry: telemetryMock, + } + stateSrvc := state.NewService(config) + stateSrvc.UseMemDB() + + gen, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) + err := stateSrvc.Initialise(&gen, &genesisHeader, &genesisTrie) + require.NoError(t, err) + + err = stateSrvc.SetupBase() + require.NoError(t, err) + + err = stateSrvc.Start() + require.NoError(t, err) + + dh, err := NewHandler(log.Critical, stateSrvc.Block, stateSrvc.Epoch, stateSrvc.Grandpa) + require.NoError(t, err) + return dh, stateSrvc +} + +func TestDigestHashes(t *testing.T) { + + babePreRuntimeDigest := types.NewBABEPreRuntimeDigest(common.MustHexToBytes("0x02020000002fe4d90f00000000")) + grandpaConsensus1 := types.ConsensusDigest{ + ConsensusEngineID: types.GrandpaEngineID, + Data: common.MustHexToBytes("0x0118a8ddd0891e14725841cd1b5581d23806a97f41c28a25436db6473c86e15dcd4f01000000000000007ca58770eb41c1a68ef77e92255e4635fc11f665cb89aee469e920511c48343a010000000000000074bfb70627416e6e6c4785e928ced384c6c06e5c8dd173a094bc3118da7b673e0100000000000000d455d6778e7100787f0e51e42b86e6e3aac96b1f68aaab59678ab1dd28e5374f0100000000000000a694eb96e1674003ccff3309937bc3ab62ad1a66436f5b1dfad03fc81e8a4f700100000000000000786fc9c50f5d26a2c9f8028fc31f1a447d3425349eb5733550201c68e495a22d01000000000000005eee23b75c97a69e537632302d88870a0f48c05d6a3b11aeb5d3fdf8b579ba79"), + } + grandpaConsensus2 := types.ConsensusDigest{ + ConsensusEngineID: types.GrandpaEngineID, + Data: common.MustHexToBytes("0x02c59e1500189fc415cce1d0b2eed702c9e05f476217d23b46a8723fd56f08cddad650be7c2d0100000000000000feca0be2c87141f6074b221c919c0161a1c468d9173c5c1be59b68fab9a0ff930100000000000000fc9d33059580a69454179ffa41cbae6de2bc8d2bd2c3f1d018fe5484a5a919560100000000000000059ddb0eb77615669a1fc7962bbff119c20c18b58b4922788f842f3cd5b2813a010000000000000007d952daf2d0e2616e5344a6cff989a3fcc5a79a5799198c15ff1c06c51a1280010000000000000065c30e319f817c4392a7c2b98f1585541d53bf8d096bd64033cce6bacbde2952010000000000000005000000"), + } + + digests := types.NewDigest() + err := digests.Add(babePreRuntimeDigest) + require.NoError(t, err) + err = digests.Add(grandpaConsensus1) + require.NoError(t, err) + err = digests.Add(grandpaConsensus2) + require.NoError(t, err) + + header := types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, digests) + + handler, _ := newTestHandler(t) + err = handler.HandleDigests(header) + require.NoError(t, err) + + err = handler.grandpaState.ApplyForcedChanges(header) + require.NoError(t, err) +} diff --git a/dot/state/grandpa.go b/dot/state/grandpa.go index f88050d53d..777bcb204c 100644 --- a/dot/state/grandpa.go +++ b/dot/state/grandpa.go @@ -92,6 +92,7 @@ func (s *GrandpaState) HandleGRANDPADigest(header *types.Header, digest scale.Va case types.GrandpaScheduledChange: return s.addScheduledChange(header, val) case types.GrandpaForcedChange: + fmt.Printf("adding a forced change\n") return s.addForcedChange(header, val) case types.GrandpaOnDisabled: return nil @@ -124,6 +125,7 @@ func (s *GrandpaState) addForcedChange(header *types.Header, fc types.GrandpaFor return fmt.Errorf("cannot import forced change: %w", err) } + fmt.Printf("there are now %d possible forced changes\n", s.forcedChanges.Len()) logger.Debugf("there are now %d possible forced changes", s.forcedChanges.Len()) return nil } @@ -257,8 +259,12 @@ func (s *GrandpaState) ApplyForcedChanges(importedBlockHeader *types.Header) err return fmt.Errorf("cannot set change set id at block") } - logger.Debugf("Applying authority set forced change at block #%d", - forcedChange.announcingHeader.Number) + logger.Debug("reseting scheduled changes and forced changes") + s.scheduledChangeRoots.reset() + s.forcedChanges.reset() + + logger.Debugf("Applying authority set forced change on block #%d made at block #%d", + importedBlockHeader.Number, forcedChange.announcingHeader.Number) return nil } diff --git a/dot/state/grandpa_changes.go b/dot/state/grandpa_changes.go index ff4fb0c0d4..425372f55e 100644 --- a/dot/state/grandpa_changes.go +++ b/dot/state/grandpa_changes.go @@ -132,6 +132,10 @@ func (oc *orderedPendingChanges) pruneChanges(hash common.Hash, isDescendantOf i return nil } +func (oc *orderedPendingChanges) reset() { + *oc = make([]pendingChange, 0) +} + type pendingChangeNode struct { change *pendingChange nodes []*pendingChangeNode @@ -315,3 +319,7 @@ func (ct *changeTree) pruneChanges(hash common.Hash, isDescendantOf isDescendant *ct = onBranchChanges return nil } + +func (ct *changeTree) reset() { + *ct = []*pendingChangeNode{} +} From 0a27158a70b1c2b6cfd582e3f1bb82e8f59a9893 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 18 Apr 2023 19:06:21 -0400 Subject: [PATCH 010/140] chore: reset scheduled changes once a forced change applies --- chain/westend/config.toml | 4 ++-- dot/sync/chain_sync.go | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/chain/westend/config.toml b/chain/westend/config.toml index d810a6c182..3e59faddc6 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -1,6 +1,6 @@ [global] -#basepath = "./tmp/gossamer/westend" -basepath = "/Volumes/SDD01/gossamer_1m_402k_synced_copy/westend" +basepath = "./tmp/gossamer/westend" +#basepath = "/Volumes/SDD01/gossamer_1m_402k_synced_copy/westend" log = "info" metrics-address = "localhost:9876" diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 19301a1682..da179e06c3 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -455,7 +455,6 @@ func (cs *chainSync) sync() { func (cs *chainSync) executeTipSync() error { return nil - } func (cs *chainSync) executeBootstrapSync() error { @@ -464,11 +463,11 @@ func (cs *chainSync) executeBootstrapSync() error { if endBootstrapSync { return nil } + bestBlockHeader, err := cs.blockState.BestBlockHeader() if err != nil { return fmt.Errorf("getting best block header while syncing: %w", err) } - startRequestAt := bestBlockHeader.Number + 1 cs.workerPool.useConnectedPeers() From bf6bb619dc127824130468120a3ce5ea54ac3766 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 19 Apr 2023 07:40:59 -0400 Subject: [PATCH 011/140] chore: rollback basepath --- chain/westend/config.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/chain/westend/config.toml b/chain/westend/config.toml index 3e59faddc6..c535c126ff 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -1,6 +1,5 @@ [global] -basepath = "./tmp/gossamer/westend" -#basepath = "/Volumes/SDD01/gossamer_1m_402k_synced_copy/westend" +basepath = "~/.gossamer/westend" log = "info" metrics-address = "localhost:9876" From 59cbaaea89aebe0a4230bda770d9038946358a56 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 19 Apr 2023 17:39:32 -0400 Subject: [PATCH 012/140] chore: wip tip sync --- chain/westend-local/config-alice.toml | 13 +- dot/sync/block_queue.go | 57 ------ dot/sync/block_queue_test.go | 252 -------------------------- dot/sync/chain_sync.go | 140 +++++++------- dot/sync/syncer.go | 2 - dot/sync/tip_syncer.go | 6 +- dot/sync/worker_pool.go | 22 ++- 7 files changed, 93 insertions(+), 399 deletions(-) delete mode 100644 dot/sync/block_queue.go delete mode 100644 dot/sync/block_queue_test.go diff --git a/chain/westend-local/config-alice.toml b/chain/westend-local/config-alice.toml index 556ef95a64..962ed36d49 100644 --- a/chain/westend-local/config-alice.toml +++ b/chain/westend-local/config-alice.toml @@ -1,5 +1,5 @@ [global] -basepath = "~/.gossamer/westend-local-alice" +basepath = "./tmp/.gossamer/westend-local-alice" log = "info" metrics-address = ":9876" @@ -11,21 +11,20 @@ state = "" runtime = "" babe = "" grandpa = "" -sync = "" -digest = "" +sync = "trace" +digest = "trace" [init] genesis = "./chain/westend-local/westend-local-spec-raw.json" [account] -key = "alice" +key = "" unlock = "" [core] roles = 4 -babe-authority = true -grandpa-authority = true -babe-lead = true +babe-authority = false +grandpa-authority = false [network] port = 7001 diff --git a/dot/sync/block_queue.go b/dot/sync/block_queue.go deleted file mode 100644 index 9b5a81d597..0000000000 --- a/dot/sync/block_queue.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "sync" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" -) - -type blockQueue struct { - queue chan *types.BlockData - hashesSet map[common.Hash]struct{} - hashesSetMutex sync.RWMutex -} - -// newBlockQueue initialises a queue of *types.BlockData with the given capacity. -func newBlockQueue(capacity int) *blockQueue { - return &blockQueue{ - queue: make(chan *types.BlockData, capacity), - hashesSet: make(map[common.Hash]struct{}, capacity), - } -} - -// push pushes an item into the queue. It blocks if the queue is at capacity. -func (bq *blockQueue) push(blockData *types.BlockData) { - bq.hashesSetMutex.Lock() - bq.hashesSet[blockData.Hash] = struct{}{} - bq.hashesSetMutex.Unlock() - - bq.queue <- blockData -} - -// pop pops the next item from the queue. It blocks if the queue is empty -// until the context is cancelled. If the context is canceled, it returns -// the error from the context. -func (bq *blockQueue) pop(ctx context.Context) (blockData *types.BlockData, err error) { - select { - case <-ctx.Done(): - return blockData, ctx.Err() - case blockData = <-bq.queue: - } - bq.hashesSetMutex.Lock() - delete(bq.hashesSet, blockData.Hash) - bq.hashesSetMutex.Unlock() - return blockData, nil -} - -func (bq *blockQueue) has(blockHash common.Hash) (has bool) { - bq.hashesSetMutex.RLock() - defer bq.hashesSetMutex.RUnlock() - _, has = bq.hashesSet[blockHash] - return has -} diff --git a/dot/sync/block_queue_test.go b/dot/sync/block_queue_test.go deleted file mode 100644 index 14989aa747..0000000000 --- a/dot/sync/block_queue_test.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func Test_newBlockQueue(t *testing.T) { - t.Parallel() - - const capacity = 1 - bq := newBlockQueue(capacity) - - require.NotNil(t, bq.queue) - assert.Equal(t, 1, cap(bq.queue)) - assert.Equal(t, 0, len(bq.queue)) - bq.queue = nil - - expectedBlockQueue := &blockQueue{ - hashesSet: make(map[common.Hash]struct{}, capacity), - } - assert.Equal(t, expectedBlockQueue, bq) -} - -func Test_blockQueue_push(t *testing.T) { - t.Parallel() - - const capacity = 1 - bq := newBlockQueue(capacity) - blockData := &types.BlockData{ - Hash: common.Hash{1}, - } - - bq.push(blockData) - - // cannot compare channels - require.NotNil(t, bq.queue) - assert.Len(t, bq.queue, 1) - - receivedBlockData := <-bq.queue - expectedBlockData := &types.BlockData{ - Hash: common.Hash{1}, - } - assert.Equal(t, expectedBlockData, receivedBlockData) - - bq.queue = nil - expectedBlockQueue := &blockQueue{ - hashesSet: map[common.Hash]struct{}{{1}: {}}, - } - assert.Equal(t, expectedBlockQueue, bq) -} - -func Test_blockQueue_pop(t *testing.T) { - t.Parallel() - - t.Run("context canceled", func(t *testing.T) { - t.Parallel() - - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - const capacity = 1 - bq := newBlockQueue(capacity) - - blockData, err := bq.pop(ctx) - assert.Nil(t, blockData) - assert.ErrorIs(t, err, context.Canceled) - }) - - t.Run("get block data after waiting", func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - const capacity = 1 - bq := newBlockQueue(capacity) - - const afterDuration = 5 * time.Millisecond - time.AfterFunc(afterDuration, func() { - blockData := &types.BlockData{ - Hash: common.Hash{1}, - } - bq.push(blockData) - }) - - blockData, err := bq.pop(ctx) - - expectedBlockData := &types.BlockData{ - Hash: common.Hash{1}, - } - assert.Equal(t, expectedBlockData, blockData) - assert.NoError(t, err) - - assert.Len(t, bq.queue, 0) - bq.queue = nil - expectedBlockQueue := &blockQueue{ - hashesSet: map[common.Hash]struct{}{}, - } - assert.Equal(t, expectedBlockQueue, bq) - }) -} - -func Test_blockQueue_has(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - blockQueue *blockQueue - blockHash common.Hash - has bool - }{ - "absent": { - blockQueue: &blockQueue{ - hashesSet: map[common.Hash]struct{}{}, - }, - blockHash: common.Hash{1}, - }, - "exists": { - blockQueue: &blockQueue{ - hashesSet: map[common.Hash]struct{}{{1}: {}}, - }, - blockHash: common.Hash{1}, - has: true, - }, - } - - for name, tc := range testCases { - testCase := tc - t.Run(name, func(t *testing.T) { - t.Parallel() - - has := testCase.blockQueue.has(testCase.blockHash) - assert.Equal(t, testCase.has, has) - }) - } -} - -func Test_lockQueue_endToEnd(t *testing.T) { - t.Parallel() - - const capacity = 10 - blockQueue := newBlockQueue(capacity) - - newBlockData := func(i byte) *types.BlockData { - return &types.BlockData{ - Hash: common.Hash{i}, - } - } - - blockQueue.push(newBlockData(1)) - blockQueue.push(newBlockData(2)) - blockQueue.push(newBlockData(3)) - - blockData, err := blockQueue.pop(context.Background()) - assert.Equal(t, newBlockData(1), blockData) - assert.NoError(t, err) - - has := blockQueue.has(newBlockData(2).Hash) - assert.True(t, has) - has = blockQueue.has(newBlockData(3).Hash) - assert.True(t, has) - - blockQueue.push(newBlockData(4)) - - has = blockQueue.has(newBlockData(4).Hash) - assert.True(t, has) - - blockData, err = blockQueue.pop(context.Background()) - assert.Equal(t, newBlockData(2), blockData) - assert.NoError(t, err) - - // drain queue - for len(blockQueue.queue) > 0 { - <-blockQueue.queue - } -} - -func Test_lockQueue_threadSafety(t *testing.T) { - // This test consists in checking for concurrent access - // using the -race detector. - t.Parallel() - - var startWg, endWg sync.WaitGroup - ctx, cancel := context.WithCancel(context.Background()) - - const operations = 3 - const parallelism = 3 - const goroutines = parallelism * operations - startWg.Add(goroutines) - endWg.Add(goroutines) - - const testDuration = 50 * time.Millisecond - go func() { - timer := time.NewTimer(time.Hour) - startWg.Wait() - _ = timer.Reset(testDuration) - <-timer.C - cancel() - }() - - runInLoop := func(f func()) { - defer endWg.Done() - startWg.Done() - startWg.Wait() - for ctx.Err() == nil { - f() - } - } - - const capacity = 10 - blockQueue := newBlockQueue(capacity) - blockData := &types.BlockData{ - Hash: common.Hash{1}, - } - blockHash := common.Hash{1} - - endWg.Add(1) - go func() { - defer endWg.Done() - <-ctx.Done() - // Empty queue channel to make sure `push` does not block - // when the context is cancelled. - for len(blockQueue.queue) > 0 { - <-blockQueue.queue - } - }() - - for i := 0; i < parallelism; i++ { - go runInLoop(func() { - blockQueue.push(blockData) - }) - - go runInLoop(func() { - _, _ = blockQueue.pop(ctx) - }) - - go runInLoop(func() { - _ = blockQueue.has(blockHash) - }) - } - - endWg.Wait() -} diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index da179e06c3..1329293bbd 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -116,9 +116,6 @@ type chainSync struct { // to replace the worker queue workerPool *syncWorkerPool - // queue of work created by setting peer heads - workQueue chan *peerState - // workers are put here when they are completed so we can handle their result resultQueue chan *worker @@ -131,17 +128,6 @@ type chainSync struct { // current workers that are attempting to obtain blocks workerState *workerState - // blocks which are ready to be processed are put into this queue - // the `chainProcessor` will read from this channel and process the blocks - // note: blocks must not be put into this channel unless their parent is known - // - // there is a case where we request and process "duplicate" blocks, which is where there - // are some blocks in this queue, and at the same time, the bootstrap worker errors and dispatches - // a new worker with start=(current best head), which results in the blocks in the queue - // getting re-requested (as they have not been processed yet) - // to fix this, we track the blocks that are in the queue - readyBlocks *blockQueue - // disjoint set of blocks which are known but not ready to be processed // ie. we only know the hash, number, or the parent block is unknown, or the body is unknown // note: the block may have empty fields, as some data about it may be unknown @@ -179,7 +165,6 @@ type chainSync struct { type chainSyncConfig struct { bs BlockState net Network - readyBlocks *blockQueue pendingBlocks DisjointBlockSet minPeers, maxPeers int slotDuration time.Duration @@ -208,12 +193,10 @@ func newChainSync(cfg chainSyncConfig) *chainSync { cancel: cancel, blockState: cfg.bs, network: cfg.net, - workQueue: make(chan *peerState, 1024), resultQueue: make(chan *worker, 1024), peerState: make(map[peer.ID]*peerState), ignorePeers: make(map[peer.ID]struct{}), workerState: newWorkerState(), - readyBlocks: cfg.readyBlocks, pendingBlocks: cfg.pendingBlocks, state: bootstrap, handler: newBootstrapSyncer(cfg.bs), @@ -231,11 +214,15 @@ func newChainSync(cfg chainSyncConfig) *chainSync { func (cs *chainSync) start() { // wait until we have a minimal workers in the sync worker pool + // and we have a clear target otherwise just wait for { + _, err := cs.getTarget() totalAvailable := cs.workerPool.totalWorkers() - if totalAvailable >= uint(cs.minPeers) { + + if err == nil && totalAvailable >= uint(cs.minPeers) { break } + time.Sleep(time.Millisecond * 100) } @@ -287,9 +274,9 @@ func (cs *chainSync) setBlockAnnounce(from peer.ID, header *types.Header) error return cs.setPeerHead(from, header.Hash(), header.Number) } -// setPeerHead sets a peer's best known block and potentially adds the peer's state to the workQueue +// setPeerHead sets a peer's best known block func (cs *chainSync) setPeerHead(p peer.ID, bestHash common.Hash, bestNumber uint) error { - err := cs.workerPool.addWorker(p, bestHash, bestNumber) + err := cs.workerPool.addWorkerFromBlockAnnounce(p, bestHash, bestNumber) if err != nil { logger.Errorf("adding a potential worker: %s", err) } @@ -363,7 +350,6 @@ func (cs *chainSync) setPeerHead(p peer.ID, bestHash common.Hash, bestNumber uin return fmt.Errorf("add hash and number: %w", err) } - //cs.workQueue <- nil return nil } @@ -402,7 +388,13 @@ func (cs *chainSync) logSyncSpeed() { switch cs.state { case bootstrap: cs.benchmarker.end(time.Now(), after.Number) - target := cs.getTarget() + target, err := cs.getTarget() + if errors.Is(err, errUnableToGetTarget) { + continue + } else if err != nil { + logger.Errorf("while getting target: %s", err) + continue + } logger.Infof( "🔗 imported blocks from %d to %d (hashes [%s ... %s])", @@ -431,29 +423,40 @@ func (cs *chainSync) logSyncSpeed() { func (cs *chainSync) sync() { for { - currentTarget := cs.getTarget() - logger.Infof("CURRENT SYNC TARGET: %d", currentTarget) + err := cs.maybeSwitchMode() + if err != nil { + logger.Errorf("trying to switch mode: %w", err) + return + } - cs.maybeSwitchMode() if cs.state == bootstrap { logger.Infof("using bootstrap sync") - err := cs.executeBootstrapSync() - if err != nil { - logger.Errorf("executing bootstrap sync: %s", err) - return - } + err = cs.executeBootstrapSync() } else { logger.Infof("using tip sync") - err := cs.executeTipSync() - if err != nil { - logger.Errorf("executing tip sync: %s", err) - return - } + err = cs.executeTipSync() + } + + if err != nil { + logger.Errorf("executing bootstrap sync: %s", err) + continue } } } func (cs *chainSync) executeTipSync() error { + cs.workerPool.stopEphemeralWorkers() + + for { + slotDurationTimer := time.NewTimer(cs.slotDuration) + + select { + case <-slotDurationTimer.C: + case <- + } + + } + return nil } @@ -482,7 +485,11 @@ func (cs *chainSync) executeBootstrapSync() error { targetBlockNumber := startRequestAt + uint(availablePeers)*128 - realTarget := cs.getTarget() + realTarget, err := cs.getTarget() + if err != nil { + return fmt.Errorf("while getting target: %w", err) + } + if targetBlockNumber > realTarget { diff := targetBlockNumber - realTarget numOfRequestsToDrop := (diff / 128) + 1 @@ -510,60 +517,50 @@ func (cs *chainSync) executeBootstrapSync() error { } } -func (cs *chainSync) maybeSwitchMode() { +func (cs *chainSync) maybeSwitchMode() error { head, err := cs.blockState.BestBlockHeader() if err != nil { - logger.Errorf("failed to get best block header: %s", err) - return + return fmt.Errorf("getting best block header: %w", err) + } + + target, err := cs.getTarget() + if err != nil { + return fmt.Errorf("getting target: %w", err) } - target := cs.getTarget() switch { case head.Number+maxResponseSize < target: // we are at least 128 blocks behind the head, switch to bootstrap - cs.setMode(bootstrap) + cs.state = bootstrap + isSyncedGauge.Set(float64(cs.state)) + logger.Debugf("switched sync mode to %d", cs.state) + case head.Number+maxResponseSize > target: - cs.setMode(tip) + cs.state = tip + isSyncedGauge.Set(float64(cs.state)) + logger.Debugf("switched sync mode to %d", cs.state) + default: // head is between (target-128, target), and we don't want to switch modes. } -} - -// setMode stops all existing workers and clears the worker set and switches the `handler` -// based on the new mode, if the mode is different than previous -func (cs *chainSync) setMode(mode chainSyncState) { - if cs.state == mode { - return - } - - // stop all current workers and clear set - cs.workerState.reset() - // update handler to respective mode - switch mode { - case bootstrap: - cs.handler = newBootstrapSyncer(cs.blockState) - case tip: - cs.handler = newTipSyncer(cs.blockState, cs.pendingBlocks, cs.readyBlocks, nil) - } - - cs.state = mode - isSyncedGauge.Set(float64(cs.state)) - logger.Debugf("switched sync mode to %d", mode) + return nil } +var errUnableToGetTarget = errors.New("unable to get target") + // getTarget takes the average of all peer heads // TODO: should we just return the highest? could be an attack vector potentially, if a peer reports some very large // head block number, it would leave us in bootstrap mode forever // it would be better to have some sort of standard deviation calculation and discard any outliers (#1861) -func (cs *chainSync) getTarget() uint { +func (cs *chainSync) getTarget() (uint, error) { cs.RLock() defer cs.RUnlock() // in practice, this shouldn't happen, as we only start the module once we have some peer states if len(cs.peerState) == 0 { // return max uint32 instead of 0, as returning 0 would switch us to tip mode unexpectedly - return uint(1<<32 - 1) + return 0, errUnableToGetTarget } // we are going to sort the data and remove the outliers then we will return the avg of all the valid elements @@ -574,7 +571,7 @@ func (cs *chainSync) getTarget() uint { sum, count := nonOutliersSumCount(uintArr) quotientBigInt := big.NewInt(0).Div(sum, big.NewInt(int64(count))) - return uint(quotientBigInt.Uint64()) + return uint(quotientBigInt.Uint64()), nil } // handleWorkersResults, every time we submit requests to workers they results should be computed here @@ -697,11 +694,6 @@ loop: } func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { - if cs.readyBlocks.has(bd.Hash) { - logger.Tracef("ignoring block %s (%d) in response, already in ready queue", bd.Hash, bd.Header.Number) - return nil - } - // if header was not requested, get it from the pending set // if we're expecting headers, validate should ensure we have a header if bd.Header == nil { @@ -984,10 +976,6 @@ func (cs *chainSync) validateResponse(req *network.BlockRequestMessage, continue } - if cs.readyBlocks.has(curr.ParentHash) { - continue - } - return errUnknownParent } diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 12a339099d..3a24891f0c 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -41,13 +41,11 @@ type Config struct { func NewService(cfg *Config) (*Service, error) { logger.Patch(log.SetLevel(cfg.LogLvl)) - readyBlocks := newBlockQueue(maxResponseSize * 30) pendingBlocks := newDisjointBlockSet(pendingBlocksLimit) csCfg := chainSyncConfig{ bs: cfg.BlockState, net: cfg.Network, - readyBlocks: readyBlocks, pendingBlocks: pendingBlocks, minPeers: cfg.MinPeers, maxPeers: cfg.MaxPeers, diff --git a/dot/sync/tip_syncer.go b/dot/sync/tip_syncer.go index 00d2318cff..4379846c95 100644 --- a/dot/sync/tip_syncer.go +++ b/dot/sync/tip_syncer.go @@ -19,16 +19,14 @@ type handleReadyBlockFunc func(*types.BlockData) type tipSyncer struct { blockState BlockState pendingBlocks DisjointBlockSet - readyBlocks *blockQueue handleReadyBlock handleReadyBlockFunc } -func newTipSyncer(blockState BlockState, pendingBlocks DisjointBlockSet, readyBlocks *blockQueue, +func newTipSyncer(blockState BlockState, pendingBlocks DisjointBlockSet, handleReadyBlock handleReadyBlockFunc) *tipSyncer { return &tipSyncer{ blockState: blockState, pendingBlocks: pendingBlocks, - readyBlocks: readyBlocks, handleReadyBlock: handleReadyBlock, } } @@ -199,7 +197,7 @@ func (s *tipSyncer) handleTick() ([]*worker, error) { return nil, err } - if has || s.readyBlocks.has(block.header.ParentHash) { + if has { // block is ready, as parent is known! // also, move any pendingBlocks that are descendants of this block to the ready blocks queue s.handleReadyBlock(block.toBlockData()) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 86ddcd52c7..1b154a608b 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -8,6 +8,7 @@ import ( "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/lib/common" "github.com/libp2p/go-libp2p/core/peer" + "golang.org/x/exp/maps" ) type syncTask struct { @@ -70,7 +71,7 @@ func (s *syncWorkerPool) useConnectedPeers() { } } -func (s *syncWorkerPool) addWorker(who peer.ID, bestHash common.Hash, bestNumber uint) error { +func (s *syncWorkerPool) addWorkerFromBlockAnnounce(who peer.ID, bestHash common.Hash, bestNumber uint) error { s.l.Lock() defer s.l.Unlock() @@ -81,6 +82,7 @@ func (s *syncWorkerPool) addWorker(who peer.ID, bestHash common.Hash, bestNumber worker, has := s.workers[who] if has { + worker.isEphemeral = false worker.update(bestHash, bestNumber) return nil } @@ -129,6 +131,24 @@ func (s *syncWorkerPool) shutdownWorker(who peer.ID, ignore bool) { } } +func (s *syncWorkerPool) stopEphemeralWorkers() { + s.l.Lock() + defer s.l.Unlock() + + workersKeys := maps.Keys(s.workers) + + for _, who := range workersKeys { + worker, has := s.workers[who] + if !has || !worker.isEphemeral { + continue + } + + worker.Stop() + delete(s.ignorePeers, who) + delete(s.workers, who) + } +} + func (s *syncWorkerPool) totalWorkers() (total uint) { s.l.RLock() defer s.l.RUnlock() From f9ca5a4c7ee616d9e22feab8b5538c40134bc28c Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 24 Apr 2023 11:43:12 -0400 Subject: [PATCH 013/140] wip: tip sync impl --- chain/westend-local/config-alice.toml | 2 +- .../westend-local/westend-local-spec-raw.json | 5 +- dot/network/sync.go | 5 +- dot/sync/chain_sync.go | 253 +++++++++++++----- dot/sync/disjoint_block_set.go | 6 +- dot/sync/requests.go | 22 +- dot/sync/sync_worker.go | 5 +- dot/sync/worker_pool.go | 38 ++- 8 files changed, 240 insertions(+), 96 deletions(-) diff --git a/chain/westend-local/config-alice.toml b/chain/westend-local/config-alice.toml index 962ed36d49..f63e8d2be8 100644 --- a/chain/westend-local/config-alice.toml +++ b/chain/westend-local/config-alice.toml @@ -51,7 +51,7 @@ modules = [ ws-port = 8546 [pprof] -enabled = false +enabled = true listening-address = "localhost:6060" block-rate = 0 mutex-rate = 0 diff --git a/chain/westend-local/westend-local-spec-raw.json b/chain/westend-local/westend-local-spec-raw.json index c87badbea1..129bc545c5 100644 --- a/chain/westend-local/westend-local-spec-raw.json +++ b/chain/westend-local/westend-local-spec-raw.json @@ -2,7 +2,10 @@ "name": "Westend Local Testnet", "id": "westend_local_testnet", "chainType": "Local", - "bootNodes": [], + "bootNodes": [ + "/ip4/127.0.0.1/tcp/30337/p2p/12D3KooWR1Kt3XD9Z1TpXMdTrAS6kJzcVnPArG35nt5bnZHmdbAr", + "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWNqk6b381NG7BLQsFEvdEZjoUi1LJ7fjaci1asycY3Kx1" + ], "telemetryEndpoints": null, "protocolId": "dot", "properties": null, diff --git a/dot/network/sync.go b/dot/network/sync.go index 85a584d7f3..64e353ba2e 100644 --- a/dot/network/sync.go +++ b/dot/network/sync.go @@ -5,6 +5,7 @@ package network import ( "context" + "errors" "fmt" "time" @@ -102,6 +103,8 @@ func (s *Service) handleWarpSyncProofResponse(stream libp2pnetwork.Stream) (inte return nil, nil } +var ErrReceivedEmptyMessage = errors.New("received empty message") + func (s *Service) receiveBlockResponse(stream libp2pnetwork.Stream) (*BlockResponseMessage, error) { // allocating a new (large) buffer every time slows down the syncing by a dramatic amount, // as malloc is one of the most CPU intensive tasks. @@ -120,7 +123,7 @@ func (s *Service) receiveBlockResponse(stream libp2pnetwork.Stream) (*BlockRespo } if n == 0 { - return nil, fmt.Errorf("received empty message") + return nil, fmt.Errorf("%w", ErrReceivedEmptyMessage) } msg := new(BlockResponseMessage) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 1329293bbd..c6bd0f17d9 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -106,6 +106,12 @@ type ChainSync interface { getHighestBlock() (highestBlock uint, err error) } +type announcedBlock struct { + who peer.ID + hash common.Hash + number uint +} + type chainSync struct { ctx context.Context cancel context.CancelFunc @@ -114,19 +120,13 @@ type chainSync struct { network Network // to replace the worker queue - workerPool *syncWorkerPool - - // workers are put here when they are completed so we can handle their result - resultQueue chan *worker + workerPool *syncWorkerPool + blockAnnounceCh chan announcedBlock // tracks the latest state we know of from our peers, // ie. their best block hash and number - sync.RWMutex - peerState map[peer.ID]*peerState - ignorePeers map[peer.ID]struct{} - - // current workers that are attempting to obtain blocks - workerState *workerState + peerStateLock sync.RWMutex + peerState map[peer.ID]*peerState // disjoint set of blocks which are known but not ready to be processed // ie. we only know the hash, number, or the parent block is unknown, or the body is unknown @@ -135,6 +135,7 @@ type chainSync struct { pendingBlockDoneCh chan<- struct{} // bootstrap or tip (near-head) + state chainSyncState // handler is set to either `bootstrapSyncer` or `tipSyncer`, depending on the current @@ -179,7 +180,7 @@ type chainSyncConfig struct { func newChainSync(cfg chainSyncConfig) *chainSync { ctx, cancel := context.WithCancel(context.Background()) const syncSamplesToKeep = 30 - const logSyncPeriod = 10 * time.Second + const logSyncPeriod = 3 * time.Second logSyncTicker := time.NewTicker(logSyncPeriod) return &chainSync{ @@ -193,10 +194,7 @@ func newChainSync(cfg chainSyncConfig) *chainSync { cancel: cancel, blockState: cfg.bs, network: cfg.net, - resultQueue: make(chan *worker, 1024), peerState: make(map[peer.ID]*peerState), - ignorePeers: make(map[peer.ID]struct{}), - workerState: newWorkerState(), pendingBlocks: cfg.pendingBlocks, state: bootstrap, handler: newBootstrapSyncer(cfg.bs), @@ -231,7 +229,9 @@ func (cs *chainSync) start() { pendingBlockDoneCh := make(chan struct{}) cs.pendingBlockDoneCh = pendingBlockDoneCh - //go cs.pendingBlocks.run(pendingBlockDoneCh) + cs.blockAnnounceCh = make(chan announcedBlock, 50) + + go cs.pendingBlocks.run(cs.finalisedCh, pendingBlockDoneCh) go cs.sync() cs.logSyncStarted = true go cs.logSyncSpeed() @@ -251,10 +251,11 @@ func (cs *chainSync) syncState() chainSyncState { return cs.state } -func (cs *chainSync) setBlockAnnounce(from peer.ID, header *types.Header) error { +func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.Header) error { + blockAnnounceHeaderHash := blockAnnounceHeader.Hash() // check if we already know of this block, if not, // add to pendingBlocks set - has, err := cs.blockState.HasHeader(header.Hash()) + has, err := cs.blockState.HasHeader(blockAnnounceHeaderHash) if err != nil { return err } @@ -263,49 +264,22 @@ func (cs *chainSync) setBlockAnnounce(from peer.ID, header *types.Header) error return blocktree.ErrBlockExists } - if err = cs.pendingBlocks.addHeader(header); err != nil { - return err - } - - // we assume that if a peer sends us a block announce for a certain block, - // that is also has the chain up until and including that block. - // this may not be a valid assumption, but perhaps we can assume that - // it is likely they will receive this block and its ancestors before us. - return cs.setPeerHead(from, header.Hash(), header.Number) -} - -// setPeerHead sets a peer's best known block -func (cs *chainSync) setPeerHead(p peer.ID, bestHash common.Hash, bestNumber uint) error { - err := cs.workerPool.addWorkerFromBlockAnnounce(p, bestHash, bestNumber) - if err != nil { - logger.Errorf("adding a potential worker: %s", err) - } - - ps := &peerState{ - who: p, - hash: bestHash, - number: bestNumber, - } - cs.Lock() - cs.peerState[p] = ps - cs.Unlock() - // if the peer reports a lower or equal best block number than us, // check if they are on a fork or not - head, err := cs.blockState.BestBlockHeader() + bestBlockHeader, err := cs.blockState.BestBlockHeader() if err != nil { return fmt.Errorf("best block header: %w", err) } - if bestNumber <= head.Number { + if blockAnnounceHeader.Number <= bestBlockHeader.Number { // check if our block hash for that number is the same, if so, do nothing // as we already have that block - ourHash, err := cs.blockState.GetHashByNumber(bestNumber) + ourHash, err := cs.blockState.GetHashByNumber(blockAnnounceHeader.Number) if err != nil { return fmt.Errorf("get block hash by number: %w", err) } - if ourHash == bestHash { + if ourHash == blockAnnounceHeaderHash { return nil } @@ -320,20 +294,20 @@ func (cs *chainSync) setPeerHead(p peer.ID, bestHash common.Hash, bestNumber uin // their block hash doesn't match ours for that number (ie. they are on a different // chain), and also the highest finalised block is higher than that number. // thus the peer is on an invalid chain - if fin.Number >= bestNumber { + if fin.Number >= blockAnnounceHeader.Number { // TODO: downscore this peer, or temporarily don't sync from them? (#1399) // perhaps we need another field in `peerState` to mark whether the state is valid or not cs.network.ReportPeer(peerset.ReputationChange{ Value: peerset.BadBlockAnnouncementValue, Reason: peerset.BadBlockAnnouncementReason, - }, p) + }, who) return fmt.Errorf("%w: for peer %s and block number %d", - errPeerOnInvalidFork, p, bestNumber) + errPeerOnInvalidFork, who, blockAnnounceHeader.Number) } // peer is on a fork, check if we have processed the fork already or not // ie. is their block written to our db? - has, err := cs.blockState.HasHeader(bestHash) + has, err := cs.blockState.HasHeader(blockAnnounceHeaderHash) if err != nil { return fmt.Errorf("has header: %w", err) } @@ -344,12 +318,43 @@ func (cs *chainSync) setPeerHead(p peer.ID, bestHash common.Hash, bestNumber uin } } - // the peer has a higher best block than us, or they are on some fork we are not aware of - // add it to the disjoint block set - if err = cs.pendingBlocks.addHashAndNumber(bestHash, bestNumber); err != nil { - return fmt.Errorf("add hash and number: %w", err) + pendingBlock := cs.pendingBlocks.getBlock(blockAnnounceHeaderHash) + if pendingBlock != nil { + return fmt.Errorf("block %s (#%d) in the pending set", + blockAnnounceHeaderHash, blockAnnounceHeader.Number) } + if err = cs.pendingBlocks.addHeader(blockAnnounceHeader); err != nil { + return err + } + + // we assume that if a peer sends us a block announce for a certain block, + // that is also has the chain up until and including that block. + // this may not be a valid assumption, but perhaps we can assume that + // it is likely they will receive this block and its ancestors before us. + cs.blockAnnounceCh <- announcedBlock{ + who: who, + hash: blockAnnounceHeaderHash, + number: blockAnnounceHeader.Number, + } + return nil +} + +// setPeerHead sets a peer's best known block +func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber uint) error { + err := cs.workerPool.addWorkerFromBlockAnnounce(who, bestHash, bestNumber) + if err != nil { + logger.Errorf("adding a potential worker: %s", err) + } + + cs.peerStateLock.Lock() + defer cs.peerStateLock.Unlock() + + cs.peerState[who] = &peerState{ + who: who, + hash: bestHash, + number: bestNumber, + } return nil } @@ -445,18 +450,125 @@ func (cs *chainSync) sync() { } func (cs *chainSync) executeTipSync() error { - cs.workerPool.stopEphemeralWorkers() - for { slotDurationTimer := time.NewTimer(cs.slotDuration) select { case <-slotDurationTimer.C: - case <- + err := cs.requestPendingBlocks() + if err != nil { + return fmt.Errorf("while requesting pending blocks") + } + + case blockAnnouncement := <-cs.blockAnnounceCh: + if !slotDurationTimer.Stop() { + <-slotDurationTimer.C + } + + who := blockAnnouncement.who + announcedHash := blockAnnouncement.hash + announcedNumber := blockAnnouncement.number + + bestBlockHeader, err := cs.blockState.BestBlockHeader() + if err != nil { + return fmt.Errorf("getting best block header: %w", err) + } + + if announcedNumber <= bestBlockHeader.Number { + continue + } + + gapLength := uint32(announcedNumber - bestBlockHeader.Number) + var request *network.BlockRequestMessage + if gapLength > 1 { + request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) + logger.Debugf("received a block announce from %s in tip mode, requesting %d blocks, starting %s (#%d)", + who, gapLength, announcedHash, announcedNumber) + } else { + gapLength = 1 + request = singleBlockRequest(announcedHash, bootstrapRequestData) + logger.Debugf("received a block announce from %s in tip mode, requesting %s (#%d)", + who, announcedHash, announcedNumber) + } + + resultsQueue := make(chan *syncTaskResult) + wg := sync.WaitGroup{} + + wg.Add(1) + go cs.handleWorkersResults(resultsQueue, bestBlockHeader.Number+1, gapLength, &wg) + cs.workerPool.submitRequest(request, resultsQueue) + wg.Wait() } + } + +} +func (cs *chainSync) requestPendingBlocks() error { + logger.Debugf("handling tick, we have %d pending blocks", cs.pendingBlocks.size()) + if cs.pendingBlocks.size() == 0 { + return nil + } + + highestFinalized, err := cs.blockState.GetHighestFinalisedHeader() + if err != nil { + return fmt.Errorf("getting highest finalised header: %w", err) } + requests := map[uint]*network.BlockRequestMessage{} + for _, pendingBlock := range cs.pendingBlocks.getBlocks() { + if pendingBlock.number <= highestFinalized.Number { + cs.pendingBlocks.removeBlock(pendingBlock.hash) + continue + } + + if pendingBlock.header == nil { + logger.Debugf("handling missing header: %s (#%d)", pendingBlock.hash, pendingBlock.number) + requestBlockFullData := singleBlockRequest(pendingBlock.hash, bootstrapRequestData) + requests[pendingBlock.number] = requestBlockFullData + continue + } + + if pendingBlock.body == nil { + logger.Debugf("handling missing body: %s (#%d)", pendingBlock.hash, pendingBlock.number) + requestBlockBody := singleBlockRequest(pendingBlock.hash, + network.RequestedDataBody+network.RequestedDataJustification) + requests[pendingBlock.number] = requestBlockBody + continue + } + + parentExists, err := cs.blockState.HasHeader(pendingBlock.header.ParentHash) + if err != nil { + return fmt.Errorf("getting pending block parent header: %w", err) + } + + if parentExists { + err := cs.handleReadyBlock(pendingBlock.toBlockData()) + if err != nil { + return fmt.Errorf("handling ready block: %w", err) + } + continue + } + + logger.Debugf("handling missing parent: %s (#%d)", pendingBlock.hash, pendingBlock.number) + + gapLength := pendingBlock.number - highestFinalized.Number + descendingGapRequest := descendingBlockRequest(pendingBlock.hash, + uint32(gapLength), bootstrapRequestData) + requests[pendingBlock.number] = descendingGapRequest + + } + + wg := sync.WaitGroup{} + // the `requests` in the tip sync are not related necessarily + // the is why we need to treat them separately + for pendindBlockNumber, request := range requests { + wg.Add(1) + + resultsQueue := make(chan *syncTaskResult) + go cs.handleWorkersResults(resultsQueue, pendindBlockNumber, *request.Max, &wg) + cs.workerPool.submitRequest(request, resultsQueue) + } + wg.Wait() return nil } @@ -497,8 +609,7 @@ func (cs *chainSync) executeBootstrapSync() error { endBootstrapSync = true } - fmt.Printf("=====> requesting from %d targeting %d\n", startRequestAt, targetBlockNumber) - requests, err := ascedingBlockRequest( + requests, err := ascedingBlockRequests( startRequestAt, targetBlockNumber, bootstrapRequestData) if err != nil { logger.Errorf("failed to setup ascending block requests: %s", err) @@ -554,8 +665,8 @@ var errUnableToGetTarget = errors.New("unable to get target") // head block number, it would leave us in bootstrap mode forever // it would be better to have some sort of standard deviation calculation and discard any outliers (#1861) func (cs *chainSync) getTarget() (uint, error) { - cs.RLock() - defer cs.RUnlock() + cs.peerStateLock.RLock() + defer cs.peerStateLock.RUnlock() // in practice, this shouldn't happen, as we only start the module once we have some peer states if len(cs.peerState) == 0 { @@ -607,6 +718,12 @@ loop: if taskResult.err != nil { logger.Criticalf("task result error: %s", taskResult.err) + + if errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { + cs.workerPool.submitRequestIgnoring(taskResult.request, taskResult.who, workersResults) + continue + } + // TODO add this worker in a ignorePeers list, implement some expiration time for // peers added to it (peerJail where peers have a release date and maybe extend the punishment // if fail again ang again Jimmy's + Diego's idea) @@ -664,7 +781,6 @@ loop: continue loop } } - break loop } } @@ -734,6 +850,7 @@ func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { return err } + cs.pendingBlocks.removeBlock(bd.Hash) return nil } @@ -966,6 +1083,10 @@ func (cs *chainSync) validateResponse(req *network.BlockRequestMessage, continue } + if curr == nil { + logger.Critical(">>>>>>>>>>>>>>>> CURR IS NIL!!") + } + // check that parent of first block in response is known (either in our db or in the ready queue) if i == 0 { prev = curr @@ -1034,8 +1155,8 @@ func (cs *chainSync) validateJustification(bd *types.BlockData) error { } func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { - cs.RLock() - defer cs.RUnlock() + cs.peerStateLock.RLock() + defer cs.peerStateLock.RUnlock() if len(cs.peerState) == 0 { return 0, errNoPeers diff --git a/dot/sync/disjoint_block_set.go b/dot/sync/disjoint_block_set.go index 69c5462c16..4a28710bf9 100644 --- a/dot/sync/disjoint_block_set.go +++ b/dot/sync/disjoint_block_set.go @@ -26,7 +26,7 @@ var ( // DisjointBlockSet represents a set of incomplete blocks, or blocks // with an unknown parent. it is implemented by *disjointBlockSet type DisjointBlockSet interface { - run(done <-chan struct{}) + run(finalisedCh <-chan *types.FinalisationInfo, done <-chan struct{}) addHashAndNumber(hash common.Hash, number uint) error addHeader(*types.Header) error addBlock(*types.Block) error @@ -113,7 +113,7 @@ func newDisjointBlockSet(limit int) *disjointBlockSet { } } -func (s *disjointBlockSet) run(done <-chan struct{}) { +func (s *disjointBlockSet) run(finalisedCh <-chan *types.FinalisationInfo, done <-chan struct{}) { ticker := time.NewTicker(clearBlocksInterval) defer ticker.Stop() @@ -121,6 +121,8 @@ func (s *disjointBlockSet) run(done <-chan struct{}) { select { case <-ticker.C: s.clearBlocks() + case finalisedInfo := <-finalisedCh: + s.removeLowerBlocks(finalisedInfo.Header.Number) case <-done: return } diff --git a/dot/sync/requests.go b/dot/sync/requests.go index a59bc5d018..b57e9263f0 100644 --- a/dot/sync/requests.go +++ b/dot/sync/requests.go @@ -2,10 +2,30 @@ package sync import ( "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/common/variadic" ) -func ascedingBlockRequest(startNumber uint, targetNumber uint, requestedData byte) ([]*network.BlockRequestMessage, error) { +func singleBlockRequest(blockHash common.Hash, requestedData byte) *network.BlockRequestMessage { + one := uint32(1) + return &network.BlockRequestMessage{ + RequestedData: requestedData, + StartingBlock: *variadic.MustNewUint32OrHash(blockHash), + Direction: network.Ascending, + Max: &one, + } +} + +func descendingBlockRequest(blockHash common.Hash, amount uint32, requestedData byte) *network.BlockRequestMessage { + return &network.BlockRequestMessage{ + RequestedData: requestedData, + StartingBlock: *variadic.MustNewUint32OrHash(blockHash), + Direction: network.Descending, + Max: &amount, + } +} + +func ascedingBlockRequests(startNumber uint, targetNumber uint, requestedData byte) ([]*network.BlockRequestMessage, error) { diff := int(targetNumber) - int(startNumber) if diff < 0 { return nil, errInvalidDirection diff --git a/dot/sync/sync_worker.go b/dot/sync/sync_worker.go index 450ebfff63..0aaabbc296 100644 --- a/dot/sync/sync_worker.go +++ b/dot/sync/sync_worker.go @@ -20,7 +20,6 @@ type syncTaskResult struct { // for requesting blocks, once a peer is disconnected or is ignored // we can just disable its worker. type syncWorker struct { - isEphemeral bool // context shared between all workers ctx context.Context l sync.RWMutex @@ -72,6 +71,10 @@ func (s *syncWorker) Start(tasks <-chan *syncTask, wg *sync.WaitGroup) { case <-s.stopCh: return case task := <-tasks: + if _, toIgnore := task.ignorePeer[s.who]; toIgnore { + continue + } + request := task.request logger.Infof("[EXECUTING] worker %s: block request: %s", s.who, request) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 1b154a608b..a9d4440673 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -8,12 +8,12 @@ import ( "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/lib/common" "github.com/libp2p/go-libp2p/core/peer" - "golang.org/x/exp/maps" ) type syncTask struct { - request *network.BlockRequestMessage - resultCh chan<- *syncTaskResult + ignorePeer map[peer.ID]struct{} + request *network.BlockRequestMessage + resultCh chan<- *syncTaskResult } type syncWorkerPool struct { @@ -65,7 +65,6 @@ func (s *syncWorkerPool) useConnectedPeers() { // should remove them and use only peers who send us // block announcements ephemeralSyncWorker := newSyncWorker(s.ctx, connectedPeer, common.Hash{}, 0, s.network) - ephemeralSyncWorker.isEphemeral = true ephemeralSyncWorker.Start(s.taskQueue, &s.wg) s.workers[connectedPeer] = ephemeralSyncWorker } @@ -82,7 +81,6 @@ func (s *syncWorkerPool) addWorkerFromBlockAnnounce(who peer.ID, bestHash common worker, has := s.workers[who] if has { - worker.isEphemeral = false worker.update(bestHash, bestNumber) return nil } @@ -97,6 +95,18 @@ func (s *syncWorkerPool) addWorkerFromBlockAnnounce(who peer.ID, bestHash common func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { s.taskQueue <- &syncTask{ + ignorePeer: make(map[peer.ID]struct{}), + request: request, + resultCh: resultCh, + } +} + +func (s *syncWorkerPool) submitRequestIgnoring(request *network.BlockRequestMessage, toIgnore peer.ID, resultCh chan<- *syncTaskResult) { + s.taskQueue <- &syncTask{ + ignorePeer: map[peer.ID]struct { + }{ + toIgnore: {}, + }, request: request, resultCh: resultCh, } @@ -131,24 +141,6 @@ func (s *syncWorkerPool) shutdownWorker(who peer.ID, ignore bool) { } } -func (s *syncWorkerPool) stopEphemeralWorkers() { - s.l.Lock() - defer s.l.Unlock() - - workersKeys := maps.Keys(s.workers) - - for _, who := range workersKeys { - worker, has := s.workers[who] - if !has || !worker.isEphemeral { - continue - } - - worker.Stop() - delete(s.ignorePeers, who) - delete(s.workers, who) - } -} - func (s *syncWorkerPool) totalWorkers() (total uint) { s.l.RLock() defer s.l.RUnlock() From f9d7e43bb64ad1aeb9d16467bcf0bc3ce55f1d40 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 26 Apr 2023 08:37:32 -0400 Subject: [PATCH 014/140] chore: complete tip sync adjusts --- .../westend-local/westend-local-spec-raw.json | 4 +- dot/network/service.go | 2 +- dot/network/sync.go | 8 - dot/sync/bootstrap_syncer.go | 96 ----- dot/sync/bootstrap_syncer_integration_test.go | 135 ------ dot/sync/bootstrap_syncer_test.go | 113 ----- dot/sync/chain_sync.go | 180 +++++--- dot/sync/requests.go | 2 +- dot/sync/sync_worker.go | 22 +- dot/sync/syncer.go | 7 +- dot/sync/syncer_test.go | 6 +- dot/sync/tip_syncer.go | 219 ---------- dot/sync/tip_syncer_integration_test.go | 372 ---------------- dot/sync/tip_syncer_test.go | 401 ------------------ dot/sync/worker_pool.go | 25 +- 15 files changed, 141 insertions(+), 1451 deletions(-) delete mode 100644 dot/sync/bootstrap_syncer.go delete mode 100644 dot/sync/bootstrap_syncer_integration_test.go delete mode 100644 dot/sync/bootstrap_syncer_test.go delete mode 100644 dot/sync/tip_syncer.go delete mode 100644 dot/sync/tip_syncer_integration_test.go delete mode 100644 dot/sync/tip_syncer_test.go diff --git a/chain/westend-local/westend-local-spec-raw.json b/chain/westend-local/westend-local-spec-raw.json index 129bc545c5..2e709e3b30 100644 --- a/chain/westend-local/westend-local-spec-raw.json +++ b/chain/westend-local/westend-local-spec-raw.json @@ -3,8 +3,8 @@ "id": "westend_local_testnet", "chainType": "Local", "bootNodes": [ - "/ip4/127.0.0.1/tcp/30337/p2p/12D3KooWR1Kt3XD9Z1TpXMdTrAS6kJzcVnPArG35nt5bnZHmdbAr", - "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWNqk6b381NG7BLQsFEvdEZjoUi1LJ7fjaci1asycY3Kx1" + "/ip4/127.0.0.1/tcp/30337/p2p/12D3KooWEFF1y4eJ5viWdU7Tn6UtdjVY6LiZfmg6dVdCyDocNc6q", + "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWCS3voVrcMs4xW1qaFQhRpMttncmjFfGCNpDHaNkTkVQk" ], "telemetryEndpoints": null, "protocolId": "dot", diff --git a/dot/network/service.go b/dot/network/service.go index 8b40006481..9d281840e7 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -248,7 +248,7 @@ func (s *Service) Start() error { s.ctx, s.cancel = context.WithCancel(context.Background()) } - s.host.registerStreamHandler(s.host.protocolID+syncID, s.handleWarpSyncStream) + s.host.registerStreamHandler(s.host.protocolID+syncID, s.handleSyncStream) // TODO: enable this protocol to receive request from other nodes //s.host.registerStreamHandler(s.host.protocolID+warpSync, s.handleSyncStream) s.host.registerStreamHandler(s.host.protocolID+lightID, s.handleLightStream) diff --git a/dot/network/sync.go b/dot/network/sync.go index 64e353ba2e..9f875991c3 100644 --- a/dot/network/sync.go +++ b/dot/network/sync.go @@ -148,14 +148,6 @@ func (s *Service) handleSyncStream(stream libp2pnetwork.Stream) { s.readStream(stream, decodeSyncMessage, s.handleSyncMessage, maxBlockResponseSize) } -func (s *Service) handleWarpSyncStream(stream libp2pnetwork.Stream) { - if stream == nil { - return - } - - fmt.Printf("====> %v\n", stream) -} - func decodeSyncMessage(in []byte, _ peer.ID, _ bool) (Message, error) { msg := new(BlockRequestMessage) err := msg.Decode(in) diff --git a/dot/sync/bootstrap_syncer.go b/dot/sync/bootstrap_syncer.go deleted file mode 100644 index 4b0b1ecc39..0000000000 --- a/dot/sync/bootstrap_syncer.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/lib/common" -) - -var _ workHandler = &bootstrapSyncer{} - -var bootstrapRequestData = network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification - -// bootstrapSyncer handles worker logic for bootstrap mode -type bootstrapSyncer struct { - blockState BlockState -} - -func newBootstrapSyncer(blockState BlockState) *bootstrapSyncer { - return &bootstrapSyncer{ - blockState: blockState, - } -} - -func (s *bootstrapSyncer) handleNewPeerState(ps *peerState) (*worker, error) { - head, err := s.blockState.BestBlockHeader() - if err != nil { - return nil, err - } - - if ps.number <= head.Number { - return nil, nil //nolint:nilnil - } - - return &worker{ - startNumber: uintPtr(head.Number + 1), - targetHash: ps.hash, - targetNumber: uintPtr(ps.number), - requestData: bootstrapRequestData, - direction: network.Ascending, - }, nil -} - -//nolint:nilnil -func (s *bootstrapSyncer) handleWorkerResult(res *worker) ( - workerToRetry *worker, err error) { - // if there is an error, potentially retry the worker - if res.err == nil { - return nil, nil - } - - // new worker should update start block and re-dispatch - head, err := s.blockState.BestBlockHeader() - if err != nil { - return nil, err - } - - // we've reached the target, return - if *res.targetNumber <= head.Number { - return nil, nil - } - - startNumber := head.Number + 1 - - // in the case we started a block producing node, we might have produced blocks - // before fully syncing (this should probably be fixed by connecting sync into BABE) - if errors.Is(res.err.err, errUnknownParent) { - fin, err := s.blockState.GetHighestFinalisedHeader() - if err != nil { - return nil, err - } - - startNumber = fin.Number - } - - return &worker{ - startHash: common.Hash{}, // for bootstrap, just use number - startNumber: uintPtr(startNumber), - targetHash: res.targetHash, - targetNumber: res.targetNumber, - requestData: res.requestData, - direction: res.direction, - }, nil -} - -func (*bootstrapSyncer) hasCurrentWorker(_ *worker, workers map[uint64]*worker) bool { - // we're in bootstrap mode, and there already is a worker, we don't need to dispatch another - return len(workers) != 0 -} - -func (*bootstrapSyncer) handleTick() ([]*worker, error) { - return nil, nil -} diff --git a/dot/sync/bootstrap_syncer_integration_test.go b/dot/sync/bootstrap_syncer_integration_test.go deleted file mode 100644 index 20a9e32d3e..0000000000 --- a/dot/sync/bootstrap_syncer_integration_test.go +++ /dev/null @@ -1,135 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "testing" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/trie" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" -) - -func newTestBootstrapSyncer(t *testing.T) *bootstrapSyncer { - header := types.NewHeader( - common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 100, types.NewDigest()) - - finHeader := types.NewHeader( - common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 200, types.NewDigest()) - - ctrl := gomock.NewController(t) - bs := NewMockBlockState(ctrl) - bs.EXPECT().BestBlockHeader().Return(header, nil).AnyTimes() - bs.EXPECT().GetHighestFinalisedHeader().Return(finHeader, nil).AnyTimes() - - return newBootstrapSyncer(bs) -} - -func TestBootstrapSyncer_handleWork(t *testing.T) { - s := newTestBootstrapSyncer(t) - - // peer's state is equal or lower than ours - // should not create a worker for bootstrap mode - w, err := s.handleNewPeerState(&peerState{ - number: 100, - }) - require.NoError(t, err) - require.Nil(t, w) - - w, err = s.handleNewPeerState(&peerState{ - number: 99, - }) - require.NoError(t, err) - require.Nil(t, w) - - // if peer's number is highest, return worker w/ their block as target - expected := &worker{ - requestData: bootstrapRequestData, - startNumber: uintPtr(101), - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(101), - } - w, err = s.handleNewPeerState(&peerState{ - number: 101, - hash: common.NewHash([]byte{1}), - }) - require.NoError(t, err) - require.Equal(t, expected, w) - - expected = &worker{ - requestData: bootstrapRequestData, - startNumber: uintPtr(101), - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(9999), - } - w, err = s.handleNewPeerState(&peerState{ - number: 9999, - hash: common.NewHash([]byte{1}), - }) - require.NoError(t, err) - require.Equal(t, expected, w) -} - -func TestBootstrapSyncer_handleWorkerResult(t *testing.T) { - s := newTestBootstrapSyncer(t) - - // if the worker error is nil, then this function should do nothing - res := &worker{} - w, err := s.handleWorkerResult(res) - require.NoError(t, err) - require.Nil(t, w) - - // if there was a worker error, this should return a worker with - // startNumber = bestBlockNumber + 1 and the same target as previously - expected := &worker{ - requestData: bootstrapRequestData, - startNumber: uintPtr(101), - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(201), - } - - res = &worker{ - requestData: bootstrapRequestData, - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(201), - err: &workerError{}, - } - - w, err = s.handleWorkerResult(res) - require.NoError(t, err) - require.Equal(t, expected, w) -} - -func TestBootstrapSyncer_handleWorkerResult_errUnknownParent(t *testing.T) { - s := newTestBootstrapSyncer(t) - - // if there was a worker error, this should return a worker with - // startNumber = bestBlockNumber + 1 and the same target as previously - expected := &worker{ - requestData: bootstrapRequestData, - startNumber: uintPtr(200), - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(300), - } - - res := &worker{ - requestData: bootstrapRequestData, - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(300), - err: &workerError{ - err: errUnknownParent, - }, - } - - w, err := s.handleWorkerResult(res) - require.NoError(t, err) - require.Equal(t, expected, w) -} diff --git a/dot/sync/bootstrap_syncer_test.go b/dot/sync/bootstrap_syncer_test.go deleted file mode 100644 index 9d59f8dd27..0000000000 --- a/dot/sync/bootstrap_syncer_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "testing" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func Test_bootstrapSyncer_handleWorkerResult(t *testing.T) { - t.Parallel() - mockError := errors.New("mock testing error") - - tests := map[string]struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - worker *worker - wantWorkerToRetry *worker - err error - }{ - "nil_worker.err_returns_nil": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - return NewMockBlockState(ctrl) - }, - worker: &worker{}, - }, - "best_block_header_error": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(nil, - mockError) - return mockBlockState - }, - worker: &worker{ - err: &workerError{}, - targetNumber: uintPtr(0), - }, - err: mockError, - }, - "targetNumber_<_bestBlockHeader_number_returns_nil": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) - return mockBlockState - }, - worker: &worker{ - err: &workerError{}, - targetNumber: uintPtr(0), - }, - }, - "targetNumber_>_bestBlockHeader_number_worker_errUnknownParent,_error_GetHighestFinalisedHeader": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(nil, mockError) - return mockBlockState - }, - worker: &worker{ - err: &workerError{err: errUnknownParent}, - targetNumber: uintPtr(3), - }, - err: mockError, - }, - "targetNumber_>_bestBlockHeader_number_worker_errUnknownParent_returns_worker": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{Number: 1}, nil) - return mockBlockState - }, - worker: &worker{ - err: &workerError{err: errUnknownParent}, - targetNumber: uintPtr(3), - }, - wantWorkerToRetry: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(3), - }, - }, - "targetNumber_>_bestBlockHeader_number_returns_worker": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) - return mockBlockState - }, - worker: &worker{ - err: &workerError{}, - targetNumber: uintPtr(3), - }, - wantWorkerToRetry: &worker{ - startNumber: uintPtr(3), - targetNumber: uintPtr(3), - }, - }, - } - for testName, tt := range tests { - tt := tt - t.Run(testName, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := &bootstrapSyncer{ - blockState: tt.blockStateBuilder(ctrl), - } - gotWorkerToRetry, err := s.handleWorkerResult(tt.worker) - assert.ErrorIs(t, err, tt.err) - assert.Equal(t, tt.wantWorkerToRetry, gotWorkerToRetry) - }) - } -} diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index c6bd0f17d9..c28a42d1c6 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -51,8 +51,9 @@ func (s chainSyncState) String() string { } var ( - pendingBlocksLimit = maxResponseSize * 32 - isSyncedGauge = promauto.NewGauge(prometheus.GaugeOpts{ + bootstrapRequestData = network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification + pendingBlocksLimit = maxResponseSize * 32 + isSyncedGauge = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: "gossamer_network_syncer", Name: "is_synced", Help: "bool representing whether the node is synced to the head of the chain", @@ -108,8 +109,7 @@ type ChainSync interface { type announcedBlock struct { who peer.ID - hash common.Hash - number uint + header *types.Header } type chainSync struct { @@ -138,10 +138,6 @@ type chainSync struct { state chainSyncState - // handler is set to either `bootstrapSyncer` or `tipSyncer`, depending on the current - // chain sync state - handler workHandler - benchmarker *syncBenchmarker finalisedCh <-chan *types.FinalisationInfo @@ -197,7 +193,6 @@ func newChainSync(cfg chainSyncConfig) *chainSync { peerState: make(map[peer.ID]*peerState), pendingBlocks: cfg.pendingBlocks, state: bootstrap, - handler: newBootstrapSyncer(cfg.bs), benchmarker: newSyncBenchmarker(syncSamplesToKeep), finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), minPeers: cfg.minPeers, @@ -334,8 +329,7 @@ func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.He // it is likely they will receive this block and its ancestors before us. cs.blockAnnounceCh <- announcedBlock{ who: who, - hash: blockAnnounceHeaderHash, - number: blockAnnounceHeader.Number, + header: blockAnnounceHeader, } return nil } @@ -451,60 +445,116 @@ func (cs *chainSync) sync() { func (cs *chainSync) executeTipSync() error { for { + cs.workerPool.useConnectedPeers() slotDurationTimer := time.NewTimer(cs.slotDuration) select { - case <-slotDurationTimer.C: - err := cs.requestPendingBlocks() - if err != nil { - return fmt.Errorf("while requesting pending blocks") - } - case blockAnnouncement := <-cs.blockAnnounceCh: if !slotDurationTimer.Stop() { <-slotDurationTimer.C } who := blockAnnouncement.who - announcedHash := blockAnnouncement.hash - announcedNumber := blockAnnouncement.number + announcedHash := blockAnnouncement.header.Hash() + announcedNumber := blockAnnouncement.header.Number - bestBlockHeader, err := cs.blockState.BestBlockHeader() + has, err := cs.blockState.HasHeader(announcedHash) if err != nil { - return fmt.Errorf("getting best block header: %w", err) + return fmt.Errorf("checking if header exists: %s", err) } - if announcedNumber <= bestBlockHeader.Number { + if has { continue } - gapLength := uint32(announcedNumber - bestBlockHeader.Number) - var request *network.BlockRequestMessage - if gapLength > 1 { - request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) - logger.Debugf("received a block announce from %s in tip mode, requesting %d blocks, starting %s (#%d)", + bestBlockHeader, err := cs.blockState.BestBlockHeader() + if err != nil { + return fmt.Errorf("getting best block header: %w", err) + } + + // if the announced block contains a lower number than our best + // block header, let's check if it is greater than our latests + // finalized header, if so this block is likeli to be a fork + if announcedNumber < bestBlockHeader.Number { + highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() + if err != nil { + return fmt.Errorf("getting highest finalized header") + } + + // ignore the block if it has the same or lower number + if announcedNumber <= highestFinalizedHeader.Number { + continue + } + + logger.Debugf("block announce lower than best block %s (#%d) and greater highest finalized %s (#%d)", + bestBlockHeader.Hash(), bestBlockHeader.Number, highestFinalizedHeader.Hash(), highestFinalizedHeader.Number) + + parentExists, err := cs.blockState.HasHeader(blockAnnouncement.header.ParentHash) + if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + return fmt.Errorf("while checking header exists: %w", err) + } + + gapLength := uint32(1) + startAtBlock := announcedNumber + var request *network.BlockRequestMessage + + if parentExists { + request = singleBlockRequest(announcedHash, bootstrapRequestData) + } else { + gapLength = uint32(announcedNumber - highestFinalizedHeader.Number) + startAtBlock = highestFinalizedHeader.Number + 1 + request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) + } + + logger.Debugf("received a block announce from %s, requesting %d blocks, starting %s (#%d)", who, gapLength, announcedHash, announcedNumber) + + resultsQueue := make(chan *syncTaskResult) + wg := sync.WaitGroup{} + + wg.Add(1) + go cs.handleWorkersResults(resultsQueue, startAtBlock, gapLength, &wg) + cs.workerPool.submitRequest(request, resultsQueue) + wg.Wait() } else { - gapLength = 1 - request = singleBlockRequest(announcedHash, bootstrapRequestData) - logger.Debugf("received a block announce from %s in tip mode, requesting %s (#%d)", - who, announcedHash, announcedNumber) - } + gapLength := uint32(announcedNumber - bestBlockHeader.Number) + startAtBlock := announcedNumber + totalBlocks := uint32(1) + var request *network.BlockRequestMessage + if gapLength > 1 { + request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) + startAtBlock = announcedNumber - uint(*request.Max) + 1 + totalBlocks = *request.Max + + logger.Debugf("received a block announce from %s, requesting %d blocks, descending request from %s (#%d)", + who, gapLength, announcedHash, announcedNumber) + } else { + gapLength = 1 + request = singleBlockRequest(announcedHash, bootstrapRequestData) + logger.Debugf("received a block announce from %s, requesting a single block %s (#%d)", + who, announcedHash, announcedNumber) + } + + resultsQueue := make(chan *syncTaskResult) + wg := sync.WaitGroup{} - resultsQueue := make(chan *syncTaskResult) - wg := sync.WaitGroup{} + wg.Add(1) + go cs.handleWorkersResults(resultsQueue, startAtBlock, totalBlocks, &wg) + cs.workerPool.submitRequest(request, resultsQueue) + wg.Wait() + } - wg.Add(1) - go cs.handleWorkersResults(resultsQueue, bestBlockHeader.Number+1, gapLength, &wg) - cs.workerPool.submitRequest(request, resultsQueue) - wg.Wait() + err = cs.requestPendingBlocks() + if err != nil { + return fmt.Errorf("while requesting pending blocks") + } } } } func (cs *chainSync) requestPendingBlocks() error { - logger.Debugf("handling tick, we have %d pending blocks", cs.pendingBlocks.size()) + logger.Info("starting request pending blocks") if cs.pendingBlocks.size() == 0 { return nil } @@ -514,28 +564,12 @@ func (cs *chainSync) requestPendingBlocks() error { return fmt.Errorf("getting highest finalised header: %w", err) } - requests := map[uint]*network.BlockRequestMessage{} for _, pendingBlock := range cs.pendingBlocks.getBlocks() { if pendingBlock.number <= highestFinalized.Number { cs.pendingBlocks.removeBlock(pendingBlock.hash) continue } - if pendingBlock.header == nil { - logger.Debugf("handling missing header: %s (#%d)", pendingBlock.hash, pendingBlock.number) - requestBlockFullData := singleBlockRequest(pendingBlock.hash, bootstrapRequestData) - requests[pendingBlock.number] = requestBlockFullData - continue - } - - if pendingBlock.body == nil { - logger.Debugf("handling missing body: %s (#%d)", pendingBlock.hash, pendingBlock.number) - requestBlockBody := singleBlockRequest(pendingBlock.hash, - network.RequestedDataBody+network.RequestedDataJustification) - requests[pendingBlock.number] = requestBlockBody - continue - } - parentExists, err := cs.blockState.HasHeader(pendingBlock.header.ParentHash) if err != nil { return fmt.Errorf("getting pending block parent header: %w", err) @@ -549,26 +583,30 @@ func (cs *chainSync) requestPendingBlocks() error { continue } - logger.Debugf("handling missing parent: %s (#%d)", pendingBlock.hash, pendingBlock.number) - gapLength := pendingBlock.number - highestFinalized.Number + if gapLength > 128 { + logger.Criticalf("GAP LENGHT: %d, GREATER THAN 128 block", gapLength) + gapLength = 128 + } + descendingGapRequest := descendingBlockRequest(pendingBlock.hash, uint32(gapLength), bootstrapRequestData) - requests[pendingBlock.number] = descendingGapRequest + startAtBlock := pendingBlock.number - uint(*descendingGapRequest.Max) + 1 - } - - wg := sync.WaitGroup{} - // the `requests` in the tip sync are not related necessarily - // the is why we need to treat them separately - for pendindBlockNumber, request := range requests { + // the `requests` in the tip sync are not related necessarily + // the is why we need to treat them separately + wg := sync.WaitGroup{} wg.Add(1) - resultsQueue := make(chan *syncTaskResult) - go cs.handleWorkersResults(resultsQueue, pendindBlockNumber, *request.Max, &wg) - cs.workerPool.submitRequest(request, resultsQueue) + + // TODO: we should handle the requests concurrently + // a way of achieve that is by constructing a new `handleWorkersResults` for + // handling only tip sync requests + go cs.handleWorkersResults(resultsQueue, startAtBlock, *descendingGapRequest.Max, &wg) + cs.workerPool.submitRequest(descendingGapRequest, resultsQueue) + wg.Wait() } - wg.Wait() + return nil } @@ -720,7 +758,7 @@ loop: logger.Criticalf("task result error: %s", taskResult.err) if errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - cs.workerPool.submitRequestIgnoring(taskResult.request, taskResult.who, workersResults) + cs.workerPool.submitRequest(taskResult.request, workersResults) continue } @@ -873,6 +911,7 @@ func (cs *chainSync) processBlockData(blockData types.BlockData) error { //nolin // while in bootstrap mode we don't need to broadcast block announcements announceImportedBlock := cs.state == tip if headerInState && bodyInState { + //logger.Infof("Process Block With State Header And Body in State: %s (#%d)", blockData.Hash.Short(), blockData.Number()) err = cs.processBlockDataWithStateHeaderAndBody(blockData, announceImportedBlock) if err != nil { return fmt.Errorf("processing block data with header and "+ @@ -883,14 +922,15 @@ func (cs *chainSync) processBlockData(blockData types.BlockData) error { //nolin if blockData.Header != nil { if blockData.Body != nil { + //logger.Infof("Process Block With Header And Body: %s (#%d)", blockData.Hash.Short(), blockData.Number()) err = cs.processBlockDataWithHeaderAndBody(blockData, announceImportedBlock) if err != nil { return fmt.Errorf("processing block data with header and body: %w", err) } - //logger.Debugf("block with hash %s processed", blockData.Hash) } if blockData.Justification != nil && len(*blockData.Justification) > 0 { + logger.Infof("Process Block Justification: %s (#%d)", blockData.Hash.Short(), blockData.Number()) err = cs.handleJustification(blockData.Header, *blockData.Justification) if err != nil { return fmt.Errorf("handling justification: %w", err) diff --git a/dot/sync/requests.go b/dot/sync/requests.go index b57e9263f0..b4c82406a8 100644 --- a/dot/sync/requests.go +++ b/dot/sync/requests.go @@ -11,7 +11,7 @@ func singleBlockRequest(blockHash common.Hash, requestedData byte) *network.Bloc return &network.BlockRequestMessage{ RequestedData: requestedData, StartingBlock: *variadic.MustNewUint32OrHash(blockHash), - Direction: network.Ascending, + Direction: network.Descending, Max: &one, } } diff --git a/dot/sync/sync_worker.go b/dot/sync/sync_worker.go index 0aaabbc296..0e28829f91 100644 --- a/dot/sync/sync_worker.go +++ b/dot/sync/sync_worker.go @@ -4,18 +4,10 @@ import ( "context" "sync" - "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/lib/common" "github.com/libp2p/go-libp2p/core/peer" ) -type syncTaskResult struct { - who peer.ID - request *network.BlockRequestMessage - response *network.BlockResponseMessage - err error -} - // syncWorker represents a available peer that could be a source // for requesting blocks, once a peer is disconnected or is ignored // we can just disable its worker. @@ -54,7 +46,7 @@ func (s *syncWorker) update(bestHash common.Hash, bestNumber uint) { s.bestNumber = bestNumber } -func (s *syncWorker) Start(tasks <-chan *syncTask, wg *sync.WaitGroup) { +func (s *syncWorker) Start(tasks chan *syncTask, wg *sync.WaitGroup) { wg.Add(1) go func() { @@ -70,11 +62,8 @@ func (s *syncWorker) Start(tasks <-chan *syncTask, wg *sync.WaitGroup) { select { case <-s.stopCh: return - case task := <-tasks: - if _, toIgnore := task.ignorePeer[s.who]; toIgnore { - continue - } + case task := <-tasks: request := task.request logger.Infof("[EXECUTING] worker %s: block request: %s", s.who, request) @@ -85,7 +74,12 @@ func (s *syncWorker) Start(tasks <-chan *syncTask, wg *sync.WaitGroup) { logger.Infof("[FINISHED] worker %s: block data amount: %d", s.who, len(response.BlockData)) } - task.resultCh <- &syncTaskResult{s.who, request, response, err} + task.resultCh <- &syncTaskResult{ + who: s.who, + request: request, + response: response, + err: err, + } } } }() diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 3a24891f0c..488a03b0fa 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -90,7 +90,12 @@ func (s *Service) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockA func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { logger.Debug("received BlockAnnounceMessage") header := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) - return s.chainSync.setBlockAnnounce(from, header) + err := s.chainSync.setBlockAnnounce(from, header) + if err != nil { + logger.Errorf("setting block announce: %s", err) + } + + return err } // IsSynced exposes the synced state diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go index 89372bc3ba..32d387816a 100644 --- a/dot/sync/syncer_test.go +++ b/dot/sync/syncer_test.go @@ -252,8 +252,7 @@ func TestService_Start(t *testing.T) { }) service := Service{ - chainSync: chainSync, - chainProcessor: chainProcessor, + chainSync: chainSync, } err := service.Start() @@ -271,8 +270,7 @@ func TestService_Stop(t *testing.T) { chainProcessor.EXPECT().stop() service := &Service{ - chainSync: chainSync, - chainProcessor: chainProcessor, + chainSync: chainSync, } err := service.Stop() diff --git a/dot/sync/tip_syncer.go b/dot/sync/tip_syncer.go deleted file mode 100644 index 4379846c95..0000000000 --- a/dot/sync/tip_syncer.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" -) - -var _ workHandler = &tipSyncer{} - -type handleReadyBlockFunc func(*types.BlockData) - -// tipSyncer handles workers when syncing at the tip of the chain -type tipSyncer struct { - blockState BlockState - pendingBlocks DisjointBlockSet - handleReadyBlock handleReadyBlockFunc -} - -func newTipSyncer(blockState BlockState, pendingBlocks DisjointBlockSet, - handleReadyBlock handleReadyBlockFunc) *tipSyncer { - return &tipSyncer{ - blockState: blockState, - pendingBlocks: pendingBlocks, - handleReadyBlock: handleReadyBlock, - } -} - -func (s *tipSyncer) handleNewPeerState(ps *peerState) (*worker, error) { - fin, err := s.blockState.GetHighestFinalisedHeader() - if err != nil { - return nil, err - } - - if ps.number <= fin.Number { - return nil, nil //nolint:nilnil - } - - return &worker{ - startHash: ps.hash, - startNumber: uintPtr(ps.number), - targetHash: ps.hash, - targetNumber: uintPtr(ps.number), - requestData: bootstrapRequestData, - }, nil -} - -//nolint:nilnil -func (s *tipSyncer) handleWorkerResult(res *worker) ( - workerToRetry *worker, err error) { - if res.err == nil { - return nil, nil - } - - if errors.Is(res.err.err, errUnknownParent) { - // handleTick will handle the errUnknownParent case - return nil, nil - } - - fin, err := s.blockState.GetHighestFinalisedHeader() - if err != nil { - return nil, err - } - - // don't retry if we're requesting blocks lower than finalised - switch res.direction { - case network.Ascending: - if *res.targetNumber <= fin.Number { - return nil, nil - } - - // if start is lower than finalised, increase it to finalised+1 - if *res.startNumber <= fin.Number { - *res.startNumber = fin.Number + 1 - res.startHash = common.Hash{} - } - case network.Descending: - if *res.startNumber <= fin.Number { - return nil, nil - } - - // if target is lower than finalised, increase it to finalised+1 - if *res.targetNumber <= fin.Number { - *res.targetNumber = fin.Number + 1 - res.targetHash = common.Hash{} - } - } - - return &worker{ - startHash: res.startHash, - startNumber: res.startNumber, - targetHash: res.targetHash, - targetNumber: res.targetNumber, - direction: res.direction, - requestData: res.requestData, - }, nil -} - -func (*tipSyncer) hasCurrentWorker(w *worker, workers map[uint64]*worker) bool { - if w == nil || w.startNumber == nil || w.targetNumber == nil { - return true - } - - for _, curr := range workers { - if w.direction != curr.direction || w.requestData != curr.requestData { - continue - } - - switch w.direction { - case network.Ascending: - if *w.targetNumber > *curr.targetNumber || - *w.startNumber < *curr.startNumber { - continue - } - case network.Descending: - if *w.targetNumber < *curr.targetNumber || - *w.startNumber > *curr.startNumber { - continue - } - } - - // worker (start, end) is within curr (start, end), if hashes are equal then the request is either - // for the same data or some subset of data that is covered by curr - if w.startHash == curr.startHash || w.targetHash == curr.targetHash { - return true - } - } - - return false -} - -// handleTick traverses the pending blocks set to find which forks still need to be requested -func (s *tipSyncer) handleTick() ([]*worker, error) { - logger.Debugf("handling tick, we have %d pending blocks", s.pendingBlocks.size()) - - if s.pendingBlocks.size() == 0 { - return nil, nil - } - - fin, err := s.blockState.GetHighestFinalisedHeader() - if err != nil { - return nil, err - } - - // cases for each block in pending set: - // 1. only hash and number are known; in this case, request the full block (and ancestor chain) - // 2. only header is known; in this case, request the block body - // 3. entire block is known; in this case, check if we have become aware of the parent - // if we have, move it to the ready blocks queue; otherwise, request the chain of ancestors - - var workers []*worker - - for _, block := range s.pendingBlocks.getBlocks() { - if block.number <= fin.Number { - // delete from pending set (this should not happen, it should have already been deleted) - s.pendingBlocks.removeBlock(block.hash) - continue - } - - logger.Tracef("handling pending block number %d with hash %s", block.number, block.hash) - - if block.header == nil { - // case 1 - workers = append(workers, &worker{ - startHash: block.hash, - startNumber: uintPtr(block.number), - targetHash: fin.Hash(), - targetNumber: uintPtr(fin.Number), - direction: network.Descending, - requestData: bootstrapRequestData, - pendingBlock: block, - }) - continue - } - - if block.body == nil { - // case 2 - workers = append(workers, &worker{ - startHash: block.hash, - startNumber: uintPtr(block.number), - targetHash: block.hash, - targetNumber: uintPtr(block.number), - requestData: network.RequestedDataBody + network.RequestedDataJustification, - pendingBlock: block, - }) - continue - } - - // case 3 - has, err := s.blockState.HasHeader(block.header.ParentHash) - if err != nil { - return nil, err - } - - if has { - // block is ready, as parent is known! - // also, move any pendingBlocks that are descendants of this block to the ready blocks queue - s.handleReadyBlock(block.toBlockData()) - continue - } - - // request descending chain from (parent of pending block) -> (last finalised block) - workers = append(workers, &worker{ - startHash: block.header.ParentHash, - startNumber: uintPtr(block.number - 1), - targetNumber: uintPtr(fin.Number), - direction: network.Descending, - requestData: bootstrapRequestData, - pendingBlock: block, - }) - } - - return workers, nil -} diff --git a/dot/sync/tip_syncer_integration_test.go b/dot/sync/tip_syncer_integration_test.go deleted file mode 100644 index 5e433d73d9..0000000000 --- a/dot/sync/tip_syncer_integration_test.go +++ /dev/null @@ -1,372 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/trie" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func newTestTipSyncer(t *testing.T) *tipSyncer { - finHeader := types.NewHeader(common.NewHash([]byte{0}), - trie.EmptyHash, trie.EmptyHash, 200, types.NewDigest()) - - ctrl := gomock.NewController(t) - bs := NewMockBlockState(ctrl) - bs.EXPECT().GetHighestFinalisedHeader().Return(finHeader, nil).AnyTimes() - bs.EXPECT().HasHeader(gomock.AssignableToTypeOf(common.Hash{})).Return(true, nil).AnyTimes() - - readyBlocks := newBlockQueue(maxResponseSize) - pendingBlocks := newDisjointBlockSet(pendingBlocksLimit) - return newTipSyncer(bs, pendingBlocks, readyBlocks, nil) -} - -func TestTipSyncer_handleNewPeerState(t *testing.T) { - s := newTestTipSyncer(t) - - // peer reports state lower than our highest finalised, we should ignore - ps := &peerState{ - number: 1, - } - - w, err := s.handleNewPeerState(ps) - require.NoError(t, err) - require.Nil(t, w) - - ps = &peerState{ - number: 201, - hash: common.Hash{0xa, 0xb}, - } - - // otherwise, return a worker - expected := &worker{ - startNumber: uintPtr(ps.number), - startHash: ps.hash, - targetNumber: uintPtr(ps.number), - targetHash: ps.hash, - requestData: bootstrapRequestData, - } - - w, err = s.handleNewPeerState(ps) - require.NoError(t, err) - require.Equal(t, expected, w) -} - -func TestTipSyncer_handleWorkerResult(t *testing.T) { - s := newTestTipSyncer(t) - - w, err := s.handleWorkerResult(&worker{}) - require.NoError(t, err) - require.Nil(t, w) - - w, err = s.handleWorkerResult(&worker{ - err: &workerError{ - err: errUnknownParent, - }, - }) - require.NoError(t, err) - require.Nil(t, w) - - // worker is for blocks lower than finalised - w, err = s.handleWorkerResult(&worker{ - targetNumber: uintPtr(199), - }) - require.NoError(t, err) - require.Nil(t, w) - - w, err = s.handleWorkerResult(&worker{ - direction: network.Descending, - startNumber: uintPtr(199), - }) - require.NoError(t, err) - require.Nil(t, w) - - // worker start is lower than finalised, start should be updated - expected := &worker{ - direction: network.Ascending, - startNumber: uintPtr(201), - targetNumber: uintPtr(300), - requestData: bootstrapRequestData, - } - - w, err = s.handleWorkerResult(&worker{ - direction: network.Ascending, - startNumber: uintPtr(199), - targetNumber: uintPtr(300), - requestData: bootstrapRequestData, - err: &workerError{}, - }) - require.NoError(t, err) - require.Equal(t, expected, w) - - expected = &worker{ - direction: network.Descending, - startNumber: uintPtr(300), - targetNumber: uintPtr(201), - requestData: bootstrapRequestData, - } - - w, err = s.handleWorkerResult(&worker{ - direction: network.Descending, - startNumber: uintPtr(300), - targetNumber: uintPtr(199), - requestData: bootstrapRequestData, - err: &workerError{}, - }) - require.NoError(t, err) - require.Equal(t, expected, w) - - // start and target are higher than finalised, don't modify - expected = &worker{ - direction: network.Descending, - startNumber: uintPtr(300), - startHash: common.Hash{0xa, 0xb}, - targetNumber: uintPtr(201), - targetHash: common.Hash{0xc, 0xd}, - requestData: bootstrapRequestData, - } - - w, err = s.handleWorkerResult(&worker{ - direction: network.Descending, - startNumber: uintPtr(300), - startHash: common.Hash{0xa, 0xb}, - targetNumber: uintPtr(201), - targetHash: common.Hash{0xc, 0xd}, - requestData: bootstrapRequestData, - err: &workerError{}, - }) - require.NoError(t, err) - require.Equal(t, expected, w) -} - -func TestTipSyncer_handleTick_case1(t *testing.T) { - s := newTestTipSyncer(t) - - w, err := s.handleTick() - require.NoError(t, err) - require.Nil(t, w) - - fin, _ := s.blockState.GetHighestFinalisedHeader() - - // add pending blocks w/ only hash and number, lower than finalised should be removed - s.pendingBlocks.addHashAndNumber(common.Hash{0xa}, fin.Number) - s.pendingBlocks.addHashAndNumber(common.Hash{0xb}, fin.Number+1) - - expected := []*worker{ - { - startHash: common.Hash{0xb}, - startNumber: uintPtr(fin.Number + 1), - targetHash: fin.Hash(), - targetNumber: uintPtr(fin.Number), - direction: network.Descending, - requestData: bootstrapRequestData, - pendingBlock: &pendingBlock{ - hash: common.Hash{0xb}, - number: 201, - clearAt: time.Unix(0, 0), - }, - }, - } - w, err = s.handleTick() - require.NoError(t, err) - require.NotEmpty(t, w) - assert.Greater(t, w[0].pendingBlock.clearAt, time.Now()) - w[0].pendingBlock.clearAt = time.Unix(0, 0) - require.Equal(t, expected, w) - require.False(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(common.Hash{0xa})) - require.True(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(common.Hash{0xb})) -} - -func TestTipSyncer_handleTick_case2(t *testing.T) { - s := newTestTipSyncer(t) - - fin, _ := s.blockState.GetHighestFinalisedHeader() - - // add pending blocks w/ only header - header := &types.Header{ - Number: fin.Number + 1, - } - s.pendingBlocks.addHeader(header) - - expected := []*worker{ - { - startHash: header.Hash(), - startNumber: uintPtr(header.Number), - targetHash: header.Hash(), - targetNumber: uintPtr(header.Number), - direction: network.Ascending, - requestData: network.RequestedDataBody + network.RequestedDataJustification, - pendingBlock: &pendingBlock{ - hash: header.Hash(), - number: 201, - header: header, - clearAt: time.Time{}, - }, - }, - } - w, err := s.handleTick() - require.NoError(t, err) - require.NotEmpty(t, w) - assert.Greater(t, w[0].pendingBlock.clearAt, time.Now()) - w[0].pendingBlock.clearAt = time.Time{} - require.Equal(t, expected, w) - require.True(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) -} -func TestTipSyncer_handleTick_case3(t *testing.T) { - s := newTestTipSyncer(t) - s.handleReadyBlock = func(data *types.BlockData) { - s.pendingBlocks.removeBlock(data.Hash) - s.readyBlocks.push(data) - } - fin, _ := s.blockState.GetHighestFinalisedHeader() - - // add pending block w/ full block, HasHeader will return true, so the block will be processed - header := &types.Header{ - Number: fin.Number + 1, - } - block := &types.Block{ - Header: *header, - Body: types.Body{}, - } - s.pendingBlocks.addBlock(block) - - w, err := s.handleTick() - require.NoError(t, err) - require.Equal(t, []*worker(nil), w) - require.False(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) - readyBlockData, err := s.readyBlocks.pop(context.Background()) - require.Equal(t, block.ToBlockData(), readyBlockData) - require.NoError(t, err) - - // add pending block w/ full block, but block is not ready as parent is unknown - ctrl := gomock.NewController(t) - bs := NewMockBlockState(ctrl) - bs.EXPECT().GetHighestFinalisedHeader().Return(fin, nil).Times(2) - bs.EXPECT().HasHeader(gomock.AssignableToTypeOf(common.Hash{})).Return(false, nil).Times(2) - s.blockState = bs - - header = &types.Header{ - Number: fin.Number + 100, - } - block = &types.Block{ - Header: *header, - Body: types.Body{}, - } - s.pendingBlocks.addBlock(block) - - expected := []*worker{ - { - startHash: header.ParentHash, - startNumber: uintPtr(header.Number - 1), - targetNumber: uintPtr(fin.Number), - direction: network.Descending, - requestData: bootstrapRequestData, - pendingBlock: &pendingBlock{ - hash: header.Hash(), - number: 300, - header: header, - body: &types.Body{}, - clearAt: time.Time{}, - }, - }, - } - - w, err = s.handleTick() - require.NoError(t, err) - require.NotEmpty(t, w) - assert.Greater(t, w[0].pendingBlock.clearAt, time.Now()) - w[0].pendingBlock.clearAt = time.Time{} - require.Equal(t, expected, w) - require.True(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) - - // add parent block to readyBlocks, should move block to readyBlocks - s.readyBlocks.push(&types.BlockData{ - Hash: header.ParentHash, - }) - w, err = s.handleTick() - require.NoError(t, err) - require.Equal(t, []*worker(nil), w) - require.False(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) - _, _ = s.readyBlocks.pop(context.Background()) // first pop will remove parent - readyBlockData, err = s.readyBlocks.pop(context.Background()) - require.NoError(t, err) - require.Equal(t, block.ToBlockData(), readyBlockData) -} - -func TestTipSyncer_hasCurrentWorker(t *testing.T) { - s := newTestTipSyncer(t) - require.False(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(0), - targetNumber: uintPtr(0), - }, nil)) - - workers := make(map[uint64]*worker) - workers[0] = &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(128), - } - require.False(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(129), - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(128), - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(127), - }, workers)) - - workers[0] = &worker{ - startNumber: uintPtr(128), - targetNumber: uintPtr(255), - } - require.False(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(127), - targetNumber: uintPtr(255), - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(128), - targetNumber: uintPtr(255), - }, workers)) - - workers[0] = &worker{ - startNumber: uintPtr(128), - targetNumber: uintPtr(1), - direction: network.Descending, - } - require.False(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(129), - targetNumber: uintPtr(1), - direction: network.Descending, - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(128), - targetNumber: uintPtr(1), - direction: network.Descending, - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(128), - targetNumber: uintPtr(2), - direction: network.Descending, - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(127), - targetNumber: uintPtr(1), - direction: network.Descending, - }, workers)) -} diff --git a/dot/sync/tip_syncer_test.go b/dot/sync/tip_syncer_test.go deleted file mode 100644 index 09ed2b9eb6..0000000000 --- a/dot/sync/tip_syncer_test.go +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "testing" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func Test_tipSyncer_handleNewPeerState(t *testing.T) { - t.Parallel() - - type fields struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - pendingBlocks DisjointBlockSet - readyBlocks *blockQueue - } - tests := map[string]struct { - fields fields - peerState *peerState - want *worker - err error - }{ - "peer_state_number_<_final_block_number": { - fields: fields{ - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - }, - peerState: &peerState{number: 1}, - want: nil, - }, - "base_state": { - fields: fields{ - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - }, - peerState: &peerState{number: 3}, - want: &worker{ - startNumber: uintPtr(3), - targetNumber: uintPtr(3), - requestData: bootstrapRequestData, - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := &tipSyncer{ - blockState: tt.fields.blockStateBuilder(ctrl), - pendingBlocks: tt.fields.pendingBlocks, - readyBlocks: tt.fields.readyBlocks, - } - got, err := s.handleNewPeerState(tt.peerState) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_tipSyncer_handleTick(t *testing.T) { - t.Parallel() - - type fields struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - pendingBlocksBuilder func(ctrl *gomock.Controller) DisjointBlockSet - readyBlocks *blockQueue - } - tests := map[string]struct { - fields fields - want []*worker - err error - }{ - "base_case": { - fields: fields{ - pendingBlocksBuilder: func(ctrl *gomock.Controller) DisjointBlockSet { - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().size().Return(1).Times(2) - mockDisjointBlockSet.EXPECT().getBlocks().Return([]*pendingBlock{ - {number: 2}, - {number: 3}, - {number: 4, - header: &types.Header{ - Number: 4, - }, - }, - {number: 5, - header: &types.Header{ - Number: 5, - }, - body: &types.Body{}, - }, - }) - mockDisjointBlockSet.EXPECT().removeBlock(common.Hash{}) - return mockDisjointBlockSet - }, - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - return mockBlockState - }, - readyBlocks: newBlockQueue(3), - }, - want: []*worker{ - { - startNumber: uintPtr(3), - targetNumber: uintPtr(2), - targetHash: common.Hash{5, 189, 204, 69, 79, 96, 160, 141, 66, 125, 5, 231, 241, - 159, 36, 15, 220, 57, 31, 87, 10, 183, 111, 203, 150, 236, 202, 11, 88, 35, 211, 191}, - pendingBlock: &pendingBlock{number: 3}, - requestData: bootstrapRequestData, - direction: network.Descending, - }, - { - startNumber: uintPtr(4), - targetNumber: uintPtr(4), - pendingBlock: &pendingBlock{ - number: 4, - header: &types.Header{ - Number: 4, - }, - }, - requestData: network.RequestedDataBody + network.RequestedDataJustification, - }, - { - startNumber: uintPtr(4), - targetNumber: uintPtr(2), - direction: network.Descending, - pendingBlock: &pendingBlock{ - number: 5, - header: &types.Header{ - Number: 5, - }, - body: &types.Body{}, - }, - requestData: bootstrapRequestData, - }, - }, - err: nil, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := &tipSyncer{ - blockState: tt.fields.blockStateBuilder(ctrl), - pendingBlocks: tt.fields.pendingBlocksBuilder(ctrl), - readyBlocks: tt.fields.readyBlocks, - } - got, err := s.handleTick() - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_tipSyncer_handleWorkerResult(t *testing.T) { - t.Parallel() - - tests := map[string]struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - res *worker - want *worker - err error - }{ - "worker_error_is_nil": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - return NewMockBlockState(ctrl) - }, - res: &worker{}, - want: nil, - err: nil, - }, - "worker_error_is_error_unknown_parent": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - return NewMockBlockState(ctrl) - }, - res: &worker{ - err: &workerError{ - err: errUnknownParent, - }, - }, - want: nil, - err: nil, - }, - "ascending,_target_number_<_finalised_number": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - res: &worker{ - targetNumber: uintPtr(1), - direction: network.Ascending, - err: &workerError{}, - }, - want: nil, - err: nil, - }, - "ascending,_start_number_<_finalised_number": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - res: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(3), - direction: network.Ascending, - err: &workerError{}, - }, - want: &worker{ - startNumber: uintPtr(3), - targetNumber: uintPtr(3), - }, - err: nil, - }, - "descending,_start_number_<_finalised_number": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - res: &worker{ - startNumber: uintPtr(1), - direction: network.Descending, - err: &workerError{}, - }, - want: nil, - err: nil, - }, - "descending,_target_number_<_finalised_number": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - res: &worker{ - startNumber: uintPtr(3), - targetNumber: uintPtr(1), - direction: network.Descending, - err: &workerError{}, - }, - want: &worker{ - startNumber: uintPtr(3), - targetNumber: uintPtr(3), - direction: network.Descending, - }, - err: nil, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := &tipSyncer{ - blockState: tt.blockStateBuilder(ctrl), - } - got, err := s.handleWorkerResult(tt.res) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_tipSyncer_hasCurrentWorker(t *testing.T) { - t.Parallel() - - type args struct { - w *worker - workers map[uint64]*worker - } - tests := map[string]struct { - args args - want bool - }{ - "worker_nil": { - want: true, - }, - "ascending,_false": { - args: args{ - w: &worker{ - direction: network.Ascending, - startNumber: uintPtr(2), - targetNumber: uintPtr(2), - }, - workers: map[uint64]*worker{ - 1: { - direction: network.Ascending, - targetNumber: uintPtr(3), - startNumber: uintPtr(3), - }, - }, - }, - want: false, - }, - "ascending,_true": { - args: args{ - w: &worker{ - direction: network.Ascending, - startNumber: uintPtr(2), - targetNumber: uintPtr(2), - }, - workers: map[uint64]*worker{ - 1: { - direction: network.Ascending, - targetNumber: uintPtr(3), - startNumber: uintPtr(1), - }, - }, - }, - want: true, - }, - "descending,_false": { - args: args{ - w: &worker{ - direction: network.Descending, - startNumber: uintPtr(2), - targetNumber: uintPtr(2), - }, - workers: map[uint64]*worker{ - 1: { - startNumber: uintPtr(3), - targetNumber: uintPtr(3), - direction: network.Descending, - }, - }, - }, - want: false, - }, - "descending,_true": { - args: args{ - w: &worker{ - direction: network.Descending, - startNumber: uintPtr(2), - targetNumber: uintPtr(2), - }, - workers: map[uint64]*worker{ - 1: { - startNumber: uintPtr(3), - targetNumber: uintPtr(1), - direction: network.Descending, - }, - }, - }, - want: true, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - s := &tipSyncer{} - got := s.hasCurrentWorker(tt.args.w, tt.args.workers) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index a9d4440673..40a821c499 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -11,9 +11,15 @@ import ( ) type syncTask struct { - ignorePeer map[peer.ID]struct{} - request *network.BlockRequestMessage - resultCh chan<- *syncTaskResult + request *network.BlockRequestMessage + resultCh chan<- *syncTaskResult +} + +type syncTaskResult struct { + who peer.ID + request *network.BlockRequestMessage + response *network.BlockResponseMessage + err error } type syncWorkerPool struct { @@ -94,19 +100,10 @@ func (s *syncWorkerPool) addWorkerFromBlockAnnounce(who peer.ID, bestHash common } func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { - s.taskQueue <- &syncTask{ - ignorePeer: make(map[peer.ID]struct{}), - request: request, - resultCh: resultCh, - } -} + s.l.RLock() + defer s.l.RUnlock() -func (s *syncWorkerPool) submitRequestIgnoring(request *network.BlockRequestMessage, toIgnore peer.ID, resultCh chan<- *syncTaskResult) { s.taskQueue <- &syncTask{ - ignorePeer: map[peer.ID]struct { - }{ - toIgnore: {}, - }, request: request, resultCh: resultCh, } From 3ba7e4c4c5be2650d06025ba8a0c4f93d8ed8614 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 26 Apr 2023 08:42:17 -0400 Subject: [PATCH 015/140] chore: rollback modification on .yml files --- chain/westend-local/config-alice.toml | 15 ++-- .../westend-local/westend-local-spec-raw.json | 5 +- chain/westend/config.toml | 6 +- docker-compose.yml | 82 +++++++++---------- .../provisioning/dashboards/gossamer.json | 12 +-- docker/prometheus/prometheus.yml | 2 +- 6 files changed, 60 insertions(+), 62 deletions(-) diff --git a/chain/westend-local/config-alice.toml b/chain/westend-local/config-alice.toml index f63e8d2be8..556ef95a64 100644 --- a/chain/westend-local/config-alice.toml +++ b/chain/westend-local/config-alice.toml @@ -1,5 +1,5 @@ [global] -basepath = "./tmp/.gossamer/westend-local-alice" +basepath = "~/.gossamer/westend-local-alice" log = "info" metrics-address = ":9876" @@ -11,20 +11,21 @@ state = "" runtime = "" babe = "" grandpa = "" -sync = "trace" -digest = "trace" +sync = "" +digest = "" [init] genesis = "./chain/westend-local/westend-local-spec-raw.json" [account] -key = "" +key = "alice" unlock = "" [core] roles = 4 -babe-authority = false -grandpa-authority = false +babe-authority = true +grandpa-authority = true +babe-lead = true [network] port = 7001 @@ -51,7 +52,7 @@ modules = [ ws-port = 8546 [pprof] -enabled = true +enabled = false listening-address = "localhost:6060" block-rate = 0 mutex-rate = 0 diff --git a/chain/westend-local/westend-local-spec-raw.json b/chain/westend-local/westend-local-spec-raw.json index 2e709e3b30..c87badbea1 100644 --- a/chain/westend-local/westend-local-spec-raw.json +++ b/chain/westend-local/westend-local-spec-raw.json @@ -2,10 +2,7 @@ "name": "Westend Local Testnet", "id": "westend_local_testnet", "chainType": "Local", - "bootNodes": [ - "/ip4/127.0.0.1/tcp/30337/p2p/12D3KooWEFF1y4eJ5viWdU7Tn6UtdjVY6LiZfmg6dVdCyDocNc6q", - "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWCS3voVrcMs4xW1qaFQhRpMttncmjFfGCNpDHaNkTkVQk" - ], + "bootNodes": [], "telemetryEndpoints": null, "protocolId": "dot", "properties": null, diff --git a/chain/westend/config.toml b/chain/westend/config.toml index c535c126ff..f9403abcf2 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -7,12 +7,12 @@ metrics-address = "localhost:9876" core = "" network = "" rpc = "" -state = "debug" +state = "" runtime = "" babe = "" grandpa = "" -sync = "trace" -digest = "trace" +sync = "" +digest = "" [init] genesis = "./chain/westend/genesis.json" diff --git a/docker-compose.yml b/docker-compose.yml index 28ea00d54a..8a300720e2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,50 +11,50 @@ # # To rebuild the Gossamer Docker image: `docker-compose build` -version: "3" +version: '3' services: - gossamer: - image: chainsafe/gossamer - build: . - volumes: - # Remove with: docker volume rm gossamer - - gossamer:/data/gossamer - command: - - --basepath=/data/gossamer - - --chain=kusama - - --log=info - - --publish-metrics - - --metrics-address=:9876 - - --pprofserver - ports: - - 6060:6060/tcp # Pprof server - - 7001:7001/tcp # Network port - - 8545:8545/tcp # RPC HTTP port - - 8546:8546/tcp # RPC Websocket port - expose: - - 9876/tcp # Prometheus metrics for Prometheus server + gossamer: + image: chainsafe/gossamer + build: . + volumes: + # Remove with: docker volume rm gossamer + - gossamer:/data/gossamer + command: + - --basepath=/data/gossamer + - --chain=kusama + - --log=info + - --publish-metrics + - --metrics-address=:9876 + - --pprofserver + ports: + - 6060:6060/tcp # Pprof server + - 7001:7001/tcp # Network port + - 8545:8545/tcp # RPC HTTP port + - 8546:8546/tcp # RPC Websocket port + expose: + - 9876/tcp # Prometheus metrics for Prometheus server - prometheus: - image: prom/prometheus - volumes: - - ./docker/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro - # The following line can be uncommented to persist metrics data. - # - gossamer-prometheus:/prometheus - expose: - - 9090/tcp # Prometheus metrics for Grafana + prometheus: + image: prom/prometheus + volumes: + - ./docker/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro + # The following line can be uncommented to persist metrics data. + # - gossamer-prometheus:/prometheus + expose: + - 9090/tcp # Prometheus metrics for Grafana - grafana: - image: grafana/grafana-oss - ports: - - 3000:3000/tcp # HTTP Web interface at http://localhost:3000/ - volumes: - - ./docker/grafana/grafana.ini:/etc/grafana/grafana.ini:ro - - ./docker/grafana/provisioning:/etc/grafana/provisioning:ro - # The following line can be uncommented to persist modifications. - # - gossamer-grafana:/var/lib/grafana + grafana: + image: grafana/grafana-oss + ports: + - 3000:3000/tcp # HTTP Web interface at http://localhost:3000/ + volumes: + - ./docker/grafana/grafana.ini:/etc/grafana/grafana.ini:ro + - ./docker/grafana/provisioning:/etc/grafana/provisioning:ro + # The following line can be uncommented to persist modifications. + # - gossamer-grafana:/var/lib/grafana volumes: - gossamer: - gossamer-prometheus: - gossamer-grafana: + gossamer: + gossamer-prometheus: + gossamer-grafana: diff --git a/docker/grafana/provisioning/dashboards/gossamer.json b/docker/grafana/provisioning/dashboards/gossamer.json index 1c4ffeaaf5..315f253aff 100644 --- a/docker/grafana/provisioning/dashboards/gossamer.json +++ b/docker/grafana/provisioning/dashboards/gossamer.json @@ -90,7 +90,7 @@ "type": "prometheus", "uid": "prometheus_id" }, - "expr": "go_goroutines{instance=~\".*internal.*\"}", + "expr": "go_goroutines{instance=~\".*gossamer.*\"}", "refId": "A" } ], @@ -148,7 +148,7 @@ "type": "prometheus", "uid": "prometheus_id" }, - "expr": "go_threads{instance=~\".*internal.*\"}", + "expr": "go_threads{instance=~\".*gossamer.*\"}", "refId": "A" } ], @@ -207,7 +207,7 @@ "type": "prometheus", "uid": "prometheus_id" }, - "expr": "process_resident_memory_bytes{instance=~\".*internal.*\"}", + "expr": "process_resident_memory_bytes{instance=~\".*gossamer.*\"}", "refId": "A" } ], @@ -292,7 +292,7 @@ "uid": "prometheus_id" }, "editorMode": "code", - "expr": "process_resident_memory_bytes{instance=~\".*internal.*\"}", + "expr": "process_resident_memory_bytes{instance=~\".*gossamer.*\"}", "legendFormat": "Resident memory", "range": true, "refId": "A" @@ -303,7 +303,7 @@ "uid": "prometheus_id" }, "editorMode": "code", - "expr": "go_memstats_heap_inuse_bytes{instance=~\".*internal.*\"}", + "expr": "go_memstats_heap_inuse_bytes{instance=~\".*gossamer.*\"}", "hide": false, "legendFormat": "Heap", "range": true, @@ -315,7 +315,7 @@ "uid": "prometheus_id" }, "editorMode": "code", - "expr": "go_memstats_stack_inuse_bytes{instance=~\".*internal.*\"}", + "expr": "go_memstats_stack_inuse_bytes{instance=~\".*gossamer.*\"}", "hide": false, "legendFormat": "Stack", "range": true, diff --git a/docker/prometheus/prometheus.yml b/docker/prometheus/prometheus.yml index 7c847ca4df..88d165df8a 100644 --- a/docker/prometheus/prometheus.yml +++ b/docker/prometheus/prometheus.yml @@ -10,4 +10,4 @@ scrape_configs: - job_name: gossamer metrics_path: /metrics static_configs: - - targets: ["host.docker.internal:9876"] + - targets: ["gossamer:9876"] From 16e8ee66c2cc71da6cd054a2ea671fc17ba59b5b Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 26 Apr 2023 12:09:43 -0400 Subject: [PATCH 016/140] chore: rollback modifications to digest pkg --- dot/digest/digest.go | 2 +- dot/digest/digest_integration_test.go | 30 +++++++++++ dot/digest/digest_test.go | 71 --------------------------- go.mod | 1 - go.sum | 2 - 5 files changed, 31 insertions(+), 75 deletions(-) delete mode 100644 dot/digest/digest_test.go diff --git a/dot/digest/digest.go b/dot/digest/digest.go index f6f61d353c..a0fac2c4f6 100644 --- a/dot/digest/digest.go +++ b/dot/digest/digest.go @@ -96,6 +96,7 @@ func (h *Handler) HandleDigests(header *types.Header) error { // toConsensusDigests converts a slice of scale.VaryingDataType to a slice of types.ConsensusDigest. func (h *Handler) toConsensusDigests(scaleVaryingTypes []scale.VaryingDataType) []types.ConsensusDigest { consensusDigests := make([]types.ConsensusDigest, 0, len(scaleVaryingTypes)) + for _, d := range scaleVaryingTypes { digestValue, err := d.Value() if err != nil { @@ -163,7 +164,6 @@ func (h *Handler) handleConsensusDigest(d *types.ConsensusDigest, header *types. return err } - fmt.Printf("going to handle: %s\n", data.String()) return h.grandpaState.HandleGRANDPADigest(header, data) case types.BabeEngineID: data := types.NewBabeConsensusDigest() diff --git a/dot/digest/digest_integration_test.go b/dot/digest/digest_integration_test.go index 1ab523d9b7..402014417f 100644 --- a/dot/digest/digest_integration_test.go +++ b/dot/digest/digest_integration_test.go @@ -13,6 +13,7 @@ import ( "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" @@ -23,6 +24,35 @@ import ( "github.com/stretchr/testify/require" ) +func newTestHandler(t *testing.T) (*Handler, *state.Service) { + testDatadirPath := t.TempDir() + + ctrl := gomock.NewController(t) + telemetryMock := NewMockTelemetry(ctrl) + telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + + config := state.Config{ + Path: testDatadirPath, + Telemetry: telemetryMock, + } + stateSrvc := state.NewService(config) + stateSrvc.UseMemDB() + + gen, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) + err := stateSrvc.Initialise(&gen, &genesisHeader, &genesisTrie) + require.NoError(t, err) + + err = stateSrvc.SetupBase() + require.NoError(t, err) + + err = stateSrvc.Start() + require.NoError(t, err) + + dh, err := NewHandler(log.Critical, stateSrvc.Block, stateSrvc.Epoch, stateSrvc.Grandpa) + require.NoError(t, err) + return dh, stateSrvc +} + func TestHandler_GrandpaScheduledChange(t *testing.T) { handler, _ := newTestHandler(t) handler.Start() diff --git a/dot/digest/digest_test.go b/dot/digest/digest_test.go deleted file mode 100644 index c77ccc8733..0000000000 --- a/dot/digest/digest_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package digest - -import ( - "testing" - - "github.com/ChainSafe/gossamer/dot/state" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/internal/log" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" -) - -func newTestHandler(t *testing.T) (*Handler, *state.Service) { - testDatadirPath := t.TempDir() - - ctrl := gomock.NewController(t) - telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() - - config := state.Config{ - Path: testDatadirPath, - Telemetry: telemetryMock, - } - stateSrvc := state.NewService(config) - stateSrvc.UseMemDB() - - gen, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) - err := stateSrvc.Initialise(&gen, &genesisHeader, &genesisTrie) - require.NoError(t, err) - - err = stateSrvc.SetupBase() - require.NoError(t, err) - - err = stateSrvc.Start() - require.NoError(t, err) - - dh, err := NewHandler(log.Critical, stateSrvc.Block, stateSrvc.Epoch, stateSrvc.Grandpa) - require.NoError(t, err) - return dh, stateSrvc -} - -func TestDigestHashes(t *testing.T) { - - babePreRuntimeDigest := types.NewBABEPreRuntimeDigest(common.MustHexToBytes("0x02020000002fe4d90f00000000")) - grandpaConsensus1 := types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: common.MustHexToBytes("0x0118a8ddd0891e14725841cd1b5581d23806a97f41c28a25436db6473c86e15dcd4f01000000000000007ca58770eb41c1a68ef77e92255e4635fc11f665cb89aee469e920511c48343a010000000000000074bfb70627416e6e6c4785e928ced384c6c06e5c8dd173a094bc3118da7b673e0100000000000000d455d6778e7100787f0e51e42b86e6e3aac96b1f68aaab59678ab1dd28e5374f0100000000000000a694eb96e1674003ccff3309937bc3ab62ad1a66436f5b1dfad03fc81e8a4f700100000000000000786fc9c50f5d26a2c9f8028fc31f1a447d3425349eb5733550201c68e495a22d01000000000000005eee23b75c97a69e537632302d88870a0f48c05d6a3b11aeb5d3fdf8b579ba79"), - } - grandpaConsensus2 := types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: common.MustHexToBytes("0x02c59e1500189fc415cce1d0b2eed702c9e05f476217d23b46a8723fd56f08cddad650be7c2d0100000000000000feca0be2c87141f6074b221c919c0161a1c468d9173c5c1be59b68fab9a0ff930100000000000000fc9d33059580a69454179ffa41cbae6de2bc8d2bd2c3f1d018fe5484a5a919560100000000000000059ddb0eb77615669a1fc7962bbff119c20c18b58b4922788f842f3cd5b2813a010000000000000007d952daf2d0e2616e5344a6cff989a3fcc5a79a5799198c15ff1c06c51a1280010000000000000065c30e319f817c4392a7c2b98f1585541d53bf8d096bd64033cce6bacbde2952010000000000000005000000"), - } - - digests := types.NewDigest() - err := digests.Add(babePreRuntimeDigest) - require.NoError(t, err) - err = digests.Add(grandpaConsensus1) - require.NoError(t, err) - err = digests.Add(grandpaConsensus2) - require.NoError(t, err) - - header := types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, digests) - - handler, _ := newTestHandler(t) - err = handler.HandleDigests(header) - require.NoError(t, err) - - err = handler.grandpaState.ApplyForcedChanges(header) - require.NoError(t, err) -} diff --git a/go.mod b/go.mod index fc1f2ac959..ecc617943f 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,6 @@ require ( github.com/docker/docker v23.0.1+incompatible github.com/ethereum/go-ethereum v1.11.4 github.com/fatih/color v1.15.0 - github.com/gammazero/deque v0.2.1 github.com/go-playground/validator/v10 v10.12.0 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.5.9 diff --git a/go.sum b/go.sum index a1908a03d3..9ad1e2a273 100644 --- a/go.sum +++ b/go.sum @@ -147,8 +147,6 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= -github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= From 3eba164804a7334062aa0ba4c7aac88f6aabbe1d Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 26 Apr 2023 14:18:07 -0400 Subject: [PATCH 017/140] chore: rollback modification to network pkg --- dot/network/service.go | 8 +--- dot/network/sync.go | 61 +----------------------------- dot/network/warp_sync_message.go | 65 -------------------------------- dot/sync/interfaces.go | 2 +- dot/sync/mocks_test.go | 12 +++--- dot/sync/worker_pool.go | 2 +- 6 files changed, 12 insertions(+), 138 deletions(-) delete mode 100644 dot/network/warp_sync_message.go diff --git a/dot/network/service.go b/dot/network/service.go index 9d281840e7..85af2c7dc3 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -31,13 +31,11 @@ const ( // the following are sub-protocols used by the node syncID = "/sync/2" - warpSyncID = "/sync/warp" lightID = "/light/2" blockAnnounceID = "/block-announces/1" transactionsID = "/transactions/1" - warpSyncMaxResponseSize = 16 * 1024 * 1024 - maxMessageSize = 1024 * 64 // 64kb for now + maxMessageSize = 1024 * 64 // 64kb for now ) var ( @@ -249,8 +247,6 @@ func (s *Service) Start() error { } s.host.registerStreamHandler(s.host.protocolID+syncID, s.handleSyncStream) - // TODO: enable this protocol to receive request from other nodes - //s.host.registerStreamHandler(s.host.protocolID+warpSync, s.handleSyncStream) s.host.registerStreamHandler(s.host.protocolID+lightID, s.handleLightStream) // register block announce protocol @@ -590,7 +586,7 @@ func (s *Service) NetworkState() common.NetworkState { } } -func (s *Service) TotalConnectedPeers() []peer.ID { +func (s *Service) AllConnectedPeers() []peer.ID { return s.host.p2pHost.Network().Peers() } diff --git a/dot/network/sync.go b/dot/network/sync.go index 9f875991c3..8aeca4c272 100644 --- a/dot/network/sync.go +++ b/dot/network/sync.go @@ -14,37 +14,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ) -var ( - blockRequestTimeout = time.Second * 20 -) - -func (s *Service) RequestWarpProof(to peer.ID, request *WarpSyncProofRequestMessage) (warpSyncResponse interface{}, err error) { - legacyWarpSyncID := s.host.protocolID + warpSyncID - - s.host.p2pHost.ConnManager().Protect(to, "") - defer s.host.p2pHost.ConnManager().Unprotect(to, "") - - ctx, cancel := context.WithTimeout(s.ctx, blockRequestTimeout) - defer cancel() - - stream, err := s.host.p2pHost.NewStream(ctx, to, legacyWarpSyncID) - if err != nil { - return nil, err - } - - defer func() { - err := stream.Close() - if err != nil { - logger.Warnf("failed to close stream: %s", err) - } - }() - - if err = s.host.writeToStream(stream, request); err != nil { - return nil, err - } - - return s.handleWarpSyncProofResponse(stream) -} +var blockRequestTimeout = time.Second * 20 +var ErrReceivedEmptyMessage = errors.New("received empty message") // DoBlockRequest sends a request to the given peer. // If a response is received within a certain time period, it is returned, @@ -77,34 +48,6 @@ func (s *Service) DoBlockRequest(to peer.ID, req *BlockRequestMessage) (*BlockRe return s.receiveBlockResponse(stream) } -func (s *Service) handleWarpSyncProofResponse(stream libp2pnetwork.Stream) (interface{}, error) { - s.blockResponseBufMu.Lock() - defer s.blockResponseBufMu.Unlock() - - // TODO: should we create another buffer pool for warp proof response buffers? - buf := s.blockResponseBuf - - n, err := readStream(stream, &buf, warpSyncMaxResponseSize) - if err != nil { - return nil, fmt.Errorf("reading warp sync stream: %w", err) - } - - if n == 0 { - return nil, fmt.Errorf("empty warp sync proof") - } - - fmt.Printf("WARP PROOF BYTES ---> %v\n", buf[:n]) - warpProof := new(WarpSyncProofResponse) - err = warpProof.Decode(buf[:n]) - if err != nil { - panic(fmt.Sprintf("failed to decode warp proof: %s", err)) - } - fmt.Printf("WARP PROOF ---> %v\n", warpProof) - return nil, nil -} - -var ErrReceivedEmptyMessage = errors.New("received empty message") - func (s *Service) receiveBlockResponse(stream libp2pnetwork.Stream) (*BlockResponseMessage, error) { // allocating a new (large) buffer every time slows down the syncing by a dramatic amount, // as malloc is one of the most CPU intensive tasks. diff --git a/dot/network/warp_sync_message.go b/dot/network/warp_sync_message.go deleted file mode 100644 index efcc628f57..0000000000 --- a/dot/network/warp_sync_message.go +++ /dev/null @@ -1,65 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/crypto/ed25519" - "github.com/ChainSafe/gossamer/pkg/scale" -) - -type WarpSyncProofRequestMessage struct { - Begin common.Hash -} - -func (w *WarpSyncProofRequestMessage) String() string { - return fmt.Sprintf("WarpSyncProofRequestMessage Begin=%v", w.Begin) -} - -func (w *WarpSyncProofRequestMessage) Encode() ([]byte, error) { - return scale.Marshal(*w) -} - -func (w *WarpSyncProofRequestMessage) Decode(in []byte) error { - panic("not implemented yet") -} - -type Vote struct { - Hash common.Hash - Number uint32 -} - -type SignedVote struct { - Vote Vote - Signature [64]byte - AuthorityID ed25519.PublicKeyBytes -} - -// Commit contains all the signed precommits for a given block -type Commit struct { - Hash common.Hash - Number uint32 - Precommits []SignedVote -} - -// Justification represents a finality justification for a block -type Justification struct { - Round uint64 - Commit Commit -} - -type WarpSyncFragment struct { - Header types.Header - Justification Justification -} - -type WarpSyncProofResponse struct { - Fragments []WarpSyncFragment - IsFinished bool -} - -func (w *WarpSyncProofResponse) Encode() ([]byte, error) { return nil, nil } -func (w *WarpSyncProofResponse) Decode(in []byte) error { - return scale.Unmarshal(in, w) -} diff --git a/dot/sync/interfaces.go b/dot/sync/interfaces.go index 558d7e6afb..8ea8a62a73 100644 --- a/dot/sync/interfaces.go +++ b/dot/sync/interfaces.go @@ -82,7 +82,7 @@ type Network interface { // ReportPeer reports peer based on the peer behaviour. ReportPeer(change peerset.ReputationChange, p peer.ID) - TotalConnectedPeers() []peer.ID + AllConnectedPeers() []peer.ID } // Telemetry is the telemetry client to send telemetry messages. diff --git a/dot/sync/mocks_test.go b/dot/sync/mocks_test.go index f4bbdfa564..b1a9812778 100644 --- a/dot/sync/mocks_test.go +++ b/dot/sync/mocks_test.go @@ -650,16 +650,16 @@ func (mr *MockNetworkMockRecorder) ReportPeer(arg0, arg1 interface{}) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeer", reflect.TypeOf((*MockNetwork)(nil).ReportPeer), arg0, arg1) } -// TotalConnectedPeers mocks base method. -func (m *MockNetwork) TotalConnectedPeers() []peer.ID { +// AllConnectedPeers mocks base method. +func (m *MockNetwork) AllConnectedPeers() []peer.ID { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TotalConnectedPeers") + ret := m.ctrl.Call(m, "AllConnectedPeers") ret0, _ := ret[0].([]peer.ID) return ret0 } -// TotalConnectedPeers indicates an expected call of TotalConnectedPeers. -func (mr *MockNetworkMockRecorder) TotalConnectedPeers() *gomock.Call { +// AllConnectedPeers indicates an expected call of AllConnectedPeers. +func (mr *MockNetworkMockRecorder) AllConnectedPeers() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TotalConnectedPeers", reflect.TypeOf((*MockNetwork)(nil).TotalConnectedPeers)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllConnectedPeers", reflect.TypeOf((*MockNetwork)(nil).AllConnectedPeers)) } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 40a821c499..b37108b215 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -47,7 +47,7 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { const ignorePeerTimeout = 2 * time.Minute func (s *syncWorkerPool) useConnectedPeers() { - connectedPeers := s.network.TotalConnectedPeers() + connectedPeers := s.network.AllConnectedPeers() s.l.Lock() defer s.l.Unlock() From c30948af255ed304e4404349059a6ddc026fe79f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 26 Apr 2023 14:25:10 -0400 Subject: [PATCH 018/140] chore: rollback modifications to auth set changes --- dot/state/grandpa.go | 10 ++-------- dot/state/grandpa_changes.go | 11 +---------- 2 files changed, 3 insertions(+), 18 deletions(-) diff --git a/dot/state/grandpa.go b/dot/state/grandpa.go index 777bcb204c..f88050d53d 100644 --- a/dot/state/grandpa.go +++ b/dot/state/grandpa.go @@ -92,7 +92,6 @@ func (s *GrandpaState) HandleGRANDPADigest(header *types.Header, digest scale.Va case types.GrandpaScheduledChange: return s.addScheduledChange(header, val) case types.GrandpaForcedChange: - fmt.Printf("adding a forced change\n") return s.addForcedChange(header, val) case types.GrandpaOnDisabled: return nil @@ -125,7 +124,6 @@ func (s *GrandpaState) addForcedChange(header *types.Header, fc types.GrandpaFor return fmt.Errorf("cannot import forced change: %w", err) } - fmt.Printf("there are now %d possible forced changes\n", s.forcedChanges.Len()) logger.Debugf("there are now %d possible forced changes", s.forcedChanges.Len()) return nil } @@ -259,12 +257,8 @@ func (s *GrandpaState) ApplyForcedChanges(importedBlockHeader *types.Header) err return fmt.Errorf("cannot set change set id at block") } - logger.Debug("reseting scheduled changes and forced changes") - s.scheduledChangeRoots.reset() - s.forcedChanges.reset() - - logger.Debugf("Applying authority set forced change on block #%d made at block #%d", - importedBlockHeader.Number, forcedChange.announcingHeader.Number) + logger.Debugf("Applying authority set forced change at block #%d", + forcedChange.announcingHeader.Number) return nil } diff --git a/dot/state/grandpa_changes.go b/dot/state/grandpa_changes.go index 425372f55e..e06f1fe0fa 100644 --- a/dot/state/grandpa_changes.go +++ b/dot/state/grandpa_changes.go @@ -132,10 +132,6 @@ func (oc *orderedPendingChanges) pruneChanges(hash common.Hash, isDescendantOf i return nil } -func (oc *orderedPendingChanges) reset() { - *oc = make([]pendingChange, 0) -} - type pendingChangeNode struct { change *pendingChange nodes []*pendingChangeNode @@ -289,8 +285,7 @@ func (ct changeTree) findApplicableChange(hash common.Hash, number uint, } if child.change.announcingHeader.Number <= number && isDescendant { - return false, fmt.Errorf("%w: %s (%d)", errUnfinalizedAncestor, - child.change.announcingHeader.Hash(), child.change.announcingHeader.Number) + return false, errUnfinalizedAncestor } } @@ -319,7 +314,3 @@ func (ct *changeTree) pruneChanges(hash common.Hash, isDescendantOf isDescendant *ct = onBranchChanges return nil } - -func (ct *changeTree) reset() { - *ct = []*pendingChangeNode{} -} From 57f433c25c3909dbdc173bb1207033b1bbd7b425 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 26 Apr 2023 15:28:35 -0400 Subject: [PATCH 019/140] chore: remove `internal/sync` pkg --- dot/sync/syncer.go | 7 +----- dot/sync/worker_pool.go | 20 ++++------------- internal/sync/service.go | 47 ---------------------------------------- internal/sync/warp.go | 18 --------------- 4 files changed, 5 insertions(+), 87 deletions(-) delete mode 100644 internal/sync/service.go delete mode 100644 internal/sync/warp.go diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 488a03b0fa..3a24891f0c 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -90,12 +90,7 @@ func (s *Service) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockA func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { logger.Debug("received BlockAnnounceMessage") header := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) - err := s.chainSync.setBlockAnnounce(from, header) - if err != nil { - logger.Errorf("setting block announce: %s", err) - } - - return err + return s.chainSync.setBlockAnnounce(from, header) } // IsSynced exposes the synced state diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index b37108b215..121fe0aaa8 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -67,12 +67,9 @@ func (s *syncWorkerPool) useConnectedPeers() { } } - // they are ephemeral because once we reach the tip we - // should remove them and use only peers who send us - // block announcements - ephemeralSyncWorker := newSyncWorker(s.ctx, connectedPeer, common.Hash{}, 0, s.network) - ephemeralSyncWorker.Start(s.taskQueue, &s.wg) - s.workers[connectedPeer] = ephemeralSyncWorker + worker := newSyncWorker(s.ctx, connectedPeer, common.Hash{}, 0, s.network) + worker.Start(s.taskQueue, &s.wg) + s.workers[connectedPeer] = worker } } @@ -100,9 +97,6 @@ func (s *syncWorkerPool) addWorkerFromBlockAnnounce(who peer.ID, bestHash common } func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { - s.l.RLock() - defer s.l.RUnlock() - s.taskQueue <- &syncTask{ request: request, resultCh: resultCh, @@ -141,11 +135,5 @@ func (s *syncWorkerPool) shutdownWorker(who peer.ID, ignore bool) { func (s *syncWorkerPool) totalWorkers() (total uint) { s.l.RLock() defer s.l.RUnlock() - - total = 0 - for range s.workers { - total++ - } - - return total + return uint(len(s.workers)) } diff --git a/internal/sync/service.go b/internal/sync/service.go deleted file mode 100644 index ed1ba4e9d1..0000000000 --- a/internal/sync/service.go +++ /dev/null @@ -1,47 +0,0 @@ -package sync - -import ( - "github.com/ChainSafe/gossamer/dot/network" - "github.com/libp2p/go-libp2p/core/peer" -) - -type Service struct { - blockState interface{} - chainSync interface{} - chainProcessor interface{} - network interface{} - - warpSync *WarpSync -} - -// Start begins the chainSync and chainProcessor modules. It begins syncing in bootstrap mode -func (s *Service) Start() error { - go s.warpSync.sync() - return nil -} - -// Stop stops the chainSync and chainProcessor modules -func (s *Service) Stop() error { - return nil -} - -// HandleBlockAnnounceHandshake notifies the `chainSync` module that -// we have received a BlockAnnounceHandshake from the given peer. -func (s *Service) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { - return nil -} - -// HandleBlockAnnounce notifies the `chainSync` module that we have received a block announcement from the given peer. -func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { - return nil -} - -// IsSynced exposes the synced state -func (s *Service) IsSynced() bool { - return false -} - -// HighestBlock gets the highest known block number -func (s *Service) HighestBlock() uint { - return 0 -} diff --git a/internal/sync/warp.go b/internal/sync/warp.go deleted file mode 100644 index ffae43347e..0000000000 --- a/internal/sync/warp.go +++ /dev/null @@ -1,18 +0,0 @@ -package sync - -import "time" - -type WarpSync struct { - network interface{} -} - -func (w *WarpSync) sync() { - w.waitForConnections() - -} - -func (w *WarpSync) waitForConnections() { - // TODO: implement actual code to wait - // for the minimal amount of peers - time.Sleep(30 * time.Second) -} From a76c0e93cce3d516747c82601f00ad022bb5b934 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 26 Apr 2023 15:46:50 -0400 Subject: [PATCH 020/140] chore: remove unneeded comments --- dot/sync/syncer.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 3a24891f0c..b9f1a02c00 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -69,14 +69,12 @@ func NewService(cfg *Config) (*Service, error) { // Start begins the chainSync and chainProcessor modules. It begins syncing in bootstrap mode func (s *Service) Start() error { go s.chainSync.start() - //go s.chainProcessor.processReadyBlocks() return nil } // Stop stops the chainSync and chainProcessor modules func (s *Service) Stop() error { s.chainSync.stop() - //s.chainProcessor.stop() return nil } From 0bd8505f69cfe46cb84fc41627bc8f2d3abea106 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 27 Apr 2023 08:25:18 -0400 Subject: [PATCH 021/140] chore: remove unneeded fields from sync worker --- dot/sync/chain_sync.go | 17 +++++------------ dot/sync/sync_worker.go | 38 ++++++++++++-------------------------- dot/sync/worker_pool.go | 10 ++++------ go.mod | 1 + go.sum | 2 ++ 5 files changed, 24 insertions(+), 44 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index c28a42d1c6..1368fa6be6 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -119,7 +119,6 @@ type chainSync struct { blockState BlockState network Network - // to replace the worker queue workerPool *syncWorkerPool blockAnnounceCh chan announcedBlock @@ -134,17 +133,13 @@ type chainSync struct { pendingBlocks DisjointBlockSet pendingBlockDoneCh chan<- struct{} - // bootstrap or tip (near-head) - - state chainSyncState - + state chainSyncState benchmarker *syncBenchmarker finalisedCh <-chan *types.FinalisationInfo - minPeers int - maxWorkerRetries uint16 - slotDuration time.Duration + minPeers int + slotDuration time.Duration logSyncTicker *time.Ticker logSyncTickerC <-chan time.Time // channel as field for unit testing @@ -196,12 +191,12 @@ func newChainSync(cfg chainSyncConfig) *chainSync { benchmarker: newSyncBenchmarker(syncSamplesToKeep), finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), minPeers: cfg.minPeers, - maxWorkerRetries: uint16(cfg.maxPeers), slotDuration: cfg.slotDuration, logSyncTicker: logSyncTicker, logSyncTickerC: logSyncTicker.C, logSyncDone: make(chan struct{}), workerPool: newSyncWorkerPool(cfg.net), + blockAnnounceCh: make(chan announcedBlock, cfg.maxPeers), } } @@ -224,8 +219,6 @@ func (cs *chainSync) start() { pendingBlockDoneCh := make(chan struct{}) cs.pendingBlockDoneCh = pendingBlockDoneCh - cs.blockAnnounceCh = make(chan announcedBlock, 50) - go cs.pendingBlocks.run(cs.finalisedCh, pendingBlockDoneCh) go cs.sync() cs.logSyncStarted = true @@ -336,7 +329,7 @@ func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.He // setPeerHead sets a peer's best known block func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber uint) error { - err := cs.workerPool.addWorkerFromBlockAnnounce(who, bestHash, bestNumber) + err := cs.workerPool.addWorkerFromBlockAnnounce(who) if err != nil { logger.Errorf("adding a potential worker: %s", err) } diff --git a/dot/sync/sync_worker.go b/dot/sync/sync_worker.go index 0e28829f91..0b2f7d8a66 100644 --- a/dot/sync/sync_worker.go +++ b/dot/sync/sync_worker.go @@ -4,7 +4,6 @@ import ( "context" "sync" - "github.com/ChainSafe/gossamer/lib/common" "github.com/libp2p/go-libp2p/core/peer" ) @@ -19,33 +18,20 @@ type syncWorker struct { doneCh chan struct{} stopCh chan struct{} - who peer.ID - network Network - bestHash common.Hash - bestNumber uint + who peer.ID + network Network } -func newSyncWorker(ctx context.Context, who peer.ID, - bestHash common.Hash, bestNumber uint, network Network) *syncWorker { +func newSyncWorker(ctx context.Context, who peer.ID, network Network) *syncWorker { return &syncWorker{ - ctx: ctx, - who: who, - bestHash: bestHash, - bestNumber: bestNumber, - network: network, - doneCh: make(chan struct{}), - stopCh: make(chan struct{}), + ctx: ctx, + who: who, + network: network, + doneCh: make(chan struct{}), + stopCh: make(chan struct{}), } } -func (s *syncWorker) update(bestHash common.Hash, bestNumber uint) { - s.l.Lock() - defer s.l.Unlock() - - s.bestHash = bestHash - s.bestNumber = bestNumber -} - func (s *syncWorker) Start(tasks chan *syncTask, wg *sync.WaitGroup) { wg.Add(1) @@ -56,7 +42,7 @@ func (s *syncWorker) Start(tasks chan *syncTask, wg *sync.WaitGroup) { logger.Infof("[SHUTDOWN] worker %s", s.who) }() - logger.Infof("worker %s started, waiting for tasks...", s.who) + logger.Debugf("worker %s started, waiting for tasks...", s.who) for { select { @@ -65,13 +51,13 @@ func (s *syncWorker) Start(tasks chan *syncTask, wg *sync.WaitGroup) { case task := <-tasks: request := task.request - logger.Infof("[EXECUTING] worker %s: block request: %s", s.who, request) + logger.Debugf("[EXECUTING] worker %s: block request: %s", s.who, request) response, err := s.network.DoBlockRequest(s.who, request) if err != nil { - logger.Infof("[FINISHED] worker %s: err: %s", s.who, err) + logger.Debugf("[FINISHED] worker %s: err: %s", s.who, err) } else if response != nil { - logger.Infof("[FINISHED] worker %s: block data amount: %d", s.who, len(response.BlockData)) + logger.Debugf("[FINISHED] worker %s: block data amount: %d", s.who, len(response.BlockData)) } task.resultCh <- &syncTaskResult{ diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 121fe0aaa8..eb586ee41c 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -6,7 +6,6 @@ import ( "time" "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/lib/common" "github.com/libp2p/go-libp2p/core/peer" ) @@ -67,13 +66,13 @@ func (s *syncWorkerPool) useConnectedPeers() { } } - worker := newSyncWorker(s.ctx, connectedPeer, common.Hash{}, 0, s.network) + worker := newSyncWorker(s.ctx, connectedPeer, s.network) worker.Start(s.taskQueue, &s.wg) s.workers[connectedPeer] = worker } } -func (s *syncWorkerPool) addWorkerFromBlockAnnounce(who peer.ID, bestHash common.Hash, bestNumber uint) error { +func (s *syncWorkerPool) addWorkerFromBlockAnnounce(who peer.ID) error { s.l.Lock() defer s.l.Unlock() @@ -82,13 +81,12 @@ func (s *syncWorkerPool) addWorkerFromBlockAnnounce(who peer.ID, bestHash common delete(s.ignorePeers, who) } - worker, has := s.workers[who] + _, has = s.workers[who] if has { - worker.update(bestHash, bestNumber) return nil } - syncWorker := newSyncWorker(s.ctx, who, bestHash, bestNumber, s.network) + syncWorker := newSyncWorker(s.ctx, who, s.network) logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) syncWorker.Start(s.taskQueue, &s.wg) diff --git a/go.mod b/go.mod index ecc617943f..245da86cf6 100644 --- a/go.mod +++ b/go.mod @@ -70,6 +70,7 @@ require ( github.com/elastic/gosigar v0.14.2 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect + github.com/gammazero/deque v0.2.1 // indirect github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect diff --git a/go.sum b/go.sum index 9ad1e2a273..a1908a03d3 100644 --- a/go.sum +++ b/go.sum @@ -147,6 +147,8 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= From 5102e62502d4f29a5d9e0ef6db33e16100e08279 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 27 Apr 2023 08:44:13 -0400 Subject: [PATCH 022/140] chore: remove dead code + add errors wrappers --- dot/sync/chain_sync.go | 49 +++++++--------------------------- dot/sync/disjoint_block_set.go | 40 +++------------------------ 2 files changed, 14 insertions(+), 75 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 1368fa6be6..0ae3b2efa8 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -25,12 +25,8 @@ import ( "github.com/ChainSafe/gossamer/lib/common" ) -const ( - // maxWorkers is the maximum number of parallel sync workers - maxWorkers = 12 -) - var _ ChainSync = &chainSync{} +var errUnableToGetTarget = errors.New("unable to get target") type chainSyncState byte @@ -67,28 +63,6 @@ type peerState struct { number uint } -// workHandler handles new potential work (ie. reported peer state, block announces), results from dispatched workers, -// and stored pending work (ie. pending blocks set) -// workHandler should be implemented by `bootstrapSync` and `tipSync` -type workHandler interface { - // handleNewPeerState returns a new worker based on a peerState. - // The worker may be nil in which case we do nothing. - handleNewPeerState(*peerState) (*worker, error) - - // handleWorkerResult handles the result of a worker, which may be - // nil or error. It optionally returns a new worker to be dispatched. - handleWorkerResult(w *worker) (workerToRetry *worker, err error) - - // hasCurrentWorker is called before a worker is to be dispatched to - // check whether it is a duplicate. this function returns whether there is - // a worker that covers the scope of the proposed worker; if true, - // ignore the proposed worker - hasCurrentWorker(*worker, map[uint64]*worker) bool - - // handleTick handles a timer tick - handleTick() ([]*worker, error) -} - // ChainSync contains the methods used by the high-level service into the `chainSync` module type ChainSync interface { start() @@ -306,14 +280,14 @@ func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.He } } - pendingBlock := cs.pendingBlocks.getBlock(blockAnnounceHeaderHash) - if pendingBlock != nil { - return fmt.Errorf("block %s (#%d) in the pending set", + hasPendingBlock := cs.pendingBlocks.hasBlock(blockAnnounceHeaderHash) + if hasPendingBlock { + return fmt.Errorf("block %s (#%d) already in the pending set", blockAnnounceHeaderHash, blockAnnounceHeader.Number) } if err = cs.pendingBlocks.addHeader(blockAnnounceHeader); err != nil { - return err + return fmt.Errorf("adding pending block header: %w", err) } // we assume that if a peer sends us a block announce for a certain block, @@ -421,10 +395,11 @@ func (cs *chainSync) sync() { return } - if cs.state == bootstrap { + switch { + case cs.state == bootstrap: logger.Infof("using bootstrap sync") err = cs.executeBootstrapSync() - } else { + case cs.state == tip: logger.Infof("using tip sync") err = cs.executeTipSync() } @@ -689,8 +664,6 @@ func (cs *chainSync) maybeSwitchMode() error { return nil } -var errUnableToGetTarget = errors.New("unable to get target") - // getTarget takes the average of all peer heads // TODO: should we just return the highest? could be an attack vector potentially, if a peer reports some very large // head block number, it would leave us in bootstrap mode forever @@ -723,7 +696,7 @@ func (cs *chainSync) getTarget() (uint, error) { func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, startAtBlock uint, totalBlocks uint32, wg *sync.WaitGroup) { defer wg.Done() - logger.Infof("starting handleWorkersResults, waiting %d blocks", totalBlocks) + logger.Debugf("starting handleWorkersResults, waiting %d blocks", totalBlocks) syncingChain := make([]*types.BlockData, totalBlocks) loop: @@ -816,7 +789,7 @@ loop: } } - logger.Infof("synced %d blocks, starting process", len(syncingChain)) + logger.Debugf("synced %d blocks, starting process", totalBlocks) if len(syncingChain) >= 2 { // ensuring the parents are in the right place parentElement := syncingChain[0] @@ -872,8 +845,6 @@ func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { return nil } - //logger.Tracef("new ready block number %d with hash %s", bd.Header.Number, bd.Hash) - err := cs.processBlockData(*bd) if err != nil { // depending on the error, we might want to save this block for later diff --git a/dot/sync/disjoint_block_set.go b/dot/sync/disjoint_block_set.go index 4a28710bf9..136987b5bd 100644 --- a/dot/sync/disjoint_block_set.go +++ b/dot/sync/disjoint_block_set.go @@ -10,6 +10,7 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" + "golang.org/x/exp/maps" ) const ( @@ -33,9 +34,9 @@ type DisjointBlockSet interface { addJustification(common.Hash, []byte) error removeBlock(common.Hash) removeLowerBlocks(num uint) - getBlock(common.Hash) *pendingBlock getBlocks() []*pendingBlock - getReadyDescendants(curr common.Hash, ready []*types.BlockData) []*types.BlockData + getBlock(common.Hash) *pendingBlock + hasBlock(common.Hash) bool size() int } @@ -272,12 +273,6 @@ func (s *disjointBlockSet) size() int { return len(s.blocks) } -func (s *disjointBlockSet) getChildren(hash common.Hash) map[common.Hash]struct{} { - s.RLock() - defer s.RUnlock() - return s.parentToChildren[hash] -} - func (s *disjointBlockSet) getBlock(hash common.Hash) *pendingBlock { s.RLock() defer s.RUnlock() @@ -288,32 +283,5 @@ func (s *disjointBlockSet) getBlocks() []*pendingBlock { s.RLock() defer s.RUnlock() - blocks := make([]*pendingBlock, len(s.blocks)) - i := 0 - for _, b := range s.blocks { - blocks[i] = b - i++ - } - return blocks -} - -// getReadyDescendants recursively checks for descendants that are ready to be processed -func (s *disjointBlockSet) getReadyDescendants(curr common.Hash, ready []*types.BlockData) []*types.BlockData { - children := s.getChildren(curr) - if len(children) == 0 { - return ready - } - - for c := range children { - b := s.getBlock(c) - if b == nil || b.header == nil || b.body == nil { - continue - } - - // if the entire block's data is known, it's ready! - ready = append(ready, b.toBlockData()) - ready = s.getReadyDescendants(c, ready) - } - - return ready + return maps.Values(s.blocks) } From c13b5978bf4ee9486fbcc40e2a712306b0b6c83f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 27 Apr 2023 14:41:41 -0400 Subject: [PATCH 023/140] chore: address some points --- dot/sync/chain_sync.go | 37 +++++++++++++++++-------------------- dot/sync/message.go | 6 ++---- dot/sync/requests.go | 21 +++------------------ dot/sync/worker_pool.go | 10 +++++++--- 4 files changed, 29 insertions(+), 45 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 0ae3b2efa8..1cbfad31a5 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -442,7 +442,7 @@ func (cs *chainSync) executeTipSync() error { // if the announced block contains a lower number than our best // block header, let's check if it is greater than our latests - // finalized header, if so this block is likeli to be a fork + // finalized header, if so this block belongs to a fork chain if announcedNumber < bestBlockHeader.Number { highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() if err != nil { @@ -594,36 +594,37 @@ func (cs *chainSync) executeBootstrapSync() error { // we build the set of requests based on the amount of available peers // in the worker pool, if we have more peers than `maxRequestAllowed` - // so we limit to `maxRequestAllowed` to avoid the error + // so we limit to `maxRequestAllowed` to avoid the error: // cannot reserve outbound connection: resource limit exceeded - availablePeers := cs.workerPool.totalWorkers() - if availablePeers > maxRequestAllowed { - availablePeers = maxRequestAllowed + availableWorkers := cs.workerPool.totalWorkers() + if availableWorkers > maxRequestAllowed { + availableWorkers = maxRequestAllowed } - targetBlockNumber := startRequestAt + uint(availablePeers)*128 - + // targetBlockNumber is the virtual target we will request, however + // we should bound it to the real target which is collected through + // block announces received from other peers + targetBlockNumber := startRequestAt + uint(availableWorkers)*128 realTarget, err := cs.getTarget() if err != nil { return fmt.Errorf("while getting target: %w", err) } if targetBlockNumber > realTarget { + // basically if our virtual target is beyond the real target + // that means we are fell requests far from the tip, then we + // calculate the correct amount of missing requests and then + // change to tip sync which should take care of the rest diff := targetBlockNumber - realTarget numOfRequestsToDrop := (diff / 128) + 1 targetBlockNumber = targetBlockNumber - (numOfRequestsToDrop * 128) endBootstrapSync = true } - requests, err := ascedingBlockRequests( - startRequestAt, targetBlockNumber, bootstrapRequestData) - if err != nil { - logger.Errorf("failed to setup ascending block requests: %s", err) - } + requests := ascedingBlockRequests(startRequestAt, targetBlockNumber, bootstrapRequestData) + expectedAmountOfBlocks := uint32(len(requests) * 128) - expectedAmountOfBlocks := totalRequestedBlocks(requests) wg := sync.WaitGroup{} - resultsQueue := make(chan *syncTaskResult) wg.Add(1) @@ -728,9 +729,6 @@ loop: continue } - // TODO add this worker in a ignorePeers list, implement some expiration time for - // peers added to it (peerJail where peers have a release date and maybe extend the punishment - // if fail again ang again Jimmy's + Diego's idea) cs.workerPool.shutdownWorker(taskResult.who, true) cs.workerPool.submitRequest(taskResult.request, workersResults) continue @@ -795,10 +793,9 @@ loop: parentElement := syncingChain[0] for _, element := range syncingChain[1:] { if parentElement.Header.Hash() != element.Header.ParentHash { - logger.Criticalf("expected %s be parent of %s", parentElement.Header.Hash(), element.Header.ParentHash) - panic("") + panic(fmt.Sprintf("expected %s be parent of %s", + parentElement.Header.Hash(), element.Header.ParentHash)) } - parentElement = element } } diff --git a/dot/sync/message.go b/dot/sync/message.go index 4c1ad50470..41853fe7fd 100644 --- a/dot/sync/message.go +++ b/dot/sync/message.go @@ -11,10 +11,8 @@ import ( "github.com/ChainSafe/gossamer/lib/common" ) -const ( - // maxResponseSize is maximum number of block data a BlockResponse message can contain - maxResponseSize = 128 -) +// maxResponseSize is maximum number of block data a BlockResponse message can contain +const maxResponseSize = 128 // CreateBlockResponse creates a block response message from a block request message func (s *Service) CreateBlockResponse(req *network.BlockRequestMessage) (*network.BlockResponseMessage, error) { diff --git a/dot/sync/requests.go b/dot/sync/requests.go index b4c82406a8..069f92cc65 100644 --- a/dot/sync/requests.go +++ b/dot/sync/requests.go @@ -25,11 +25,8 @@ func descendingBlockRequest(blockHash common.Hash, amount uint32, requestedData } } -func ascedingBlockRequests(startNumber uint, targetNumber uint, requestedData byte) ([]*network.BlockRequestMessage, error) { +func ascedingBlockRequests(startNumber uint, targetNumber uint, requestedData byte) []*network.BlockRequestMessage { diff := int(targetNumber) - int(startNumber) - if diff < 0 { - return nil, errInvalidDirection - } // start and end block are the same, just request 1 block if diff == 0 { @@ -41,7 +38,7 @@ func ascedingBlockRequests(startNumber uint, targetNumber uint, requestedData by Direction: network.Ascending, Max: &one, }, - }, nil + } } numRequests := uint(diff) / maxResponseSize @@ -66,17 +63,5 @@ func ascedingBlockRequests(startNumber uint, targetNumber uint, requestedData by startNumber += uint(max) } - return reqs, nil -} - -func totalRequestedBlocks(requests []*network.BlockRequestMessage) uint32 { - acc := uint32(0) - - for _, request := range requests { - if request.Max != nil { - acc += *request.Max - } - } - - return acc + return reqs } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index eb586ee41c..3f72e74ce5 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -26,9 +26,13 @@ type syncWorkerPool struct { l sync.RWMutex wg sync.WaitGroup - network Network - taskQueue chan *syncTask - workers map[peer.ID]*syncWorker + network Network + taskQueue chan *syncTask + workers map[peer.ID]*syncWorker + + // TODO add this worker in a ignorePeers list, implement some expiration time for + // peers added to it (peerJail where peers have a release date and maybe extend the punishment + // if fail again ang again Jimmy's + Diego's idea) ignorePeers map[peer.ID]time.Time } From f0c667fd851e6df4aaf65e85b1c071e1f22baa19 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 2 May 2023 09:25:00 -0400 Subject: [PATCH 024/140] chore: fix missing blocks in ascedingBlockRequests --- dot/sync/disjoint_block_set.go | 2 +- dot/sync/requests.go | 26 +++++++++++++++++++++----- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/dot/sync/disjoint_block_set.go b/dot/sync/disjoint_block_set.go index 136987b5bd..538f302ea2 100644 --- a/dot/sync/disjoint_block_set.go +++ b/dot/sync/disjoint_block_set.go @@ -34,8 +34,8 @@ type DisjointBlockSet interface { addJustification(common.Hash, []byte) error removeBlock(common.Hash) removeLowerBlocks(num uint) - getBlocks() []*pendingBlock getBlock(common.Hash) *pendingBlock + getBlocks() []*pendingBlock hasBlock(common.Hash) bool size() int } diff --git a/dot/sync/requests.go b/dot/sync/requests.go index 069f92cc65..9651419b6a 100644 --- a/dot/sync/requests.go +++ b/dot/sync/requests.go @@ -25,8 +25,12 @@ func descendingBlockRequest(blockHash common.Hash, amount uint32, requestedData } } -func ascedingBlockRequests(startNumber uint, targetNumber uint, requestedData byte) []*network.BlockRequestMessage { - diff := int(targetNumber) - int(startNumber) +func ascedingBlockRequests(startNumber, targetNumber uint, requestedData byte) []*network.BlockRequestMessage { + if startNumber > targetNumber { + return []*network.BlockRequestMessage{} + } + + diff := targetNumber - startNumber // start and end block are the same, just request 1 block if diff == 0 { @@ -41,17 +45,29 @@ func ascedingBlockRequests(startNumber uint, targetNumber uint, requestedData by } } - numRequests := uint(diff) / maxResponseSize - if diff%maxResponseSize != 0 { + numRequests := diff / maxResponseSize + // we should check if the diff is in the maxResponseSize bounds + // otherwise we should increase the numRequests by one, take this + // example, we want to sync from 0 to 259, the diff is 259 + // then the num of requests is 2 (uint(259)/uint(128)) however two requests will + // retrieve only 256 blocks (each request can retrive a max of 128 blocks), so we should + // create one more request to retrive those missing blocks, 3 in this example. + missingBlocks := diff % maxResponseSize + if missingBlocks != 0 { numRequests++ } reqs := make([]*network.BlockRequestMessage, numRequests) - // check if we want to specify a size const max = uint32(maxResponseSize) for i := uint(0); i < numRequests; i++ { max := max + + lastIteration := numRequests - 1 + if i == lastIteration && missingBlocks != 0 { + max = uint32(missingBlocks) + } + start := variadic.MustNewUint32OrHash(startNumber) reqs[i] = &network.BlockRequestMessage{ From 2e640ceaf364e78b2aa8f2d4b19809ceb5d92305 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 2 May 2023 10:35:38 -0400 Subject: [PATCH 025/140] chore: delete chainprocessor.go from merge with development --- dot/sync/chain_processor.go | 300 ------------------------------------ 1 file changed, 300 deletions(-) delete mode 100644 dot/sync/chain_processor.go diff --git a/dot/sync/chain_processor.go b/dot/sync/chain_processor.go deleted file mode 100644 index ef060b6a2b..0000000000 --- a/dot/sync/chain_processor.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "bytes" - "context" - "errors" - "fmt" - - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/blocktree" -) - -// ChainProcessor processes ready blocks. -// it is implemented by *chainProcessor -type ChainProcessor interface { - processReadyBlocks() - stop() -} - -type chainProcessor struct { - ctx context.Context - cancel context.CancelFunc - - chainSync ChainSync - - // blocks that are ready for processing. ie. their parent is known, or their parent is ahead - // of them within this channel and thus will be processed first - readyBlocks *blockQueue - - // set of block not yet ready to be processed. - // blocks are placed here if they fail to be processed due to missing parent block - pendingBlocks DisjointBlockSet - - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry -} - -type chainProcessorConfig struct { - readyBlocks *blockQueue - pendingBlocks DisjointBlockSet - syncer ChainSync - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry - badBlocks []string -} - -func newChainProcessor(cfg chainProcessorConfig) *chainProcessor { - ctx, cancel := context.WithCancel(context.Background()) - - return &chainProcessor{ - ctx: ctx, - cancel: cancel, - readyBlocks: cfg.readyBlocks, - pendingBlocks: cfg.pendingBlocks, - chainSync: cfg.syncer, - blockState: cfg.blockState, - storageState: cfg.storageState, - transactionState: cfg.transactionState, - babeVerifier: cfg.babeVerifier, - finalityGadget: cfg.finalityGadget, - blockImportHandler: cfg.blockImportHandler, - telemetry: cfg.telemetry, - } -} - -func (s *chainProcessor) stop() { - s.cancel() -} - -func (s *chainProcessor) processReadyBlocks() { - for { - bd, err := s.readyBlocks.pop(s.ctx) - if err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return - } - panic(fmt.Sprintf("unhandled error: %s", err)) - } - - if err := s.processBlockData(*bd); err != nil { - // depending on the error, we might want to save this block for later - if !errors.Is(err, errFailedToGetParent) && !errors.Is(err, blocktree.ErrParentNotFound) { - logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) - continue - } - - logger.Tracef("block data processing for block with hash %s failed: %s", bd.Hash, err) - if err := s.pendingBlocks.addBlock(&types.Block{ - Header: *bd.Header, - Body: *bd.Body, - }); err != nil { - logger.Debugf("failed to re-add block to pending blocks: %s", err) - } - } - } -} - -// processBlockData processes the BlockData from a BlockResponse and -// returns the index of the last BlockData it handled on success, -// or the index of the block data that errored on failure. -func (c *chainProcessor) processBlockData(blockData types.BlockData) error { //nolint:revive - logger.Debugf("processing block data with hash %s", blockData.Hash) - - headerInState, err := c.blockState.HasHeader(blockData.Hash) - if err != nil { - return fmt.Errorf("checking if block state has header: %w", err) - } - - bodyInState, err := c.blockState.HasBlockBody(blockData.Hash) - if err != nil { - return fmt.Errorf("checking if block state has body: %w", err) - } - - // while in bootstrap mode we don't need to broadcast block announcements - announceImportedBlock := c.chainSync.syncState() == tip - if headerInState && bodyInState { - err = c.processBlockDataWithStateHeaderAndBody(blockData, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and "+ - "body in block state: %w", err) - } - return nil - } - - if blockData.Header != nil { - if blockData.Body != nil { - err = c.processBlockDataWithHeaderAndBody(blockData, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and body: %w", err) - } - logger.Debugf("block with hash %s processed", blockData.Hash) - } - - if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err = c.handleJustification(blockData.Header, *blockData.Justification) - if err != nil { - return fmt.Errorf("handling justification: %w", err) - } - } - } - - err = c.blockState.CompareAndSetBlockData(&blockData) - if err != nil { - return fmt.Errorf("comparing and setting block data: %w", err) - } - - return nil -} - -func (c *chainProcessor) processBlockDataWithStateHeaderAndBody(blockData types.BlockData, //nolint:revive - announceImportedBlock bool) (err error) { - // TODO: fix this; sometimes when the node shuts down the "best block" isn't stored properly, - // so when the node restarts it has blocks higher than what it thinks is the best, causing it not to sync - // if we update the node to only store finalised blocks in the database, this should be fixed and the entire - // code block can be removed (#1784) - block, err := c.blockState.GetBlockByHash(blockData.Hash) - if err != nil { - return fmt.Errorf("getting block by hash: %w", err) - } - - err = c.blockState.AddBlockToBlockTree(block) - if errors.Is(err, blocktree.ErrBlockExists) { - logger.Debugf( - "block number %d with hash %s already exists in block tree, skipping it.", - block.Header.Number, blockData.Hash) - return nil - } else if err != nil { - return fmt.Errorf("adding block to blocktree: %w", err) - } - - if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err = c.handleJustification(&block.Header, *blockData.Justification) - if err != nil { - return fmt.Errorf("handling justification: %w", err) - } - } - - // TODO: this is probably unnecessary, since the state is already in the database - // however, this case shouldn't be hit often, since it's only hit if the node state - // is rewinded or if the node shuts down unexpectedly (#1784) - state, err := c.storageState.TrieState(&block.Header.StateRoot) - if err != nil { - return fmt.Errorf("loading trie state: %w", err) - } - - err = c.blockImportHandler.HandleBlockImport(block, state, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block import: %w", err) - } - - return nil -} - -func (c *chainProcessor) processBlockDataWithHeaderAndBody(blockData types.BlockData, //nolint:revive - announceImportedBlock bool) (err error) { - err = c.babeVerifier.VerifyBlock(blockData.Header) - if err != nil { - return fmt.Errorf("babe verifying block: %w", err) - } - - c.handleBody(blockData.Body) - - block := &types.Block{ - Header: *blockData.Header, - Body: *blockData.Body, - } - - err = c.handleBlock(block, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block: %w", err) - } - - return nil -} - -// handleHeader handles block bodies included in BlockResponses -func (s *chainProcessor) handleBody(body *types.Body) { - for _, ext := range *body { - s.transactionState.RemoveExtrinsic(ext) - } -} - -// handleHeader handles blocks (header+body) included in BlockResponses -func (s *chainProcessor) handleBlock(block *types.Block, announceImportedBlock bool) error { - parent, err := s.blockState.GetHeader(block.Header.ParentHash) - if err != nil { - return fmt.Errorf("%w: %s", errFailedToGetParent, err) - } - - s.storageState.Lock() - defer s.storageState.Unlock() - - ts, err := s.storageState.TrieState(&parent.StateRoot) - if err != nil { - return err - } - - root := ts.MustRoot() - if !bytes.Equal(parent.StateRoot[:], root[:]) { - panic("parent state root does not match snapshot state root") - } - - rt, err := s.blockState.GetRuntime(parent.Hash()) - if err != nil { - return err - } - - rt.SetContextStorage(ts) - - _, err = rt.ExecuteBlock(block) - if err != nil { - return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) - } - - if err = s.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { - return err - } - - logger.Debugf("🔗 imported block number %d with hash %s", block.Header.Number, block.Header.Hash()) - - blockHash := block.Header.Hash() - s.telemetry.SendMessage(telemetry.NewBlockImport( - &blockHash, - block.Header.Number, - "NetworkInitialSync")) - - return nil -} - -func (s *chainProcessor) handleJustification(header *types.Header, justification []byte) (err error) { - logger.Debugf("handling justification for block %d...", header.Number) - - headerHash := header.Hash() - err = s.finalityGadget.VerifyBlockJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) - } - - err = s.blockState.SetJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) - } - - logger.Infof("🔨 finalised block number %d with hash %s", header.Number, headerHash) - return nil -} From 683e33166e8d3d740b1982be7edae4cb777fe13f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 3 May 2023 08:41:17 -0400 Subject: [PATCH 026/140] chore: impl wait/release while validating a worker result --- dot/sync/chain_sync.go | 55 +++++----- dot/sync/chain_sync_test.go | 86 +++++++-------- dot/sync/errors.go | 1 + dot/sync/mock_chain_sync_test.go | 82 --------------- dot/sync/mock_disjoint_block_set_test.go | 22 ++-- dot/sync/mocks_generate_test.go | 3 +- dot/sync/mocks_test.go | 28 ++--- dot/sync/requests.go | 3 +- dot/sync/requests_test.go | 128 +++++++++++++++++++++++ dot/sync/sync_worker.go | 69 +++++++----- dot/sync/worker.go | 95 ----------------- dot/sync/worker_pool.go | 35 ++++--- 12 files changed, 290 insertions(+), 317 deletions(-) create mode 100644 dot/sync/requests_test.go delete mode 100644 dot/sync/worker.go diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index c34e41d81d..ea54f104d7 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -27,7 +27,6 @@ import ( ) var _ ChainSync = &chainSync{} -var errUnableToGetTarget = errors.New("unable to get target") type chainSyncState byte @@ -57,8 +56,8 @@ var ( }) ) -// peerState tracks our peers's best reported blocks -type peerState struct { +// peerView tracks our peers's best reported blocks +type peerView struct { who peer.ID hash common.Hash number uint @@ -99,8 +98,8 @@ type chainSync struct { // tracks the latest state we know of from our peers, // ie. their best block hash and number - peerStateLock sync.RWMutex - peerState map[peer.ID]*peerState + peerViewLock sync.RWMutex + peerView map[peer.ID]*peerView // disjoint set of blocks which are known but not ready to be processed // ie. we only know the hash, number, or the parent block is unknown, or the body is unknown @@ -162,7 +161,7 @@ func newChainSync(cfg chainSyncConfig) *chainSync { cancel: cancel, blockState: cfg.bs, network: cfg.net, - peerState: make(map[peer.ID]*peerState), + peerView: make(map[peer.ID]*peerView), pendingBlocks: cfg.pendingBlocks, state: bootstrap, benchmarker: newSyncBenchmarker(syncSamplesToKeep), @@ -307,15 +306,15 @@ func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.He // setPeerHead sets a peer's best known block func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber uint) error { - err := cs.workerPool.addWorkerFromBlockAnnounce(who) + err := cs.workerPool.fromBlockAnnounce(who) if err != nil { logger.Errorf("adding a potential worker: %s", err) } - cs.peerStateLock.Lock() - defer cs.peerStateLock.Unlock() + cs.peerViewLock.Lock() + defer cs.peerViewLock.Unlock() - cs.peerState[who] = &peerState{ + cs.peerView[who] = &peerView{ who: who, hash: bestHash, number: bestNumber, @@ -616,7 +615,7 @@ func (cs *chainSync) executeBootstrapSync() error { if targetBlockNumber > realTarget { // basically if our virtual target is beyond the real target - // that means we are fell requests far from the tip, then we + // that means we are few requests far from the tip, then we // calculate the correct amount of missing requests and then // change to tip sync which should take care of the rest diff := targetBlockNumber - realTarget @@ -674,18 +673,18 @@ func (cs *chainSync) maybeSwitchMode() error { // head block number, it would leave us in bootstrap mode forever // it would be better to have some sort of standard deviation calculation and discard any outliers (#1861) func (cs *chainSync) getTarget() (uint, error) { - cs.peerStateLock.RLock() - defer cs.peerStateLock.RUnlock() + cs.peerViewLock.RLock() + defer cs.peerViewLock.RUnlock() // in practice, this shouldn't happen, as we only start the module once we have some peer states - if len(cs.peerState) == 0 { + if len(cs.peerView) == 0 { // return max uint32 instead of 0, as returning 0 would switch us to tip mode unexpectedly return 0, errUnableToGetTarget } // we are going to sort the data and remove the outliers then we will return the avg of all the valid elements - uintArr := make([]uint, 0, len(cs.peerState)) - for _, ps := range cs.peerState { + uintArr := make([]uint, 0, len(cs.peerView)) + for _, ps := range cs.peerView { uintArr = append(uintArr, ps.number) } @@ -701,7 +700,7 @@ func (cs *chainSync) getTarget() (uint, error) { func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, startAtBlock uint, totalBlocks uint32, wg *sync.WaitGroup) { defer wg.Done() - logger.Debugf("starting handleWorkersResults, waiting %d blocks", totalBlocks) + logger.Debugf("handling workers results, waiting for %d blocks", totalBlocks) syncingChain := make([]*types.BlockData, totalBlocks) loop: @@ -722,18 +721,17 @@ loop: <-idleTimer.C } - logger.Infof("task result: peer(%s), error: %v, hasResponse: %v", + logger.Debugf("task result: peer(%s), with error: %v, with response: %v", taskResult.who, taskResult.err != nil, taskResult.response != nil) if taskResult.err != nil { - logger.Criticalf("task result error: %s", taskResult.err) + logger.Errorf("task result: peer(%s) error: %s", + taskResult.who, taskResult.err) - if errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue + // if we receive and empty message from the stream we don't need to shutdown the worker + if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { + cs.workerPool.shutdownWorker(taskResult.who, true) } - - cs.workerPool.shutdownWorker(taskResult.who, true) cs.workerPool.submitRequest(taskResult.request, workersResults) continue } @@ -765,6 +763,7 @@ loop: continue } + cs.workerPool.releaseWorker(who) if len(response.BlockData) > 0 { firstBlockInResponse := response.BlockData[0] lastBlockInResponse := response.BlockData[len(response.BlockData)-1] @@ -1166,14 +1165,14 @@ func (cs *chainSync) validateJustification(bd *types.BlockData) error { } func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { - cs.peerStateLock.RLock() - defer cs.peerStateLock.RUnlock() + cs.peerViewLock.RLock() + defer cs.peerViewLock.RUnlock() - if len(cs.peerState) == 0 { + if len(cs.peerView) == 0 { return 0, errNoPeers } - for _, ps := range cs.peerState { + for _, ps := range cs.peerView { if ps.number < highestBlock { continue } diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 1633371d4c..c2d6d7234e 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -4,6 +4,8 @@ package sync import ( + "time" + "context" "errors" "testing" @@ -72,15 +74,15 @@ func Test_chainSync_setPeerHead(t *testing.T) { number uint errWrapped error errMessage string - expectedPeerIDToPeerState map[peer.ID]*peerState - expectedQueuedPeerStates []*peerState + expectedPeerIDToPeerState map[peer.ID]*peerView + expectedQueuedPeerStates []*peerView }{ "best_block_header_error": { chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { blockState := NewMockBlockState(ctrl) blockState.EXPECT().BestBlockHeader().Return(nil, errTest) return &chainSync{ - peerState: map[peer.ID]*peerState{}, + peerView: map[peer.ID]*peerView{}, blockState: blockState, } }, @@ -89,7 +91,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { number: 1, errWrapped: errTest, errMessage: "best block header: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ + expectedPeerIDToPeerState: map[peer.ID]*peerView{ somePeer: { who: somePeer, hash: someHash, @@ -105,7 +107,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { blockState.EXPECT().GetHashByNumber(uint(1)). Return(common.Hash{}, errTest) return &chainSync{ - peerState: map[peer.ID]*peerState{}, + peerView: map[peer.ID]*peerView{}, blockState: blockState, } }, @@ -114,7 +116,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { number: 1, errWrapped: errTest, errMessage: "get block hash by number: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ + expectedPeerIDToPeerState: map[peer.ID]*peerView{ somePeer: { who: somePeer, hash: someHash, @@ -129,14 +131,14 @@ func Test_chainSync_setPeerHead(t *testing.T) { blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) blockState.EXPECT().GetHashByNumber(uint(1)).Return(someHash, nil) return &chainSync{ - peerState: map[peer.ID]*peerState{}, + peerView: map[peer.ID]*peerView{}, blockState: blockState, } }, peerID: somePeer, hash: someHash, number: 1, - expectedPeerIDToPeerState: map[peer.ID]*peerState{ + expectedPeerIDToPeerState: map[peer.ID]*peerView{ somePeer: { who: somePeer, hash: someHash, @@ -153,7 +155,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { Return(common.Hash{2}, nil) // other hash than someHash blockState.EXPECT().GetHighestFinalisedHeader().Return(nil, errTest) return &chainSync{ - peerState: map[peer.ID]*peerState{}, + peerView: map[peer.ID]*peerView{}, blockState: blockState, } }, @@ -162,7 +164,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { number: 1, errWrapped: errTest, errMessage: "get highest finalised header: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ + expectedPeerIDToPeerState: map[peer.ID]*peerView{ somePeer: { who: somePeer, hash: someHash, @@ -185,7 +187,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { Reason: peerset.BadBlockAnnouncementReason, }, somePeer) return &chainSync{ - peerState: map[peer.ID]*peerState{}, + peerView: map[peer.ID]*peerView{}, blockState: blockState, network: network, } @@ -195,7 +197,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { number: 1, errWrapped: errPeerOnInvalidFork, errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ + expectedPeerIDToPeerState: map[peer.ID]*peerView{ somePeer: { who: somePeer, hash: someHash, @@ -218,7 +220,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { Reason: peerset.BadBlockAnnouncementReason, }, somePeer) return &chainSync{ - peerState: map[peer.ID]*peerState{}, + peerView: map[peer.ID]*peerView{}, blockState: blockState, network: network, } @@ -228,7 +230,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { number: 1, errWrapped: errPeerOnInvalidFork, errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ + expectedPeerIDToPeerState: map[peer.ID]*peerView{ somePeer: { who: somePeer, hash: someHash, @@ -249,7 +251,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) blockState.EXPECT().HasHeader(someHash).Return(false, errTest) return &chainSync{ - peerState: map[peer.ID]*peerState{}, + peerView: map[peer.ID]*peerView{}, blockState: blockState, } }, @@ -258,7 +260,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { number: 2, errWrapped: errTest, errMessage: "has header: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ + expectedPeerIDToPeerState: map[peer.ID]*peerView{ somePeer: { who: somePeer, hash: someHash, @@ -279,14 +281,14 @@ func Test_chainSync_setPeerHead(t *testing.T) { blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) blockState.EXPECT().HasHeader(someHash).Return(true, nil) return &chainSync{ - peerState: map[peer.ID]*peerState{}, + peerView: map[peer.ID]*peerView{}, blockState: blockState, } }, peerID: somePeer, hash: someHash, number: 2, - expectedPeerIDToPeerState: map[peer.ID]*peerState{ + expectedPeerIDToPeerState: map[peer.ID]*peerView{ somePeer: { who: somePeer, hash: someHash, @@ -303,7 +305,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). Return(errTest) return &chainSync{ - peerState: map[peer.ID]*peerState{}, + peerView: map[peer.ID]*peerView{}, blockState: blockState, pendingBlocks: pendingBlocks, } @@ -313,7 +315,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { number: 2, errWrapped: errTest, errMessage: "add hash and number: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ + expectedPeerIDToPeerState: map[peer.ID]*peerView{ somePeer: { who: somePeer, hash: someHash, @@ -330,25 +332,25 @@ func Test_chainSync_setPeerHead(t *testing.T) { pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). Return(nil) return &chainSync{ - peerState: map[peer.ID]*peerState{}, + peerView: map[peer.ID]*peerView{}, blockState: blockState, pendingBlocks: pendingBlocks, // buffered of 1 so setPeerHead can write to it // without a consumer of the channel on the other end. - workQueue: make(chan *peerState, 1), + workQueue: make(chan *peerView, 1), } }, peerID: somePeer, hash: someHash, number: 2, - expectedPeerIDToPeerState: map[peer.ID]*peerState{ + expectedPeerIDToPeerState: map[peer.ID]*peerView{ somePeer: { who: somePeer, hash: someHash, number: 2, }, }, - expectedQueuedPeerStates: []*peerState{ + expectedQueuedPeerStates: []*peerView{ { who: somePeer, hash: someHash, @@ -372,7 +374,7 @@ func Test_chainSync_setPeerHead(t *testing.T) { if testCase.errWrapped != nil { assert.EqualError(t, err, testCase.errMessage) } - assert.Equal(t, testCase.expectedPeerIDToPeerState, chainSync.peerState) + assert.Equal(t, testCase.expectedPeerIDToPeerState, chainSync.peerView) require.Equal(t, len(testCase.expectedQueuedPeerStates), len(chainSync.workQueue)) for _, expectedPeerState := range testCase.expectedQueuedPeerStates { @@ -410,11 +412,11 @@ func TestChainSync_sync_bootstrap_withWorkerError(t *testing.T) { defer cs.cancel() testPeer := peer.ID("noot") - cs.peerState[testPeer] = &peerState{ + cs.peerView[testPeer] = &peerView{ number: 1000, } - cs.workQueue <- cs.peerState[testPeer] + cs.workQueue <- cs.peerView[testPeer] select { case res := <-cs.resultQueue: @@ -452,11 +454,11 @@ func TestChainSync_sync_tip(t *testing.T) { defer cs.cancel() testPeer := peer.ID("noot") - cs.peerState[testPeer] = &peerState{ + cs.peerView[testPeer] = &peerView{ number: 999, } - cs.workQueue <- cs.peerState[testPeer] + cs.workQueue <- cs.peerView[testPeer] <-done require.Equal(t, tip, cs.state) } @@ -465,7 +467,7 @@ func TestChainSync_getTarget(t *testing.T) { ctrl := gomock.NewController(t) cs := newTestChainSync(ctrl) require.Equal(t, uint(1<<32-1), cs.getTarget()) - cs.peerState = map[peer.ID]*peerState{ + cs.peerView = map[peer.ID]*peerView{ "a": { number: 0, // outlier }, @@ -491,7 +493,7 @@ func TestChainSync_getTarget(t *testing.T) { require.Equal(t, uint(130), cs.getTarget()) // sum:650/count:5= avg:130 - cs.peerState = map[peer.ID]*peerState{ + cs.peerView = map[peer.ID]*peerView{ "testA": { number: 1000, }, @@ -924,7 +926,7 @@ func TestChainSync_doSync(t *testing.T) { require.NotNil(t, workerErr) require.Equal(t, errNoPeers, workerErr.err) - cs.peerState["noot"] = &peerState{ + cs.peerView["noot"] = &peerView{ number: 100, } @@ -1096,10 +1098,10 @@ func TestChainSync_determineSyncPeers(t *testing.T) { peersTried := make(map[peer.ID]struct{}) // test base case - cs.peerState[testPeerA] = &peerState{ + cs.peerView[testPeerA] = &peerView{ number: 129, } - cs.peerState[testPeerB] = &peerState{ + cs.peerView[testPeerB] = &peerView{ number: 257, } @@ -1347,12 +1349,12 @@ func Test_chainSync_setBlockAnnounce(t *testing.T) { return chainSync{ blockState: mockBlockState, pendingBlocks: mockDisjointBlockSet, - peerState: make(map[peer.ID]*peerState), + peerView: make(map[peer.ID]*peerView), // creating an buffered channel for this specific test // since it will put a work on the queue and an unbufered channel // will hang until we read on this channel and the goal is to // put the work on the channel and don't block - workQueue: make(chan *peerState, 1), + workQueue: make(chan *peerView, 1), } }, }, @@ -1382,7 +1384,7 @@ func Test_chainSync_getHighestBlock(t *testing.T) { tests := []struct { name string - peerState map[peer.ID]*peerState + peerState map[peer.ID]*peerView wantHighestBlock uint expectedError error }{ @@ -1392,7 +1394,7 @@ func Test_chainSync_getHighestBlock(t *testing.T) { }, { name: "base case", - peerState: map[peer.ID]*peerState{"1": {number: 2}}, + peerState: map[peer.ID]*peerView{"1": {number: 2}}, wantHighestBlock: 2, }, } @@ -1401,7 +1403,7 @@ func Test_chainSync_getHighestBlock(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() cs := &chainSync{ - peerState: tt.peerState, + peerView: tt.peerState, } gotHighestBlock, err := cs.getHighestBlock() if tt.expectedError != nil { @@ -1622,13 +1624,12 @@ func Test_chainSync_handleResult(t *testing.T) { } } -func newTestChainSyncWithReadyBlocks(ctrl *gomock.Controller, readyBlocks *blockQueue) *chainSync { +func newTestChainSyncWithReadyBlocks(ctrl *gomock.Controller) *chainSync { mockBlockState := NewMockBlockState(ctrl) mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) cfg := chainSyncConfig{ bs: mockBlockState, - readyBlocks: readyBlocks, pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), minPeers: 1, maxPeers: 5, @@ -1639,6 +1640,5 @@ func newTestChainSyncWithReadyBlocks(ctrl *gomock.Controller, readyBlocks *block } func newTestChainSync(ctrl *gomock.Controller) *chainSync { - readyBlocks := newBlockQueue(maxResponseSize) - return newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) + return newTestChainSyncWithReadyBlocks(ctrl) } diff --git a/dot/sync/errors.go b/dot/sync/errors.go index 1a90802d68..00b304a640 100644 --- a/dot/sync/errors.go +++ b/dot/sync/errors.go @@ -20,6 +20,7 @@ var ( errRequestStartTooHigh = errors.New("request start number is higher than our best block") // chainSync errors + errUnableToGetTarget = errors.New("unable to get target") errEmptyBlockData = errors.New("empty block data") errNilBlockData = errors.New("block data is nil") errNilResponse = errors.New("block response is nil") diff --git a/dot/sync/mock_chain_sync_test.go b/dot/sync/mock_chain_sync_test.go index f89250c252..c681244882 100644 --- a/dot/sync/mock_chain_sync_test.go +++ b/dot/sync/mock_chain_sync_test.go @@ -13,88 +13,6 @@ import ( peer "github.com/libp2p/go-libp2p/core/peer" ) -// MockworkHandler is a mock of workHandler interface. -type MockworkHandler struct { - ctrl *gomock.Controller - recorder *MockworkHandlerMockRecorder -} - -// MockworkHandlerMockRecorder is the mock recorder for MockworkHandler. -type MockworkHandlerMockRecorder struct { - mock *MockworkHandler -} - -// NewMockworkHandler creates a new mock instance. -func NewMockworkHandler(ctrl *gomock.Controller) *MockworkHandler { - mock := &MockworkHandler{ctrl: ctrl} - mock.recorder = &MockworkHandlerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockworkHandler) EXPECT() *MockworkHandlerMockRecorder { - return m.recorder -} - -// handleNewPeerState mocks base method. -func (m *MockworkHandler) handleNewPeerState(arg0 *peerState) (*worker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "handleNewPeerState", arg0) - ret0, _ := ret[0].(*worker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// handleNewPeerState indicates an expected call of handleNewPeerState. -func (mr *MockworkHandlerMockRecorder) handleNewPeerState(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handleNewPeerState", reflect.TypeOf((*MockworkHandler)(nil).handleNewPeerState), arg0) -} - -// handleTick mocks base method. -func (m *MockworkHandler) handleTick() ([]*worker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "handleTick") - ret0, _ := ret[0].([]*worker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// handleTick indicates an expected call of handleTick. -func (mr *MockworkHandlerMockRecorder) handleTick() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handleTick", reflect.TypeOf((*MockworkHandler)(nil).handleTick)) -} - -// handleWorkerResult mocks base method. -func (m *MockworkHandler) handleWorkerResult(w *worker) (*worker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "handleWorkerResult", w) - ret0, _ := ret[0].(*worker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// handleWorkerResult indicates an expected call of handleWorkerResult. -func (mr *MockworkHandlerMockRecorder) handleWorkerResult(w interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handleWorkerResult", reflect.TypeOf((*MockworkHandler)(nil).handleWorkerResult), w) -} - -// hasCurrentWorker mocks base method. -func (m *MockworkHandler) hasCurrentWorker(arg0 *worker, arg1 map[uint64]*worker) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "hasCurrentWorker", arg0, arg1) - ret0, _ := ret[0].(bool) - return ret0 -} - -// hasCurrentWorker indicates an expected call of hasCurrentWorker. -func (mr *MockworkHandlerMockRecorder) hasCurrentWorker(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "hasCurrentWorker", reflect.TypeOf((*MockworkHandler)(nil).hasCurrentWorker), arg0, arg1) -} - // MockChainSync is a mock of ChainSync interface. type MockChainSync struct { ctrl *gomock.Controller diff --git a/dot/sync/mock_disjoint_block_set_test.go b/dot/sync/mock_disjoint_block_set_test.go index 07b5578dd9..d26ef0644a 100644 --- a/dot/sync/mock_disjoint_block_set_test.go +++ b/dot/sync/mock_disjoint_block_set_test.go @@ -119,18 +119,18 @@ func (mr *MockDisjointBlockSetMockRecorder) getBlocks() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getBlocks", reflect.TypeOf((*MockDisjointBlockSet)(nil).getBlocks)) } -// getReadyDescendants mocks base method. -func (m *MockDisjointBlockSet) getReadyDescendants(arg0 common.Hash, arg1 []*types.BlockData) []*types.BlockData { +// hasBlock mocks base method. +func (m *MockDisjointBlockSet) hasBlock(arg0 common.Hash) bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getReadyDescendants", arg0, arg1) - ret0, _ := ret[0].([]*types.BlockData) + ret := m.ctrl.Call(m, "hasBlock", arg0) + ret0, _ := ret[0].(bool) return ret0 } -// getReadyDescendants indicates an expected call of getReadyDescendants. -func (mr *MockDisjointBlockSetMockRecorder) getReadyDescendants(arg0, arg1 interface{}) *gomock.Call { +// hasBlock indicates an expected call of hasBlock. +func (mr *MockDisjointBlockSetMockRecorder) hasBlock(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getReadyDescendants", reflect.TypeOf((*MockDisjointBlockSet)(nil).getReadyDescendants), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "hasBlock", reflect.TypeOf((*MockDisjointBlockSet)(nil).hasBlock), arg0) } // removeBlock mocks base method. @@ -158,15 +158,15 @@ func (mr *MockDisjointBlockSetMockRecorder) removeLowerBlocks(arg0 interface{}) } // run mocks base method. -func (m *MockDisjointBlockSet) run(arg0 <-chan struct{}) { +func (m *MockDisjointBlockSet) run(arg0 <-chan *types.FinalisationInfo, arg1 <-chan struct{}) { m.ctrl.T.Helper() - m.ctrl.Call(m, "run", arg0) + m.ctrl.Call(m, "run", arg0, arg1) } // run indicates an expected call of run. -func (mr *MockDisjointBlockSetMockRecorder) run(arg0 interface{}) *gomock.Call { +func (mr *MockDisjointBlockSetMockRecorder) run(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "run", reflect.TypeOf((*MockDisjointBlockSet)(nil).run), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "run", reflect.TypeOf((*MockDisjointBlockSet)(nil).run), arg0, arg1) } // size mocks base method. diff --git a/dot/sync/mocks_generate_test.go b/dot/sync/mocks_generate_test.go index 0889fd2845..f7015667b4 100644 --- a/dot/sync/mocks_generate_test.go +++ b/dot/sync/mocks_generate_test.go @@ -6,6 +6,5 @@ package sync //go:generate mockgen -destination=mocks_test.go -package=$GOPACKAGE . BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network //go:generate mockgen -destination=mock_telemetry_test.go -package $GOPACKAGE . Telemetry //go:generate mockgen -destination=mock_runtime_test.go -package $GOPACKAGE github.com/ChainSafe/gossamer/lib/runtime Instance -//go:generate mockgen -destination=mock_chain_processor_test.go -package=$GOPACKAGE . ChainProcessor -//go:generate mockgen -destination=mock_chain_sync_test.go -package $GOPACKAGE -source chain_sync.go . ChainSync,workHandler +//go:generate mockgen -destination=mock_chain_sync_test.go -package $GOPACKAGE -source chain_sync.go . ChainSync //go:generate mockgen -destination=mock_disjoint_block_set_test.go -package=$GOPACKAGE . DisjointBlockSet diff --git a/dot/sync/mocks_test.go b/dot/sync/mocks_test.go index b1a9812778..3c8cb43bfe 100644 --- a/dot/sync/mocks_test.go +++ b/dot/sync/mocks_test.go @@ -609,6 +609,20 @@ func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { return m.recorder } +// AllConnectedPeers mocks base method. +func (m *MockNetwork) AllConnectedPeers() []peer.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllConnectedPeers") + ret0, _ := ret[0].([]peer.ID) + return ret0 +} + +// AllConnectedPeers indicates an expected call of AllConnectedPeers. +func (mr *MockNetworkMockRecorder) AllConnectedPeers() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllConnectedPeers", reflect.TypeOf((*MockNetwork)(nil).AllConnectedPeers)) +} + // DoBlockRequest mocks base method. func (m *MockNetwork) DoBlockRequest(arg0 peer.ID, arg1 *network.BlockRequestMessage) (*network.BlockResponseMessage, error) { m.ctrl.T.Helper() @@ -649,17 +663,3 @@ func (mr *MockNetworkMockRecorder) ReportPeer(arg0, arg1 interface{}) *gomock.Ca mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeer", reflect.TypeOf((*MockNetwork)(nil).ReportPeer), arg0, arg1) } - -// AllConnectedPeers mocks base method. -func (m *MockNetwork) AllConnectedPeers() []peer.ID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AllConnectedPeers") - ret0, _ := ret[0].([]peer.ID) - return ret0 -} - -// AllConnectedPeers indicates an expected call of AllConnectedPeers. -func (mr *MockNetworkMockRecorder) AllConnectedPeers() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllConnectedPeers", reflect.TypeOf((*MockNetwork)(nil).AllConnectedPeers)) -} diff --git a/dot/sync/requests.go b/dot/sync/requests.go index 9651419b6a..f73bba8444 100644 --- a/dot/sync/requests.go +++ b/dot/sync/requests.go @@ -59,9 +59,8 @@ func ascedingBlockRequests(startNumber, targetNumber uint, requestedData byte) [ reqs := make([]*network.BlockRequestMessage, numRequests) // check if we want to specify a size - const max = uint32(maxResponseSize) for i := uint(0); i < numRequests; i++ { - max := max + max := uint32(maxResponseSize) lastIteration := numRequests - 1 if i == lastIteration && missingBlocks != 0 { diff --git a/dot/sync/requests_test.go b/dot/sync/requests_test.go new file mode 100644 index 0000000000..008a61db88 --- /dev/null +++ b/dot/sync/requests_test.go @@ -0,0 +1,128 @@ +package sync + +import ( + "testing" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/lib/common/variadic" + "github.com/stretchr/testify/require" +) + +func TestAscendingBlockRequest(t *testing.T) { + one := uint32(1) + three := uint32(3) + maxResponseSize := uint32(maxResponseSize) + cases := map[string]struct { + startNumber, targetNumber uint + expectedBlockRequestMessage []*network.BlockRequestMessage + }{ + "start_greater_than_target": { + startNumber: 10, + targetNumber: 0, + expectedBlockRequestMessage: []*network.BlockRequestMessage{}, + }, + + "no_difference_between_start_and_target": { + startNumber: 10, + targetNumber: 10, + expectedBlockRequestMessage: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(10)), + Direction: network.Ascending, + Max: &one, + }, + }, + }, + + "requesting_128_blocks": { + startNumber: 0, + targetNumber: 128, + expectedBlockRequestMessage: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + Direction: network.Ascending, + Max: &maxResponseSize, + }, + }, + }, + + "requesting_4_chunks_of_128_blocks": { + startNumber: 0, + targetNumber: 512, // 128 * 4 + expectedBlockRequestMessage: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + Direction: network.Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(128)), + Direction: network.Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(256)), + Direction: network.Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(384)), + Direction: network.Ascending, + Max: &maxResponseSize, + }, + }, + }, + + "requesting_4_chunks_of_128_plus_3_blocks": { + startNumber: 0, + targetNumber: 512 + 3, // 128 * 4 + expectedBlockRequestMessage: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + Direction: network.Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(128)), + Direction: network.Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(256)), + Direction: network.Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(384)), + Direction: network.Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(512)), + Direction: network.Ascending, + Max: &three, + }, + }, + }, + } + + for tname, tt := range cases { + tt := tt + + t.Run(tname, func(t *testing.T) { + requests := ascedingBlockRequests(tt.startNumber, tt.targetNumber, bootstrapRequestData) + require.Equal(t, requests, tt.expectedBlockRequestMessage) + }) + } +} diff --git a/dot/sync/sync_worker.go b/dot/sync/sync_worker.go index 0b2f7d8a66..662a19fed9 100644 --- a/dot/sync/sync_worker.go +++ b/dot/sync/sync_worker.go @@ -15,8 +15,9 @@ type syncWorker struct { ctx context.Context l sync.RWMutex - doneCh chan struct{} - stopCh chan struct{} + releaseCh chan struct{} + doneCh chan struct{} + stopCh chan struct{} who peer.ID network Network @@ -24,15 +25,16 @@ type syncWorker struct { func newSyncWorker(ctx context.Context, who peer.ID, network Network) *syncWorker { return &syncWorker{ - ctx: ctx, - who: who, - network: network, - doneCh: make(chan struct{}), - stopCh: make(chan struct{}), + ctx: ctx, + who: who, + network: network, + doneCh: make(chan struct{}), + stopCh: make(chan struct{}), + releaseCh: make(chan struct{}), } } -func (s *syncWorker) Start(tasks chan *syncTask, wg *sync.WaitGroup) { +func (s *syncWorker) Start(tasks <-chan *syncTask, wg *sync.WaitGroup) { wg.Add(1) go func() { @@ -45,32 +47,47 @@ func (s *syncWorker) Start(tasks chan *syncTask, wg *sync.WaitGroup) { logger.Debugf("worker %s started, waiting for tasks...", s.who) for { + s.waitForTasks(tasks) + + logger.Debugf("[WAITING RELEASE] worker %s", s.who) select { + case <-s.releaseCh: case <-s.stopCh: return - - case task := <-tasks: - request := task.request - - logger.Debugf("[EXECUTING] worker %s: block request: %s", s.who, request) - response, err := s.network.DoBlockRequest(s.who, request) - if err != nil { - logger.Debugf("[FINISHED] worker %s: err: %s", s.who, err) - } else if response != nil { - logger.Debugf("[FINISHED] worker %s: block data amount: %d", s.who, len(response.BlockData)) - } - - task.resultCh <- &syncTaskResult{ - who: s.who, - request: request, - response: response, - err: err, - } } } }() } +func (s *syncWorker) waitForTasks(tasks <-chan *syncTask) { + select { + case <-s.stopCh: + return + + case task := <-tasks: + request := task.request + + logger.Debugf("[EXECUTING] worker %s: block request: %s", s.who, request) + response, err := s.network.DoBlockRequest(s.who, request) + if err != nil { + logger.Debugf("[FINISHED] worker %s: err: %s", s.who, err) + } else if response != nil { + logger.Debugf("[FINISHED] worker %s: block data amount: %d", s.who, len(response.BlockData)) + } + + task.resultCh <- &syncTaskResult{ + who: s.who, + request: request, + response: response, + err: err, + } + } +} + +func (s *syncWorker) Release() { + s.releaseCh <- struct{}{} +} + func (s *syncWorker) Stop() { close(s.stopCh) <-s.doneCh diff --git a/dot/sync/worker.go b/dot/sync/worker.go deleted file mode 100644 index c597623089..0000000000 --- a/dot/sync/worker.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "sync" - "time" - - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/lib/common" -) - -// workerState helps track the current worker set and set the upcoming worker ID -type workerState struct { - ctx context.Context - cancel context.CancelFunc - - sync.Mutex - nextWorker uint64 - workers map[uint64]*worker -} - -func newWorkerState() *workerState { - ctx, cancel := context.WithCancel(context.Background()) - return &workerState{ - ctx: ctx, - cancel: cancel, - workers: make(map[uint64]*worker), - } -} - -func (s *workerState) add(w *worker) { - s.Lock() - defer s.Unlock() - - w.id = s.nextWorker - w.ctx = s.ctx - s.nextWorker++ - s.workers[w.id] = w -} - -func (s *workerState) delete(id uint64) { - s.Lock() - defer s.Unlock() - delete(s.workers, id) -} - -func (s *workerState) reset() { - s.cancel() - s.ctx, s.cancel = context.WithCancel(context.Background()) - - s.Lock() - defer s.Unlock() - - for id := range s.workers { - delete(s.workers, id) - } - s.nextWorker = 0 -} - -// worker respresents a process that is attempting to sync from the specified start block to target block -// if it fails for some reason, `err` is set. -// otherwise, we can assume all the blocks have been received and added to the `readyBlocks` queue -type worker struct { - ctx context.Context - id uint64 - retryCount uint16 - peersTried map[peer.ID]struct{} - - startHash common.Hash - startNumber *uint - targetHash common.Hash - targetNumber *uint - - // if this worker is tied to a specific pending block, this field is set - pendingBlock *pendingBlock - - // bitmap of fields to request - requestData byte - direction network.SyncDirection - - duration time.Duration - err *workerError -} - -type workerError struct { - err error - who peer.ID // whose response caused the error, if any -} - -func uintPtr(n uint) *uint { return &n } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 3f72e74ce5..16b1c0902b 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -9,6 +9,11 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ) +const ( + ignorePeerTimeout = 2 * time.Minute + maxRequestAllowed uint = 40 +) + type syncTask struct { request *network.BlockRequestMessage resultCh chan<- *syncTaskResult @@ -36,8 +41,6 @@ type syncWorkerPool struct { ignorePeers map[peer.ID]time.Time } -const maxRequestAllowed uint = 40 - func newSyncWorkerPool(net Network) *syncWorkerPool { return &syncWorkerPool{ network: net, @@ -47,8 +50,6 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { } } -const ignorePeerTimeout = 2 * time.Minute - func (s *syncWorkerPool) useConnectedPeers() { connectedPeers := s.network.AllConnectedPeers() @@ -76,7 +77,7 @@ func (s *syncWorkerPool) useConnectedPeers() { } } -func (s *syncWorkerPool) addWorkerFromBlockAnnounce(who peer.ID) error { +func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) error { s.l.Lock() defer s.l.Unlock() @@ -111,23 +112,29 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage, } } -func (s *syncWorkerPool) shutdownWorker(who peer.ID, ignore bool) { +func (s *syncWorkerPool) releaseWorker(who peer.ID) { s.l.Lock() - defer s.l.Unlock() - peer, has := s.workers[who] + s.l.Unlock() + if !has { return } - go func() { - logger.Warnf("trying to stop %s (ignore=%v)", who, ignore) - peer.Stop() - logger.Warnf("peer %s stopped", who) - }() + peer.Release() +} + +func (s *syncWorkerPool) shutdownWorker(who peer.ID, ignore bool) { + s.l.Lock() + peer, has := s.workers[who] + s.l.Unlock() - delete(s.workers, who) + if !has { + return + } + peer.Stop() + delete(s.workers, who) if ignore { ignorePeerTimeout := time.Now().Add(ignorePeerTimeout) s.ignorePeers[who] = ignorePeerTimeout From b3d4c8e96723737025b540fb53b3a9aee8ca7e68 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 5 May 2023 19:32:17 -0400 Subject: [PATCH 027/140] wip: tip sync concurrency for bounded requests --- chain/westend/config.toml | 6 +- dot/sync/chain_sync.go | 209 ++-- dot/sync/chain_sync_test._old | 1643 ++++++++++++++++++++++++++++++ dot/sync/chain_sync_test.go | 1631 +---------------------------- dot/sync/mock_chain_sync_test.go | 6 +- dot/sync/sync_worker.go | 94 -- dot/sync/syncer.go | 3 +- dot/sync/worker_pool.go | 233 +++-- 8 files changed, 1943 insertions(+), 1882 deletions(-) create mode 100644 dot/sync/chain_sync_test._old delete mode 100644 dot/sync/sync_worker.go diff --git a/chain/westend/config.toml b/chain/westend/config.toml index f9403abcf2..88ebb8fed3 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -1,5 +1,5 @@ [global] -basepath = "~/.gossamer/westend" +basepath = "./tmp/westend" log = "info" metrics-address = "localhost:9876" @@ -11,8 +11,8 @@ state = "" runtime = "" babe = "" grandpa = "" -sync = "" -digest = "" +sync = "trace" +digest = "trace" [init] genesis = "./chain/westend/genesis.json" diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index ea54f104d7..f84cc8b4fc 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -72,7 +72,7 @@ type ChainSync interface { setBlockAnnounce(from peer.ID, header *types.Header) error // called upon receiving a BlockAnnounceHandshake - setPeerHead(p peer.ID, hash common.Hash, number uint) error + setPeerHead(p peer.ID, hash common.Hash, number uint) // syncState returns the current syncing state syncState() chainSyncState @@ -89,6 +89,7 @@ type announcedBlock struct { type chainSync struct { ctx context.Context cancel context.CancelFunc + stopCh chan struct{} blockState BlockState network Network @@ -104,8 +105,7 @@ type chainSync struct { // disjoint set of blocks which are known but not ready to be processed // ie. we only know the hash, number, or the parent block is unknown, or the body is unknown // note: the block may have empty fields, as some data about it may be unknown - pendingBlocks DisjointBlockSet - pendingBlockDoneCh chan<- struct{} + pendingBlocks DisjointBlockSet state chainSyncState benchmarker *syncBenchmarker @@ -151,6 +151,7 @@ func newChainSync(cfg chainSyncConfig) *chainSync { logSyncTicker := time.NewTicker(logSyncPeriod) return &chainSync{ + stopCh: make(chan struct{}), storageState: cfg.storageState, transactionState: cfg.transactionState, babeVerifier: cfg.babeVerifier, @@ -193,19 +194,17 @@ func (cs *chainSync) start() { isSyncedGauge.Set(float64(cs.state)) - pendingBlockDoneCh := make(chan struct{}) - cs.pendingBlockDoneCh = pendingBlockDoneCh - - go cs.pendingBlocks.run(cs.finalisedCh, pendingBlockDoneCh) + go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh) + go cs.workerPool.listenForRequests(cs.stopCh) go cs.sync() cs.logSyncStarted = true go cs.logSyncSpeed() } func (cs *chainSync) stop() { - if cs.pendingBlockDoneCh != nil { - close(cs.pendingBlockDoneCh) - } + close(cs.stopCh) + <-cs.workerPool.doneCh + cs.cancel() if cs.logSyncStarted { <-cs.logSyncDone @@ -305,12 +304,8 @@ func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.He } // setPeerHead sets a peer's best known block -func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber uint) error { - err := cs.workerPool.fromBlockAnnounce(who) - if err != nil { - logger.Errorf("adding a potential worker: %s", err) - } - +func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber uint) { + cs.workerPool.fromBlockAnnounce(who) cs.peerViewLock.Lock() defer cs.peerViewLock.Unlock() @@ -319,7 +314,6 @@ func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber u hash: bestHash, number: bestNumber, } - return nil } func (cs *chainSync) logSyncSpeed() { @@ -337,7 +331,7 @@ func (cs *chainSync) logSyncSpeed() { } select { - case <-cs.logSyncTickerC: // channel of cs.logSyncTicker + case <-cs.logSyncTickerC: case <-cs.ctx.Done(): return } @@ -416,109 +410,108 @@ func (cs *chainSync) sync() { func (cs *chainSync) executeTipSync() error { for { - cs.workerPool.useConnectedPeers() + //cs.workerPool.useConnectedPeers() slotDurationTimer := time.NewTimer(cs.slotDuration) - select { - case blockAnnouncement := <-cs.blockAnnounceCh: - if !slotDurationTimer.Stop() { - <-slotDurationTimer.C - } + blockAnnouncement := <-cs.blockAnnounceCh - who := blockAnnouncement.who - announcedHash := blockAnnouncement.header.Hash() - announcedNumber := blockAnnouncement.header.Number + if !slotDurationTimer.Stop() { + <-slotDurationTimer.C + } - has, err := cs.blockState.HasHeader(announcedHash) - if err != nil { - return fmt.Errorf("checking if header exists: %s", err) - } + peerWhoAnnounced := blockAnnouncement.who + announcedHash := blockAnnouncement.header.Hash() + announcedNumber := blockAnnouncement.header.Number - if has { - continue - } + has, err := cs.blockState.HasHeader(announcedHash) + if err != nil { + return fmt.Errorf("checking if header exists: %s", err) + } + + if has { + continue + } - bestBlockHeader, err := cs.blockState.BestBlockHeader() + bestBlockHeader, err := cs.blockState.BestBlockHeader() + if err != nil { + return fmt.Errorf("getting best block header: %w", err) + } + + // if the announced block contains a lower number than our best + // block header, let's check if it is greater than our latests + // finalized header, if so this block belongs to a fork chain + if announcedNumber < bestBlockHeader.Number { + highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() if err != nil { - return fmt.Errorf("getting best block header: %w", err) + return fmt.Errorf("getting highest finalized header") } - // if the announced block contains a lower number than our best - // block header, let's check if it is greater than our latests - // finalized header, if so this block belongs to a fork chain - if announcedNumber < bestBlockHeader.Number { - highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - return fmt.Errorf("getting highest finalized header") - } - - // ignore the block if it has the same or lower number - if announcedNumber <= highestFinalizedHeader.Number { - continue - } + // ignore the block if it has the same or lower number + if announcedNumber <= highestFinalizedHeader.Number { + continue + } - logger.Debugf("block announce lower than best block %s (#%d) and greater highest finalized %s (#%d)", - bestBlockHeader.Hash(), bestBlockHeader.Number, highestFinalizedHeader.Hash(), highestFinalizedHeader.Number) + logger.Debugf("block announce lower than best block %s (#%d) and greater highest finalized %s (#%d)", + bestBlockHeader.Hash(), bestBlockHeader.Number, highestFinalizedHeader.Hash(), highestFinalizedHeader.Number) - parentExists, err := cs.blockState.HasHeader(blockAnnouncement.header.ParentHash) - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { - return fmt.Errorf("while checking header exists: %w", err) - } + parentExists, err := cs.blockState.HasHeader(blockAnnouncement.header.ParentHash) + if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + return fmt.Errorf("while checking header exists: %w", err) + } - gapLength := uint32(1) - startAtBlock := announcedNumber - var request *network.BlockRequestMessage + gapLength := uint32(1) + startAtBlock := announcedNumber + var request *network.BlockRequestMessage - if parentExists { - request = singleBlockRequest(announcedHash, bootstrapRequestData) - } else { - gapLength = uint32(announcedNumber - highestFinalizedHeader.Number) - startAtBlock = highestFinalizedHeader.Number + 1 - request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) - } + if parentExists { + request = singleBlockRequest(announcedHash, bootstrapRequestData) + } else { + gapLength = uint32(announcedNumber - highestFinalizedHeader.Number) + startAtBlock = highestFinalizedHeader.Number + 1 + request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) + } - logger.Debugf("received a block announce from %s, requesting %d blocks, starting %s (#%d)", - who, gapLength, announcedHash, announcedNumber) + logger.Debugf("received a block announce from %s, requesting %d blocks, starting %s (#%d)", + peerWhoAnnounced, gapLength, announcedHash, announcedNumber) - resultsQueue := make(chan *syncTaskResult) - wg := sync.WaitGroup{} + resultsQueue := make(chan *syncTaskResult) + wg := sync.WaitGroup{} - wg.Add(1) - go cs.handleWorkersResults(resultsQueue, startAtBlock, gapLength, &wg) - cs.workerPool.submitRequest(request, resultsQueue) - wg.Wait() + wg.Add(1) + go cs.handleWorkersResults(resultsQueue, startAtBlock, gapLength, &wg) + cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) + wg.Wait() + } else { + gapLength := uint32(announcedNumber - bestBlockHeader.Number) + startAtBlock := announcedNumber + totalBlocks := uint32(1) + var request *network.BlockRequestMessage + if gapLength > 1 { + request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) + startAtBlock = announcedNumber - uint(*request.Max) + 1 + totalBlocks = *request.Max + + logger.Debugf("received a block announce from %s, requesting %d blocks, descending request from %s (#%d)", + peerWhoAnnounced, gapLength, announcedHash, announcedNumber) } else { - gapLength := uint32(announcedNumber - bestBlockHeader.Number) - startAtBlock := announcedNumber - totalBlocks := uint32(1) - var request *network.BlockRequestMessage - if gapLength > 1 { - request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) - startAtBlock = announcedNumber - uint(*request.Max) + 1 - totalBlocks = *request.Max - - logger.Debugf("received a block announce from %s, requesting %d blocks, descending request from %s (#%d)", - who, gapLength, announcedHash, announcedNumber) - } else { - gapLength = 1 - request = singleBlockRequest(announcedHash, bootstrapRequestData) - logger.Debugf("received a block announce from %s, requesting a single block %s (#%d)", - who, announcedHash, announcedNumber) - } + gapLength = 1 + request = singleBlockRequest(announcedHash, bootstrapRequestData) + logger.Debugf("received a block announce from %s, requesting a single block %s (#%d)", + peerWhoAnnounced, announcedHash, announcedNumber) + } - resultsQueue := make(chan *syncTaskResult) - wg := sync.WaitGroup{} + resultsQueue := make(chan *syncTaskResult) + wg := sync.WaitGroup{} - wg.Add(1) - go cs.handleWorkersResults(resultsQueue, startAtBlock, totalBlocks, &wg) - cs.workerPool.submitRequest(request, resultsQueue) - wg.Wait() - } + wg.Add(1) + go cs.handleWorkersResults(resultsQueue, startAtBlock, totalBlocks, &wg) + cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) + wg.Wait() + } - err = cs.requestPendingBlocks() - if err != nil { - return fmt.Errorf("while requesting pending blocks") - } + err = cs.requestPendingBlocks() + if err != nil { + return fmt.Errorf("while requesting pending blocks") } } @@ -600,8 +593,8 @@ func (cs *chainSync) executeBootstrapSync() error { // so we limit to `maxRequestAllowed` to avoid the error: // cannot reserve outbound connection: resource limit exceeded availableWorkers := cs.workerPool.totalWorkers() - if availableWorkers > maxRequestAllowed { - availableWorkers = maxRequestAllowed + if availableWorkers > maxRequestsAllowed { + availableWorkers = maxRequestsAllowed } // targetBlockNumber is the virtual target we will request, however @@ -730,7 +723,7 @@ loop: // if we receive and empty message from the stream we don't need to shutdown the worker if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - cs.workerPool.shutdownWorker(taskResult.who, true) + cs.workerPool.punishPeer(taskResult.who) } cs.workerPool.submitRequest(taskResult.request, workersResults) continue @@ -749,7 +742,7 @@ loop: switch { case errors.Is(err, errResponseIsNotChain): logger.Criticalf("response invalid: %s", err) - cs.workerPool.shutdownWorker(taskResult.who, true) + cs.workerPool.punishPeer(taskResult.who) cs.workerPool.submitRequest(taskResult.request, workersResults) continue case errors.Is(err, errEmptyBlockData): @@ -758,7 +751,7 @@ loop: case errors.Is(err, errUnknownParent): case err != nil: logger.Criticalf("response invalid: %s", err) - cs.workerPool.shutdownWorker(taskResult.who, true) + cs.workerPool.punishPeer(taskResult.who) cs.workerPool.submitRequest(taskResult.request, workersResults) continue } @@ -875,7 +868,6 @@ func (cs *chainSync) processBlockData(blockData types.BlockData) error { //nolin // while in bootstrap mode we don't need to broadcast block announcements announceImportedBlock := cs.state == tip if headerInState && bodyInState { - //logger.Infof("Process Block With State Header And Body in State: %s (#%d)", blockData.Hash.Short(), blockData.Number()) err = cs.processBlockDataWithStateHeaderAndBody(blockData, announceImportedBlock) if err != nil { return fmt.Errorf("processing block data with header and "+ @@ -886,7 +878,6 @@ func (cs *chainSync) processBlockData(blockData types.BlockData) error { //nolin if blockData.Header != nil { if blockData.Body != nil { - //logger.Infof("Process Block With Header And Body: %s (#%d)", blockData.Hash.Short(), blockData.Number()) err = cs.processBlockDataWithHeaderAndBody(blockData, announceImportedBlock) if err != nil { return fmt.Errorf("processing block data with header and body: %w", err) @@ -894,7 +885,7 @@ func (cs *chainSync) processBlockData(blockData types.BlockData) error { //nolin } if blockData.Justification != nil && len(*blockData.Justification) > 0 { - logger.Infof("Process Block Justification: %s (#%d)", blockData.Hash.Short(), blockData.Number()) + logger.Infof("handling justification for block %s (#%d)", blockData.Hash.Short(), blockData.Number()) err = cs.handleJustification(blockData.Header, *blockData.Justification) if err != nil { return fmt.Errorf("handling justification: %w", err) diff --git a/dot/sync/chain_sync_test._old b/dot/sync/chain_sync_test._old new file mode 100644 index 0000000000..6da080e929 --- /dev/null +++ b/dot/sync/chain_sync_test._old @@ -0,0 +1,1643 @@ +// // Copyright 2021 ChainSafe Systems (ON) +// // SPDX-License-Identifier: LGPL-3.0-only + +// package sync + +// import ( +// "time" + +// "context" +// "errors" +// "testing" + +// "github.com/ChainSafe/gossamer/dot/network" +// "github.com/ChainSafe/gossamer/dot/peerset" +// "github.com/ChainSafe/gossamer/dot/types" +// "github.com/ChainSafe/gossamer/lib/blocktree" +// "github.com/ChainSafe/gossamer/lib/common" +// "github.com/ChainSafe/gossamer/lib/common/variadic" +// "github.com/ChainSafe/gossamer/lib/trie" +// "github.com/golang/mock/gomock" +// "github.com/libp2p/go-libp2p/core/peer" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/require" +// ) + +// const defaultSlotDuration = 6 * time.Second + +// func Test_chainSyncState_String(t *testing.T) { +// t.Parallel() + +// tests := []struct { +// name string +// s chainSyncState +// want string +// }{ +// { +// name: "case_bootstrap", +// s: bootstrap, +// want: "bootstrap", +// }, +// { +// name: "case_tip", +// s: tip, +// want: "tip", +// }, +// { +// name: "case_unknown", +// s: 3, +// want: "unknown", +// }, +// } +// for _, tt := range tests { +// tt := tt +// t.Run(tt.name, func(t *testing.T) { +// t.Parallel() +// got := tt.s.String() +// assert.Equal(t, tt.want, got) +// }) +// } +// } + +// func Test_chainSync_setPeerHead(t *testing.T) { +// t.Parallel() + +// errTest := errors.New("test error") +// const somePeer = peer.ID("abc") +// someHash := common.Hash{1, 2, 3, 4} + +// testCases := map[string]struct { +// chainSyncBuilder func(ctrl *gomock.Controller) *chainSync +// peerID peer.ID +// hash common.Hash +// number uint +// errWrapped error +// errMessage string +// expectedPeerIDToPeerState map[peer.ID]*peerView +// expectedQueuedPeerStates []*peerView +// }{ +// "best_block_header_error": { +// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { +// blockState := NewMockBlockState(ctrl) +// blockState.EXPECT().BestBlockHeader().Return(nil, errTest) +// return &chainSync{ +// peerView: map[peer.ID]*peerView{}, +// blockState: blockState, +// } +// }, +// peerID: somePeer, +// hash: someHash, +// number: 1, +// errWrapped: errTest, +// errMessage: "best block header: test error", +// expectedPeerIDToPeerState: map[peer.ID]*peerView{ +// somePeer: { +// who: somePeer, +// hash: someHash, +// number: 1, +// }, +// }, +// }, +// "number_smaller_than_best_block_number_get_hash_by_number_error": { +// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { +// blockState := NewMockBlockState(ctrl) +// bestBlockHeader := &types.Header{Number: 2} +// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) +// blockState.EXPECT().GetHashByNumber(uint(1)). +// Return(common.Hash{}, errTest) +// return &chainSync{ +// peerView: map[peer.ID]*peerView{}, +// blockState: blockState, +// } +// }, +// peerID: somePeer, +// hash: someHash, +// number: 1, +// errWrapped: errTest, +// errMessage: "get block hash by number: test error", +// expectedPeerIDToPeerState: map[peer.ID]*peerView{ +// somePeer: { +// who: somePeer, +// hash: someHash, +// number: 1, +// }, +// }, +// }, +// "number_smaller_than_best_block_number_and_same_hash": { +// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { +// blockState := NewMockBlockState(ctrl) +// bestBlockHeader := &types.Header{Number: 2} +// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) +// blockState.EXPECT().GetHashByNumber(uint(1)).Return(someHash, nil) +// return &chainSync{ +// peerView: map[peer.ID]*peerView{}, +// blockState: blockState, +// } +// }, +// peerID: somePeer, +// hash: someHash, +// number: 1, +// expectedPeerIDToPeerState: map[peer.ID]*peerView{ +// somePeer: { +// who: somePeer, +// hash: someHash, +// number: 1, +// }, +// }, +// }, +// "number_smaller_than_best_block_number_get_highest_finalised_header_error": { +// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { +// blockState := NewMockBlockState(ctrl) +// bestBlockHeader := &types.Header{Number: 2} +// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) +// blockState.EXPECT().GetHashByNumber(uint(1)). +// Return(common.Hash{2}, nil) // other hash than someHash +// blockState.EXPECT().GetHighestFinalisedHeader().Return(nil, errTest) +// return &chainSync{ +// peerView: map[peer.ID]*peerView{}, +// blockState: blockState, +// } +// }, +// peerID: somePeer, +// hash: someHash, +// number: 1, +// errWrapped: errTest, +// errMessage: "get highest finalised header: test error", +// expectedPeerIDToPeerState: map[peer.ID]*peerView{ +// somePeer: { +// who: somePeer, +// hash: someHash, +// number: 1, +// }, +// }, +// }, +// "number_smaller_than_best_block_number_and_finalised_number_equal_than_number": { +// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { +// blockState := NewMockBlockState(ctrl) +// bestBlockHeader := &types.Header{Number: 2} +// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) +// blockState.EXPECT().GetHashByNumber(uint(1)). +// Return(common.Hash{2}, nil) // other hash than someHash +// finalisedBlockHeader := &types.Header{Number: 1} +// blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) +// network := NewMockNetwork(ctrl) +// network.EXPECT().ReportPeer(peerset.ReputationChange{ +// Value: peerset.BadBlockAnnouncementValue, +// Reason: peerset.BadBlockAnnouncementReason, +// }, somePeer) +// return &chainSync{ +// peerView: map[peer.ID]*peerView{}, +// blockState: blockState, +// network: network, +// } +// }, +// peerID: somePeer, +// hash: someHash, +// number: 1, +// errWrapped: errPeerOnInvalidFork, +// errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", +// expectedPeerIDToPeerState: map[peer.ID]*peerView{ +// somePeer: { +// who: somePeer, +// hash: someHash, +// number: 1, +// }, +// }, +// }, +// "number_smaller_than_best_block_number_and_finalised_number_bigger_than_number": { +// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { +// blockState := NewMockBlockState(ctrl) +// bestBlockHeader := &types.Header{Number: 2} +// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) +// blockState.EXPECT().GetHashByNumber(uint(1)). +// Return(common.Hash{2}, nil) // other hash than someHash +// finalisedBlockHeader := &types.Header{Number: 2} +// blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) +// network := NewMockNetwork(ctrl) +// network.EXPECT().ReportPeer(peerset.ReputationChange{ +// Value: peerset.BadBlockAnnouncementValue, +// Reason: peerset.BadBlockAnnouncementReason, +// }, somePeer) +// return &chainSync{ +// peerView: map[peer.ID]*peerView{}, +// blockState: blockState, +// network: network, +// } +// }, +// peerID: somePeer, +// hash: someHash, +// number: 1, +// errWrapped: errPeerOnInvalidFork, +// errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", +// expectedPeerIDToPeerState: map[peer.ID]*peerView{ +// somePeer: { +// who: somePeer, +// hash: someHash, +// number: 1, +// }, +// }, +// }, +// "number smaller than best block number and " + +// "finalised number smaller than number and " + +// "has_header_error": { +// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { +// blockState := NewMockBlockState(ctrl) +// bestBlockHeader := &types.Header{Number: 3} +// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) +// blockState.EXPECT().GetHashByNumber(uint(2)). +// Return(common.Hash{2}, nil) // other hash than someHash +// finalisedBlockHeader := &types.Header{Number: 1} +// blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) +// blockState.EXPECT().HasHeader(someHash).Return(false, errTest) +// return &chainSync{ +// peerView: map[peer.ID]*peerView{}, +// blockState: blockState, +// } +// }, +// peerID: somePeer, +// hash: someHash, +// number: 2, +// errWrapped: errTest, +// errMessage: "has header: test error", +// expectedPeerIDToPeerState: map[peer.ID]*peerView{ +// somePeer: { +// who: somePeer, +// hash: someHash, +// number: 2, +// }, +// }, +// }, +// "number smaller than best block number and " + +// "finalised number smaller than number and " + +// "has_the_hash": { +// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { +// blockState := NewMockBlockState(ctrl) +// bestBlockHeader := &types.Header{Number: 3} +// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) +// blockState.EXPECT().GetHashByNumber(uint(2)). +// Return(common.Hash{2}, nil) // other hash than someHash +// finalisedBlockHeader := &types.Header{Number: 1} +// blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) +// blockState.EXPECT().HasHeader(someHash).Return(true, nil) +// return &chainSync{ +// peerView: map[peer.ID]*peerView{}, +// blockState: blockState, +// } +// }, +// peerID: somePeer, +// hash: someHash, +// number: 2, +// expectedPeerIDToPeerState: map[peer.ID]*peerView{ +// somePeer: { +// who: somePeer, +// hash: someHash, +// number: 2, +// }, +// }, +// }, +// "number_bigger_than_the_head_number_add_hash_and_number_error": { +// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { +// blockState := NewMockBlockState(ctrl) +// bestBlockHeader := &types.Header{Number: 1} +// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) +// pendingBlocks := NewMockDisjointBlockSet(ctrl) +// pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). +// Return(errTest) +// return &chainSync{ +// peerView: map[peer.ID]*peerView{}, +// blockState: blockState, +// pendingBlocks: pendingBlocks, +// } +// }, +// peerID: somePeer, +// hash: someHash, +// number: 2, +// errWrapped: errTest, +// errMessage: "add hash and number: test error", +// expectedPeerIDToPeerState: map[peer.ID]*peerView{ +// somePeer: { +// who: somePeer, +// hash: someHash, +// number: 2, +// }, +// }, +// }, +// "number_bigger_than_the_head_number_success": { +// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { +// blockState := NewMockBlockState(ctrl) +// bestBlockHeader := &types.Header{Number: 1} +// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) +// pendingBlocks := NewMockDisjointBlockSet(ctrl) +// pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). +// Return(nil) +// return &chainSync{ +// peerView: map[peer.ID]*peerView{}, +// blockState: blockState, +// pendingBlocks: pendingBlocks, +// // buffered of 1 so setPeerHead can write to it +// // without a consumer of the channel on the other end. +// workQueue: make(chan *peerView, 1), +// } +// }, +// peerID: somePeer, +// hash: someHash, +// number: 2, +// expectedPeerIDToPeerState: map[peer.ID]*peerView{ +// somePeer: { +// who: somePeer, +// hash: someHash, +// number: 2, +// }, +// }, +// expectedQueuedPeerStates: []*peerView{ +// { +// who: somePeer, +// hash: someHash, +// number: 2, +// }, +// }, +// }, +// } + +// for name, testCase := range testCases { +// testCase := testCase +// t.Run(name, func(t *testing.T) { +// t.Parallel() +// ctrl := gomock.NewController(t) + +// chainSync := testCase.chainSyncBuilder(ctrl) + +// err := chainSync.setPeerHead(testCase.peerID, testCase.hash, testCase.number) + +// assert.ErrorIs(t, err, testCase.errWrapped) +// if testCase.errWrapped != nil { +// assert.EqualError(t, err, testCase.errMessage) +// } +// assert.Equal(t, testCase.expectedPeerIDToPeerState, chainSync.peerView) + +// require.Equal(t, len(testCase.expectedQueuedPeerStates), len(chainSync.workQueue)) +// for _, expectedPeerState := range testCase.expectedQueuedPeerStates { +// peerState := <-chainSync.workQueue +// assert.Equal(t, expectedPeerState, peerState) +// } +// }) +// } +// } + +// func TestChainSync_sync_bootstrap_withWorkerError(t *testing.T) { +// t.Parallel() + +// ctrl := gomock.NewController(t) +// cs := newTestChainSync(ctrl) +// mockBlockState := NewMockBlockState(ctrl) +// mockHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 0, +// types.NewDigest()) +// mockBlockState.EXPECT().BestBlockHeader().Return(mockHeader, nil).Times(2) +// cs.blockState = mockBlockState +// cs.handler = newBootstrapSyncer(mockBlockState) + +// mockNetwork := NewMockNetwork(ctrl) +// startingBlock := variadic.MustNewUint32OrHash(1) +// max := uint32(128) +// mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ +// RequestedData: 19, +// StartingBlock: *startingBlock, +// Direction: 0, +// Max: &max, +// }) +// cs.network = mockNetwork + +// go cs.sync() +// defer cs.cancel() + +// testPeer := peer.ID("noot") +// cs.peerView[testPeer] = &peerView{ +// number: 1000, +// } + +// cs.workQueue <- cs.peerView[testPeer] + +// select { +// case res := <-cs.resultQueue: +// expected := &workerError{ +// err: errNilResponse, // since MockNetwork returns a nil response +// who: testPeer, +// } +// require.Equal(t, expected, res.err) +// case <-time.After(5 * time.Second): +// t.Fatal("did not get worker response") +// } + +// require.Equal(t, bootstrap, cs.state) +// } + +// func TestChainSync_sync_tip(t *testing.T) { +// t.Parallel() + +// done := make(chan struct{}) + +// ctrl := gomock.NewController(t) +// cs := newTestChainSync(ctrl) +// header := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 1000, +// types.NewDigest()) + +// bs := NewMockBlockState(ctrl) +// bs.EXPECT().BestBlockHeader().Return(header, nil) +// bs.EXPECT().GetHighestFinalisedHeader().DoAndReturn(func() (*types.Header, error) { +// close(done) +// return header, nil +// }) +// cs.blockState = bs + +// go cs.sync() +// defer cs.cancel() + +// testPeer := peer.ID("noot") +// cs.peerView[testPeer] = &peerView{ +// number: 999, +// } + +// cs.workQueue <- cs.peerView[testPeer] +// <-done +// require.Equal(t, tip, cs.state) +// } + +// func TestChainSync_getTarget(t *testing.T) { +// ctrl := gomock.NewController(t) +// cs := newTestChainSync(ctrl) +// require.Equal(t, uint(1<<32-1), cs.getTarget()) +// cs.peerView = map[peer.ID]*peerView{ +// "a": { +// number: 0, // outlier +// }, +// "b": { +// number: 110, +// }, +// "c": { +// number: 120, +// }, +// "d": { +// number: 130, +// }, +// "e": { +// number: 140, +// }, +// "f": { +// number: 150, +// }, +// "g": { +// number: 1000, // outlier +// }, +// } + +// require.Equal(t, uint(130), cs.getTarget()) // sum:650/count:5= avg:130 + +// cs.peerView = map[peer.ID]*peerView{ +// "testA": { +// number: 1000, +// }, +// "testB": { +// number: 2000, +// }, +// } + +// require.Equal(t, uint(1500), cs.getTarget()) +// } + +// func TestWorkerToRequests(t *testing.T) { +// t.Parallel() + +// w := &worker{ +// startNumber: uintPtr(10), +// targetNumber: uintPtr(1), +// direction: network.Ascending, +// } +// _, err := workerToRequests(w) +// require.Equal(t, errInvalidDirection, err) + +// type testCase struct { +// w *worker +// expected []*network.BlockRequestMessage +// } + +// var ( +// max128 = uint32(128) +// max9 = uint32(9) +// max64 = uint32(64) +// ) + +// testCases := map[string]testCase{ +// "test_0": { +// w: &worker{ +// startNumber: uintPtr(1), +// targetNumber: uintPtr(1 + maxResponseSize), +// direction: network.Ascending, +// requestData: bootstrapRequestData, +// }, +// expected: []*network.BlockRequestMessage{ +// { +// RequestedData: bootstrapRequestData, +// StartingBlock: *variadic.MustNewUint32OrHash(1), +// Direction: network.Ascending, +// Max: &max128, +// }, +// }, +// }, +// "test_1": { +// w: &worker{ +// startNumber: uintPtr(1), +// targetNumber: uintPtr(1 + (maxResponseSize * 2)), +// direction: network.Ascending, +// requestData: bootstrapRequestData, +// }, +// expected: []*network.BlockRequestMessage{ +// { +// RequestedData: bootstrapRequestData, +// StartingBlock: *variadic.MustNewUint32OrHash(1), +// Direction: network.Ascending, +// Max: &max128, +// }, +// { +// RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, +// StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize), +// Direction: network.Ascending, +// Max: &max128, +// }, +// }, +// }, +// "test_2": { +// w: &worker{ +// startNumber: uintPtr(1), +// targetNumber: uintPtr(10), +// direction: network.Ascending, +// requestData: bootstrapRequestData, +// }, +// expected: []*network.BlockRequestMessage{ +// { +// RequestedData: bootstrapRequestData, +// StartingBlock: *variadic.MustNewUint32OrHash(1), +// Direction: network.Ascending, +// Max: &max128, +// }, +// }, +// }, +// "test_3": { +// w: &worker{ +// startNumber: uintPtr(10), +// targetNumber: uintPtr(1), +// direction: network.Descending, +// requestData: bootstrapRequestData, +// }, +// expected: []*network.BlockRequestMessage{ +// { +// RequestedData: bootstrapRequestData, +// StartingBlock: *variadic.MustNewUint32OrHash(10), +// Direction: network.Descending, +// Max: &max9, +// }, +// }, +// }, +// "test_4": { +// w: &worker{ +// startNumber: uintPtr(1), +// targetNumber: uintPtr(1 + maxResponseSize + (maxResponseSize / 2)), +// direction: network.Ascending, +// requestData: bootstrapRequestData, +// }, +// expected: []*network.BlockRequestMessage{ +// { +// RequestedData: bootstrapRequestData, +// StartingBlock: *variadic.MustNewUint32OrHash(1), +// Direction: network.Ascending, +// Max: &max128, +// }, +// { +// RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, +// StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize), +// Direction: network.Ascending, +// Max: &max128, +// }, +// }, +// }, +// "test_5": { +// w: &worker{ +// startNumber: uintPtr(1), +// targetNumber: uintPtr(10), +// targetHash: common.Hash{0xa}, +// direction: network.Ascending, +// requestData: bootstrapRequestData, +// }, +// expected: []*network.BlockRequestMessage{ +// { +// RequestedData: bootstrapRequestData, +// StartingBlock: *variadic.MustNewUint32OrHash(1), +// Direction: network.Ascending, +// Max: &max128, +// }, +// }, +// }, +// "test_6": { +// w: &worker{ +// startNumber: uintPtr(1), +// startHash: common.Hash{0xb}, +// targetNumber: uintPtr(10), +// targetHash: common.Hash{0xc}, +// direction: network.Ascending, +// requestData: bootstrapRequestData, +// }, +// expected: []*network.BlockRequestMessage{ +// { +// RequestedData: bootstrapRequestData, +// StartingBlock: *variadic.MustNewUint32OrHash(common.Hash{0xb}), +// Direction: network.Ascending, +// Max: &max128, +// }, +// }, +// }, +// "test_7": { +// w: &worker{ +// startNumber: uintPtr(10), +// targetNumber: uintPtr(10), +// direction: network.Ascending, +// requestData: bootstrapRequestData, +// }, +// expected: []*network.BlockRequestMessage{ +// { +// RequestedData: bootstrapRequestData, +// StartingBlock: *variadic.MustNewUint32OrHash(10), +// Direction: network.Ascending, +// Max: &max128, +// }, +// }, +// }, +// "test_8": { +// w: &worker{ +// startNumber: uintPtr(1 + maxResponseSize + (maxResponseSize / 2)), +// targetNumber: uintPtr(1), +// direction: network.Descending, +// requestData: bootstrapRequestData, +// }, +// expected: []*network.BlockRequestMessage{ +// { +// RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, +// StartingBlock: *variadic.MustNewUint32OrHash(1 + (maxResponseSize / 2)), +// Direction: network.Descending, +// Max: &max64, +// }, +// { +// RequestedData: bootstrapRequestData, +// StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize + (maxResponseSize / 2)), +// Direction: network.Descending, +// Max: &max128, +// }, +// }, +// }, +// } + +// for name, tc := range testCases { +// tc := tc +// t.Run(name, func(t *testing.T) { +// t.Parallel() +// reqs, err := workerToRequests(tc.w) +// require.NoError(t, err) +// require.Equal(t, tc.expected, reqs) +// }) +// } +// } + +// func TestChainSync_validateResponse(t *testing.T) { +// t.Parallel() +// badBlockHash := common.NewHash([]byte("badblockhash")) + +// tests := map[string]struct { +// blockStateBuilder func(ctrl *gomock.Controller) BlockState +// networkBuilder func(ctrl *gomock.Controller) Network +// req *network.BlockRequestMessage +// resp *network.BlockResponseMessage +// expectedError error +// }{ +// "nil_req,_nil_resp": { +// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) +// return mockBlockState +// }, +// networkBuilder: func(ctrl *gomock.Controller) Network { +// return NewMockNetwork(ctrl) +// }, +// expectedError: errEmptyBlockData, +// }, +// "handle_error_response_is_not_chain,_has_header": { +// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) +// mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) +// return mockBlockState +// }, +// networkBuilder: func(ctrl *gomock.Controller) Network { +// return NewMockNetwork(ctrl) +// }, +// req: &network.BlockRequestMessage{ +// RequestedData: network.RequestedDataHeader, +// }, +// resp: &network.BlockResponseMessage{ +// BlockData: []*types.BlockData{ +// { +// Header: &types.Header{ +// Number: 1, +// }, +// Body: &types.Body{}, +// }, +// { +// Header: &types.Header{ +// Number: 2, +// }, +// Body: &types.Body{}, +// }, +// }, +// }, +// expectedError: errResponseIsNotChain, +// }, +// "handle_justification-only_request,_unknown_block": { +// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) +// mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) +// return mockBlockState +// }, +// networkBuilder: func(ctrl *gomock.Controller) Network { +// mockNetwork := NewMockNetwork(ctrl) +// mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ +// Value: peerset.BadJustificationValue, +// Reason: peerset.BadJustificationReason, +// }, peer.ID("")) +// return mockNetwork +// }, +// req: &network.BlockRequestMessage{ +// RequestedData: network.RequestedDataJustification, +// }, +// resp: &network.BlockResponseMessage{ +// BlockData: []*types.BlockData{ +// { +// Justification: &[]byte{0}, +// }, +// }, +// }, +// expectedError: errUnknownBlockForJustification, +// }, +// "handle_error_unknown_parent": { +// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) +// mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) +// return mockBlockState +// }, +// networkBuilder: func(ctrl *gomock.Controller) Network { +// return NewMockNetwork(ctrl) +// }, +// req: &network.BlockRequestMessage{ +// RequestedData: network.RequestedDataHeader, +// }, +// resp: &network.BlockResponseMessage{ +// BlockData: []*types.BlockData{ +// { +// Header: &types.Header{ +// Number: 1, +// }, +// Body: &types.Body{}, +// }, +// { +// Header: &types.Header{ +// Number: 2, +// }, +// Body: &types.Body{}, +// }, +// }, +// }, +// expectedError: errUnknownParent, +// }, +// "handle_error_bad_block": { +// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) +// return mockBlockState +// }, +// networkBuilder: func(ctrl *gomock.Controller) Network { +// return NewMockNetwork(ctrl) +// }, +// req: &network.BlockRequestMessage{ +// RequestedData: network.RequestedDataHeader, +// }, +// resp: &network.BlockResponseMessage{ +// BlockData: []*types.BlockData{ +// { +// Hash: badBlockHash, +// Header: &types.Header{ +// Number: 2, +// }, +// Body: &types.Body{}, +// }, +// }, +// }, +// expectedError: errBadBlock, +// }, +// "no_error": { +// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) +// mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) +// return mockBlockState +// }, +// networkBuilder: func(ctrl *gomock.Controller) Network { +// return NewMockNetwork(ctrl) +// }, +// req: &network.BlockRequestMessage{ +// RequestedData: network.RequestedDataHeader, +// }, +// resp: &network.BlockResponseMessage{ +// BlockData: []*types.BlockData{ +// { +// Header: &types.Header{ +// Number: 2, +// }, +// Body: &types.Body{}, +// }, +// { +// Header: &types.Header{ +// ParentHash: (&types.Header{ +// Number: 2, +// }).Hash(), +// Number: 3, +// }, +// Body: &types.Body{}, +// }, +// }, +// }, +// }, +// } +// for name, tt := range tests { +// tt := tt +// t.Run(name, func(t *testing.T) { +// t.Parallel() +// ctrl := gomock.NewController(t) + +// cfg := chainSyncConfig{ +// bs: tt.blockStateBuilder(ctrl), +// pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), +// readyBlocks: newBlockQueue(maxResponseSize), +// net: tt.networkBuilder(ctrl), +// badBlocks: []string{ +// badBlockHash.String(), +// }, +// } +// cs := newChainSync(cfg) + +// err := cs.validateResponse(tt.req, tt.resp, "") +// if tt.expectedError != nil { +// assert.EqualError(t, err, tt.expectedError.Error()) +// } else { +// assert.NoError(t, err) +// } +// }) +// } +// } + +// func TestChainSync_doSync(t *testing.T) { +// t.Parallel() + +// ctrl := gomock.NewController(t) +// readyBlocks := newBlockQueue(maxResponseSize) +// cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) + +// max := uint32(1) +// req := &network.BlockRequestMessage{ +// RequestedData: bootstrapRequestData, +// StartingBlock: *variadic.MustNewUint32OrHash(1), +// Direction: network.Ascending, +// Max: &max, +// } + +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil).Times(2) +// cs.blockState = mockBlockState + +// workerErr := cs.doSync(req, make(map[peer.ID]struct{})) +// require.NotNil(t, workerErr) +// require.Equal(t, errNoPeers, workerErr.err) + +// cs.peerView["noot"] = &peerView{ +// number: 100, +// } + +// mockNetwork := NewMockNetwork(ctrl) +// startingBlock := variadic.MustNewUint32OrHash(1) +// max1 := uint32(1) +// mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ +// RequestedData: 19, +// StartingBlock: *startingBlock, +// Direction: 0, +// Max: &max1, +// }) +// cs.network = mockNetwork + +// workerErr = cs.doSync(req, make(map[peer.ID]struct{})) +// require.NotNil(t, workerErr) +// require.Equal(t, errNilResponse, workerErr.err) + +// resp := &network.BlockResponseMessage{ +// BlockData: []*types.BlockData{ +// { +// Hash: common.Hash{0x1}, +// Header: &types.Header{ +// Number: 1, +// }, +// Body: &types.Body{}, +// }, +// }, +// } + +// mockNetwork = NewMockNetwork(ctrl) +// mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ +// RequestedData: 19, +// StartingBlock: *startingBlock, +// Direction: 0, +// Max: &max1, +// }).Return(resp, nil) +// cs.network = mockNetwork + +// workerErr = cs.doSync(req, make(map[peer.ID]struct{})) +// require.Nil(t, workerErr) +// bd, err := readyBlocks.pop(context.Background()) +// require.NotNil(t, bd) +// require.NoError(t, err) +// require.Equal(t, resp.BlockData[0], bd) + +// parent := (&types.Header{ +// Number: 2, +// }).Hash() +// resp = &network.BlockResponseMessage{ +// BlockData: []*types.BlockData{ +// { +// Hash: common.Hash{0x3}, +// Header: &types.Header{ +// ParentHash: parent, +// Number: 3, +// }, +// Body: &types.Body{}, +// }, +// { +// Hash: common.Hash{0x2}, +// Header: &types.Header{ +// Number: 2, +// }, +// Body: &types.Body{}, +// }, +// }, +// } + +// // test to see if descending blocks get reversed +// req.Direction = network.Descending +// mockNetwork = NewMockNetwork(ctrl) +// mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ +// RequestedData: 19, +// StartingBlock: *startingBlock, +// Direction: 1, +// Max: &max1, +// }).Return(resp, nil) +// cs.network = mockNetwork +// workerErr = cs.doSync(req, make(map[peer.ID]struct{})) +// require.Nil(t, workerErr) + +// bd, err = readyBlocks.pop(context.Background()) +// require.NotNil(t, bd) +// require.Equal(t, resp.BlockData[0], bd) +// require.NoError(t, err) + +// bd, err = readyBlocks.pop(context.Background()) +// require.NotNil(t, bd) +// require.Equal(t, resp.BlockData[1], bd) +// require.NoError(t, err) +// } + +// func TestHandleReadyBlock(t *testing.T) { +// t.Parallel() + +// ctrl := gomock.NewController(t) +// readyBlocks := newBlockQueue(maxResponseSize) +// cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) + +// // test that descendant chain gets returned by getReadyDescendants on block 1 being ready +// header1 := &types.Header{ +// Number: 1, +// } +// block1 := &types.Block{ +// Header: *header1, +// Body: types.Body{}, +// } + +// header2 := &types.Header{ +// ParentHash: header1.Hash(), +// Number: 2, +// } +// block2 := &types.Block{ +// Header: *header2, +// Body: types.Body{}, +// } +// cs.pendingBlocks.addBlock(block2) + +// header3 := &types.Header{ +// ParentHash: header2.Hash(), +// Number: 3, +// } +// block3 := &types.Block{ +// Header: *header3, +// Body: types.Body{}, +// } +// cs.pendingBlocks.addBlock(block3) + +// header2NotDescendant := &types.Header{ +// ParentHash: common.Hash{0xff}, +// Number: 2, +// } +// block2NotDescendant := &types.Block{ +// Header: *header2NotDescendant, +// Body: types.Body{}, +// } +// cs.pendingBlocks.addBlock(block2NotDescendant) + +// cs.handleReadyBlock(block1.ToBlockData()) + +// require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header1.Hash())) +// require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header2.Hash())) +// require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header3.Hash())) +// require.True(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header2NotDescendant.Hash())) + +// blockData1, err := readyBlocks.pop(context.Background()) +// require.NoError(t, err) +// require.Equal(t, block1.ToBlockData(), blockData1) + +// blockData2, err := readyBlocks.pop(context.Background()) +// require.NoError(t, err) +// require.Equal(t, block2.ToBlockData(), blockData2) + +// blockData3, err := readyBlocks.pop(context.Background()) +// require.NoError(t, err) +// require.Equal(t, block3.ToBlockData(), blockData3) +// } + +// func TestChainSync_determineSyncPeers(t *testing.T) { +// t.Parallel() + +// ctrl := gomock.NewController(t) +// cs := newTestChainSync(ctrl) + +// req := &network.BlockRequestMessage{} +// testPeerA := peer.ID("a") +// testPeerB := peer.ID("b") +// peersTried := make(map[peer.ID]struct{}) + +// // test base case +// cs.peerView[testPeerA] = &peerView{ +// number: 129, +// } +// cs.peerView[testPeerB] = &peerView{ +// number: 257, +// } + +// peers := cs.determineSyncPeers(req, peersTried) +// require.Equal(t, 2, len(peers)) +// require.Contains(t, peers, testPeerA) +// require.Contains(t, peers, testPeerB) + +// // test peer ignored case +// cs.ignorePeers[testPeerA] = struct{}{} +// peers = cs.determineSyncPeers(req, peersTried) +// require.Equal(t, 1, len(peers)) +// require.Equal(t, []peer.ID{testPeerB}, peers) + +// // test all peers ignored case +// cs.ignorePeers[testPeerB] = struct{}{} +// peers = cs.determineSyncPeers(req, peersTried) +// require.Equal(t, 2, len(peers)) +// require.Contains(t, peers, testPeerA) +// require.Contains(t, peers, testPeerB) +// require.Equal(t, 0, len(cs.ignorePeers)) + +// // test peer's best block below number case, shouldn't include that peer +// start, err := variadic.NewUint32OrHash(130) +// require.NoError(t, err) +// req.StartingBlock = *start +// peers = cs.determineSyncPeers(req, peersTried) +// require.Equal(t, 1, len(peers)) +// require.Equal(t, []peer.ID{testPeerB}, peers) + +// // test peer tried case, should ignore peer already tried +// peersTried[testPeerA] = struct{}{} +// req.StartingBlock = variadic.Uint32OrHash{} +// peers = cs.determineSyncPeers(req, peersTried) +// require.Equal(t, 1, len(peers)) +// require.Equal(t, []peer.ID{testPeerB}, peers) +// } + +// func Test_chainSync_logSyncSpeed(t *testing.T) { +// t.Parallel() + +// type fields struct { +// blockStateBuilder func(ctrl *gomock.Controller) BlockState +// networkBuilder func(ctrl *gomock.Controller) Network +// state chainSyncState +// benchmarker *syncBenchmarker +// } +// tests := []struct { +// name string +// fields fields +// }{ +// { +// name: "state_bootstrap", +// fields: fields{ +// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).Times(3) +// mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) +// return mockBlockState +// }, +// networkBuilder: func(ctrl *gomock.Controller) Network { +// mockNetwork := NewMockNetwork(ctrl) +// mockNetwork.EXPECT().Peers().Return(nil) +// return mockNetwork +// }, +// benchmarker: newSyncBenchmarker(10), +// state: bootstrap, +// }, +// }, +// { +// name: "case_tip", +// fields: fields{ +// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).Times(3) +// mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) +// return mockBlockState +// }, +// networkBuilder: func(ctrl *gomock.Controller) Network { +// mockNetwork := NewMockNetwork(ctrl) +// mockNetwork.EXPECT().Peers().Return(nil) +// return mockNetwork +// }, +// benchmarker: newSyncBenchmarker(10), +// state: tip, +// }, +// }, +// } +// for _, tt := range tests { +// tt := tt +// t.Run(tt.name, func(t *testing.T) { +// t.Parallel() +// ctrl := gomock.NewController(t) +// ctx, cancel := context.WithCancel(context.Background()) +// tickerChannel := make(chan time.Time) +// cs := &chainSync{ +// ctx: ctx, +// cancel: cancel, +// blockState: tt.fields.blockStateBuilder(ctrl), +// network: tt.fields.networkBuilder(ctrl), +// state: tt.fields.state, +// benchmarker: tt.fields.benchmarker, +// logSyncTickerC: tickerChannel, +// logSyncTicker: time.NewTicker(time.Hour), // just here to be stopped +// logSyncDone: make(chan struct{}), +// } + +// go cs.logSyncSpeed() + +// tickerChannel <- time.Time{} +// cs.cancel() +// <-cs.logSyncDone +// }) +// } +// } + +// func Test_chainSync_start(t *testing.T) { +// t.Parallel() + +// type fields struct { +// blockStateBuilder func(ctrl *gomock.Controller) BlockState +// disjointBlockSetBuilder func(ctrl *gomock.Controller, called chan<- struct{}) DisjointBlockSet +// benchmarker *syncBenchmarker +// } +// tests := []struct { +// name string +// fields fields +// }{ +// { +// name: "base_case", +// fields: fields{ +// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil) +// return mockBlockState +// }, +// disjointBlockSetBuilder: func(ctrl *gomock.Controller, called chan<- struct{}) DisjointBlockSet { +// mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) +// mockDisjointBlockSet.EXPECT().run(gomock.AssignableToTypeOf(make(<-chan struct{}))). +// DoAndReturn(func(stop <-chan struct{}) { +// close(called) // test glue, ideally we would use a ready chan struct passed to run(). +// }) +// return mockDisjointBlockSet +// }, +// benchmarker: newSyncBenchmarker(1), +// }, +// }, +// } +// for _, tt := range tests { +// tt := tt +// t.Run(tt.name, func(t *testing.T) { +// t.Parallel() +// ctrl := gomock.NewController(t) +// ctx, cancel := context.WithCancel(context.Background()) +// disjointBlockSetCalled := make(chan struct{}) +// cs := &chainSync{ +// ctx: ctx, +// cancel: cancel, +// blockState: tt.fields.blockStateBuilder(ctrl), +// pendingBlocks: tt.fields.disjointBlockSetBuilder(ctrl, disjointBlockSetCalled), +// benchmarker: tt.fields.benchmarker, +// slotDuration: time.Hour, +// logSyncTicker: time.NewTicker(time.Hour), // just here to be closed +// logSyncDone: make(chan struct{}), +// } +// cs.start() +// <-disjointBlockSetCalled +// cs.stop() +// }) +// } +// } + +// func Test_chainSync_setBlockAnnounce(t *testing.T) { +// t.Parallel() + +// type args struct { +// from peer.ID +// header *types.Header +// } +// tests := map[string]struct { +// chainSyncBuilder func(*types.Header, *gomock.Controller) chainSync +// args args +// wantErr error +// }{ +// "base_case": { +// wantErr: blocktree.ErrBlockExists, +// args: args{ +// header: &types.Header{Number: 2}, +// }, +// chainSyncBuilder: func(_ *types.Header, ctrl *gomock.Controller) chainSync { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().HasHeader(common.MustHexToHash( +// "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf")).Return(true, nil) +// mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) +// return chainSync{ +// blockState: mockBlockState, +// pendingBlocks: mockDisjointBlockSet, +// } +// }, +// }, +// "err_when_calling_has_header": { +// wantErr: errors.New("checking header exists"), +// args: args{ +// header: &types.Header{Number: 2}, +// }, +// chainSyncBuilder: func(_ *types.Header, ctrl *gomock.Controller) chainSync { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT(). +// HasHeader(common.MustHexToHash( +// "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf")). +// Return(false, errors.New("checking header exists")) +// mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) +// return chainSync{ +// blockState: mockBlockState, +// pendingBlocks: mockDisjointBlockSet, +// } +// }, +// }, +// "adding_block_header_to_pending_blocks": { +// args: args{ +// header: &types.Header{Number: 2}, +// }, +// chainSyncBuilder: func(expectedHeader *types.Header, ctrl *gomock.Controller) chainSync { +// argumentHeaderHash := common.MustHexToHash( +// "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf") + +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT(). +// HasHeader(argumentHeaderHash). +// Return(false, nil) + +// mockBlockState.EXPECT(). +// BestBlockHeader(). +// Return(&types.Header{Number: 1}, nil) + +// mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) +// mockDisjointBlockSet.EXPECT(). +// addHeader(expectedHeader). +// Return(nil) + +// mockDisjointBlockSet.EXPECT(). +// addHashAndNumber(argumentHeaderHash, uint(2)). +// Return(nil) + +// return chainSync{ +// blockState: mockBlockState, +// pendingBlocks: mockDisjointBlockSet, +// peerView: make(map[peer.ID]*peerView), +// // creating an buffered channel for this specific test +// // since it will put a work on the queue and an unbufered channel +// // will hang until we read on this channel and the goal is to +// // put the work on the channel and don't block +// workQueue: make(chan *peerView, 1), +// } +// }, +// }, +// } +// for name, tt := range tests { +// tt := tt +// t.Run(name, func(t *testing.T) { +// t.Parallel() +// ctrl := gomock.NewController(t) +// sync := tt.chainSyncBuilder(tt.args.header, ctrl) +// err := sync.setBlockAnnounce(tt.args.from, tt.args.header) +// if tt.wantErr != nil { +// assert.EqualError(t, err, tt.wantErr.Error()) +// } else { +// assert.NoError(t, err) +// } + +// if sync.workQueue != nil { +// assert.Equal(t, len(sync.workQueue), 1) +// } +// }) +// } +// } + +// func Test_chainSync_getHighestBlock(t *testing.T) { +// t.Parallel() + +// tests := []struct { +// name string +// peerState map[peer.ID]*peerView +// wantHighestBlock uint +// expectedError error +// }{ +// { +// name: "error no peers", +// expectedError: errors.New("no peers to sync with"), +// }, +// { +// name: "base case", +// peerState: map[peer.ID]*peerView{"1": {number: 2}}, +// wantHighestBlock: 2, +// }, +// } +// for _, tt := range tests { +// tt := tt +// t.Run(tt.name, func(t *testing.T) { +// t.Parallel() +// cs := &chainSync{ +// peerView: tt.peerState, +// } +// gotHighestBlock, err := cs.getHighestBlock() +// if tt.expectedError != nil { +// assert.EqualError(t, err, tt.expectedError.Error()) +// } else { +// assert.NoError(t, err) +// } +// assert.Equal(t, tt.wantHighestBlock, gotHighestBlock) +// }) +// } +// } + +// func Test_chainSync_handleResult(t *testing.T) { +// t.Parallel() +// mockError := errors.New("test mock error") +// tests := map[string]struct { +// chainSyncBuilder func(ctrl *gomock.Controller, result *worker) chainSync +// maxWorkerRetries uint16 +// res *worker +// err error +// }{ +// "res.err_==_nil": { +// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { +// return chainSync{ +// workerState: newWorkerState(), +// } +// }, +// res: &worker{}, +// }, +// "res.err.err.Error()_==_context.Canceled": { +// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { +// return chainSync{ +// workerState: newWorkerState(), +// } +// }, +// res: &worker{ +// ctx: context.Background(), +// err: &workerError{ +// err: context.Canceled, +// }, +// }, +// }, +// "res.err.err.Error()_==_context.DeadlineExceeded": { +// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { +// mockNetwork := NewMockNetwork(ctrl) +// mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{Value: -1024, Reason: "Request timeout"}, +// peer.ID("")) +// mockWorkHandler := NewMockworkHandler(ctrl) +// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) +// return chainSync{ +// workerState: newWorkerState(), +// network: mockNetwork, +// handler: mockWorkHandler, +// } +// }, +// res: &worker{ +// ctx: context.Background(), +// err: &workerError{ +// err: context.DeadlineExceeded, +// }, +// }, +// }, +// "res.err.err.Error()_dial_backoff": { +// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { +// return chainSync{ +// workerState: newWorkerState(), +// } +// }, +// res: &worker{ +// ctx: context.Background(), +// err: &workerError{ +// err: errors.New("dial backoff"), +// }, +// }, +// }, +// "res.err.err.Error()_==_errNoPeers": { +// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { +// return chainSync{ +// workerState: newWorkerState(), +// } +// }, +// res: &worker{ +// ctx: context.Background(), +// err: &workerError{ +// err: errNoPeers, +// }, +// }, +// }, +// "res.err.err.Error()_==_protocol_not_supported": { +// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { +// mockNetwork := NewMockNetwork(ctrl) +// mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{Value: -2147483648, +// Reason: "Unsupported protocol"}, +// peer.ID("")) +// return chainSync{ +// workerState: newWorkerState(), +// network: mockNetwork, +// } +// }, +// res: &worker{ +// ctx: context.Background(), +// err: &workerError{ +// err: errors.New("protocol not supported"), +// }, +// }, +// }, +// "no_error,_no_retries": { +// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { +// mockWorkHandler := NewMockworkHandler(ctrl) +// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) +// return chainSync{ +// workerState: newWorkerState(), +// handler: mockWorkHandler, +// } +// }, +// res: &worker{ +// ctx: context.Background(), +// err: &workerError{ +// err: errors.New(""), +// }, +// }, +// }, +// "handle_work_result_error,_no_retries": { +// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { +// mockWorkHandler := NewMockworkHandler(ctrl) +// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(nil, mockError) +// return chainSync{ +// workerState: newWorkerState(), +// handler: mockWorkHandler, +// } +// }, +// res: &worker{ +// ctx: context.Background(), +// err: &workerError{ +// err: errors.New(""), +// }, +// }, +// err: mockError, +// }, +// "handle_work_result_nil,_no_retries": { +// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { +// mockWorkHandler := NewMockworkHandler(ctrl) +// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(nil, nil) +// return chainSync{ +// workerState: newWorkerState(), +// handler: mockWorkHandler, +// } +// }, +// res: &worker{ +// ctx: context.Background(), +// err: &workerError{ +// err: errors.New(""), +// }, +// }, +// }, +// "no_error,_maxWorkerRetries_2": { +// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { +// mockWorkHandler := NewMockworkHandler(ctrl) +// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) +// mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) +// mockDisjointBlockSet.EXPECT().removeBlock(common.Hash{}) +// return chainSync{ +// workerState: newWorkerState(), +// handler: mockWorkHandler, +// pendingBlocks: mockDisjointBlockSet, +// } +// }, +// maxWorkerRetries: 2, +// res: &worker{ +// ctx: context.Background(), +// err: &workerError{ +// err: errors.New(""), +// }, +// pendingBlock: newPendingBlock(common.Hash{}, 1, nil, nil, time.Now()), +// }, +// }, +// "no_error": { +// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { +// mockWorkHandler := NewMockworkHandler(ctrl) +// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) +// mockWorkHandler.EXPECT().hasCurrentWorker(&worker{ +// ctx: context.Background(), +// err: &workerError{ +// err: mockError, +// }, +// retryCount: 1, +// peersTried: map[peer.ID]struct{}{ +// "": {}, +// }, +// }, newWorkerState().workers).Return(true) +// return chainSync{ +// workerState: newWorkerState(), +// handler: mockWorkHandler, +// maxWorkerRetries: 2, +// } +// }, +// res: &worker{ +// ctx: context.Background(), +// err: &workerError{ +// err: mockError, +// }, +// }, +// }, +// } +// for testName, tt := range tests { +// tt := tt +// t.Run(testName, func(t *testing.T) { +// t.Parallel() +// ctrl := gomock.NewController(t) +// sync := tt.chainSyncBuilder(ctrl, tt.res) +// err := sync.handleResult(tt.res) +// if tt.err != nil { +// assert.EqualError(t, err, tt.err.Error()) +// } else { +// assert.NoError(t, err) +// } +// }) +// } +// } + +// func newTestChainSyncWithReadyBlocks(ctrl *gomock.Controller) *chainSync { +// mockBlockState := NewMockBlockState(ctrl) +// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + +// cfg := chainSyncConfig{ +// bs: mockBlockState, +// pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), +// minPeers: 1, +// maxPeers: 5, +// slotDuration: defaultSlotDuration, +// } + +// return newChainSync(cfg) +// } + +// func newTestChainSync(ctrl *gomock.Controller) *chainSync { +// return newTestChainSyncWithReadyBlocks(ctrl) +// } diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index c2d6d7234e..6fe5061444 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -1,1630 +1,53 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - package sync import ( - "time" - - "context" - "errors" "testing" "time" - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" - "github.com/ChainSafe/gossamer/lib/trie" "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -const defaultSlotDuration = 6 * time.Second - -func Test_chainSyncState_String(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - s chainSyncState - want string - }{ - { - name: "case_bootstrap", - s: bootstrap, - want: "bootstrap", - }, - { - name: "case_tip", - s: tip, - want: "tip", - }, - { - name: "case_unknown", - s: 3, - want: "unknown", - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - got := tt.s.String() - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_chainSync_setPeerHead(t *testing.T) { - t.Parallel() - - errTest := errors.New("test error") - const somePeer = peer.ID("abc") - someHash := common.Hash{1, 2, 3, 4} - - testCases := map[string]struct { - chainSyncBuilder func(ctrl *gomock.Controller) *chainSync - peerID peer.ID - hash common.Hash - number uint - errWrapped error - errMessage string - expectedPeerIDToPeerState map[peer.ID]*peerView - expectedQueuedPeerStates []*peerView - }{ - "best_block_header_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().BestBlockHeader().Return(nil, errTest) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - hash: someHash, - number: 1, - errWrapped: errTest, - errMessage: "best block header: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerView{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, - }, - "number_smaller_than_best_block_number_get_hash_by_number_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{}, errTest) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - hash: someHash, - number: 1, - errWrapped: errTest, - errMessage: "get block hash by number: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerView{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, - }, - "number_smaller_than_best_block_number_and_same_hash": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)).Return(someHash, nil) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - hash: someHash, - number: 1, - expectedPeerIDToPeerState: map[peer.ID]*peerView{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, - }, - "number_smaller_than_best_block_number_get_highest_finalised_header_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{2}, nil) // other hash than someHash - blockState.EXPECT().GetHighestFinalisedHeader().Return(nil, errTest) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - hash: someHash, - number: 1, - errWrapped: errTest, - errMessage: "get highest finalised header: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerView{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, - }, - "number_smaller_than_best_block_number_and_finalised_number_equal_than_number": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - network := NewMockNetwork(ctrl) - network.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, somePeer) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - network: network, - } - }, - peerID: somePeer, - hash: someHash, - number: 1, - errWrapped: errPeerOnInvalidFork, - errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", - expectedPeerIDToPeerState: map[peer.ID]*peerView{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, - }, - "number_smaller_than_best_block_number_and_finalised_number_bigger_than_number": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - network := NewMockNetwork(ctrl) - network.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, somePeer) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - network: network, - } - }, - peerID: somePeer, - hash: someHash, - number: 1, - errWrapped: errPeerOnInvalidFork, - errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", - expectedPeerIDToPeerState: map[peer.ID]*peerView{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, - }, - "number smaller than best block number and " + - "finalised number smaller than number and " + - "has_header_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 3} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(2)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - blockState.EXPECT().HasHeader(someHash).Return(false, errTest) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - hash: someHash, - number: 2, - errWrapped: errTest, - errMessage: "has header: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerView{ - somePeer: { - who: somePeer, - hash: someHash, - number: 2, - }, - }, - }, - "number smaller than best block number and " + - "finalised number smaller than number and " + - "has_the_hash": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 3} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(2)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - blockState.EXPECT().HasHeader(someHash).Return(true, nil) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - hash: someHash, - number: 2, - expectedPeerIDToPeerState: map[peer.ID]*peerView{ - somePeer: { - who: somePeer, - hash: someHash, - number: 2, - }, - }, - }, - "number_bigger_than_the_head_number_add_hash_and_number_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). - Return(errTest) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - pendingBlocks: pendingBlocks, - } - }, - peerID: somePeer, - hash: someHash, - number: 2, - errWrapped: errTest, - errMessage: "add hash and number: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerView{ - somePeer: { - who: somePeer, - hash: someHash, - number: 2, - }, - }, - }, - "number_bigger_than_the_head_number_success": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). - Return(nil) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - pendingBlocks: pendingBlocks, - // buffered of 1 so setPeerHead can write to it - // without a consumer of the channel on the other end. - workQueue: make(chan *peerView, 1), - } - }, - peerID: somePeer, - hash: someHash, - number: 2, - expectedPeerIDToPeerState: map[peer.ID]*peerView{ - somePeer: { - who: somePeer, - hash: someHash, - number: 2, - }, - }, - expectedQueuedPeerStates: []*peerView{ - { - who: somePeer, - hash: someHash, - number: 2, - }, - }, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - chainSync := testCase.chainSyncBuilder(ctrl) - - err := chainSync.setPeerHead(testCase.peerID, testCase.hash, testCase.number) - - assert.ErrorIs(t, err, testCase.errWrapped) - if testCase.errWrapped != nil { - assert.EqualError(t, err, testCase.errMessage) - } - assert.Equal(t, testCase.expectedPeerIDToPeerState, chainSync.peerView) - - require.Equal(t, len(testCase.expectedQueuedPeerStates), len(chainSync.workQueue)) - for _, expectedPeerState := range testCase.expectedQueuedPeerStates { - peerState := <-chainSync.workQueue - assert.Equal(t, expectedPeerState, peerState) - } - }) - } -} - -func TestChainSync_sync_bootstrap_withWorkerError(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) - mockBlockState := NewMockBlockState(ctrl) - mockHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 0, - types.NewDigest()) - mockBlockState.EXPECT().BestBlockHeader().Return(mockHeader, nil).Times(2) - cs.blockState = mockBlockState - cs.handler = newBootstrapSyncer(mockBlockState) - - mockNetwork := NewMockNetwork(ctrl) - startingBlock := variadic.MustNewUint32OrHash(1) - max := uint32(128) - mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ - RequestedData: 19, - StartingBlock: *startingBlock, - Direction: 0, - Max: &max, - }) - cs.network = mockNetwork - - go cs.sync() - defer cs.cancel() - - testPeer := peer.ID("noot") - cs.peerView[testPeer] = &peerView{ - number: 1000, - } - - cs.workQueue <- cs.peerView[testPeer] - - select { - case res := <-cs.resultQueue: - expected := &workerError{ - err: errNilResponse, // since MockNetwork returns a nil response - who: testPeer, - } - require.Equal(t, expected, res.err) - case <-time.After(5 * time.Second): - t.Fatal("did not get worker response") - } - - require.Equal(t, bootstrap, cs.state) -} - -func TestChainSync_sync_tip(t *testing.T) { - t.Parallel() - - done := make(chan struct{}) - - ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) - header := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 1000, - types.NewDigest()) - - bs := NewMockBlockState(ctrl) - bs.EXPECT().BestBlockHeader().Return(header, nil) - bs.EXPECT().GetHighestFinalisedHeader().DoAndReturn(func() (*types.Header, error) { - close(done) - return header, nil - }) - cs.blockState = bs - - go cs.sync() - defer cs.cancel() - - testPeer := peer.ID("noot") - cs.peerView[testPeer] = &peerView{ - number: 999, - } - - cs.workQueue <- cs.peerView[testPeer] - <-done - require.Equal(t, tip, cs.state) -} - -func TestChainSync_getTarget(t *testing.T) { - ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) - require.Equal(t, uint(1<<32-1), cs.getTarget()) - cs.peerView = map[peer.ID]*peerView{ - "a": { - number: 0, // outlier - }, - "b": { - number: 110, - }, - "c": { - number: 120, - }, - "d": { - number: 130, - }, - "e": { - number: 140, - }, - "f": { - number: 150, - }, - "g": { - number: 1000, // outlier - }, - } - - require.Equal(t, uint(130), cs.getTarget()) // sum:650/count:5= avg:130 - - cs.peerView = map[peer.ID]*peerView{ - "testA": { - number: 1000, - }, - "testB": { - number: 2000, - }, - } - - require.Equal(t, uint(1500), cs.getTarget()) -} - -func TestWorkerToRequests(t *testing.T) { - t.Parallel() - - w := &worker{ - startNumber: uintPtr(10), - targetNumber: uintPtr(1), - direction: network.Ascending, - } - _, err := workerToRequests(w) - require.Equal(t, errInvalidDirection, err) - - type testCase struct { - w *worker - expected []*network.BlockRequestMessage - } - - var ( - max128 = uint32(128) - max9 = uint32(9) - max64 = uint32(64) - ) - - testCases := map[string]testCase{ - "test_0": { - w: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(1 + maxResponseSize), - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_1": { - w: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(1 + (maxResponseSize * 2)), - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max128, - }, - { - RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, - StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_2": { - w: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(10), - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_3": { - w: &worker{ - startNumber: uintPtr(10), - targetNumber: uintPtr(1), - direction: network.Descending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(10), - Direction: network.Descending, - Max: &max9, - }, - }, - }, - "test_4": { - w: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(1 + maxResponseSize + (maxResponseSize / 2)), - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max128, - }, - { - RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, - StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_5": { - w: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(10), - targetHash: common.Hash{0xa}, - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_6": { - w: &worker{ - startNumber: uintPtr(1), - startHash: common.Hash{0xb}, - targetNumber: uintPtr(10), - targetHash: common.Hash{0xc}, - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(common.Hash{0xb}), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_7": { - w: &worker{ - startNumber: uintPtr(10), - targetNumber: uintPtr(10), - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(10), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_8": { - w: &worker{ - startNumber: uintPtr(1 + maxResponseSize + (maxResponseSize / 2)), - targetNumber: uintPtr(1), - direction: network.Descending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, - StartingBlock: *variadic.MustNewUint32OrHash(1 + (maxResponseSize / 2)), - Direction: network.Descending, - Max: &max64, - }, - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize + (maxResponseSize / 2)), - Direction: network.Descending, - Max: &max128, - }, - }, - }, - } - - for name, tc := range testCases { - tc := tc - t.Run(name, func(t *testing.T) { - t.Parallel() - reqs, err := workerToRequests(tc.w) - require.NoError(t, err) - require.Equal(t, tc.expected, reqs) - }) - } -} - -func TestChainSync_validateResponse(t *testing.T) { - t.Parallel() - badBlockHash := common.NewHash([]byte("badblockhash")) - - tests := map[string]struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - networkBuilder func(ctrl *gomock.Controller) Network - req *network.BlockRequestMessage - resp *network.BlockResponseMessage - expectedError error - }{ - "nil_req,_nil_resp": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - return NewMockNetwork(ctrl) - }, - expectedError: errEmptyBlockData, - }, - "handle_error_response_is_not_chain,_has_header": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - return NewMockNetwork(ctrl) - }, - req: &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader, - }, - resp: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Header: &types.Header{ - Number: 1, - }, - Body: &types.Body{}, - }, - { - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - }, - }, - }, - expectedError: errResponseIsNotChain, - }, - "handle_justification-only_request,_unknown_block": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadJustificationValue, - Reason: peerset.BadJustificationReason, - }, peer.ID("")) - return mockNetwork - }, - req: &network.BlockRequestMessage{ - RequestedData: network.RequestedDataJustification, - }, - resp: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Justification: &[]byte{0}, - }, - }, - }, - expectedError: errUnknownBlockForJustification, - }, - "handle_error_unknown_parent": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - return NewMockNetwork(ctrl) - }, - req: &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader, - }, - resp: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Header: &types.Header{ - Number: 1, - }, - Body: &types.Body{}, - }, - { - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - }, - }, - }, - expectedError: errUnknownParent, - }, - "handle_error_bad_block": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - return NewMockNetwork(ctrl) - }, - req: &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader, - }, - resp: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: badBlockHash, - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - }, - }, - }, - expectedError: errBadBlock, - }, - "no_error": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - return NewMockNetwork(ctrl) - }, - req: &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader, - }, - resp: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - }, - { - Header: &types.Header{ - ParentHash: (&types.Header{ - Number: 2, - }).Hash(), - Number: 3, - }, - Body: &types.Body{}, - }, - }, - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - cfg := chainSyncConfig{ - bs: tt.blockStateBuilder(ctrl), - pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), - readyBlocks: newBlockQueue(maxResponseSize), - net: tt.networkBuilder(ctrl), - badBlocks: []string{ - badBlockHash.String(), - }, - } - cs := newChainSync(cfg) - - err := cs.validateResponse(tt.req, tt.resp, "") - if tt.expectedError != nil { - assert.EqualError(t, err, tt.expectedError.Error()) - } else { - assert.NoError(t, err) - } - }) - } -} - -func TestChainSync_doSync(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - readyBlocks := newBlockQueue(maxResponseSize) - cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) - - max := uint32(1) - req := &network.BlockRequestMessage{ - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max, - } - - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil).Times(2) - cs.blockState = mockBlockState - - workerErr := cs.doSync(req, make(map[peer.ID]struct{})) - require.NotNil(t, workerErr) - require.Equal(t, errNoPeers, workerErr.err) - - cs.peerView["noot"] = &peerView{ - number: 100, - } +func TestChainSync_setPeerHead(t *testing.T) { + const randomHashString = "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21" + randomHash := common.MustHexToHash(randomHashString) - mockNetwork := NewMockNetwork(ctrl) - startingBlock := variadic.MustNewUint32OrHash(1) - max1 := uint32(1) - mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ - RequestedData: 19, - StartingBlock: *startingBlock, - Direction: 0, - Max: &max1, - }) - cs.network = mockNetwork - - workerErr = cs.doSync(req, make(map[peer.ID]struct{})) - require.NotNil(t, workerErr) - require.Equal(t, errNilResponse, workerErr.err) - - resp := &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: common.Hash{0x1}, - Header: &types.Header{ - Number: 1, - }, - Body: &types.Body{}, - }, - }, - } - - mockNetwork = NewMockNetwork(ctrl) - mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ - RequestedData: 19, - StartingBlock: *startingBlock, - Direction: 0, - Max: &max1, - }).Return(resp, nil) - cs.network = mockNetwork - - workerErr = cs.doSync(req, make(map[peer.ID]struct{})) - require.Nil(t, workerErr) - bd, err := readyBlocks.pop(context.Background()) - require.NotNil(t, bd) - require.NoError(t, err) - require.Equal(t, resp.BlockData[0], bd) - - parent := (&types.Header{ - Number: 2, - }).Hash() - resp = &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: common.Hash{0x3}, - Header: &types.Header{ - ParentHash: parent, - Number: 3, - }, - Body: &types.Body{}, - }, - { - Hash: common.Hash{0x2}, - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - }, - }, - } - - // test to see if descending blocks get reversed - req.Direction = network.Descending - mockNetwork = NewMockNetwork(ctrl) - mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ - RequestedData: 19, - StartingBlock: *startingBlock, - Direction: 1, - Max: &max1, - }).Return(resp, nil) - cs.network = mockNetwork - workerErr = cs.doSync(req, make(map[peer.ID]struct{})) - require.Nil(t, workerErr) - - bd, err = readyBlocks.pop(context.Background()) - require.NotNil(t, bd) - require.Equal(t, resp.BlockData[0], bd) - require.NoError(t, err) - - bd, err = readyBlocks.pop(context.Background()) - require.NotNil(t, bd) - require.Equal(t, resp.BlockData[1], bd) - require.NoError(t, err) -} - -func TestHandleReadyBlock(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - readyBlocks := newBlockQueue(maxResponseSize) - cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) - - // test that descendant chain gets returned by getReadyDescendants on block 1 being ready - header1 := &types.Header{ - Number: 1, - } - block1 := &types.Block{ - Header: *header1, - Body: types.Body{}, - } - - header2 := &types.Header{ - ParentHash: header1.Hash(), - Number: 2, - } - block2 := &types.Block{ - Header: *header2, - Body: types.Body{}, - } - cs.pendingBlocks.addBlock(block2) - - header3 := &types.Header{ - ParentHash: header2.Hash(), - Number: 3, - } - block3 := &types.Block{ - Header: *header3, - Body: types.Body{}, - } - cs.pendingBlocks.addBlock(block3) - - header2NotDescendant := &types.Header{ - ParentHash: common.Hash{0xff}, - Number: 2, - } - block2NotDescendant := &types.Block{ - Header: *header2NotDescendant, - Body: types.Body{}, - } - cs.pendingBlocks.addBlock(block2NotDescendant) - - cs.handleReadyBlock(block1.ToBlockData()) - - require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header1.Hash())) - require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header2.Hash())) - require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header3.Hash())) - require.True(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header2NotDescendant.Hash())) - - blockData1, err := readyBlocks.pop(context.Background()) - require.NoError(t, err) - require.Equal(t, block1.ToBlockData(), blockData1) - - blockData2, err := readyBlocks.pop(context.Background()) - require.NoError(t, err) - require.Equal(t, block2.ToBlockData(), blockData2) - - blockData3, err := readyBlocks.pop(context.Background()) - require.NoError(t, err) - require.Equal(t, block3.ToBlockData(), blockData3) -} - -func TestChainSync_determineSyncPeers(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) - - req := &network.BlockRequestMessage{} - testPeerA := peer.ID("a") - testPeerB := peer.ID("b") - peersTried := make(map[peer.ID]struct{}) - - // test base case - cs.peerView[testPeerA] = &peerView{ - number: 129, - } - cs.peerView[testPeerB] = &peerView{ - number: 257, - } - - peers := cs.determineSyncPeers(req, peersTried) - require.Equal(t, 2, len(peers)) - require.Contains(t, peers, testPeerA) - require.Contains(t, peers, testPeerB) - - // test peer ignored case - cs.ignorePeers[testPeerA] = struct{}{} - peers = cs.determineSyncPeers(req, peersTried) - require.Equal(t, 1, len(peers)) - require.Equal(t, []peer.ID{testPeerB}, peers) - - // test all peers ignored case - cs.ignorePeers[testPeerB] = struct{}{} - peers = cs.determineSyncPeers(req, peersTried) - require.Equal(t, 2, len(peers)) - require.Contains(t, peers, testPeerA) - require.Contains(t, peers, testPeerB) - require.Equal(t, 0, len(cs.ignorePeers)) - - // test peer's best block below number case, shouldn't include that peer - start, err := variadic.NewUint32OrHash(130) - require.NoError(t, err) - req.StartingBlock = *start - peers = cs.determineSyncPeers(req, peersTried) - require.Equal(t, 1, len(peers)) - require.Equal(t, []peer.ID{testPeerB}, peers) - - // test peer tried case, should ignore peer already tried - peersTried[testPeerA] = struct{}{} - req.StartingBlock = variadic.Uint32OrHash{} - peers = cs.determineSyncPeers(req, peersTried) - require.Equal(t, 1, len(peers)) - require.Equal(t, []peer.ID{testPeerB}, peers) -} - -func Test_chainSync_logSyncSpeed(t *testing.T) { - t.Parallel() - - type fields struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - networkBuilder func(ctrl *gomock.Controller) Network - state chainSyncState - benchmarker *syncBenchmarker - } - tests := []struct { - name string - fields fields + testcases := map[string]struct { + newChainSync func(t *testing.T, ctrl *gomock.Controller) *chainSync + peerID peer.ID + bestHash common.Hash + bestNumber uint }{ - { - name: "state_bootstrap", - fields: fields{ - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).Times(3) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return(nil) - return mockNetwork - }, - benchmarker: newSyncBenchmarker(10), - state: bootstrap, - }, - }, - { - name: "case_tip", - fields: fields{ - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).Times(3) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return(nil) - return mockNetwork - }, - benchmarker: newSyncBenchmarker(10), - state: tip, - }, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - ctx, cancel := context.WithCancel(context.Background()) - tickerChannel := make(chan time.Time) - cs := &chainSync{ - ctx: ctx, - cancel: cancel, - blockState: tt.fields.blockStateBuilder(ctrl), - network: tt.fields.networkBuilder(ctrl), - state: tt.fields.state, - benchmarker: tt.fields.benchmarker, - logSyncTickerC: tickerChannel, - logSyncTicker: time.NewTicker(time.Hour), // just here to be stopped - logSyncDone: make(chan struct{}), - } - - go cs.logSyncSpeed() - - tickerChannel <- time.Time{} - cs.cancel() - <-cs.logSyncDone - }) - } -} + "set_peer_head_with_new_peer": { + newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { + networkMock := NewMockNetwork(ctrl) + workerPool := newSyncWorkerPool(networkMock) -func Test_chainSync_start(t *testing.T) { - t.Parallel() - - type fields struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - disjointBlockSetBuilder func(ctrl *gomock.Controller, called chan<- struct{}) DisjointBlockSet - benchmarker *syncBenchmarker - } - tests := []struct { - name string - fields fields - }{ - { - name: "base_case", - fields: fields{ - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil) - return mockBlockState - }, - disjointBlockSetBuilder: func(ctrl *gomock.Controller, called chan<- struct{}) DisjointBlockSet { - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().run(gomock.AssignableToTypeOf(make(<-chan struct{}))). - DoAndReturn(func(stop <-chan struct{}) { - close(called) // test glue, ideally we would use a ready chan struct passed to run(). - }) - return mockDisjointBlockSet - }, - benchmarker: newSyncBenchmarker(1), + cs := newChainSyncTest(t, ctrl) + cs.workerPool = workerPool + return cs }, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), }, } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - ctx, cancel := context.WithCancel(context.Background()) - disjointBlockSetCalled := make(chan struct{}) - cs := &chainSync{ - ctx: ctx, - cancel: cancel, - blockState: tt.fields.blockStateBuilder(ctrl), - pendingBlocks: tt.fields.disjointBlockSetBuilder(ctrl, disjointBlockSetCalled), - benchmarker: tt.fields.benchmarker, - slotDuration: time.Hour, - logSyncTicker: time.NewTicker(time.Hour), // just here to be closed - logSyncDone: make(chan struct{}), - } - cs.start() - <-disjointBlockSetCalled - cs.stop() - }) - } -} -func Test_chainSync_setBlockAnnounce(t *testing.T) { - t.Parallel() - - type args struct { - from peer.ID - header *types.Header - } - tests := map[string]struct { - chainSyncBuilder func(*types.Header, *gomock.Controller) chainSync - args args - wantErr error - }{ - "base_case": { - wantErr: blocktree.ErrBlockExists, - args: args{ - header: &types.Header{Number: 2}, - }, - chainSyncBuilder: func(_ *types.Header, ctrl *gomock.Controller) chainSync { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.MustHexToHash( - "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf")).Return(true, nil) - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - return chainSync{ - blockState: mockBlockState, - pendingBlocks: mockDisjointBlockSet, - } - }, - }, - "err_when_calling_has_header": { - wantErr: errors.New("checking header exists"), - args: args{ - header: &types.Header{Number: 2}, - }, - chainSyncBuilder: func(_ *types.Header, ctrl *gomock.Controller) chainSync { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT(). - HasHeader(common.MustHexToHash( - "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf")). - Return(false, errors.New("checking header exists")) - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - return chainSync{ - blockState: mockBlockState, - pendingBlocks: mockDisjointBlockSet, - } - }, - }, - "adding_block_header_to_pending_blocks": { - args: args{ - header: &types.Header{Number: 2}, - }, - chainSyncBuilder: func(expectedHeader *types.Header, ctrl *gomock.Controller) chainSync { - argumentHeaderHash := common.MustHexToHash( - "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf") - - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT(). - HasHeader(argumentHeaderHash). - Return(false, nil) - - mockBlockState.EXPECT(). - BestBlockHeader(). - Return(&types.Header{Number: 1}, nil) - - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT(). - addHeader(expectedHeader). - Return(nil) - - mockDisjointBlockSet.EXPECT(). - addHashAndNumber(argumentHeaderHash, uint(2)). - Return(nil) - - return chainSync{ - blockState: mockBlockState, - pendingBlocks: mockDisjointBlockSet, - peerView: make(map[peer.ID]*peerView), - // creating an buffered channel for this specific test - // since it will put a work on the queue and an unbufered channel - // will hang until we read on this channel and the goal is to - // put the work on the channel and don't block - workQueue: make(chan *peerView, 1), - } - }, - }, - } - for name, tt := range tests { + for tname, tt := range testcases { tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() + t.Run(tname, func(t *testing.T) { ctrl := gomock.NewController(t) - sync := tt.chainSyncBuilder(tt.args.header, ctrl) - err := sync.setBlockAnnounce(tt.args.from, tt.args.header) - if tt.wantErr != nil { - assert.EqualError(t, err, tt.wantErr.Error()) - } else { - assert.NoError(t, err) - } - - if sync.workQueue != nil { - assert.Equal(t, len(sync.workQueue), 1) - } + cs := tt.newChainSync(t, ctrl) + cs.setPeerHead(tt.peerID, tt.bestHash, tt.bestNumber) }) } } -func Test_chainSync_getHighestBlock(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - peerState map[peer.ID]*peerView - wantHighestBlock uint - expectedError error - }{ - { - name: "error no peers", - expectedError: errors.New("no peers to sync with"), - }, - { - name: "base case", - peerState: map[peer.ID]*peerView{"1": {number: 2}}, - wantHighestBlock: 2, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - cs := &chainSync{ - peerView: tt.peerState, - } - gotHighestBlock, err := cs.getHighestBlock() - if tt.expectedError != nil { - assert.EqualError(t, err, tt.expectedError.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.wantHighestBlock, gotHighestBlock) - }) - } -} +func newChainSyncTest(t *testing.T, ctrl *gomock.Controller) *chainSync { + t.Helper() -func Test_chainSync_handleResult(t *testing.T) { - t.Parallel() - mockError := errors.New("test mock error") - tests := map[string]struct { - chainSyncBuilder func(ctrl *gomock.Controller, result *worker) chainSync - maxWorkerRetries uint16 - res *worker - err error - }{ - "res.err_==_nil": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - return chainSync{ - workerState: newWorkerState(), - } - }, - res: &worker{}, - }, - "res.err.err.Error()_==_context.Canceled": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - return chainSync{ - workerState: newWorkerState(), - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: context.Canceled, - }, - }, - }, - "res.err.err.Error()_==_context.DeadlineExceeded": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{Value: -1024, Reason: "Request timeout"}, - peer.ID("")) - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) - return chainSync{ - workerState: newWorkerState(), - network: mockNetwork, - handler: mockWorkHandler, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: context.DeadlineExceeded, - }, - }, - }, - "res.err.err.Error()_dial_backoff": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - return chainSync{ - workerState: newWorkerState(), - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New("dial backoff"), - }, - }, - }, - "res.err.err.Error()_==_errNoPeers": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - return chainSync{ - workerState: newWorkerState(), - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errNoPeers, - }, - }, - }, - "res.err.err.Error()_==_protocol_not_supported": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{Value: -2147483648, - Reason: "Unsupported protocol"}, - peer.ID("")) - return chainSync{ - workerState: newWorkerState(), - network: mockNetwork, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New("protocol not supported"), - }, - }, - }, - "no_error,_no_retries": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) - return chainSync{ - workerState: newWorkerState(), - handler: mockWorkHandler, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New(""), - }, - }, - }, - "handle_work_result_error,_no_retries": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(nil, mockError) - return chainSync{ - workerState: newWorkerState(), - handler: mockWorkHandler, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New(""), - }, - }, - err: mockError, - }, - "handle_work_result_nil,_no_retries": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(nil, nil) - return chainSync{ - workerState: newWorkerState(), - handler: mockWorkHandler, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New(""), - }, - }, - }, - "no_error,_maxWorkerRetries_2": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().removeBlock(common.Hash{}) - return chainSync{ - workerState: newWorkerState(), - handler: mockWorkHandler, - pendingBlocks: mockDisjointBlockSet, - } - }, - maxWorkerRetries: 2, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New(""), - }, - pendingBlock: newPendingBlock(common.Hash{}, 1, nil, nil, time.Now()), - }, - }, - "no_error": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) - mockWorkHandler.EXPECT().hasCurrentWorker(&worker{ - ctx: context.Background(), - err: &workerError{ - err: mockError, - }, - retryCount: 1, - peersTried: map[peer.ID]struct{}{ - "": {}, - }, - }, newWorkerState().workers).Return(true) - return chainSync{ - workerState: newWorkerState(), - handler: mockWorkHandler, - maxWorkerRetries: 2, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: mockError, - }, - }, - }, - } - for testName, tt := range tests { - tt := tt - t.Run(testName, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - sync := tt.chainSyncBuilder(ctrl, tt.res) - err := sync.handleResult(tt.res) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - }) - } -} - -func newTestChainSyncWithReadyBlocks(ctrl *gomock.Controller) *chainSync { mockBlockState := NewMockBlockState(ctrl) mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) @@ -1633,12 +56,8 @@ func newTestChainSyncWithReadyBlocks(ctrl *gomock.Controller) *chainSync { pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), minPeers: 1, maxPeers: 5, - slotDuration: defaultSlotDuration, + slotDuration: 6 * time.Second, } return newChainSync(cfg) } - -func newTestChainSync(ctrl *gomock.Controller) *chainSync { - return newTestChainSyncWithReadyBlocks(ctrl) -} diff --git a/dot/sync/mock_chain_sync_test.go b/dot/sync/mock_chain_sync_test.go index c681244882..b3a0decdab 100644 --- a/dot/sync/mock_chain_sync_test.go +++ b/dot/sync/mock_chain_sync_test.go @@ -66,11 +66,9 @@ func (mr *MockChainSyncMockRecorder) setBlockAnnounce(from, header interface{}) } // setPeerHead mocks base method. -func (m *MockChainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error { +func (m *MockChainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "setPeerHead", p, hash, number) - ret0, _ := ret[0].(error) - return ret0 + m.ctrl.Call(m, "setPeerHead", p, hash, number) } // setPeerHead indicates an expected call of setPeerHead. diff --git a/dot/sync/sync_worker.go b/dot/sync/sync_worker.go deleted file mode 100644 index 662a19fed9..0000000000 --- a/dot/sync/sync_worker.go +++ /dev/null @@ -1,94 +0,0 @@ -package sync - -import ( - "context" - "sync" - - "github.com/libp2p/go-libp2p/core/peer" -) - -// syncWorker represents a available peer that could be a source -// for requesting blocks, once a peer is disconnected or is ignored -// we can just disable its worker. -type syncWorker struct { - // context shared between all workers - ctx context.Context - l sync.RWMutex - - releaseCh chan struct{} - doneCh chan struct{} - stopCh chan struct{} - - who peer.ID - network Network -} - -func newSyncWorker(ctx context.Context, who peer.ID, network Network) *syncWorker { - return &syncWorker{ - ctx: ctx, - who: who, - network: network, - doneCh: make(chan struct{}), - stopCh: make(chan struct{}), - releaseCh: make(chan struct{}), - } -} - -func (s *syncWorker) Start(tasks <-chan *syncTask, wg *sync.WaitGroup) { - wg.Add(1) - - go func() { - defer func() { - wg.Done() - close(s.doneCh) - logger.Infof("[SHUTDOWN] worker %s", s.who) - }() - - logger.Debugf("worker %s started, waiting for tasks...", s.who) - - for { - s.waitForTasks(tasks) - - logger.Debugf("[WAITING RELEASE] worker %s", s.who) - select { - case <-s.releaseCh: - case <-s.stopCh: - return - } - } - }() -} - -func (s *syncWorker) waitForTasks(tasks <-chan *syncTask) { - select { - case <-s.stopCh: - return - - case task := <-tasks: - request := task.request - - logger.Debugf("[EXECUTING] worker %s: block request: %s", s.who, request) - response, err := s.network.DoBlockRequest(s.who, request) - if err != nil { - logger.Debugf("[FINISHED] worker %s: err: %s", s.who, err) - } else if response != nil { - logger.Debugf("[FINISHED] worker %s: block data amount: %d", s.who, len(response.BlockData)) - } - - task.resultCh <- &syncTaskResult{ - who: s.who, - request: request, - response: response, - err: err, - } - } -} - -func (s *syncWorker) Release() { - s.releaseCh <- struct{}{} -} - -func (s *syncWorker) Stop() { - close(s.stopCh) - <-s.doneCh -} diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 7410c60131..1a8d4aa2a8 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -83,7 +83,8 @@ func (s *Service) Stop() error { // HandleBlockAnnounceHandshake notifies the `chainSync` module that // we have received a BlockAnnounceHandshake from the given peer. func (s *Service) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { - return s.chainSync.setPeerHead(from, msg.BestBlockHash, uint(msg.BestBlockNumber)) + s.chainSync.setPeerHead(from, msg.BestBlockHash, uint(msg.BestBlockNumber)) + return nil } // HandleBlockAnnounce notifies the `chainSync` module that we have received a block announcement from the given peer. diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 16b1c0902b..03d983ed32 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -1,7 +1,7 @@ package sync import ( - "context" + "errors" "sync" "time" @@ -10,11 +10,18 @@ import ( ) const ( - ignorePeerTimeout = 2 * time.Minute - maxRequestAllowed uint = 40 + available byte = iota + busy + punished +) + +const ( + ignorePeerTimeout = 2 * time.Minute + maxRequestsAllowed uint = 45 ) type syncTask struct { + boundTo *peer.ID request *network.BlockRequestMessage resultCh chan<- *syncTaskResult } @@ -26,77 +33,54 @@ type syncTaskResult struct { err error } +type peerSyncWorker struct { + status byte + punishedTime time.Time +} + type syncWorkerPool struct { - ctx context.Context - l sync.RWMutex - wg sync.WaitGroup + wg sync.WaitGroup + l sync.RWMutex + doneCh chan struct{} network Network taskQueue chan *syncTask - workers map[peer.ID]*syncWorker + workers map[peer.ID]*peerSyncWorker - // TODO add this worker in a ignorePeers list, implement some expiration time for - // peers added to it (peerJail where peers have a release date and maybe extend the punishment - // if fail again ang again Jimmy's + Diego's idea) - ignorePeers map[peer.ID]time.Time + waiting bool + waitingBounded bool + availablePeerCh chan peer.ID } func newSyncWorkerPool(net Network) *syncWorkerPool { return &syncWorkerPool{ - network: net, - workers: make(map[peer.ID]*syncWorker), - taskQueue: make(chan *syncTask, maxRequestAllowed+1), - ignorePeers: make(map[peer.ID]time.Time), + network: net, + waiting: false, + doneCh: make(chan struct{}), + availablePeerCh: make(chan peer.ID), + workers: make(map[peer.ID]*peerSyncWorker), + taskQueue: make(chan *syncTask, maxRequestsAllowed), } } func (s *syncWorkerPool) useConnectedPeers() { connectedPeers := s.network.AllConnectedPeers() - - s.l.Lock() - defer s.l.Unlock() - for _, connectedPeer := range connectedPeers { - _, has := s.workers[connectedPeer] - if has { - continue - } - - releaseTime, has := s.ignorePeers[connectedPeer] - if has { - if time.Now().Before(releaseTime) { - continue - } else { - delete(s.ignorePeers, connectedPeer) - } - } - - worker := newSyncWorker(s.ctx, connectedPeer, s.network) - worker.Start(s.taskQueue, &s.wg) - s.workers[connectedPeer] = worker + s.releaseWorker(connectedPeer) } } -func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) error { - s.l.Lock() - defer s.l.Unlock() - - _, has := s.ignorePeers[who] - if has { - delete(s.ignorePeers, who) - } - - _, has = s.workers[who] - if has { - return nil - } - - syncWorker := newSyncWorker(s.ctx, who, s.network) +func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) { + s.releaseWorker(who) logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) +} - syncWorker.Start(s.taskQueue, &s.wg) - s.workers[who] = syncWorker - return nil +func (s *syncWorkerPool) submitBoundedRequest(request *network.BlockRequestMessage, who peer.ID, resultCh chan<- *syncTaskResult) { + s.taskQueue <- &syncTask{ + boundTo: &who, + request: request, + resultCh: resultCh, + } } func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { @@ -114,30 +98,37 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage, func (s *syncWorkerPool) releaseWorker(who peer.ID) { s.l.Lock() - peer, has := s.workers[who] - s.l.Unlock() + defer s.l.Unlock() + peerSync, has := s.workers[who] if !has { + peerSync = &peerSyncWorker{status: available} + } + + // if the punishment is still valid we do nothing + if peerSync.status == punished && peerSync.punishedTime.After(time.Now()) { return } - peer.Release() + s.workers[who] = &peerSyncWorker{status: available} + if s.waiting { + s.waiting = false + s.availablePeerCh <- who + } } -func (s *syncWorkerPool) shutdownWorker(who peer.ID, ignore bool) { +func (s *syncWorkerPool) punishPeer(who peer.ID) { s.l.Lock() - peer, has := s.workers[who] - s.l.Unlock() + defer s.l.Unlock() + _, has := s.workers[who] if !has { return } - peer.Stop() - delete(s.workers, who) - if ignore { - ignorePeerTimeout := time.Now().Add(ignorePeerTimeout) - s.ignorePeers[who] = ignorePeerTimeout + s.workers[who] = &peerSyncWorker{ + status: punished, + punishedTime: time.Now().Add(ignorePeerTimeout), } } @@ -146,3 +137,115 @@ func (s *syncWorkerPool) totalWorkers() (total uint) { defer s.l.RUnlock() return uint(len(s.workers)) } + +var errPeerNotFound = errors.New("peer not found") +var errNoPeersAvailable = errors.New("no peers available") + +// getAvailablePeer returns the very first peer available and changes +// its status from available to busy, if there is no peer avaible then +// the caller should wait for availablePeerCh +func (s *syncWorkerPool) searchForAvailable() (peer.ID, error) { + s.l.RLock() + defer s.l.RUnlock() + + for peerID, peerSync := range s.workers { + switch peerSync.status { + case punished: + // if the punishedTime has passed then we mark it + // as available and notify it availability if needed + // otherwise we keep the peer in the punishment and don't notify + if peerSync.punishedTime.Before(time.Now()) { + peerSync.status = busy + s.workers[peerID] = peerSync + return peerID, nil + } + case available: + peerSync.status = busy + s.workers[peerID] = peerSync + return peerID, nil + default: + } + } + + s.waiting = true + return peer.ID(""), errNoPeersAvailable //could not found an available peer to dispatch +} + +func (s *syncWorkerPool) isPeerAvailable(peerID peer.ID) (bool, error) { + s.l.RLock() + defer s.l.RUnlock() + peerSync, has := s.workers[peerID] + if !has { + return false, errPeerNotFound + } + + return peerSync.status == available, nil +} + +func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { + defer close(s.doneCh) + for { + select { + case <-stopCh: + //wait for ongoing requests to be finished before returning + s.wg.Wait() + return + + case task := <-s.taskQueue: + var availablePeer peer.ID + if task.boundTo != nil { + isAvailable, err := s.isPeerAvailable(*task.boundTo) + if err != nil { + if errors.Is(err, errPeerNotFound) { + // TODO: check if there is a better solution + continue + } + } + + if isAvailable { + availablePeer = *task.boundTo + } else { + logger.Debugf("[WAITING] task in idle state: %s", task.request) + availablePeer = <-s.availablePeerCh + logger.Debugf("[WAITING] got the peer %s to handle task: %s", availablePeer, task) + } + } else { + var err error + availablePeer, err = s.searchForAvailable() + if err != nil { + if errors.Is(err, errNoPeersAvailable) { + logger.Debugf("[WAITING] task in idle state: %s", task.request) + availablePeer = <-s.availablePeerCh + logger.Debugf("[WAITING] got the peer %s to handle task: %s", availablePeer, task) + } else { + // TODO: check if there is a better solution + continue + } + } + } + + s.wg.Add(1) + go executeRequest(s.network, availablePeer, task, &s.wg) + } + } +} + +func executeRequest(network Network, who peer.ID, task *syncTask, wg *sync.WaitGroup) { + defer wg.Done() + request := task.request + + logger.Debugf("[EXECUTING] worker %s: block request: %s", who, request) + response, err := network.DoBlockRequest(who, request) + if err != nil { + logger.Debugf("[FINISHED] worker %s: err: %s", who, err) + } else if response != nil { + logger.Debugf("[FINISHED] worker %s: block data amount: %d", who, len(response.BlockData)) + } + + task.resultCh <- &syncTaskResult{ + who: who, + request: request, + response: response, + err: err, + } +} From c446fc053dd239ff24e3628b39a0de3152dc39a3 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 5 May 2023 21:04:26 -0400 Subject: [PATCH 028/140] wip: bounded requests without blocking the task queue listener --- chain/westend-local/config-alice.toml | 10 +-- .../westend-local/westend-local-spec-raw.json | 5 +- dot/sync/chain_sync.go | 15 ++-- dot/sync/requests.go | 10 +++ dot/sync/worker_pool.go | 77 ++++++++++++++----- 5 files changed, 85 insertions(+), 32 deletions(-) diff --git a/chain/westend-local/config-alice.toml b/chain/westend-local/config-alice.toml index 592980d240..897eeeed3c 100644 --- a/chain/westend-local/config-alice.toml +++ b/chain/westend-local/config-alice.toml @@ -11,20 +11,20 @@ state = "" runtime = "" babe = "" grandpa = "" -sync = "" -digest = "" +sync = "trace" +digest = "trace" [init] genesis = "./chain/westend-local/westend-local-spec-raw.json" [account] -key = "alice" +key = "" unlock = "" [core] roles = 4 -babe-authority = true -grandpa-authority = true +babe-authority = false +grandpa-authority = false [network] port = 7001 diff --git a/chain/westend-local/westend-local-spec-raw.json b/chain/westend-local/westend-local-spec-raw.json index c87badbea1..401f7efa72 100644 --- a/chain/westend-local/westend-local-spec-raw.json +++ b/chain/westend-local/westend-local-spec-raw.json @@ -2,7 +2,10 @@ "name": "Westend Local Testnet", "id": "westend_local_testnet", "chainType": "Local", - "bootNodes": [], + "bootNodes": [ + "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWHXRStPGYZvPvoRPVm8hUT88VwWrgUYymbYABumkLV34M", + "/ip4/127.0.0.1/tcp/30331/p2p/12D3KooWMCjJg4jdms2a7FR5ZsbJHzZUv52JSrDBkJCWxfH31ApD" + ], "telemetryEndpoints": null, "protocolId": "dot", "properties": null, diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index f84cc8b4fc..7c95ee22a7 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -296,10 +296,12 @@ func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.He // that is also has the chain up until and including that block. // this may not be a valid assumption, but perhaps we can assume that // it is likely they will receive this block and its ancestors before us. - cs.blockAnnounceCh <- announcedBlock{ - who: who, - header: blockAnnounceHeader, - } + + // * disabling for deployment in staging + // cs.blockAnnounceCh <- announcedBlock{ + // who: who, + // header: blockAnnounceHeader, + // } return nil } @@ -514,11 +516,10 @@ func (cs *chainSync) executeTipSync() error { return fmt.Errorf("while requesting pending blocks") } } - } func (cs *chainSync) requestPendingBlocks() error { - logger.Info("starting request pending blocks") + logger.Infof("total of pending blocks: %d", cs.pendingBlocks.size()) if cs.pendingBlocks.size() == 0 { return nil } @@ -618,7 +619,7 @@ func (cs *chainSync) executeBootstrapSync() error { } requests := ascedingBlockRequests(startRequestAt, targetBlockNumber, bootstrapRequestData) - expectedAmountOfBlocks := uint32(len(requests) * 128) + expectedAmountOfBlocks := totalOfBlocksRequested(requests) wg := sync.WaitGroup{} resultsQueue := make(chan *syncTaskResult) diff --git a/dot/sync/requests.go b/dot/sync/requests.go index f73bba8444..af22c7d477 100644 --- a/dot/sync/requests.go +++ b/dot/sync/requests.go @@ -80,3 +80,13 @@ func ascedingBlockRequests(startNumber, targetNumber uint, requestedData byte) [ return reqs } + +func totalOfBlocksRequested(requests []*network.BlockRequestMessage) (total uint32) { + for _, request := range requests { + if request.Max != nil { + total += *request.Max + } + } + + return total +} diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 03d983ed32..831f079c51 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -48,18 +48,22 @@ type syncWorkerPool struct { workers map[peer.ID]*peerSyncWorker waiting bool - waitingBounded bool availablePeerCh chan peer.ID + + waitingBounded *peer.ID + availableBounded chan struct{} + waitBoundedLock sync.Mutex } func newSyncWorkerPool(net Network) *syncWorkerPool { return &syncWorkerPool{ - network: net, - waiting: false, - doneCh: make(chan struct{}), - availablePeerCh: make(chan peer.ID), - workers: make(map[peer.ID]*peerSyncWorker), - taskQueue: make(chan *syncTask, maxRequestsAllowed), + network: net, + waiting: false, + doneCh: make(chan struct{}), + availablePeerCh: make(chan peer.ID), + availableBounded: make(chan struct{}), + workers: make(map[peer.ID]*peerSyncWorker), + taskQueue: make(chan *syncTask, maxRequestsAllowed), } } @@ -110,7 +114,15 @@ func (s *syncWorkerPool) releaseWorker(who peer.ID) { return } + if s.waitingBounded != nil && *s.waitingBounded == who { + s.waitingBounded = nil + s.workers[who] = &peerSyncWorker{status: busy} + s.availableBounded <- struct{}{} + return + } + s.workers[who] = &peerSyncWorker{status: available} + if s.waiting { s.waiting = false s.availablePeerCh <- who @@ -171,7 +183,7 @@ func (s *syncWorkerPool) searchForAvailable() (peer.ID, error) { return peer.ID(""), errNoPeersAvailable //could not found an available peer to dispatch } -func (s *syncWorkerPool) isPeerAvailable(peerID peer.ID) (bool, error) { +func (s *syncWorkerPool) searchForExactAvailable(peerID peer.ID) (bool, error) { s.l.RLock() defer s.l.RUnlock() peerSync, has := s.workers[peerID] @@ -179,9 +191,38 @@ func (s *syncWorkerPool) isPeerAvailable(peerID peer.ID) (bool, error) { return false, errPeerNotFound } + switch peerSync.status { + case punished: + // if the punishedTime has passed then we mark it + // as available and notify it availability if needed + // otherwise we keep the peer in the punishment and don't notify + if peerSync.punishedTime.Before(time.Now()) { + peerSync.status = busy + s.workers[peerID] = peerSync + return true, nil + } + case available: + peerSync.status = busy + s.workers[peerID] = peerSync + return true, nil + default: + } + return peerSync.status == available, nil } +func (s *syncWorkerPool) waitPeerAndExecute(network Network, who peer.ID, availableBounded <-chan struct{}, task *syncTask, wg *sync.WaitGroup) { + s.waitBoundedLock.Lock() + s.waitingBounded = &who + + logger.Debugf("[WAITING] bounded task to peer %s in idle state: %s", who, task.request) + <-availableBounded + logger.Debugf("[WAITING] got the peer %s to handle task: %s", who, task) + s.waitBoundedLock.Unlock() + + executeRequest(network, who, task, wg) +} + func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { defer close(s.doneCh) for { @@ -194,20 +235,19 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { case task := <-s.taskQueue: var availablePeer peer.ID if task.boundTo != nil { - isAvailable, err := s.isPeerAvailable(*task.boundTo) + isAvailable, err := s.searchForExactAvailable(*task.boundTo) if err != nil { - if errors.Is(err, errPeerNotFound) { - // TODO: check if there is a better solution - continue - } + logger.Errorf("while checking peer %s available: %s", + *task.boundTo, task.request) + continue } if isAvailable { availablePeer = *task.boundTo } else { - logger.Debugf("[WAITING] task in idle state: %s", task.request) - availablePeer = <-s.availablePeerCh - logger.Debugf("[WAITING] got the peer %s to handle task: %s", availablePeer, task) + s.wg.Add(1) + go s.waitPeerAndExecute(s.network, *task.boundTo, s.availableBounded, task, &s.wg) + continue } } else { var err error @@ -216,10 +256,9 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { if errors.Is(err, errNoPeersAvailable) { logger.Debugf("[WAITING] task in idle state: %s", task.request) availablePeer = <-s.availablePeerCh - logger.Debugf("[WAITING] got the peer %s to handle task: %s", availablePeer, task) + logger.Debugf("[WAITING] got the peer %s to handle task: %s", availablePeer, task) } else { - // TODO: check if there is a better solution - continue + logger.Errorf("while searching for available peer: %s", task.request) } } } From c8f47fa15f3ee2a3e4b758b9f36b13e96ab5c53f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 5 May 2023 21:09:18 -0400 Subject: [PATCH 029/140] chore: use versioned config data + enable traces --- chain/westend-local/config-alice.toml | 4 ++-- chain/westend-local/westend-local-spec-raw.json | 5 +---- chain/westend/config.toml | 6 +++--- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/chain/westend-local/config-alice.toml b/chain/westend-local/config-alice.toml index 897eeeed3c..6b823ea97a 100644 --- a/chain/westend-local/config-alice.toml +++ b/chain/westend-local/config-alice.toml @@ -11,8 +11,8 @@ state = "" runtime = "" babe = "" grandpa = "" -sync = "trace" -digest = "trace" +sync = "" +digest = "" [init] genesis = "./chain/westend-local/westend-local-spec-raw.json" diff --git a/chain/westend-local/westend-local-spec-raw.json b/chain/westend-local/westend-local-spec-raw.json index 401f7efa72..c87badbea1 100644 --- a/chain/westend-local/westend-local-spec-raw.json +++ b/chain/westend-local/westend-local-spec-raw.json @@ -2,10 +2,7 @@ "name": "Westend Local Testnet", "id": "westend_local_testnet", "chainType": "Local", - "bootNodes": [ - "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWHXRStPGYZvPvoRPVm8hUT88VwWrgUYymbYABumkLV34M", - "/ip4/127.0.0.1/tcp/30331/p2p/12D3KooWMCjJg4jdms2a7FR5ZsbJHzZUv52JSrDBkJCWxfH31ApD" - ], + "bootNodes": [], "telemetryEndpoints": null, "protocolId": "dot", "properties": null, diff --git a/chain/westend/config.toml b/chain/westend/config.toml index 88ebb8fed3..5cbef73da7 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -1,5 +1,5 @@ [global] -basepath = "./tmp/westend" +basepath = "~/.gossamer/westend" log = "info" metrics-address = "localhost:9876" @@ -7,10 +7,10 @@ metrics-address = "localhost:9876" core = "" network = "" rpc = "" -state = "" +state = "trace" runtime = "" babe = "" -grandpa = "" +grandpa = "trace" sync = "trace" digest = "trace" From ce40f145d92589d5a49c8525a76899b6a303aae0 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 5 May 2023 21:21:12 -0400 Subject: [PATCH 030/140] chore: remove state trace --- chain/westend-local/config-alice.toml | 4 ++-- chain/westend-local/westend-local-spec-raw.json | 5 ++++- chain/westend/config.toml | 2 +- dot/sync/chain_sync.go | 10 ++++------ 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/chain/westend-local/config-alice.toml b/chain/westend-local/config-alice.toml index 6b823ea97a..897eeeed3c 100644 --- a/chain/westend-local/config-alice.toml +++ b/chain/westend-local/config-alice.toml @@ -11,8 +11,8 @@ state = "" runtime = "" babe = "" grandpa = "" -sync = "" -digest = "" +sync = "trace" +digest = "trace" [init] genesis = "./chain/westend-local/westend-local-spec-raw.json" diff --git a/chain/westend-local/westend-local-spec-raw.json b/chain/westend-local/westend-local-spec-raw.json index c87badbea1..305901804c 100644 --- a/chain/westend-local/westend-local-spec-raw.json +++ b/chain/westend-local/westend-local-spec-raw.json @@ -2,7 +2,10 @@ "name": "Westend Local Testnet", "id": "westend_local_testnet", "chainType": "Local", - "bootNodes": [], + "bootNodes": [ + "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWJ6ykeQHZAC95vFfVVATuqF41Z8xt8kjdmyzHo8NjcBsA", + "/ip4/127.0.0.1/tcp/30331/p2p/12D3KooWBYjPKQ5Q8atgKgFB7qMZhbQYTPREjkHWn2xpNSHuivgT" + ], "telemetryEndpoints": null, "protocolId": "dot", "properties": null, diff --git a/chain/westend/config.toml b/chain/westend/config.toml index 5cbef73da7..bd6e0dd2db 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -7,7 +7,7 @@ metrics-address = "localhost:9876" core = "" network = "" rpc = "" -state = "trace" +state = "" runtime = "" babe = "" grandpa = "trace" diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 7c95ee22a7..da277cdbde 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -296,12 +296,10 @@ func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.He // that is also has the chain up until and including that block. // this may not be a valid assumption, but perhaps we can assume that // it is likely they will receive this block and its ancestors before us. - - // * disabling for deployment in staging - // cs.blockAnnounceCh <- announcedBlock{ - // who: who, - // header: blockAnnounceHeader, - // } + cs.blockAnnounceCh <- announcedBlock{ + who: who, + header: blockAnnounceHeader, + } return nil } From e33c444edda5ed471e81fcff0794962bef936782 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Sat, 6 May 2023 08:29:36 -0400 Subject: [PATCH 031/140] chore: revert changes from `westend-local-spec.json` --- chain/westend-local/config-alice.toml | 10 +++++----- chain/westend-local/westend-local-spec-raw.json | 5 +---- chain/westend/config.toml | 6 +++--- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/chain/westend-local/config-alice.toml b/chain/westend-local/config-alice.toml index 897eeeed3c..592980d240 100644 --- a/chain/westend-local/config-alice.toml +++ b/chain/westend-local/config-alice.toml @@ -11,20 +11,20 @@ state = "" runtime = "" babe = "" grandpa = "" -sync = "trace" -digest = "trace" +sync = "" +digest = "" [init] genesis = "./chain/westend-local/westend-local-spec-raw.json" [account] -key = "" +key = "alice" unlock = "" [core] roles = 4 -babe-authority = false -grandpa-authority = false +babe-authority = true +grandpa-authority = true [network] port = 7001 diff --git a/chain/westend-local/westend-local-spec-raw.json b/chain/westend-local/westend-local-spec-raw.json index 305901804c..c87badbea1 100644 --- a/chain/westend-local/westend-local-spec-raw.json +++ b/chain/westend-local/westend-local-spec-raw.json @@ -2,10 +2,7 @@ "name": "Westend Local Testnet", "id": "westend_local_testnet", "chainType": "Local", - "bootNodes": [ - "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWJ6ykeQHZAC95vFfVVATuqF41Z8xt8kjdmyzHo8NjcBsA", - "/ip4/127.0.0.1/tcp/30331/p2p/12D3KooWBYjPKQ5Q8atgKgFB7qMZhbQYTPREjkHWn2xpNSHuivgT" - ], + "bootNodes": [], "telemetryEndpoints": null, "protocolId": "dot", "properties": null, diff --git a/chain/westend/config.toml b/chain/westend/config.toml index bd6e0dd2db..f9403abcf2 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -10,9 +10,9 @@ rpc = "" state = "" runtime = "" babe = "" -grandpa = "trace" -sync = "trace" -digest = "trace" +grandpa = "" +sync = "" +digest = "" [init] genesis = "./chain/westend/genesis.json" From f00a2421225fedf9927c75fb5b65189957fa1a85 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Sat, 6 May 2023 12:36:22 -0400 Subject: [PATCH 032/140] chore: enable trace to `sync` and `digest` --- chain/westend/config.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chain/westend/config.toml b/chain/westend/config.toml index f9403abcf2..c40a398546 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -11,8 +11,8 @@ state = "" runtime = "" babe = "" grandpa = "" -sync = "" -digest = "" +sync = "trace" +digest = "trace" [init] genesis = "./chain/westend/genesis.json" From 6e3d2d0f630254e87fba12ac9d7b2fba2f39cdec Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 8 May 2023 08:37:21 -0400 Subject: [PATCH 033/140] chore: enable pprof to staging env --- chain/westend/config.toml | 1 + dot/sync/chain_sync.go | 7 ++++--- dot/sync/worker_pool.go | 24 +++++++++++++++++++----- 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/chain/westend/config.toml b/chain/westend/config.toml index c40a398546..6fd864236e 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -54,6 +54,7 @@ ws-external = false [pprof] +enabled = true listening-address = "localhost:6060" block-rate = 0 mutex-rate = 0 diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index da277cdbde..cc1ca87225 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -722,8 +722,9 @@ loop: // if we receive and empty message from the stream we don't need to shutdown the worker if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.punishPeer(taskResult.who, true) } + cs.workerPool.submitRequest(taskResult.request, workersResults) continue } @@ -741,7 +742,7 @@ loop: switch { case errors.Is(err, errResponseIsNotChain): logger.Criticalf("response invalid: %s", err) - cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.punishPeer(taskResult.who, false) cs.workerPool.submitRequest(taskResult.request, workersResults) continue case errors.Is(err, errEmptyBlockData): @@ -750,7 +751,7 @@ loop: case errors.Is(err, errUnknownParent): case err != nil: logger.Criticalf("response invalid: %s", err) - cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.punishPeer(taskResult.who, false) cs.workerPool.submitRequest(taskResult.request, workersResults) continue } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 831f079c51..31ab8b205c 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -17,7 +17,7 @@ const ( const ( ignorePeerTimeout = 2 * time.Minute - maxRequestsAllowed uint = 45 + maxRequestsAllowed uint = 40 ) type syncTask struct { @@ -43,9 +43,10 @@ type syncWorkerPool struct { l sync.RWMutex doneCh chan struct{} - network Network - taskQueue chan *syncTask - workers map[peer.ID]*peerSyncWorker + network Network + taskQueue chan *syncTask + workers map[peer.ID]*peerSyncWorker + ignorePeers map[peer.ID]struct{} waiting bool availablePeerCh chan peer.ID @@ -64,6 +65,7 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { availableBounded: make(chan struct{}), workers: make(map[peer.ID]*peerSyncWorker), taskQueue: make(chan *syncTask, maxRequestsAllowed), + ignorePeers: make(map[peer.ID]struct{}), } } @@ -104,6 +106,12 @@ func (s *syncWorkerPool) releaseWorker(who peer.ID) { s.l.Lock() defer s.l.Unlock() + _, toIgnore := s.ignorePeers[who] + if toIgnore { + delete(s.workers, who) + return + } + peerSync, has := s.workers[who] if !has { peerSync = &peerSyncWorker{status: available} @@ -129,10 +137,16 @@ func (s *syncWorkerPool) releaseWorker(who peer.ID) { } } -func (s *syncWorkerPool) punishPeer(who peer.ID) { +func (s *syncWorkerPool) punishPeer(who peer.ID, ignore bool) { s.l.Lock() defer s.l.Unlock() + if ignore { + s.ignorePeers[who] = struct{}{} + delete(s.workers, who) + return + } + _, has := s.workers[who] if !has { return From 023df2c8f1357b702875e28f923b77e758439276 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 8 May 2023 09:48:18 -0400 Subject: [PATCH 034/140] chore: use `l.Lock()` instead of `l.RLock()` --- dot/sync/worker_pool.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 31ab8b205c..e514895abf 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -171,8 +171,8 @@ var errNoPeersAvailable = errors.New("no peers available") // its status from available to busy, if there is no peer avaible then // the caller should wait for availablePeerCh func (s *syncWorkerPool) searchForAvailable() (peer.ID, error) { - s.l.RLock() - defer s.l.RUnlock() + s.l.Lock() + defer s.l.Unlock() for peerID, peerSync := range s.workers { switch peerSync.status { @@ -198,8 +198,9 @@ func (s *syncWorkerPool) searchForAvailable() (peer.ID, error) { } func (s *syncWorkerPool) searchForExactAvailable(peerID peer.ID) (bool, error) { - s.l.RLock() - defer s.l.RUnlock() + s.l.Lock() + defer s.l.Unlock() + peerSync, has := s.workers[peerID] if !has { return false, errPeerNotFound From 0f661debbcd0d3832052a8c40913619c8a42a106 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 8 May 2023 11:27:07 -0400 Subject: [PATCH 035/140] chore: change the worker_pool --- dot/sync/chain_sync.go | 3 +- dot/sync/worker_pool.go | 209 ++++++++++++++-------------------------- 2 files changed, 71 insertions(+), 141 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index cc1ca87225..7430db5ec9 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -305,7 +305,7 @@ func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.He // setPeerHead sets a peer's best known block func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber uint) { - cs.workerPool.fromBlockAnnounce(who) + cs.workerPool.newPeer(who) cs.peerViewLock.Lock() defer cs.peerViewLock.Unlock() @@ -756,7 +756,6 @@ loop: continue } - cs.workerPool.releaseWorker(who) if len(response.BlockData) > 0 { firstBlockInResponse := response.BlockData[0] lastBlockInResponse := response.BlockData[len(response.BlockData)-1] diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index e514895abf..ded757ec6f 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -1,7 +1,6 @@ package sync import ( - "errors" "sync" "time" @@ -39,46 +38,58 @@ type peerSyncWorker struct { } type syncWorkerPool struct { - wg sync.WaitGroup - l sync.RWMutex - doneCh chan struct{} + wg sync.WaitGroup + l sync.RWMutex + doneCh chan struct{} + availableCond *sync.Cond network Network taskQueue chan *syncTask workers map[peer.ID]*peerSyncWorker ignorePeers map[peer.ID]struct{} - - waiting bool - availablePeerCh chan peer.ID - - waitingBounded *peer.ID - availableBounded chan struct{} - waitBoundedLock sync.Mutex } func newSyncWorkerPool(net Network) *syncWorkerPool { - return &syncWorkerPool{ - network: net, - waiting: false, - doneCh: make(chan struct{}), - availablePeerCh: make(chan peer.ID), - availableBounded: make(chan struct{}), - workers: make(map[peer.ID]*peerSyncWorker), - taskQueue: make(chan *syncTask, maxRequestsAllowed), - ignorePeers: make(map[peer.ID]struct{}), + swp := &syncWorkerPool{ + network: net, + doneCh: make(chan struct{}), + workers: make(map[peer.ID]*peerSyncWorker), + taskQueue: make(chan *syncTask, maxRequestsAllowed), + ignorePeers: make(map[peer.ID]struct{}), } + + swp.availableCond = sync.NewCond(&swp.l) + return swp } func (s *syncWorkerPool) useConnectedPeers() { connectedPeers := s.network.AllConnectedPeers() for _, connectedPeer := range connectedPeers { - s.releaseWorker(connectedPeer) + s.newPeer(connectedPeer) } } -func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) { - s.releaseWorker(who) - logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) +func (s *syncWorkerPool) newPeer(who peer.ID) { + s.l.Lock() + defer s.l.Unlock() + + _, toIgnore := s.ignorePeers[who] + if toIgnore { + return + } + + peerSync, has := s.workers[who] + if !has { + peerSync = &peerSyncWorker{status: available} + s.workers[who] = peerSync + + logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) + } + + // check if the punishment is not valid + if peerSync.status == punished && peerSync.punishedTime.Before(time.Now()) { + s.workers[who] = &peerSyncWorker{status: available} + } } func (s *syncWorkerPool) submitBoundedRequest(request *network.BlockRequestMessage, who peer.ID, resultCh chan<- *syncTaskResult) { @@ -102,41 +113,6 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage, } } -func (s *syncWorkerPool) releaseWorker(who peer.ID) { - s.l.Lock() - defer s.l.Unlock() - - _, toIgnore := s.ignorePeers[who] - if toIgnore { - delete(s.workers, who) - return - } - - peerSync, has := s.workers[who] - if !has { - peerSync = &peerSyncWorker{status: available} - } - - // if the punishment is still valid we do nothing - if peerSync.status == punished && peerSync.punishedTime.After(time.Now()) { - return - } - - if s.waitingBounded != nil && *s.waitingBounded == who { - s.waitingBounded = nil - s.workers[who] = &peerSyncWorker{status: busy} - s.availableBounded <- struct{}{} - return - } - - s.workers[who] = &peerSyncWorker{status: available} - - if s.waiting { - s.waiting = false - s.availablePeerCh <- who - } -} - func (s *syncWorkerPool) punishPeer(who peer.ID, ignore bool) { s.l.Lock() defer s.l.Unlock() @@ -164,16 +140,10 @@ func (s *syncWorkerPool) totalWorkers() (total uint) { return uint(len(s.workers)) } -var errPeerNotFound = errors.New("peer not found") -var errNoPeersAvailable = errors.New("no peers available") - // getAvailablePeer returns the very first peer available and changes // its status from available to busy, if there is no peer avaible then // the caller should wait for availablePeerCh -func (s *syncWorkerPool) searchForAvailable() (peer.ID, error) { - s.l.Lock() - defer s.l.Unlock() - +func (s *syncWorkerPool) getAvailablePeer() peer.ID { for peerID, peerSync := range s.workers { switch peerSync.status { case punished: @@ -181,61 +151,25 @@ func (s *syncWorkerPool) searchForAvailable() (peer.ID, error) { // as available and notify it availability if needed // otherwise we keep the peer in the punishment and don't notify if peerSync.punishedTime.Before(time.Now()) { - peerSync.status = busy - s.workers[peerID] = peerSync - return peerID, nil + return peerID } case available: - peerSync.status = busy - s.workers[peerID] = peerSync - return peerID, nil + return peerID default: } } - s.waiting = true - return peer.ID(""), errNoPeersAvailable //could not found an available peer to dispatch + //could not found an available peer to dispatch + return peer.ID("") } -func (s *syncWorkerPool) searchForExactAvailable(peerID peer.ID) (bool, error) { - s.l.Lock() - defer s.l.Unlock() - +func (s *syncWorkerPool) getPeerByID(peerID peer.ID) *peerSyncWorker { peerSync, has := s.workers[peerID] if !has { - return false, errPeerNotFound - } - - switch peerSync.status { - case punished: - // if the punishedTime has passed then we mark it - // as available and notify it availability if needed - // otherwise we keep the peer in the punishment and don't notify - if peerSync.punishedTime.Before(time.Now()) { - peerSync.status = busy - s.workers[peerID] = peerSync - return true, nil - } - case available: - peerSync.status = busy - s.workers[peerID] = peerSync - return true, nil - default: + return nil } - return peerSync.status == available, nil -} - -func (s *syncWorkerPool) waitPeerAndExecute(network Network, who peer.ID, availableBounded <-chan struct{}, task *syncTask, wg *sync.WaitGroup) { - s.waitBoundedLock.Lock() - s.waitingBounded = &who - - logger.Debugf("[WAITING] bounded task to peer %s in idle state: %s", who, task.request) - <-availableBounded - logger.Debugf("[WAITING] got the peer %s to handle task: %s", who, task) - s.waitBoundedLock.Unlock() - - executeRequest(network, who, task, wg) + return peerSync } func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { @@ -248,48 +182,45 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { return case task := <-s.taskQueue: - var availablePeer peer.ID - if task.boundTo != nil { - isAvailable, err := s.searchForExactAvailable(*task.boundTo) - if err != nil { - logger.Errorf("while checking peer %s available: %s", - *task.boundTo, task.request) - continue + s.l.Lock() + for { + var peerID peer.ID + if task.boundTo != nil { + peerSync := s.getPeerByID(*task.boundTo) + if peerSync != nil && peerSync.status == available { + peerID = *task.boundTo + } + } else { + peerID = s.getAvailablePeer() } - if isAvailable { - availablePeer = *task.boundTo - } else { + if peerID != peer.ID("") { + s.workers[peerID] = &peerSyncWorker{status: busy} + s.l.Unlock() + s.wg.Add(1) - go s.waitPeerAndExecute(s.network, *task.boundTo, s.availableBounded, task, &s.wg) - continue - } - } else { - var err error - availablePeer, err = s.searchForAvailable() - if err != nil { - if errors.Is(err, errNoPeersAvailable) { - logger.Debugf("[WAITING] task in idle state: %s", task.request) - availablePeer = <-s.availablePeerCh - logger.Debugf("[WAITING] got the peer %s to handle task: %s", availablePeer, task) - } else { - logger.Errorf("while searching for available peer: %s", task.request) - } + go s.executeRequest(peerID, task, &s.wg) + break + } else { + s.availableCond.Wait() } } - - s.wg.Add(1) - go executeRequest(s.network, availablePeer, task, &s.wg) } } } -func executeRequest(network Network, who peer.ID, task *syncTask, wg *sync.WaitGroup) { - defer wg.Done() +func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask, wg *sync.WaitGroup) { + defer func() { + s.l.Lock() + s.workers[who] = &peerSyncWorker{status: available} + s.l.Unlock() + s.availableCond.Signal() + wg.Done() + }() request := task.request logger.Debugf("[EXECUTING] worker %s: block request: %s", who, request) - response, err := network.DoBlockRequest(who, request) + response, err := s.network.DoBlockRequest(who, request) if err != nil { logger.Debugf("[FINISHED] worker %s: err: %s", who, err) } else if response != nil { From 924d17255cd79ab4bdf3a9a51e6bcc01e075df7e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 9 May 2023 08:50:14 -0400 Subject: [PATCH 036/140] chore: small fixes to the `executeRequest` method --- chain/westend/config.toml | 5 ++--- dot/sync/chain_sync.go | 12 ++++++++---- dot/sync/worker_pool.go | 18 ++++++++++++------ 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/chain/westend/config.toml b/chain/westend/config.toml index 6fd864236e..f9403abcf2 100644 --- a/chain/westend/config.toml +++ b/chain/westend/config.toml @@ -11,8 +11,8 @@ state = "" runtime = "" babe = "" grandpa = "" -sync = "trace" -digest = "trace" +sync = "" +digest = "" [init] genesis = "./chain/westend/genesis.json" @@ -54,7 +54,6 @@ ws-external = false [pprof] -enabled = true listening-address = "localhost:6060" block-rate = 0 mutex-rate = 0 diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 7430db5ec9..7cecf5a7a9 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -305,7 +305,8 @@ func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.He // setPeerHead sets a peer's best known block func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber uint) { - cs.workerPool.newPeer(who) + cs.workerPool.fromBlockAnnounce(who) + cs.peerViewLock.Lock() defer cs.peerViewLock.Unlock() @@ -317,8 +318,10 @@ func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber u } func (cs *chainSync) logSyncSpeed() { - defer close(cs.logSyncDone) - defer cs.logSyncTicker.Stop() + defer func() { + cs.logSyncTicker.Stop() + close(cs.logSyncDone) + }() for { before, err := cs.blockState.BestBlockHeader() @@ -580,12 +583,13 @@ func (cs *chainSync) executeBootstrapSync() error { return nil } + cs.workerPool.useConnectedPeers() + bestBlockHeader, err := cs.blockState.BestBlockHeader() if err != nil { return fmt.Errorf("getting best block header while syncing: %w", err) } startRequestAt := bestBlockHeader.Number + 1 - cs.workerPool.useConnectedPeers() // we build the set of requests based on the amount of available peers // in the worker pool, if we have more peers than `maxRequestAllowed` diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index ded757ec6f..fa17423092 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -54,7 +54,7 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { network: net, doneCh: make(chan struct{}), workers: make(map[peer.ID]*peerSyncWorker), - taskQueue: make(chan *syncTask, maxRequestsAllowed), + taskQueue: make(chan *syncTask), ignorePeers: make(map[peer.ID]struct{}), } @@ -64,15 +64,21 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { func (s *syncWorkerPool) useConnectedPeers() { connectedPeers := s.network.AllConnectedPeers() + s.l.Lock() + defer s.l.Unlock() + for _, connectedPeer := range connectedPeers { s.newPeer(connectedPeer) } } -func (s *syncWorkerPool) newPeer(who peer.ID) { +func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) { s.l.Lock() defer s.l.Unlock() + s.newPeer(who) +} +func (s *syncWorkerPool) newPeer(who peer.ID) { _, toIgnore := s.ignorePeers[who] if toIgnore { return @@ -178,7 +184,7 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { select { case <-stopCh: //wait for ongoing requests to be finished before returning - s.wg.Wait() + //s.wg.Wait() return case task := <-s.taskQueue: @@ -199,7 +205,7 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { s.l.Unlock() s.wg.Add(1) - go s.executeRequest(peerID, task, &s.wg) + go s.executeRequest(peerID, task) break } else { s.availableCond.Wait() @@ -209,13 +215,13 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { } } -func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask, wg *sync.WaitGroup) { +func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { defer func() { s.l.Lock() s.workers[who] = &peerSyncWorker{status: available} s.l.Unlock() s.availableCond.Signal() - wg.Done() + s.wg.Done() }() request := task.request From 6b481c19ad0c8e253c28db4059398b017176ed25 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 9 May 2023 11:25:55 -0400 Subject: [PATCH 037/140] chore: remove unneeded comment line + improve func name --- dot/sync/chain_sync.go | 2 +- dot/sync/requests.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 7cecf5a7a9..ac3e8439ee 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -621,7 +621,7 @@ func (cs *chainSync) executeBootstrapSync() error { } requests := ascedingBlockRequests(startRequestAt, targetBlockNumber, bootstrapRequestData) - expectedAmountOfBlocks := totalOfBlocksRequested(requests) + expectedAmountOfBlocks := totalBlocksRequested(requests) wg := sync.WaitGroup{} resultsQueue := make(chan *syncTaskResult) diff --git a/dot/sync/requests.go b/dot/sync/requests.go index af22c7d477..a45eb547a2 100644 --- a/dot/sync/requests.go +++ b/dot/sync/requests.go @@ -58,7 +58,6 @@ func ascedingBlockRequests(startNumber, targetNumber uint, requestedData byte) [ } reqs := make([]*network.BlockRequestMessage, numRequests) - // check if we want to specify a size for i := uint(0); i < numRequests; i++ { max := uint32(maxResponseSize) @@ -81,7 +80,7 @@ func ascedingBlockRequests(startNumber, targetNumber uint, requestedData byte) [ return reqs } -func totalOfBlocksRequested(requests []*network.BlockRequestMessage) (total uint32) { +func totalBlocksRequested(requests []*network.BlockRequestMessage) (total uint32) { for _, request := range requests { if request.Max != nil { total += *request.Max From 35c9eea826a431f47d335ccb00a6d44beaa88f73 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 22 May 2023 08:43:42 -0400 Subject: [PATCH 038/140] chore: re-written `Test_chainSync_setBlockAnnounce` test --- dot/sync/chain_sync.go | 15 +- dot/sync/chain_sync_test.go | 394 +++++++++++++++++- .../disjoint_block_set_integration_test.go | 96 ----- dot/sync/errors.go | 3 +- dot/sync/syncer_test.go | 9 - 5 files changed, 391 insertions(+), 126 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index ac3e8439ee..86bd2e39a6 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -217,16 +217,6 @@ func (cs *chainSync) syncState() chainSyncState { func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.Header) error { blockAnnounceHeaderHash := blockAnnounceHeader.Hash() - // check if we already know of this block, if not, - // add to pendingBlocks set - has, err := cs.blockState.HasHeader(blockAnnounceHeaderHash) - if err != nil { - return err - } - - if has { - return blocktree.ErrBlockExists - } // if the peer reports a lower or equal best block number than us, // check if they are on a fork or not @@ -238,6 +228,7 @@ func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.He if blockAnnounceHeader.Number <= bestBlockHeader.Number { // check if our block hash for that number is the same, if so, do nothing // as we already have that block + // TODO: check what happens when get hash by number retuns nothing or ErrNotExists ourHash, err := cs.blockState.GetHashByNumber(blockAnnounceHeader.Number) if err != nil { return fmt.Errorf("get block hash by number: %w", err) @@ -284,8 +275,8 @@ func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.He hasPendingBlock := cs.pendingBlocks.hasBlock(blockAnnounceHeaderHash) if hasPendingBlock { - return fmt.Errorf("block %s (#%d) already in the pending set", - blockAnnounceHeaderHash, blockAnnounceHeader.Number) + return fmt.Errorf("%w: block %s (#%d)", + errAlreadyInDisjointSet, blockAnnounceHeaderHash, blockAnnounceHeader.Number) } if err = cs.pendingBlocks.addHeader(blockAnnounceHeader); err != nil { diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 6fe5061444..79e202b6b7 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -1,24 +1,328 @@ package sync import ( + "errors" + "fmt" "testing" "time" + "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/pkg/scale" "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +const defaultSlotDuration = 6 * time.Second + +func newTestChainSyncWithReadyBlocks(ctrl *gomock.Controller) *chainSync { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + + cfg := chainSyncConfig{ + bs: mockBlockState, + pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), + minPeers: 1, + maxPeers: 5, + slotDuration: defaultSlotDuration, + } + + return newChainSync(cfg) +} + +func newTestChainSync(ctrl *gomock.Controller) *chainSync { + return newTestChainSyncWithReadyBlocks(ctrl) +} + +func Test_chainSyncState_String(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + s chainSyncState + want string + }{ + { + name: "case_bootstrap", + s: bootstrap, + want: "bootstrap", + }, + { + name: "case_tip", + s: tip, + want: "tip", + }, + { + name: "case_unknown", + s: 3, + want: "unknown", + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got := tt.s.String() + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_chainSync_setBlockAnnounce(t *testing.T) { + t.Parallel() + + errTest := errors.New("test error") + const somePeer = peer.ID("abc") + + block1AnnounceHeader := types.NewHeader(common.Hash{}, common.Hash{}, + common.Hash{}, 1, scale.VaryingDataTypeSlice{}) + block2AnnounceHeader := types.NewHeader(common.Hash{}, common.Hash{}, + common.Hash{}, 2, scale.VaryingDataTypeSlice{}) + + testCases := map[string]struct { + chainSyncBuilder func(ctrl *gomock.Controller) *chainSync + peerID peer.ID + blockAnnounceHeader *types.Header + errWrapped error + errMessage string + expectedQueuedBlockAnnounce *announcedBlock + }{ + "best_block_header_error": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + blockState.EXPECT().BestBlockHeader().Return(nil, errTest) + return &chainSync{ + peerView: map[peer.ID]*peerView{}, + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errTest, + errMessage: "best block header: test error", + }, + "number_smaller_than_best_block_number_get_hash_by_number_error": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{}, errTest) + return &chainSync{ + peerView: map[peer.ID]*peerView{}, + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errTest, + errMessage: "get block hash by number: test error", + }, + "number_smaller_than_best_block_number_and_same_hash": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)).Return(block1AnnounceHeader.Hash(), nil) + return &chainSync{ + peerView: map[peer.ID]*peerView{}, + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + }, + "number_smaller_than_best_block_number_get_highest_finalised_header_error": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{2}, nil) + blockState.EXPECT().GetHighestFinalisedHeader().Return(nil, errTest) + return &chainSync{ + peerView: map[peer.ID]*peerView{}, + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errTest, + errMessage: "get highest finalised header: test error", + }, + "number_smaller_than_best_block_announced_number_equaks_finalised_number": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + network := NewMockNetwork(ctrl) + network.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, somePeer) + return &chainSync{ + peerView: map[peer.ID]*peerView{}, + blockState: blockState, + network: network, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errPeerOnInvalidFork, + errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", + }, + "number_smaller_than_best_block_number_and_finalised_number_bigger_than_number": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + network := NewMockNetwork(ctrl) + network.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, somePeer) + return &chainSync{ + peerView: map[peer.ID]*peerView{}, + blockState: blockState, + network: network, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errPeerOnInvalidFork, + errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", + }, + "number_smaller_than_best_block_number_and_" + + "finalised_number_smaller_than_number_and_" + + "has_header_error": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 3} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(2)). + Return(common.Hash{5, 1, 2}, nil) // other hash than block2AnnounceHeader hash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + blockState.EXPECT().HasHeader(block2AnnounceHeader.Hash()).Return(false, errTest) + return &chainSync{ + peerView: map[peer.ID]*peerView{}, + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + errWrapped: errTest, + errMessage: "has header: test error", + }, + "number_smaller_than_best_block_number_and_" + + "finalised_number_smaller_than_number_and_" + + "has_the_hash": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 3} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(2)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + blockState.EXPECT().HasHeader(block2AnnounceHeader.Hash()).Return(true, nil) + return &chainSync{ + peerView: map[peer.ID]*peerView{}, + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + }, + "number_bigger_than_best_block_number_already_exists_in_disjoint_set": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + pendingBlocks := NewMockDisjointBlockSet(ctrl) + pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(true) + return &chainSync{ + peerView: map[peer.ID]*peerView{}, + blockState: blockState, + pendingBlocks: pendingBlocks, + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + errWrapped: errAlreadyInDisjointSet, + errMessage: fmt.Sprintf("already in disjoint set: block %s (#%d)", + block2AnnounceHeader.Hash(), block2AnnounceHeader.Number), + }, + "number_bigger_than_best_block_number_added_in_disjoint_set_with_success": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + pendingBlocks := NewMockDisjointBlockSet(ctrl) + pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(nil) + return &chainSync{ + peerView: map[peer.ID]*peerView{}, + blockState: blockState, + pendingBlocks: pendingBlocks, + // buffered of 1 so setBlockAnnounce can write to it + // without a consumer of the channel on the other end. + blockAnnounceCh: make(chan announcedBlock, 1), + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + expectedQueuedBlockAnnounce: &announcedBlock{ + who: somePeer, + header: block2AnnounceHeader, + }, + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + chainSync := testCase.chainSyncBuilder(ctrl) + + err := chainSync.setBlockAnnounce(testCase.peerID, testCase.blockAnnounceHeader) + + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + + if testCase.expectedQueuedBlockAnnounce != nil { + queuedBlockAnnounce := <-chainSync.blockAnnounceCh + assert.Equal(t, *testCase.expectedQueuedBlockAnnounce, queuedBlockAnnounce) + } + }) + } +} + func TestChainSync_setPeerHead(t *testing.T) { const randomHashString = "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21" randomHash := common.MustHexToHash(randomHashString) testcases := map[string]struct { - newChainSync func(t *testing.T, ctrl *gomock.Controller) *chainSync - peerID peer.ID - bestHash common.Hash - bestNumber uint + newChainSync func(t *testing.T, ctrl *gomock.Controller) *chainSync + peerID peer.ID + bestHash common.Hash + bestNumber uint + shouldBeAndWorker bool + workerStatus byte }{ "set_peer_head_with_new_peer": { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { @@ -29,9 +333,70 @@ func TestChainSync_setPeerHead(t *testing.T) { cs.workerPool = workerPool return cs }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAndWorker: true, + workerStatus: available, + }, + "set_peer_head_with_a_to_ignore_peer_should_not_be_included_in_the_workerpoll": { + newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { + networkMock := NewMockNetwork(ctrl) + workerPool := newSyncWorkerPool(networkMock) + workerPool.ignorePeers = map[peer.ID]struct{}{ + peer.ID("peer-test"): {}, + } + + cs := newChainSyncTest(t, ctrl) + cs.workerPool = workerPool + return cs + }, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAndWorker: false, + }, + "set_peer_head_that_stills_punished_in_the_worker_poll": { + newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { + networkMock := NewMockNetwork(ctrl) + workerPool := newSyncWorkerPool(networkMock) + workerPool.workers = map[peer.ID]*peerSyncWorker{ + peer.ID("peer-test"): { + status: punished, + punishedTime: time.Now().Add(3 * time.Hour), + }, + } + + cs := newChainSyncTest(t, ctrl) + cs.workerPool = workerPool + return cs + }, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAndWorker: true, + workerStatus: punished, + }, + "set_peer_head_that_punishment_isnot_valid_in_the_worker_poll": { + newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { + networkMock := NewMockNetwork(ctrl) + workerPool := newSyncWorkerPool(networkMock) + workerPool.workers = map[peer.ID]*peerSyncWorker{ + peer.ID("peer-test"): { + status: punished, + punishedTime: time.Now().Add(-3 * time.Hour), + }, + } + + cs := newChainSyncTest(t, ctrl) + cs.workerPool = workerPool + return cs + }, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAndWorker: true, + workerStatus: available, }, } @@ -41,6 +406,21 @@ func TestChainSync_setPeerHead(t *testing.T) { ctrl := gomock.NewController(t) cs := tt.newChainSync(t, ctrl) cs.setPeerHead(tt.peerID, tt.bestHash, tt.bestNumber) + + view, exists := cs.peerView[tt.peerID] + require.True(t, exists) + require.Equal(t, tt.peerID, view.who) + require.Equal(t, tt.bestHash, view.hash) + require.Equal(t, tt.bestNumber, view.number) + + if tt.shouldBeAndWorker { + syncWorker, exists := cs.workerPool.workers[tt.peerID] + require.True(t, exists) + require.Equal(t, tt.workerStatus, syncWorker.status) + } else { + _, exists := cs.workerPool.workers[tt.peerID] + require.False(t, exists) + } }) } } diff --git a/dot/sync/disjoint_block_set_integration_test.go b/dot/sync/disjoint_block_set_integration_test.go index 2497b8f290..ec6745ba56 100644 --- a/dot/sync/disjoint_block_set_integration_test.go +++ b/dot/sync/disjoint_block_set_integration_test.go @@ -113,102 +113,6 @@ func TestPendingBlock_toBlockData(t *testing.T) { require.Equal(t, expected, pb.toBlockData()) } -func TestDisjointBlockSet_getReadyDescendants(t *testing.T) { - s := newDisjointBlockSet(pendingBlocksLimit) - - // test that descendant chain gets returned by getReadyDescendants on block 1 being ready - header1 := &types.Header{ - Number: 1, - } - block1 := &types.Block{ - Header: *header1, - Body: types.Body{}, - } - - header2 := &types.Header{ - ParentHash: header1.Hash(), - Number: 2, - } - block2 := &types.Block{ - Header: *header2, - Body: types.Body{}, - } - s.addBlock(block2) - - header3 := &types.Header{ - ParentHash: header2.Hash(), - Number: 3, - } - block3 := &types.Block{ - Header: *header3, - Body: types.Body{}, - } - s.addBlock(block3) - - header2NotDescendant := &types.Header{ - ParentHash: common.Hash{0xff}, - Number: 2, - } - block2NotDescendant := &types.Block{ - Header: *header2NotDescendant, - Body: types.Body{}, - } - s.addBlock(block2NotDescendant) - - ready := []*types.BlockData{block1.ToBlockData()} - ready = s.getReadyDescendants(header1.Hash(), ready) - require.Equal(t, 3, len(ready)) - require.Equal(t, block1.ToBlockData(), ready[0]) - require.Equal(t, block2.ToBlockData(), ready[1]) - require.Equal(t, block3.ToBlockData(), ready[2]) -} - -func TestDisjointBlockSet_getReadyDescendants_blockNotComplete(t *testing.T) { - s := newDisjointBlockSet(pendingBlocksLimit) - - // test that descendant chain gets returned by getReadyDescendants on block 1 being ready - // the ready list should contain only block 1 and 2, as block 3 is incomplete (body is missing) - header1 := &types.Header{ - Number: 1, - } - block1 := &types.Block{ - Header: *header1, - Body: types.Body{}, - } - - header2 := &types.Header{ - ParentHash: header1.Hash(), - Number: 2, - } - block2 := &types.Block{ - Header: *header2, - Body: types.Body{}, - } - s.addBlock(block2) - - header3 := &types.Header{ - ParentHash: header2.Hash(), - Number: 3, - } - s.addHeader(header3) - - header2NotDescendant := &types.Header{ - ParentHash: common.Hash{0xff}, - Number: 2, - } - block2NotDescendant := &types.Block{ - Header: *header2NotDescendant, - Body: types.Body{}, - } - s.addBlock(block2NotDescendant) - - ready := []*types.BlockData{block1.ToBlockData()} - ready = s.getReadyDescendants(header1.Hash(), ready) - require.Equal(t, 2, len(ready)) - require.Equal(t, block1.ToBlockData(), ready[0]) - require.Equal(t, block2.ToBlockData(), ready[1]) -} - func TestDisjointBlockSet_ClearBlocks(t *testing.T) { s := newDisjointBlockSet(pendingBlocksLimit) diff --git a/dot/sync/errors.go b/dot/sync/errors.go index 00b304a640..d44fc045dc 100644 --- a/dot/sync/errors.go +++ b/dot/sync/errors.go @@ -23,17 +23,16 @@ var ( errUnableToGetTarget = errors.New("unable to get target") errEmptyBlockData = errors.New("empty block data") errNilBlockData = errors.New("block data is nil") - errNilResponse = errors.New("block response is nil") errNilHeaderInResponse = errors.New("expected header, received none") errNilBodyInResponse = errors.New("expected body, received none") errNoPeers = errors.New("no peers to sync with") errResponseIsNotChain = errors.New("block response does not form a chain") errPeerOnInvalidFork = errors.New("peer is on an invalid fork") - errInvalidDirection = errors.New("direction of request does not match specified start and target") errUnknownParent = errors.New("parent of first block in block response is unknown") errUnknownBlockForJustification = errors.New("received justification for unknown block") errFailedToGetParent = errors.New("failed to get parent header") errStartAndEndMismatch = errors.New("request start and end hash are not on the same chain") errFailedToGetDescendant = errors.New("failed to find descendant block") errBadBlock = errors.New("known bad block") + errAlreadyInDisjointSet = errors.New("already in disjoint set") ) diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go index 32d387816a..302a8a45db 100644 --- a/dot/sync/syncer_test.go +++ b/dot/sync/syncer_test.go @@ -245,12 +245,6 @@ func TestService_Start(t *testing.T) { allCalled.Done() }) - chainProcessor := NewMockChainProcessor(ctrl) - allCalled.Add(1) - chainProcessor.EXPECT().processReadyBlocks().DoAndReturn(func() { - allCalled.Done() - }) - service := Service{ chainSync: chainSync, } @@ -266,9 +260,6 @@ func TestService_Stop(t *testing.T) { chainSync := NewMockChainSync(ctrl) chainSync.EXPECT().stop() - chainProcessor := NewMockChainProcessor(ctrl) - chainProcessor.EXPECT().stop() - service := &Service{ chainSync: chainSync, } From 443045c9e990ce3474c2f6cf44eef93c3b9101c8 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 23 May 2023 19:38:59 -0400 Subject: [PATCH 039/140] chore: include a bootstrap sync test with 2 workers --- dot/sync/chain_sync.go | 182 ++++++++++-------------- dot/sync/chain_sync_test.go | 274 ++++++++++++++++++++++++++++++++++++ dot/sync/worker_pool.go | 7 +- 3 files changed, 350 insertions(+), 113 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 86bd2e39a6..1977b29207 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -568,61 +568,54 @@ func (cs *chainSync) requestPendingBlocks() error { } func (cs *chainSync) executeBootstrapSync() error { - endBootstrapSync := false - for { - if endBootstrapSync { - return nil - } + cs.workerPool.useConnectedPeers() - cs.workerPool.useConnectedPeers() - - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - return fmt.Errorf("getting best block header while syncing: %w", err) - } - startRequestAt := bestBlockHeader.Number + 1 - - // we build the set of requests based on the amount of available peers - // in the worker pool, if we have more peers than `maxRequestAllowed` - // so we limit to `maxRequestAllowed` to avoid the error: - // cannot reserve outbound connection: resource limit exceeded - availableWorkers := cs.workerPool.totalWorkers() - if availableWorkers > maxRequestsAllowed { - availableWorkers = maxRequestsAllowed - } + bestBlockHeader, err := cs.blockState.BestBlockHeader() + if err != nil { + return fmt.Errorf("getting best block header while syncing: %w", err) + } + startRequestAt := bestBlockHeader.Number + 1 + + // we build the set of requests based on the amount of available peers + // in the worker pool, if we have more peers than `maxRequestAllowed` + // so we limit to `maxRequestAllowed` to avoid the error: + // cannot reserve outbound connection: resource limit exceeded + availableWorkers := cs.workerPool.totalWorkers() + if availableWorkers > maxRequestsAllowed { + availableWorkers = maxRequestsAllowed + } - // targetBlockNumber is the virtual target we will request, however - // we should bound it to the real target which is collected through - // block announces received from other peers - targetBlockNumber := startRequestAt + uint(availableWorkers)*128 - realTarget, err := cs.getTarget() - if err != nil { - return fmt.Errorf("while getting target: %w", err) - } + // targetBlockNumber is the virtual target we will request, however + // we should bound it to the real target which is collected through + // block announces received from other peers + targetBlockNumber := startRequestAt + uint(availableWorkers)*128 + realTarget, err := cs.getTarget() + if err != nil { + return fmt.Errorf("while getting target: %w", err) + } - if targetBlockNumber > realTarget { - // basically if our virtual target is beyond the real target - // that means we are few requests far from the tip, then we - // calculate the correct amount of missing requests and then - // change to tip sync which should take care of the rest - diff := targetBlockNumber - realTarget - numOfRequestsToDrop := (diff / 128) + 1 - targetBlockNumber = targetBlockNumber - (numOfRequestsToDrop * 128) - endBootstrapSync = true - } + if targetBlockNumber > realTarget { + // basically if our virtual target is beyond the real target + // that means we are few requests far from the tip, then we + // calculate the correct amount of missing requests and then + // change to tip sync which should take care of the rest + diff := targetBlockNumber - realTarget + numOfRequestsToDrop := (diff / 128) + 1 + targetBlockNumber = targetBlockNumber - (numOfRequestsToDrop * 128) + } - requests := ascedingBlockRequests(startRequestAt, targetBlockNumber, bootstrapRequestData) - expectedAmountOfBlocks := totalBlocksRequested(requests) + requests := ascedingBlockRequests(startRequestAt, targetBlockNumber, bootstrapRequestData) + expectedAmountOfBlocks := totalBlocksRequested(requests) - wg := sync.WaitGroup{} - resultsQueue := make(chan *syncTaskResult) + wg := sync.WaitGroup{} + resultsQueue := make(chan *syncTaskResult) - wg.Add(1) - go cs.handleWorkersResults(resultsQueue, startRequestAt, expectedAmountOfBlocks, &wg) - cs.workerPool.submitRequests(requests, resultsQueue) + wg.Add(1) + go cs.handleWorkersResults(resultsQueue, startRequestAt, expectedAmountOfBlocks, &wg) + cs.workerPool.submitRequests(requests, resultsQueue) + wg.Wait() - wg.Wait() - } + return nil } func (cs *chainSync) maybeSwitchMode() error { @@ -824,12 +817,12 @@ func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { return err } - bd.Header = block.header - } + if block.header == nil { + logger.Errorf("new ready block number (unknown) with hash %s", bd.Hash) + return nil + } - if bd.Header == nil { - logger.Errorf("new ready block number (unknown) with hash %s", bd.Hash) - return nil + bd.Header = block.header } err := cs.processBlockData(*bd) @@ -847,8 +840,6 @@ func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { // returns the index of the last BlockData it handled on success, // or the index of the block data that errored on failure. func (cs *chainSync) processBlockData(blockData types.BlockData) error { //nolint:revive - // logger.Debugf("processing block data with hash %s", blockData.Hash) - headerInState, err := cs.blockState.HasHeader(blockData.Hash) if err != nil { return fmt.Errorf("checking if block state has header: %w", err) @@ -1022,8 +1013,6 @@ func (cs *chainSync) handleBlock(block *types.Block, announceImportedBlock bool) return err } - //logger.Debugf("🔗 imported block number %d with hash %s", block.Header.Number, block.Header.Hash()) - blockHash := block.Header.Hash() cs.telemetry.SendMessage(telemetry.NewBlockImport( &blockHash, @@ -1048,55 +1037,45 @@ func (cs *chainSync) validateResponse(req *network.BlockRequestMessage, logger.Tracef("validating block response starting at block hash %s", resp.BlockData[0].Hash) - var ( - prev, curr *types.Header - err error - ) headerRequested := (req.RequestedData & network.RequestedDataHeader) == 1 + firstItem := resp.BlockData[0] - for i, bd := range resp.BlockData { - if err = cs.validateBlockData(req, bd, p); err != nil { + // check that we know the parent of the first block (or it's in the ready queue) + fmt.Printf("checking the header of: %s\n", firstItem.Header.ParentHash) + has, err := cs.blockState.HasHeader(firstItem.Header.ParentHash) + if err != nil { + return fmt.Errorf("while checking ancestry: %w", err) + } + + if !has { + return errUnknownParent + } + + previousBlockData := firstItem + for _, currBlockData := range resp.BlockData[1:] { + if err := cs.validateBlockData(req, currBlockData, p); err != nil { return err } if headerRequested { - curr = bd.Header - } else { + previousHash := previousBlockData.Header.Hash() + if previousHash != currBlockData.Header.ParentHash || + currBlockData.Header.Number != (previousBlockData.Header.Number+1) { + return errResponseIsNotChain + } + } else if currBlockData.Justification != nil { // if this is a justification-only request, make sure we have the block for the justification - if err = cs.validateJustification(bd); err != nil { + has, _ := cs.blockState.HasHeader(currBlockData.Hash) + if !has { cs.network.ReportPeer(peerset.ReputationChange{ Value: peerset.BadJustificationValue, Reason: peerset.BadJustificationReason, }, p) - return err + return errUnknownBlockForJustification } - continue } - if curr == nil { - logger.Critical(">>>>>>>>>>>>>>>> CURR IS NIL!!") - } - - // check that parent of first block in response is known (either in our db or in the ready queue) - if i == 0 { - prev = curr - - // check that we know the parent of the first block (or it's in the ready queue) - has, _ := cs.blockState.HasHeader(curr.ParentHash) - if has { - continue - } - - return errUnknownParent - } - - // otherwise, check that this response forms a chain - // ie. curr's parent hash is hash of previous header, and curr's number is previous number + 1 - if prev.Hash() != curr.ParentHash || curr.Number != prev.Number+1 { - return errResponseIsNotChain - } - - prev = curr + previousBlockData = currBlockData } return nil @@ -1130,25 +1109,6 @@ func (cs *chainSync) validateBlockData(req *network.BlockRequestMessage, bd *typ return nil } -func (cs *chainSync) validateJustification(bd *types.BlockData) error { - if bd == nil { - return errNilBlockData - } - - // this is ok, since the remote peer doesn't need to provide the info we request from them - // especially with justifications, it's common that they don't have them. - if bd.Justification == nil { - return nil - } - - has, _ := cs.blockState.HasHeader(bd.Hash) - if !has { - return errUnknownBlockForJustification - } - - return nil -} - func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { cs.peerViewLock.RLock() defer cs.peerViewLock.RUnlock() diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 79e202b6b7..3ed750a6ce 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -6,9 +6,14 @@ import ( "testing" "time" + "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/peerset" + "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/common/variadic" + "github.com/ChainSafe/gossamer/lib/runtime/storage" + "github.com/ChainSafe/gossamer/lib/trie" "github.com/ChainSafe/gossamer/pkg/scale" "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p/core/peer" @@ -441,3 +446,272 @@ func newChainSyncTest(t *testing.T, ctrl *gomock.Controller) *chainSync { return newChainSync(cfg) } + +func setupChainSyncToBootstrapMode(t *testing.T, blocksAhead uint, + bs BlockState, net Network, babeVerifier BabeVerifier, + storageState StorageState, blockImportHandler BlockImportHandler, telemetry Telemetry) *chainSync { + t.Helper() + mockedPeerID := []peer.ID{ + peer.ID("some_peer_1"), + peer.ID("some_peer_2"), + peer.ID("some_peer_3"), + } + + peerViewMap := map[peer.ID]*peerView{} + for _, p := range mockedPeerID { + peerViewMap[p] = &peerView{ + who: p, + hash: common.Hash{1, 2, 3}, + number: blocksAhead, + } + } + + cfg := chainSyncConfig{ + pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), + minPeers: 1, + maxPeers: 5, + slotDuration: 6 * time.Second, + bs: bs, + net: net, + babeVerifier: babeVerifier, + storageState: storageState, + blockImportHandler: blockImportHandler, + telemetry: telemetry, + } + + chainSync := newChainSync(cfg) + chainSync.peerView = peerViewMap + + return chainSync +} + +func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + mockBlockState.EXPECT().BestBlockHeader().Return(mockedGenesisHeader, nil).Times(2) + mockBlockState.EXPECT().HasHeader(mockedGenesisHeader.Hash()).Return(true, nil) + + mockNetwork := NewMockNetwork(ctrl) + startingBlock := variadic.MustNewUint32OrHash(1) + max := uint32(128) + workerPeerID := peer.ID("noot") + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 128) + // setup mocks for new synced blocks that doesn't exists in our local database + ensureBlockImportFlow(t, mockedGenesisHeader, blockResponse.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) + + mockNetwork.EXPECT().DoBlockRequest(workerPeerID, &network.BlockRequestMessage{ + RequestedData: bootstrapRequestData, + StartingBlock: *startingBlock, + Direction: network.Ascending, + Max: &max, + }).Return(blockResponse, nil) + mockNetwork.EXPECT().AllConnectedPeers().Return([]peer.ID{}) + + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 129 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(129), target) + + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(workerPeerID) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.executeBootstrapSync() + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh +} + +func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + mockBlockState.EXPECT().BestBlockHeader().Return(mockedGenesisHeader, nil) + + mockNetwork := NewMockNetwork(ctrl) + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], + } + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) + + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], + } + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + mockNetwork.EXPECT().DoBlockRequest(gomock.Any(), gomock.Any()). + Return(worker1Response, nil) + mockNetwork.EXPECT().DoBlockRequest(gomock.Any(), gomock.Any()). + Return(worker2Response, nil) + + mockNetwork.EXPECT().AllConnectedPeers().Return([]peer.ID{}) + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) + + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("noot")) + cs.workerPool.fromBlockAnnounce(peer.ID("noot2")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.executeBootstrapSync() + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh +} + +func createSuccesfullBlockResponse(t *testing.T, genesisHash common.Hash, startingAt, numBlocks int) *network.BlockResponseMessage { + response := new(network.BlockResponseMessage) + response.BlockData = make([]*types.BlockData, numBlocks) + + emptyTrieState := storage.NewTrieState(nil) + tsRoot := emptyTrieState.MustRoot() + + firstHeader := types.NewHeader(genesisHash, tsRoot, common.Hash{}, + uint(startingAt), scale.VaryingDataTypeSlice{}) + response.BlockData[0] = &types.BlockData{ + Hash: firstHeader.Hash(), + Header: firstHeader, + Body: types.NewBody([]types.Extrinsic{}), + Justification: nil, + } + + parentHash := firstHeader.Hash() + for idx := 1; idx < numBlocks; idx++ { + blockNumber := idx + startingAt + header := types.NewHeader(parentHash, tsRoot, common.Hash{}, + uint(blockNumber), scale.VaryingDataTypeSlice{}) + response.BlockData[idx] = &types.BlockData{ + Hash: header.Hash(), + Header: header, + Body: types.NewBody([]types.Extrinsic{}), + Justification: nil, + } + parentHash = header.Hash() + } + + return response +} + +// ensureBlockImportFlow will setup the expectations for method calls that happens while chain sync imports a block +func ensureBlockImportFlow(t *testing.T, parentHeader *types.Header, blocksReceived []*types.BlockData, mockBlockState *MockBlockState, + mockBabeVerifier *MockBabeVerifier, mockStorageState *MockStorageState, + mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry) { + t.Helper() + + mockBlockState.EXPECT().HasHeader(parentHeader.Hash()).Return(true, nil) + + for idx, blockData := range blocksReceived { + mockBlockState.EXPECT().HasHeader(blockData.Header.Hash()).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(blockData.Header.Hash()).Return(false, nil) + mockBabeVerifier.EXPECT().VerifyBlock(blockData.Header).Return(nil) + + var previousHeader *types.Header + if idx == 0 { + previousHeader = parentHeader + } else { + previousHeader = blocksReceived[idx-1].Header + } + + mockBlockState.EXPECT().GetHeader(blockData.Header.ParentHash).Return(previousHeader, nil) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().Unlock() + + emptyTrieState := storage.NewTrieState(nil) + parentStateRoot := previousHeader.StateRoot + mockStorageState.EXPECT().TrieState(&parentStateRoot). + Return(emptyTrieState, nil) + + ctrl := gomock.NewController(t) + mockRuntimeInstance := NewMockInstance(ctrl) + mockBlockState.EXPECT().GetRuntime(previousHeader.Hash()). + Return(mockRuntimeInstance, nil) + + expectedBlock := &types.Block{ + Header: *blockData.Header, + Body: *blockData.Body, + } + + mockRuntimeInstance.EXPECT().SetContextStorage(emptyTrieState) + mockRuntimeInstance.EXPECT().ExecuteBlock(expectedBlock). + Return(nil, nil) + + mockImportHandler.EXPECT().HandleBlockImport(expectedBlock, emptyTrieState, false). + Return(nil) + + blockHash := blockData.Header.Hash() + expectedTelemetryMessage := telemetry.NewBlockImport( + &blockHash, + blockData.Header.Number, + "NetworkInitialSync") + mockTelemetry.EXPECT().SendMessage(expectedTelemetryMessage) + + mockBlockState.EXPECT().CompareAndSetBlockData(blockData).Return(nil) + } +} diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index fa17423092..8bf9ec1427 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -64,9 +64,12 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { func (s *syncWorkerPool) useConnectedPeers() { connectedPeers := s.network.AllConnectedPeers() + if len(connectedPeers) < 1 { + return + } + s.l.Lock() defer s.l.Unlock() - for _, connectedPeer := range connectedPeers { s.newPeer(connectedPeer) } @@ -184,7 +187,7 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { select { case <-stopCh: //wait for ongoing requests to be finished before returning - //s.wg.Wait() + s.wg.Wait() return case task := <-s.taskQueue: From 2432324c119140cd94df9cfa7b49b4cd8f8b027c Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 24 May 2023 10:56:35 -0400 Subject: [PATCH 040/140] chore: testing successful sync with workers failing --- dot/sync/chain_sync.go | 17 ++-- dot/sync/chain_sync_test.go | 168 ++++++++++++++++++++++++++++++------ 2 files changed, 152 insertions(+), 33 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 1977b29207..a236ac4af1 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -683,6 +683,9 @@ func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, s logger.Debugf("handling workers results, waiting for %d blocks", totalBlocks) syncingChain := make([]*types.BlockData, totalBlocks) + // the total numbers of blocks is missing in the syncing chain + waitingBlocks := totalBlocks + loop: for { // in a case where we don't handle workers results we should check the pool @@ -737,6 +740,10 @@ loop: cs.workerPool.submitRequest(taskResult.request, workersResults) continue case errors.Is(err, errUnknownParent): + case errors.Is(err, errBadBlock): + logger.Warnf("peer %s sent a bad block: %s", who, err) + cs.workerPool.punishPeer(taskResult.who, true) + cs.workerPool.submitRequest(taskResult.request, workersResults) case err != nil: logger.Criticalf("response invalid: %s", err) cs.workerPool.punishPeer(taskResult.who, false) @@ -761,12 +768,10 @@ loop: // we need to check if we've filled all positions // otherwise we should wait for more responses - for _, element := range syncingChain { - if element == nil { - continue loop - } + waitingBlocks -= uint32(len(response.BlockData)) + if waitingBlocks == 0 { + break loop } - break loop } } @@ -1040,8 +1045,6 @@ func (cs *chainSync) validateResponse(req *network.BlockRequestMessage, headerRequested := (req.RequestedData & network.RequestedDataHeader) == 1 firstItem := resp.BlockData[0] - // check that we know the parent of the first block (or it's in the ready queue) - fmt.Printf("checking the header of: %s\n", firstItem.Header.ParentHash) has, err := cs.blockState.HasHeader(firstItem.Header.ParentHash) if err != nil { return fmt.Errorf("while checking ancestry: %w", err) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 3ed750a6ce..d35d59b0c4 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -487,46 +487,48 @@ func setupChainSyncToBootstrapMode(t *testing.T, blocksAhead uint, func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 0, types.NewDigest()) - mockBlockState.EXPECT().BestBlockHeader().Return(mockedGenesisHeader, nil).Times(2) - mockBlockState.EXPECT().HasHeader(mockedGenesisHeader.Hash()).Return(true, nil) - mockNetwork := NewMockNetwork(ctrl) + const blocksAhead = 129 + totalBlockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, int(blocksAhead)-1) + mockedNetwork := NewMockNetwork(ctrl) + + workerPeerID := peer.ID("noot") startingBlock := variadic.MustNewUint32OrHash(1) max := uint32(128) - workerPeerID := peer.ID("noot") + + mockedNetwork.EXPECT().DoBlockRequest(workerPeerID, &network.BlockRequestMessage{ + RequestedData: bootstrapRequestData, + StartingBlock: *startingBlock, + Direction: network.Ascending, + Max: &max, + }).Return(totalBlockResponse, nil) + mockedNetwork.EXPECT().AllConnectedPeers().Return([]peer.ID{}) + + mockedBlockState := NewMockBlockState(ctrl) + mockedBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + + mockedBlockState.EXPECT().BestBlockHeader().Return(mockedGenesisHeader, nil) mockBabeVerifier := NewMockBabeVerifier(ctrl) mockStorageState := NewMockStorageState(ctrl) mockImportHandler := NewMockBlockImportHandler(ctrl) mockTelemetry := NewMockTelemetry(ctrl) - blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 128) // setup mocks for new synced blocks that doesn't exists in our local database - ensureBlockImportFlow(t, mockedGenesisHeader, blockResponse.BlockData, mockBlockState, + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, totalBlockResponse.BlockData, mockedBlockState, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) - mockNetwork.EXPECT().DoBlockRequest(workerPeerID, &network.BlockRequestMessage{ - RequestedData: bootstrapRequestData, - StartingBlock: *startingBlock, - Direction: network.Ascending, - Max: &max, - }).Return(blockResponse, nil) - mockNetwork.EXPECT().AllConnectedPeers().Return([]peer.ID{}) - // setup a chain sync which holds in its peer view map - // 3 peers, each one announce block 129 as its best block number. + // 3 peers, each one announce block X as its best block number. // We start this test with genesis block being our best block, so - // we're far behind by 128 blocks, we should execute a bootstrap + // we're far behind by X blocks, we should execute a bootstrap // sync request those blocks - const blocksAhead = 129 cs := setupChainSyncToBootstrapMode(t, blocksAhead, - mockBlockState, mockNetwork, mockBabeVerifier, + mockedBlockState, mockedNetwork, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) target, err := cs.getTarget() @@ -536,7 +538,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { // include a new worker in the worker pool set, this worker // should be an available peer that will receive a block request // the worker pool executes the workers management - cs.workerPool.fromBlockAnnounce(workerPeerID) + cs.workerPool.fromBlockAnnounce(peer.ID("noot")) stopCh := make(chan struct{}) go cs.workerPool.listenForRequests(stopCh) @@ -546,6 +548,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { close(stopCh) <-cs.workerPool.doneCh + } func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { @@ -575,7 +578,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { } // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow // will setup the expectations starting from the genesis header until block 128 - ensureBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) worker2Response := &network.BlockResponseMessage{ @@ -584,7 +587,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow // will setup the expectations starting from block 128, from previous worker, until block 256 parent := worker1Response.BlockData[127] - ensureBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) // we use gomock.Any since I cannot guarantee which peer picks which request @@ -626,6 +629,119 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { <-cs.workerPool.doneCh } +func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing.T) { + + t.Parallel() + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + mockBlockState.EXPECT().BestBlockHeader().Return(mockedGenesisHeader, nil) + + mockNetwork := NewMockNetwork(ctrl) + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], + } + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) + + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], + } + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + doBlockRequestCount := 0 + mockNetwork.EXPECT().DoBlockRequest(gomock.Any(), gomock.Any()). + DoAndReturn(func(peerID, _ any) (any, any) { + // this simple logic does: ensure that the DoBlockRequest is called by + // peer.ID(alice) and peer.ID(bob). When bob calls, this method will fail + // then alice should pick the failed request and re-execute it which will + // be the third call + + defer func() { doBlockRequestCount++ }() + + pID := peerID.(peer.ID) // cast to peer ID + switch doBlockRequestCount { + case 0, 1: + if pID == peer.ID("alice") { + return worker1Response, nil + } + + if pID == peer.ID("bob") { + return nil, errors.New("a bad error while getting a response") + } + + require.FailNow(t, "expected calls by %s and %s, got: %s", + peer.ID("alice"), peer.ID("bob"), pID) + default: + //ensure the the third call will be made by peer.ID("alice") + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) + } + + return worker2Response, nil + }).Times(3) + + mockNetwork.EXPECT().AllConnectedPeers().Return([]peer.ID{}) + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) + + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) + cs.workerPool.fromBlockAnnounce(peer.ID("bob")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.executeBootstrapSync() + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh + + // peer should be in the ignore set + _, ok := cs.workerPool.ignorePeers[peer.ID("bob")] + require.True(t, ok) + + _, ok = cs.workerPool.workers[peer.ID("bob")] + require.False(t, ok) +} + func createSuccesfullBlockResponse(t *testing.T, genesisHash common.Hash, startingAt, numBlocks int) *network.BlockResponseMessage { response := new(network.BlockResponseMessage) response.BlockData = make([]*types.BlockData, numBlocks) @@ -659,8 +775,8 @@ func createSuccesfullBlockResponse(t *testing.T, genesisHash common.Hash, starti return response } -// ensureBlockImportFlow will setup the expectations for method calls that happens while chain sync imports a block -func ensureBlockImportFlow(t *testing.T, parentHeader *types.Header, blocksReceived []*types.BlockData, mockBlockState *MockBlockState, +// ensureSuccessfulBlockImportFlow will setup the expectations for method calls that happens while chain sync imports a block +func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, blocksReceived []*types.BlockData, mockBlockState *MockBlockState, mockBabeVerifier *MockBabeVerifier, mockStorageState *MockStorageState, mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry) { t.Helper() From 5c7f98373e1e97c7083f1c6021673f0f6474e2d5 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 24 May 2023 13:41:37 -0400 Subject: [PATCH 041/140] chore: adjust mocks --- dot/sync/chain_sync_test._old | 1643 --------------------------------- dot/sync/chain_sync_test.go | 3 + dot/sync/requests.go | 3 + dot/sync/requests_test.go | 3 + dot/sync/worker_pool.go | 10 + 5 files changed, 19 insertions(+), 1643 deletions(-) delete mode 100644 dot/sync/chain_sync_test._old diff --git a/dot/sync/chain_sync_test._old b/dot/sync/chain_sync_test._old deleted file mode 100644 index 6da080e929..0000000000 --- a/dot/sync/chain_sync_test._old +++ /dev/null @@ -1,1643 +0,0 @@ -// // Copyright 2021 ChainSafe Systems (ON) -// // SPDX-License-Identifier: LGPL-3.0-only - -// package sync - -// import ( -// "time" - -// "context" -// "errors" -// "testing" - -// "github.com/ChainSafe/gossamer/dot/network" -// "github.com/ChainSafe/gossamer/dot/peerset" -// "github.com/ChainSafe/gossamer/dot/types" -// "github.com/ChainSafe/gossamer/lib/blocktree" -// "github.com/ChainSafe/gossamer/lib/common" -// "github.com/ChainSafe/gossamer/lib/common/variadic" -// "github.com/ChainSafe/gossamer/lib/trie" -// "github.com/golang/mock/gomock" -// "github.com/libp2p/go-libp2p/core/peer" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/require" -// ) - -// const defaultSlotDuration = 6 * time.Second - -// func Test_chainSyncState_String(t *testing.T) { -// t.Parallel() - -// tests := []struct { -// name string -// s chainSyncState -// want string -// }{ -// { -// name: "case_bootstrap", -// s: bootstrap, -// want: "bootstrap", -// }, -// { -// name: "case_tip", -// s: tip, -// want: "tip", -// }, -// { -// name: "case_unknown", -// s: 3, -// want: "unknown", -// }, -// } -// for _, tt := range tests { -// tt := tt -// t.Run(tt.name, func(t *testing.T) { -// t.Parallel() -// got := tt.s.String() -// assert.Equal(t, tt.want, got) -// }) -// } -// } - -// func Test_chainSync_setPeerHead(t *testing.T) { -// t.Parallel() - -// errTest := errors.New("test error") -// const somePeer = peer.ID("abc") -// someHash := common.Hash{1, 2, 3, 4} - -// testCases := map[string]struct { -// chainSyncBuilder func(ctrl *gomock.Controller) *chainSync -// peerID peer.ID -// hash common.Hash -// number uint -// errWrapped error -// errMessage string -// expectedPeerIDToPeerState map[peer.ID]*peerView -// expectedQueuedPeerStates []*peerView -// }{ -// "best_block_header_error": { -// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { -// blockState := NewMockBlockState(ctrl) -// blockState.EXPECT().BestBlockHeader().Return(nil, errTest) -// return &chainSync{ -// peerView: map[peer.ID]*peerView{}, -// blockState: blockState, -// } -// }, -// peerID: somePeer, -// hash: someHash, -// number: 1, -// errWrapped: errTest, -// errMessage: "best block header: test error", -// expectedPeerIDToPeerState: map[peer.ID]*peerView{ -// somePeer: { -// who: somePeer, -// hash: someHash, -// number: 1, -// }, -// }, -// }, -// "number_smaller_than_best_block_number_get_hash_by_number_error": { -// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { -// blockState := NewMockBlockState(ctrl) -// bestBlockHeader := &types.Header{Number: 2} -// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) -// blockState.EXPECT().GetHashByNumber(uint(1)). -// Return(common.Hash{}, errTest) -// return &chainSync{ -// peerView: map[peer.ID]*peerView{}, -// blockState: blockState, -// } -// }, -// peerID: somePeer, -// hash: someHash, -// number: 1, -// errWrapped: errTest, -// errMessage: "get block hash by number: test error", -// expectedPeerIDToPeerState: map[peer.ID]*peerView{ -// somePeer: { -// who: somePeer, -// hash: someHash, -// number: 1, -// }, -// }, -// }, -// "number_smaller_than_best_block_number_and_same_hash": { -// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { -// blockState := NewMockBlockState(ctrl) -// bestBlockHeader := &types.Header{Number: 2} -// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) -// blockState.EXPECT().GetHashByNumber(uint(1)).Return(someHash, nil) -// return &chainSync{ -// peerView: map[peer.ID]*peerView{}, -// blockState: blockState, -// } -// }, -// peerID: somePeer, -// hash: someHash, -// number: 1, -// expectedPeerIDToPeerState: map[peer.ID]*peerView{ -// somePeer: { -// who: somePeer, -// hash: someHash, -// number: 1, -// }, -// }, -// }, -// "number_smaller_than_best_block_number_get_highest_finalised_header_error": { -// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { -// blockState := NewMockBlockState(ctrl) -// bestBlockHeader := &types.Header{Number: 2} -// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) -// blockState.EXPECT().GetHashByNumber(uint(1)). -// Return(common.Hash{2}, nil) // other hash than someHash -// blockState.EXPECT().GetHighestFinalisedHeader().Return(nil, errTest) -// return &chainSync{ -// peerView: map[peer.ID]*peerView{}, -// blockState: blockState, -// } -// }, -// peerID: somePeer, -// hash: someHash, -// number: 1, -// errWrapped: errTest, -// errMessage: "get highest finalised header: test error", -// expectedPeerIDToPeerState: map[peer.ID]*peerView{ -// somePeer: { -// who: somePeer, -// hash: someHash, -// number: 1, -// }, -// }, -// }, -// "number_smaller_than_best_block_number_and_finalised_number_equal_than_number": { -// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { -// blockState := NewMockBlockState(ctrl) -// bestBlockHeader := &types.Header{Number: 2} -// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) -// blockState.EXPECT().GetHashByNumber(uint(1)). -// Return(common.Hash{2}, nil) // other hash than someHash -// finalisedBlockHeader := &types.Header{Number: 1} -// blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) -// network := NewMockNetwork(ctrl) -// network.EXPECT().ReportPeer(peerset.ReputationChange{ -// Value: peerset.BadBlockAnnouncementValue, -// Reason: peerset.BadBlockAnnouncementReason, -// }, somePeer) -// return &chainSync{ -// peerView: map[peer.ID]*peerView{}, -// blockState: blockState, -// network: network, -// } -// }, -// peerID: somePeer, -// hash: someHash, -// number: 1, -// errWrapped: errPeerOnInvalidFork, -// errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", -// expectedPeerIDToPeerState: map[peer.ID]*peerView{ -// somePeer: { -// who: somePeer, -// hash: someHash, -// number: 1, -// }, -// }, -// }, -// "number_smaller_than_best_block_number_and_finalised_number_bigger_than_number": { -// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { -// blockState := NewMockBlockState(ctrl) -// bestBlockHeader := &types.Header{Number: 2} -// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) -// blockState.EXPECT().GetHashByNumber(uint(1)). -// Return(common.Hash{2}, nil) // other hash than someHash -// finalisedBlockHeader := &types.Header{Number: 2} -// blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) -// network := NewMockNetwork(ctrl) -// network.EXPECT().ReportPeer(peerset.ReputationChange{ -// Value: peerset.BadBlockAnnouncementValue, -// Reason: peerset.BadBlockAnnouncementReason, -// }, somePeer) -// return &chainSync{ -// peerView: map[peer.ID]*peerView{}, -// blockState: blockState, -// network: network, -// } -// }, -// peerID: somePeer, -// hash: someHash, -// number: 1, -// errWrapped: errPeerOnInvalidFork, -// errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", -// expectedPeerIDToPeerState: map[peer.ID]*peerView{ -// somePeer: { -// who: somePeer, -// hash: someHash, -// number: 1, -// }, -// }, -// }, -// "number smaller than best block number and " + -// "finalised number smaller than number and " + -// "has_header_error": { -// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { -// blockState := NewMockBlockState(ctrl) -// bestBlockHeader := &types.Header{Number: 3} -// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) -// blockState.EXPECT().GetHashByNumber(uint(2)). -// Return(common.Hash{2}, nil) // other hash than someHash -// finalisedBlockHeader := &types.Header{Number: 1} -// blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) -// blockState.EXPECT().HasHeader(someHash).Return(false, errTest) -// return &chainSync{ -// peerView: map[peer.ID]*peerView{}, -// blockState: blockState, -// } -// }, -// peerID: somePeer, -// hash: someHash, -// number: 2, -// errWrapped: errTest, -// errMessage: "has header: test error", -// expectedPeerIDToPeerState: map[peer.ID]*peerView{ -// somePeer: { -// who: somePeer, -// hash: someHash, -// number: 2, -// }, -// }, -// }, -// "number smaller than best block number and " + -// "finalised number smaller than number and " + -// "has_the_hash": { -// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { -// blockState := NewMockBlockState(ctrl) -// bestBlockHeader := &types.Header{Number: 3} -// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) -// blockState.EXPECT().GetHashByNumber(uint(2)). -// Return(common.Hash{2}, nil) // other hash than someHash -// finalisedBlockHeader := &types.Header{Number: 1} -// blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) -// blockState.EXPECT().HasHeader(someHash).Return(true, nil) -// return &chainSync{ -// peerView: map[peer.ID]*peerView{}, -// blockState: blockState, -// } -// }, -// peerID: somePeer, -// hash: someHash, -// number: 2, -// expectedPeerIDToPeerState: map[peer.ID]*peerView{ -// somePeer: { -// who: somePeer, -// hash: someHash, -// number: 2, -// }, -// }, -// }, -// "number_bigger_than_the_head_number_add_hash_and_number_error": { -// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { -// blockState := NewMockBlockState(ctrl) -// bestBlockHeader := &types.Header{Number: 1} -// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) -// pendingBlocks := NewMockDisjointBlockSet(ctrl) -// pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). -// Return(errTest) -// return &chainSync{ -// peerView: map[peer.ID]*peerView{}, -// blockState: blockState, -// pendingBlocks: pendingBlocks, -// } -// }, -// peerID: somePeer, -// hash: someHash, -// number: 2, -// errWrapped: errTest, -// errMessage: "add hash and number: test error", -// expectedPeerIDToPeerState: map[peer.ID]*peerView{ -// somePeer: { -// who: somePeer, -// hash: someHash, -// number: 2, -// }, -// }, -// }, -// "number_bigger_than_the_head_number_success": { -// chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { -// blockState := NewMockBlockState(ctrl) -// bestBlockHeader := &types.Header{Number: 1} -// blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) -// pendingBlocks := NewMockDisjointBlockSet(ctrl) -// pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). -// Return(nil) -// return &chainSync{ -// peerView: map[peer.ID]*peerView{}, -// blockState: blockState, -// pendingBlocks: pendingBlocks, -// // buffered of 1 so setPeerHead can write to it -// // without a consumer of the channel on the other end. -// workQueue: make(chan *peerView, 1), -// } -// }, -// peerID: somePeer, -// hash: someHash, -// number: 2, -// expectedPeerIDToPeerState: map[peer.ID]*peerView{ -// somePeer: { -// who: somePeer, -// hash: someHash, -// number: 2, -// }, -// }, -// expectedQueuedPeerStates: []*peerView{ -// { -// who: somePeer, -// hash: someHash, -// number: 2, -// }, -// }, -// }, -// } - -// for name, testCase := range testCases { -// testCase := testCase -// t.Run(name, func(t *testing.T) { -// t.Parallel() -// ctrl := gomock.NewController(t) - -// chainSync := testCase.chainSyncBuilder(ctrl) - -// err := chainSync.setPeerHead(testCase.peerID, testCase.hash, testCase.number) - -// assert.ErrorIs(t, err, testCase.errWrapped) -// if testCase.errWrapped != nil { -// assert.EqualError(t, err, testCase.errMessage) -// } -// assert.Equal(t, testCase.expectedPeerIDToPeerState, chainSync.peerView) - -// require.Equal(t, len(testCase.expectedQueuedPeerStates), len(chainSync.workQueue)) -// for _, expectedPeerState := range testCase.expectedQueuedPeerStates { -// peerState := <-chainSync.workQueue -// assert.Equal(t, expectedPeerState, peerState) -// } -// }) -// } -// } - -// func TestChainSync_sync_bootstrap_withWorkerError(t *testing.T) { -// t.Parallel() - -// ctrl := gomock.NewController(t) -// cs := newTestChainSync(ctrl) -// mockBlockState := NewMockBlockState(ctrl) -// mockHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 0, -// types.NewDigest()) -// mockBlockState.EXPECT().BestBlockHeader().Return(mockHeader, nil).Times(2) -// cs.blockState = mockBlockState -// cs.handler = newBootstrapSyncer(mockBlockState) - -// mockNetwork := NewMockNetwork(ctrl) -// startingBlock := variadic.MustNewUint32OrHash(1) -// max := uint32(128) -// mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ -// RequestedData: 19, -// StartingBlock: *startingBlock, -// Direction: 0, -// Max: &max, -// }) -// cs.network = mockNetwork - -// go cs.sync() -// defer cs.cancel() - -// testPeer := peer.ID("noot") -// cs.peerView[testPeer] = &peerView{ -// number: 1000, -// } - -// cs.workQueue <- cs.peerView[testPeer] - -// select { -// case res := <-cs.resultQueue: -// expected := &workerError{ -// err: errNilResponse, // since MockNetwork returns a nil response -// who: testPeer, -// } -// require.Equal(t, expected, res.err) -// case <-time.After(5 * time.Second): -// t.Fatal("did not get worker response") -// } - -// require.Equal(t, bootstrap, cs.state) -// } - -// func TestChainSync_sync_tip(t *testing.T) { -// t.Parallel() - -// done := make(chan struct{}) - -// ctrl := gomock.NewController(t) -// cs := newTestChainSync(ctrl) -// header := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 1000, -// types.NewDigest()) - -// bs := NewMockBlockState(ctrl) -// bs.EXPECT().BestBlockHeader().Return(header, nil) -// bs.EXPECT().GetHighestFinalisedHeader().DoAndReturn(func() (*types.Header, error) { -// close(done) -// return header, nil -// }) -// cs.blockState = bs - -// go cs.sync() -// defer cs.cancel() - -// testPeer := peer.ID("noot") -// cs.peerView[testPeer] = &peerView{ -// number: 999, -// } - -// cs.workQueue <- cs.peerView[testPeer] -// <-done -// require.Equal(t, tip, cs.state) -// } - -// func TestChainSync_getTarget(t *testing.T) { -// ctrl := gomock.NewController(t) -// cs := newTestChainSync(ctrl) -// require.Equal(t, uint(1<<32-1), cs.getTarget()) -// cs.peerView = map[peer.ID]*peerView{ -// "a": { -// number: 0, // outlier -// }, -// "b": { -// number: 110, -// }, -// "c": { -// number: 120, -// }, -// "d": { -// number: 130, -// }, -// "e": { -// number: 140, -// }, -// "f": { -// number: 150, -// }, -// "g": { -// number: 1000, // outlier -// }, -// } - -// require.Equal(t, uint(130), cs.getTarget()) // sum:650/count:5= avg:130 - -// cs.peerView = map[peer.ID]*peerView{ -// "testA": { -// number: 1000, -// }, -// "testB": { -// number: 2000, -// }, -// } - -// require.Equal(t, uint(1500), cs.getTarget()) -// } - -// func TestWorkerToRequests(t *testing.T) { -// t.Parallel() - -// w := &worker{ -// startNumber: uintPtr(10), -// targetNumber: uintPtr(1), -// direction: network.Ascending, -// } -// _, err := workerToRequests(w) -// require.Equal(t, errInvalidDirection, err) - -// type testCase struct { -// w *worker -// expected []*network.BlockRequestMessage -// } - -// var ( -// max128 = uint32(128) -// max9 = uint32(9) -// max64 = uint32(64) -// ) - -// testCases := map[string]testCase{ -// "test_0": { -// w: &worker{ -// startNumber: uintPtr(1), -// targetNumber: uintPtr(1 + maxResponseSize), -// direction: network.Ascending, -// requestData: bootstrapRequestData, -// }, -// expected: []*network.BlockRequestMessage{ -// { -// RequestedData: bootstrapRequestData, -// StartingBlock: *variadic.MustNewUint32OrHash(1), -// Direction: network.Ascending, -// Max: &max128, -// }, -// }, -// }, -// "test_1": { -// w: &worker{ -// startNumber: uintPtr(1), -// targetNumber: uintPtr(1 + (maxResponseSize * 2)), -// direction: network.Ascending, -// requestData: bootstrapRequestData, -// }, -// expected: []*network.BlockRequestMessage{ -// { -// RequestedData: bootstrapRequestData, -// StartingBlock: *variadic.MustNewUint32OrHash(1), -// Direction: network.Ascending, -// Max: &max128, -// }, -// { -// RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, -// StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize), -// Direction: network.Ascending, -// Max: &max128, -// }, -// }, -// }, -// "test_2": { -// w: &worker{ -// startNumber: uintPtr(1), -// targetNumber: uintPtr(10), -// direction: network.Ascending, -// requestData: bootstrapRequestData, -// }, -// expected: []*network.BlockRequestMessage{ -// { -// RequestedData: bootstrapRequestData, -// StartingBlock: *variadic.MustNewUint32OrHash(1), -// Direction: network.Ascending, -// Max: &max128, -// }, -// }, -// }, -// "test_3": { -// w: &worker{ -// startNumber: uintPtr(10), -// targetNumber: uintPtr(1), -// direction: network.Descending, -// requestData: bootstrapRequestData, -// }, -// expected: []*network.BlockRequestMessage{ -// { -// RequestedData: bootstrapRequestData, -// StartingBlock: *variadic.MustNewUint32OrHash(10), -// Direction: network.Descending, -// Max: &max9, -// }, -// }, -// }, -// "test_4": { -// w: &worker{ -// startNumber: uintPtr(1), -// targetNumber: uintPtr(1 + maxResponseSize + (maxResponseSize / 2)), -// direction: network.Ascending, -// requestData: bootstrapRequestData, -// }, -// expected: []*network.BlockRequestMessage{ -// { -// RequestedData: bootstrapRequestData, -// StartingBlock: *variadic.MustNewUint32OrHash(1), -// Direction: network.Ascending, -// Max: &max128, -// }, -// { -// RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, -// StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize), -// Direction: network.Ascending, -// Max: &max128, -// }, -// }, -// }, -// "test_5": { -// w: &worker{ -// startNumber: uintPtr(1), -// targetNumber: uintPtr(10), -// targetHash: common.Hash{0xa}, -// direction: network.Ascending, -// requestData: bootstrapRequestData, -// }, -// expected: []*network.BlockRequestMessage{ -// { -// RequestedData: bootstrapRequestData, -// StartingBlock: *variadic.MustNewUint32OrHash(1), -// Direction: network.Ascending, -// Max: &max128, -// }, -// }, -// }, -// "test_6": { -// w: &worker{ -// startNumber: uintPtr(1), -// startHash: common.Hash{0xb}, -// targetNumber: uintPtr(10), -// targetHash: common.Hash{0xc}, -// direction: network.Ascending, -// requestData: bootstrapRequestData, -// }, -// expected: []*network.BlockRequestMessage{ -// { -// RequestedData: bootstrapRequestData, -// StartingBlock: *variadic.MustNewUint32OrHash(common.Hash{0xb}), -// Direction: network.Ascending, -// Max: &max128, -// }, -// }, -// }, -// "test_7": { -// w: &worker{ -// startNumber: uintPtr(10), -// targetNumber: uintPtr(10), -// direction: network.Ascending, -// requestData: bootstrapRequestData, -// }, -// expected: []*network.BlockRequestMessage{ -// { -// RequestedData: bootstrapRequestData, -// StartingBlock: *variadic.MustNewUint32OrHash(10), -// Direction: network.Ascending, -// Max: &max128, -// }, -// }, -// }, -// "test_8": { -// w: &worker{ -// startNumber: uintPtr(1 + maxResponseSize + (maxResponseSize / 2)), -// targetNumber: uintPtr(1), -// direction: network.Descending, -// requestData: bootstrapRequestData, -// }, -// expected: []*network.BlockRequestMessage{ -// { -// RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, -// StartingBlock: *variadic.MustNewUint32OrHash(1 + (maxResponseSize / 2)), -// Direction: network.Descending, -// Max: &max64, -// }, -// { -// RequestedData: bootstrapRequestData, -// StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize + (maxResponseSize / 2)), -// Direction: network.Descending, -// Max: &max128, -// }, -// }, -// }, -// } - -// for name, tc := range testCases { -// tc := tc -// t.Run(name, func(t *testing.T) { -// t.Parallel() -// reqs, err := workerToRequests(tc.w) -// require.NoError(t, err) -// require.Equal(t, tc.expected, reqs) -// }) -// } -// } - -// func TestChainSync_validateResponse(t *testing.T) { -// t.Parallel() -// badBlockHash := common.NewHash([]byte("badblockhash")) - -// tests := map[string]struct { -// blockStateBuilder func(ctrl *gomock.Controller) BlockState -// networkBuilder func(ctrl *gomock.Controller) Network -// req *network.BlockRequestMessage -// resp *network.BlockResponseMessage -// expectedError error -// }{ -// "nil_req,_nil_resp": { -// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) -// return mockBlockState -// }, -// networkBuilder: func(ctrl *gomock.Controller) Network { -// return NewMockNetwork(ctrl) -// }, -// expectedError: errEmptyBlockData, -// }, -// "handle_error_response_is_not_chain,_has_header": { -// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) -// mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) -// return mockBlockState -// }, -// networkBuilder: func(ctrl *gomock.Controller) Network { -// return NewMockNetwork(ctrl) -// }, -// req: &network.BlockRequestMessage{ -// RequestedData: network.RequestedDataHeader, -// }, -// resp: &network.BlockResponseMessage{ -// BlockData: []*types.BlockData{ -// { -// Header: &types.Header{ -// Number: 1, -// }, -// Body: &types.Body{}, -// }, -// { -// Header: &types.Header{ -// Number: 2, -// }, -// Body: &types.Body{}, -// }, -// }, -// }, -// expectedError: errResponseIsNotChain, -// }, -// "handle_justification-only_request,_unknown_block": { -// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) -// mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) -// return mockBlockState -// }, -// networkBuilder: func(ctrl *gomock.Controller) Network { -// mockNetwork := NewMockNetwork(ctrl) -// mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ -// Value: peerset.BadJustificationValue, -// Reason: peerset.BadJustificationReason, -// }, peer.ID("")) -// return mockNetwork -// }, -// req: &network.BlockRequestMessage{ -// RequestedData: network.RequestedDataJustification, -// }, -// resp: &network.BlockResponseMessage{ -// BlockData: []*types.BlockData{ -// { -// Justification: &[]byte{0}, -// }, -// }, -// }, -// expectedError: errUnknownBlockForJustification, -// }, -// "handle_error_unknown_parent": { -// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) -// mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) -// return mockBlockState -// }, -// networkBuilder: func(ctrl *gomock.Controller) Network { -// return NewMockNetwork(ctrl) -// }, -// req: &network.BlockRequestMessage{ -// RequestedData: network.RequestedDataHeader, -// }, -// resp: &network.BlockResponseMessage{ -// BlockData: []*types.BlockData{ -// { -// Header: &types.Header{ -// Number: 1, -// }, -// Body: &types.Body{}, -// }, -// { -// Header: &types.Header{ -// Number: 2, -// }, -// Body: &types.Body{}, -// }, -// }, -// }, -// expectedError: errUnknownParent, -// }, -// "handle_error_bad_block": { -// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) -// return mockBlockState -// }, -// networkBuilder: func(ctrl *gomock.Controller) Network { -// return NewMockNetwork(ctrl) -// }, -// req: &network.BlockRequestMessage{ -// RequestedData: network.RequestedDataHeader, -// }, -// resp: &network.BlockResponseMessage{ -// BlockData: []*types.BlockData{ -// { -// Hash: badBlockHash, -// Header: &types.Header{ -// Number: 2, -// }, -// Body: &types.Body{}, -// }, -// }, -// }, -// expectedError: errBadBlock, -// }, -// "no_error": { -// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) -// mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) -// return mockBlockState -// }, -// networkBuilder: func(ctrl *gomock.Controller) Network { -// return NewMockNetwork(ctrl) -// }, -// req: &network.BlockRequestMessage{ -// RequestedData: network.RequestedDataHeader, -// }, -// resp: &network.BlockResponseMessage{ -// BlockData: []*types.BlockData{ -// { -// Header: &types.Header{ -// Number: 2, -// }, -// Body: &types.Body{}, -// }, -// { -// Header: &types.Header{ -// ParentHash: (&types.Header{ -// Number: 2, -// }).Hash(), -// Number: 3, -// }, -// Body: &types.Body{}, -// }, -// }, -// }, -// }, -// } -// for name, tt := range tests { -// tt := tt -// t.Run(name, func(t *testing.T) { -// t.Parallel() -// ctrl := gomock.NewController(t) - -// cfg := chainSyncConfig{ -// bs: tt.blockStateBuilder(ctrl), -// pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), -// readyBlocks: newBlockQueue(maxResponseSize), -// net: tt.networkBuilder(ctrl), -// badBlocks: []string{ -// badBlockHash.String(), -// }, -// } -// cs := newChainSync(cfg) - -// err := cs.validateResponse(tt.req, tt.resp, "") -// if tt.expectedError != nil { -// assert.EqualError(t, err, tt.expectedError.Error()) -// } else { -// assert.NoError(t, err) -// } -// }) -// } -// } - -// func TestChainSync_doSync(t *testing.T) { -// t.Parallel() - -// ctrl := gomock.NewController(t) -// readyBlocks := newBlockQueue(maxResponseSize) -// cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) - -// max := uint32(1) -// req := &network.BlockRequestMessage{ -// RequestedData: bootstrapRequestData, -// StartingBlock: *variadic.MustNewUint32OrHash(1), -// Direction: network.Ascending, -// Max: &max, -// } - -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil).Times(2) -// cs.blockState = mockBlockState - -// workerErr := cs.doSync(req, make(map[peer.ID]struct{})) -// require.NotNil(t, workerErr) -// require.Equal(t, errNoPeers, workerErr.err) - -// cs.peerView["noot"] = &peerView{ -// number: 100, -// } - -// mockNetwork := NewMockNetwork(ctrl) -// startingBlock := variadic.MustNewUint32OrHash(1) -// max1 := uint32(1) -// mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ -// RequestedData: 19, -// StartingBlock: *startingBlock, -// Direction: 0, -// Max: &max1, -// }) -// cs.network = mockNetwork - -// workerErr = cs.doSync(req, make(map[peer.ID]struct{})) -// require.NotNil(t, workerErr) -// require.Equal(t, errNilResponse, workerErr.err) - -// resp := &network.BlockResponseMessage{ -// BlockData: []*types.BlockData{ -// { -// Hash: common.Hash{0x1}, -// Header: &types.Header{ -// Number: 1, -// }, -// Body: &types.Body{}, -// }, -// }, -// } - -// mockNetwork = NewMockNetwork(ctrl) -// mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ -// RequestedData: 19, -// StartingBlock: *startingBlock, -// Direction: 0, -// Max: &max1, -// }).Return(resp, nil) -// cs.network = mockNetwork - -// workerErr = cs.doSync(req, make(map[peer.ID]struct{})) -// require.Nil(t, workerErr) -// bd, err := readyBlocks.pop(context.Background()) -// require.NotNil(t, bd) -// require.NoError(t, err) -// require.Equal(t, resp.BlockData[0], bd) - -// parent := (&types.Header{ -// Number: 2, -// }).Hash() -// resp = &network.BlockResponseMessage{ -// BlockData: []*types.BlockData{ -// { -// Hash: common.Hash{0x3}, -// Header: &types.Header{ -// ParentHash: parent, -// Number: 3, -// }, -// Body: &types.Body{}, -// }, -// { -// Hash: common.Hash{0x2}, -// Header: &types.Header{ -// Number: 2, -// }, -// Body: &types.Body{}, -// }, -// }, -// } - -// // test to see if descending blocks get reversed -// req.Direction = network.Descending -// mockNetwork = NewMockNetwork(ctrl) -// mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ -// RequestedData: 19, -// StartingBlock: *startingBlock, -// Direction: 1, -// Max: &max1, -// }).Return(resp, nil) -// cs.network = mockNetwork -// workerErr = cs.doSync(req, make(map[peer.ID]struct{})) -// require.Nil(t, workerErr) - -// bd, err = readyBlocks.pop(context.Background()) -// require.NotNil(t, bd) -// require.Equal(t, resp.BlockData[0], bd) -// require.NoError(t, err) - -// bd, err = readyBlocks.pop(context.Background()) -// require.NotNil(t, bd) -// require.Equal(t, resp.BlockData[1], bd) -// require.NoError(t, err) -// } - -// func TestHandleReadyBlock(t *testing.T) { -// t.Parallel() - -// ctrl := gomock.NewController(t) -// readyBlocks := newBlockQueue(maxResponseSize) -// cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) - -// // test that descendant chain gets returned by getReadyDescendants on block 1 being ready -// header1 := &types.Header{ -// Number: 1, -// } -// block1 := &types.Block{ -// Header: *header1, -// Body: types.Body{}, -// } - -// header2 := &types.Header{ -// ParentHash: header1.Hash(), -// Number: 2, -// } -// block2 := &types.Block{ -// Header: *header2, -// Body: types.Body{}, -// } -// cs.pendingBlocks.addBlock(block2) - -// header3 := &types.Header{ -// ParentHash: header2.Hash(), -// Number: 3, -// } -// block3 := &types.Block{ -// Header: *header3, -// Body: types.Body{}, -// } -// cs.pendingBlocks.addBlock(block3) - -// header2NotDescendant := &types.Header{ -// ParentHash: common.Hash{0xff}, -// Number: 2, -// } -// block2NotDescendant := &types.Block{ -// Header: *header2NotDescendant, -// Body: types.Body{}, -// } -// cs.pendingBlocks.addBlock(block2NotDescendant) - -// cs.handleReadyBlock(block1.ToBlockData()) - -// require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header1.Hash())) -// require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header2.Hash())) -// require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header3.Hash())) -// require.True(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header2NotDescendant.Hash())) - -// blockData1, err := readyBlocks.pop(context.Background()) -// require.NoError(t, err) -// require.Equal(t, block1.ToBlockData(), blockData1) - -// blockData2, err := readyBlocks.pop(context.Background()) -// require.NoError(t, err) -// require.Equal(t, block2.ToBlockData(), blockData2) - -// blockData3, err := readyBlocks.pop(context.Background()) -// require.NoError(t, err) -// require.Equal(t, block3.ToBlockData(), blockData3) -// } - -// func TestChainSync_determineSyncPeers(t *testing.T) { -// t.Parallel() - -// ctrl := gomock.NewController(t) -// cs := newTestChainSync(ctrl) - -// req := &network.BlockRequestMessage{} -// testPeerA := peer.ID("a") -// testPeerB := peer.ID("b") -// peersTried := make(map[peer.ID]struct{}) - -// // test base case -// cs.peerView[testPeerA] = &peerView{ -// number: 129, -// } -// cs.peerView[testPeerB] = &peerView{ -// number: 257, -// } - -// peers := cs.determineSyncPeers(req, peersTried) -// require.Equal(t, 2, len(peers)) -// require.Contains(t, peers, testPeerA) -// require.Contains(t, peers, testPeerB) - -// // test peer ignored case -// cs.ignorePeers[testPeerA] = struct{}{} -// peers = cs.determineSyncPeers(req, peersTried) -// require.Equal(t, 1, len(peers)) -// require.Equal(t, []peer.ID{testPeerB}, peers) - -// // test all peers ignored case -// cs.ignorePeers[testPeerB] = struct{}{} -// peers = cs.determineSyncPeers(req, peersTried) -// require.Equal(t, 2, len(peers)) -// require.Contains(t, peers, testPeerA) -// require.Contains(t, peers, testPeerB) -// require.Equal(t, 0, len(cs.ignorePeers)) - -// // test peer's best block below number case, shouldn't include that peer -// start, err := variadic.NewUint32OrHash(130) -// require.NoError(t, err) -// req.StartingBlock = *start -// peers = cs.determineSyncPeers(req, peersTried) -// require.Equal(t, 1, len(peers)) -// require.Equal(t, []peer.ID{testPeerB}, peers) - -// // test peer tried case, should ignore peer already tried -// peersTried[testPeerA] = struct{}{} -// req.StartingBlock = variadic.Uint32OrHash{} -// peers = cs.determineSyncPeers(req, peersTried) -// require.Equal(t, 1, len(peers)) -// require.Equal(t, []peer.ID{testPeerB}, peers) -// } - -// func Test_chainSync_logSyncSpeed(t *testing.T) { -// t.Parallel() - -// type fields struct { -// blockStateBuilder func(ctrl *gomock.Controller) BlockState -// networkBuilder func(ctrl *gomock.Controller) Network -// state chainSyncState -// benchmarker *syncBenchmarker -// } -// tests := []struct { -// name string -// fields fields -// }{ -// { -// name: "state_bootstrap", -// fields: fields{ -// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).Times(3) -// mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) -// return mockBlockState -// }, -// networkBuilder: func(ctrl *gomock.Controller) Network { -// mockNetwork := NewMockNetwork(ctrl) -// mockNetwork.EXPECT().Peers().Return(nil) -// return mockNetwork -// }, -// benchmarker: newSyncBenchmarker(10), -// state: bootstrap, -// }, -// }, -// { -// name: "case_tip", -// fields: fields{ -// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).Times(3) -// mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) -// return mockBlockState -// }, -// networkBuilder: func(ctrl *gomock.Controller) Network { -// mockNetwork := NewMockNetwork(ctrl) -// mockNetwork.EXPECT().Peers().Return(nil) -// return mockNetwork -// }, -// benchmarker: newSyncBenchmarker(10), -// state: tip, -// }, -// }, -// } -// for _, tt := range tests { -// tt := tt -// t.Run(tt.name, func(t *testing.T) { -// t.Parallel() -// ctrl := gomock.NewController(t) -// ctx, cancel := context.WithCancel(context.Background()) -// tickerChannel := make(chan time.Time) -// cs := &chainSync{ -// ctx: ctx, -// cancel: cancel, -// blockState: tt.fields.blockStateBuilder(ctrl), -// network: tt.fields.networkBuilder(ctrl), -// state: tt.fields.state, -// benchmarker: tt.fields.benchmarker, -// logSyncTickerC: tickerChannel, -// logSyncTicker: time.NewTicker(time.Hour), // just here to be stopped -// logSyncDone: make(chan struct{}), -// } - -// go cs.logSyncSpeed() - -// tickerChannel <- time.Time{} -// cs.cancel() -// <-cs.logSyncDone -// }) -// } -// } - -// func Test_chainSync_start(t *testing.T) { -// t.Parallel() - -// type fields struct { -// blockStateBuilder func(ctrl *gomock.Controller) BlockState -// disjointBlockSetBuilder func(ctrl *gomock.Controller, called chan<- struct{}) DisjointBlockSet -// benchmarker *syncBenchmarker -// } -// tests := []struct { -// name string -// fields fields -// }{ -// { -// name: "base_case", -// fields: fields{ -// blockStateBuilder: func(ctrl *gomock.Controller) BlockState { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil) -// return mockBlockState -// }, -// disjointBlockSetBuilder: func(ctrl *gomock.Controller, called chan<- struct{}) DisjointBlockSet { -// mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) -// mockDisjointBlockSet.EXPECT().run(gomock.AssignableToTypeOf(make(<-chan struct{}))). -// DoAndReturn(func(stop <-chan struct{}) { -// close(called) // test glue, ideally we would use a ready chan struct passed to run(). -// }) -// return mockDisjointBlockSet -// }, -// benchmarker: newSyncBenchmarker(1), -// }, -// }, -// } -// for _, tt := range tests { -// tt := tt -// t.Run(tt.name, func(t *testing.T) { -// t.Parallel() -// ctrl := gomock.NewController(t) -// ctx, cancel := context.WithCancel(context.Background()) -// disjointBlockSetCalled := make(chan struct{}) -// cs := &chainSync{ -// ctx: ctx, -// cancel: cancel, -// blockState: tt.fields.blockStateBuilder(ctrl), -// pendingBlocks: tt.fields.disjointBlockSetBuilder(ctrl, disjointBlockSetCalled), -// benchmarker: tt.fields.benchmarker, -// slotDuration: time.Hour, -// logSyncTicker: time.NewTicker(time.Hour), // just here to be closed -// logSyncDone: make(chan struct{}), -// } -// cs.start() -// <-disjointBlockSetCalled -// cs.stop() -// }) -// } -// } - -// func Test_chainSync_setBlockAnnounce(t *testing.T) { -// t.Parallel() - -// type args struct { -// from peer.ID -// header *types.Header -// } -// tests := map[string]struct { -// chainSyncBuilder func(*types.Header, *gomock.Controller) chainSync -// args args -// wantErr error -// }{ -// "base_case": { -// wantErr: blocktree.ErrBlockExists, -// args: args{ -// header: &types.Header{Number: 2}, -// }, -// chainSyncBuilder: func(_ *types.Header, ctrl *gomock.Controller) chainSync { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().HasHeader(common.MustHexToHash( -// "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf")).Return(true, nil) -// mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) -// return chainSync{ -// blockState: mockBlockState, -// pendingBlocks: mockDisjointBlockSet, -// } -// }, -// }, -// "err_when_calling_has_header": { -// wantErr: errors.New("checking header exists"), -// args: args{ -// header: &types.Header{Number: 2}, -// }, -// chainSyncBuilder: func(_ *types.Header, ctrl *gomock.Controller) chainSync { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT(). -// HasHeader(common.MustHexToHash( -// "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf")). -// Return(false, errors.New("checking header exists")) -// mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) -// return chainSync{ -// blockState: mockBlockState, -// pendingBlocks: mockDisjointBlockSet, -// } -// }, -// }, -// "adding_block_header_to_pending_blocks": { -// args: args{ -// header: &types.Header{Number: 2}, -// }, -// chainSyncBuilder: func(expectedHeader *types.Header, ctrl *gomock.Controller) chainSync { -// argumentHeaderHash := common.MustHexToHash( -// "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf") - -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT(). -// HasHeader(argumentHeaderHash). -// Return(false, nil) - -// mockBlockState.EXPECT(). -// BestBlockHeader(). -// Return(&types.Header{Number: 1}, nil) - -// mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) -// mockDisjointBlockSet.EXPECT(). -// addHeader(expectedHeader). -// Return(nil) - -// mockDisjointBlockSet.EXPECT(). -// addHashAndNumber(argumentHeaderHash, uint(2)). -// Return(nil) - -// return chainSync{ -// blockState: mockBlockState, -// pendingBlocks: mockDisjointBlockSet, -// peerView: make(map[peer.ID]*peerView), -// // creating an buffered channel for this specific test -// // since it will put a work on the queue and an unbufered channel -// // will hang until we read on this channel and the goal is to -// // put the work on the channel and don't block -// workQueue: make(chan *peerView, 1), -// } -// }, -// }, -// } -// for name, tt := range tests { -// tt := tt -// t.Run(name, func(t *testing.T) { -// t.Parallel() -// ctrl := gomock.NewController(t) -// sync := tt.chainSyncBuilder(tt.args.header, ctrl) -// err := sync.setBlockAnnounce(tt.args.from, tt.args.header) -// if tt.wantErr != nil { -// assert.EqualError(t, err, tt.wantErr.Error()) -// } else { -// assert.NoError(t, err) -// } - -// if sync.workQueue != nil { -// assert.Equal(t, len(sync.workQueue), 1) -// } -// }) -// } -// } - -// func Test_chainSync_getHighestBlock(t *testing.T) { -// t.Parallel() - -// tests := []struct { -// name string -// peerState map[peer.ID]*peerView -// wantHighestBlock uint -// expectedError error -// }{ -// { -// name: "error no peers", -// expectedError: errors.New("no peers to sync with"), -// }, -// { -// name: "base case", -// peerState: map[peer.ID]*peerView{"1": {number: 2}}, -// wantHighestBlock: 2, -// }, -// } -// for _, tt := range tests { -// tt := tt -// t.Run(tt.name, func(t *testing.T) { -// t.Parallel() -// cs := &chainSync{ -// peerView: tt.peerState, -// } -// gotHighestBlock, err := cs.getHighestBlock() -// if tt.expectedError != nil { -// assert.EqualError(t, err, tt.expectedError.Error()) -// } else { -// assert.NoError(t, err) -// } -// assert.Equal(t, tt.wantHighestBlock, gotHighestBlock) -// }) -// } -// } - -// func Test_chainSync_handleResult(t *testing.T) { -// t.Parallel() -// mockError := errors.New("test mock error") -// tests := map[string]struct { -// chainSyncBuilder func(ctrl *gomock.Controller, result *worker) chainSync -// maxWorkerRetries uint16 -// res *worker -// err error -// }{ -// "res.err_==_nil": { -// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { -// return chainSync{ -// workerState: newWorkerState(), -// } -// }, -// res: &worker{}, -// }, -// "res.err.err.Error()_==_context.Canceled": { -// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { -// return chainSync{ -// workerState: newWorkerState(), -// } -// }, -// res: &worker{ -// ctx: context.Background(), -// err: &workerError{ -// err: context.Canceled, -// }, -// }, -// }, -// "res.err.err.Error()_==_context.DeadlineExceeded": { -// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { -// mockNetwork := NewMockNetwork(ctrl) -// mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{Value: -1024, Reason: "Request timeout"}, -// peer.ID("")) -// mockWorkHandler := NewMockworkHandler(ctrl) -// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) -// return chainSync{ -// workerState: newWorkerState(), -// network: mockNetwork, -// handler: mockWorkHandler, -// } -// }, -// res: &worker{ -// ctx: context.Background(), -// err: &workerError{ -// err: context.DeadlineExceeded, -// }, -// }, -// }, -// "res.err.err.Error()_dial_backoff": { -// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { -// return chainSync{ -// workerState: newWorkerState(), -// } -// }, -// res: &worker{ -// ctx: context.Background(), -// err: &workerError{ -// err: errors.New("dial backoff"), -// }, -// }, -// }, -// "res.err.err.Error()_==_errNoPeers": { -// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { -// return chainSync{ -// workerState: newWorkerState(), -// } -// }, -// res: &worker{ -// ctx: context.Background(), -// err: &workerError{ -// err: errNoPeers, -// }, -// }, -// }, -// "res.err.err.Error()_==_protocol_not_supported": { -// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { -// mockNetwork := NewMockNetwork(ctrl) -// mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{Value: -2147483648, -// Reason: "Unsupported protocol"}, -// peer.ID("")) -// return chainSync{ -// workerState: newWorkerState(), -// network: mockNetwork, -// } -// }, -// res: &worker{ -// ctx: context.Background(), -// err: &workerError{ -// err: errors.New("protocol not supported"), -// }, -// }, -// }, -// "no_error,_no_retries": { -// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { -// mockWorkHandler := NewMockworkHandler(ctrl) -// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) -// return chainSync{ -// workerState: newWorkerState(), -// handler: mockWorkHandler, -// } -// }, -// res: &worker{ -// ctx: context.Background(), -// err: &workerError{ -// err: errors.New(""), -// }, -// }, -// }, -// "handle_work_result_error,_no_retries": { -// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { -// mockWorkHandler := NewMockworkHandler(ctrl) -// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(nil, mockError) -// return chainSync{ -// workerState: newWorkerState(), -// handler: mockWorkHandler, -// } -// }, -// res: &worker{ -// ctx: context.Background(), -// err: &workerError{ -// err: errors.New(""), -// }, -// }, -// err: mockError, -// }, -// "handle_work_result_nil,_no_retries": { -// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { -// mockWorkHandler := NewMockworkHandler(ctrl) -// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(nil, nil) -// return chainSync{ -// workerState: newWorkerState(), -// handler: mockWorkHandler, -// } -// }, -// res: &worker{ -// ctx: context.Background(), -// err: &workerError{ -// err: errors.New(""), -// }, -// }, -// }, -// "no_error,_maxWorkerRetries_2": { -// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { -// mockWorkHandler := NewMockworkHandler(ctrl) -// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) -// mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) -// mockDisjointBlockSet.EXPECT().removeBlock(common.Hash{}) -// return chainSync{ -// workerState: newWorkerState(), -// handler: mockWorkHandler, -// pendingBlocks: mockDisjointBlockSet, -// } -// }, -// maxWorkerRetries: 2, -// res: &worker{ -// ctx: context.Background(), -// err: &workerError{ -// err: errors.New(""), -// }, -// pendingBlock: newPendingBlock(common.Hash{}, 1, nil, nil, time.Now()), -// }, -// }, -// "no_error": { -// chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { -// mockWorkHandler := NewMockworkHandler(ctrl) -// mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) -// mockWorkHandler.EXPECT().hasCurrentWorker(&worker{ -// ctx: context.Background(), -// err: &workerError{ -// err: mockError, -// }, -// retryCount: 1, -// peersTried: map[peer.ID]struct{}{ -// "": {}, -// }, -// }, newWorkerState().workers).Return(true) -// return chainSync{ -// workerState: newWorkerState(), -// handler: mockWorkHandler, -// maxWorkerRetries: 2, -// } -// }, -// res: &worker{ -// ctx: context.Background(), -// err: &workerError{ -// err: mockError, -// }, -// }, -// }, -// } -// for testName, tt := range tests { -// tt := tt -// t.Run(testName, func(t *testing.T) { -// t.Parallel() -// ctrl := gomock.NewController(t) -// sync := tt.chainSyncBuilder(ctrl, tt.res) -// err := sync.handleResult(tt.res) -// if tt.err != nil { -// assert.EqualError(t, err, tt.err.Error()) -// } else { -// assert.NoError(t, err) -// } -// }) -// } -// } - -// func newTestChainSyncWithReadyBlocks(ctrl *gomock.Controller) *chainSync { -// mockBlockState := NewMockBlockState(ctrl) -// mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - -// cfg := chainSyncConfig{ -// bs: mockBlockState, -// pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), -// minPeers: 1, -// maxPeers: 5, -// slotDuration: defaultSlotDuration, -// } - -// return newChainSync(cfg) -// } - -// func newTestChainSync(ctrl *gomock.Controller) *chainSync { -// return newTestChainSyncWithReadyBlocks(ctrl) -// } diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index d35d59b0c4..15e07346e3 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -1,3 +1,6 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( diff --git a/dot/sync/requests.go b/dot/sync/requests.go index a45eb547a2..7e237aad0e 100644 --- a/dot/sync/requests.go +++ b/dot/sync/requests.go @@ -1,3 +1,6 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( diff --git a/dot/sync/requests_test.go b/dot/sync/requests_test.go index 008a61db88..893c611444 100644 --- a/dot/sync/requests_test.go +++ b/dot/sync/requests_test.go @@ -1,3 +1,6 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 8bf9ec1427..576a62715f 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -1,3 +1,6 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( @@ -191,6 +194,12 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { return case task := <-s.taskQueue: + // whenever a task arrives we try to find an available peer + // if the task is directed at some peer then we will wait for + // that peer to become available, same happens a normal task + // arrives and there is no available peer, then we should wait + // for someone to become free and then use it. + s.l.Lock() for { var peerID peer.ID @@ -223,6 +232,7 @@ func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { s.l.Lock() s.workers[who] = &peerSyncWorker{status: available} s.l.Unlock() + s.availableCond.Signal() s.wg.Done() }() From 17b03b775f66c97cc445a55505846f93888fcdc0 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 24 May 2023 14:32:57 -0400 Subject: [PATCH 042/140] chore: avoid ignoring peers whenever we failed to read response bytes --- chain/westend/defaults.go | 2 ++ dot/network/errors.go | 1 + dot/network/utils.go | 2 +- dot/sync/chain_sync.go | 16 +++++++++++----- 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index f6dcb2bfe5..0e24657e54 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -29,6 +29,8 @@ func DefaultConfig() *cfg.Config { config.Core.GrandpaAuthority = false config.Core.Role = 1 config.Network.NoMDNS = false + config.Log.Digest = "trace" + config.Log.Sync = "trace" return config } diff --git a/dot/network/errors.go b/dot/network/errors.go index d22d4d5d88..c52ecdc0a8 100644 --- a/dot/network/errors.go +++ b/dot/network/errors.go @@ -16,4 +16,5 @@ var ( errInvalidStartingBlockType = errors.New("invalid StartingBlock in messsage") errInboundHanshakeExists = errors.New("an inbound handshake already exists for given peer") errInvalidRole = errors.New("invalid role") + ErrFailedToReadEntireMessage = errors.New("failed to read entire message") ) diff --git a/dot/network/utils.go b/dot/network/utils.go index e5fd8da6ef..a2b7a626bc 100644 --- a/dot/network/utils.go +++ b/dot/network/utils.go @@ -230,7 +230,7 @@ func readStream(stream libp2pnetwork.Stream, bufPointer *[]byte, maxSize uint64) } if tot != int(length) { - return tot, fmt.Errorf("failed to read entire message: expected %d bytes, received %d bytes", length, tot) + return tot, fmt.Errorf("%w: expected %d bytes, received %d bytes", ErrFailedToReadEntireMessage, length, tot) } return tot, nil diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index a236ac4af1..0d8fa30d01 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -678,11 +678,16 @@ func (cs *chainSync) getTarget() (uint, error) { // any error from a worker we should evaluate the error and re-insert the request // in the queue and wait for it to completes func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, startAtBlock uint, totalBlocks uint32, wg *sync.WaitGroup) { - defer wg.Done() + startTime := time.Now() + defer func() { + tookSeeconds := time.Since(startTime).Seconds() + bps := float64(totalBlocks) / tookSeeconds + logger.Debugf("⛓️ synced %d blocks, took: %.2f seconds, bps: %.2f blocks/second", totalBlocks, tookSeeconds, bps) + wg.Done() + }() - logger.Debugf("handling workers results, waiting for %d blocks", totalBlocks) + logger.Debugf("waiting for %d blocks", totalBlocks) syncingChain := make([]*types.BlockData, totalBlocks) - // the total numbers of blocks is missing in the syncing chain waitingBlocks := totalBlocks @@ -711,8 +716,9 @@ loop: logger.Errorf("task result: peer(%s) error: %s", taskResult.who, taskResult.err) - // if we receive and empty message from the stream we don't need to shutdown the worker - if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { + if errors.Is(taskResult.err, network.ErrFailedToReadEntireMessage) { + cs.workerPool.punishPeer(taskResult.who, false) + } else if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { cs.workerPool.punishPeer(taskResult.who, true) } From 0f695203030bffadfd30f4bdd5a5cfc7fc6796b9 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 25 May 2023 15:30:40 -0400 Subject: [PATCH 043/140] chore: add export comment to `AllConnectedPeers` --- dot/network/service.go | 1 + 1 file changed, 1 insertion(+) diff --git a/dot/network/service.go b/dot/network/service.go index b0d3d65d28..d09281f8a3 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -594,6 +594,7 @@ func (s *Service) NetworkState() common.NetworkState { } } +// AllConnectedPeers returns all the connected to the node instance func (s *Service) AllConnectedPeers() []peer.ID { return s.host.p2pHost.Network().Peers() } From e78093716437a186c828089f97a209fcc092c6a5 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 29 May 2023 13:38:02 -0400 Subject: [PATCH 044/140] chore: improve readability + ignore peers management --- dot/sync/benchmark.go | 67 ----- dot/sync/benchmark_test.go | 239 ---------------- dot/sync/chain_sync.go | 449 ++++++++++--------------------- dot/sync/chain_sync_test.go | 276 ++++--------------- dot/sync/mock_chain_sync_test.go | 13 +- dot/sync/syncer.go | 71 ++++- dot/sync/syncer_test.go | 237 +++++++++++++--- dot/sync/worker_pool.go | 16 +- 8 files changed, 479 insertions(+), 889 deletions(-) delete mode 100644 dot/sync/benchmark.go delete mode 100644 dot/sync/benchmark_test.go diff --git a/dot/sync/benchmark.go b/dot/sync/benchmark.go deleted file mode 100644 index 018cb8b1e4..0000000000 --- a/dot/sync/benchmark.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "container/ring" - "time" -) - -type syncBenchmarker struct { - start time.Time - startBlock uint - blocksPerSecond *ring.Ring - samplesToKeep int -} - -func newSyncBenchmarker(samplesToKeep int) *syncBenchmarker { - if samplesToKeep == 0 { - panic("cannot have 0 samples to keep") - } - - return &syncBenchmarker{ - blocksPerSecond: ring.New(samplesToKeep), - samplesToKeep: samplesToKeep, - } -} - -func (b *syncBenchmarker) begin(now time.Time, block uint) { - b.start = now - b.startBlock = block -} - -func (b *syncBenchmarker) end(now time.Time, block uint) { - duration := now.Sub(b.start) - blocks := block - b.startBlock - bps := float64(blocks) / duration.Seconds() - b.blocksPerSecond.Value = bps - b.blocksPerSecond = b.blocksPerSecond.Next() -} - -func (b *syncBenchmarker) average() float64 { - var sum float64 - var elementsSet int - b.blocksPerSecond.Do(func(x interface{}) { - if x == nil { - return - } - bps := x.(float64) - sum += bps - elementsSet++ - }) - - if elementsSet == 0 { - return 0 - } - - return sum / float64(elementsSet) -} - -func (b *syncBenchmarker) mostRecentAverage() float64 { - value := b.blocksPerSecond.Prev().Value - if value == nil { - return 0 - } - return value.(float64) -} diff --git a/dot/sync/benchmark_test.go b/dot/sync/benchmark_test.go deleted file mode 100644 index 47050cebcf..0000000000 --- a/dot/sync/benchmark_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2022 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "container/ring" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func Test_newSyncBenchmarker(t *testing.T) { - t.Parallel() - - t.Run("10 samples to keep", func(t *testing.T) { - t.Parallel() - const samplesToKeep = 10 - actual := newSyncBenchmarker(samplesToKeep) - - expected := &syncBenchmarker{ - blocksPerSecond: ring.New(samplesToKeep), - samplesToKeep: samplesToKeep, - } - - assert.Equal(t, expected, actual) - }) - - t.Run("panics on 0 sample to keep", func(t *testing.T) { - t.Parallel() - const samplesToKeep = 0 - assert.PanicsWithValue(t, "cannot have 0 samples to keep", func() { - newSyncBenchmarker(samplesToKeep) - }) - }) -} - -func Test_syncBenchmarker_begin(t *testing.T) { - t.Parallel() - - const startSec = 1000 - start := time.Unix(startSec, 0) - const startBlock = 10 - - b := syncBenchmarker{} - b.begin(start, startBlock) - - expected := syncBenchmarker{ - start: start, - startBlock: startBlock, - } - - assert.Equal(t, expected, b) -} - -func Test_syncBenchmarker_end(t *testing.T) { - t.Parallel() - - const startSec = 1000 - start := time.Unix(startSec, 0) - - const nowSec = 1010 - now := time.Unix(nowSec, 0) - - const ( - startBlock = 10 - endBlock = 12 - ) - - const ringCap = 3 - - blocksPerSecond := ring.New(ringCap) - blocksPerSecond.Value = 1.00 - blocksPerSecond = blocksPerSecond.Next() - - b := syncBenchmarker{ - start: start, - startBlock: startBlock, - blocksPerSecond: blocksPerSecond, - } - b.end(now, endBlock) - - expectedBlocksPerSecond := ring.New(ringCap) - expectedBlocksPerSecond.Value = 1.00 - expectedBlocksPerSecond = expectedBlocksPerSecond.Next() - expectedBlocksPerSecond.Value = 0.2 - expectedBlocksPerSecond = expectedBlocksPerSecond.Next() - - expected := syncBenchmarker{ - start: start, - startBlock: startBlock, - blocksPerSecond: expectedBlocksPerSecond, - } - - assert.Equal(t, expected, b) -} - -func Test_syncBenchmarker_average(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - values []float64 - ringCap int - average float64 - }{ - // zero size ring is not possible due to constructor check - "empty_ring": { - ringCap: 1, - }, - "single_element_in_one-size_ring": { - values: []float64{1.1}, - ringCap: 1, - average: 1.1, - }, - "single_element_in_two-size_ring": { - values: []float64{1.1}, - ringCap: 2, - average: 1.1, - }, - "two_elements_in_two-size_ring": { - values: []float64{1.0, 2.0}, - ringCap: 2, - average: 1.5, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - - blocksPerSecond := ring.New(testCase.ringCap) - for _, value := range testCase.values { - blocksPerSecond.Value = value - blocksPerSecond = blocksPerSecond.Next() - } - - benchmarker := syncBenchmarker{ - blocksPerSecond: blocksPerSecond, - samplesToKeep: testCase.ringCap, - } - - avg := benchmarker.average() - - assert.Equal(t, testCase.average, avg) - }) - } -} - -func Test_syncBenchmarker_mostRecentAverage(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - values []float64 - ringCap int - average float64 - }{ - // zero size ring is not possible due to constructor check - "empty_ring": { - ringCap: 1, - }, - "single_element_in_one-size_ring": { - values: []float64{1.1}, - ringCap: 1, - average: 1.1, - }, - "single_element_in_two-size_ring": { - values: []float64{1.1}, - ringCap: 2, - average: 1.1, - }, - "two_elements_in_two-size_ring": { - values: []float64{1.0, 2.0}, - ringCap: 2, - average: 2.0, - }, - "three_elements_in_two-size_ring": { - values: []float64{1.0, 2.0, 3.0}, - ringCap: 2, - average: 3.0, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - - blocksPerSecond := ring.New(testCase.ringCap) - for _, value := range testCase.values { - blocksPerSecond.Value = value - blocksPerSecond = blocksPerSecond.Next() - } - - benchmarker := syncBenchmarker{ - blocksPerSecond: blocksPerSecond, - } - - avg := benchmarker.mostRecentAverage() - - assert.Equal(t, testCase.average, avg) - }) - } -} - -func Test_syncBenchmarker(t *testing.T) { - t.Parallel() - - const samplesToKeep = 5 - benchmarker := newSyncBenchmarker(samplesToKeep) - - const initialBlock = 10 - timeZero := time.Unix(0, 0) - const timeIncrement = time.Second - const baseBlocksIncrement uint = 1 - - startTime := timeZero - endTime := startTime.Add(timeIncrement) - var block uint = initialBlock - - const samples = 10 - for i := 0; i < samples; i++ { - benchmarker.begin(startTime, block) - block += baseBlocksIncrement + uint(i) - benchmarker.end(endTime, block) - - startTime = startTime.Add(timeIncrement) - endTime = startTime.Add(timeIncrement) - } - - avg := benchmarker.average() - const expectedAvg = 8.0 - assert.Equal(t, expectedAvg, avg) - - mostRecentAvg := benchmarker.mostRecentAverage() - const expectedMostRecentAvg = 10.0 - assert.Equal(t, expectedMostRecentAvg, mostRecentAvg) -} diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 0d8fa30d01..6f2ba15791 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -5,11 +5,11 @@ package sync import ( "bytes" - "context" "errors" "fmt" "math/big" "sync" + "sync/atomic" "time" "github.com/ChainSafe/chaindb" @@ -68,9 +68,6 @@ type ChainSync interface { start() stop() - // called upon receiving a BlockAnnounce - setBlockAnnounce(from peer.ID, header *types.Header) error - // called upon receiving a BlockAnnounceHandshake setPeerHead(p peer.ID, hash common.Hash, number uint) @@ -79,6 +76,8 @@ type ChainSync interface { // getHighestBlock returns the highest block or an error getHighestBlock() (highestBlock uint, err error) + + onImportBlock(announcedBlock) error } type announcedBlock struct { @@ -87,8 +86,6 @@ type announcedBlock struct { } type chainSync struct { - ctx context.Context - cancel context.CancelFunc stopCh chan struct{} blockState BlockState @@ -107,19 +104,13 @@ type chainSync struct { // note: the block may have empty fields, as some data about it may be unknown pendingBlocks DisjointBlockSet - state chainSyncState - benchmarker *syncBenchmarker + state atomic.Value finalisedCh <-chan *types.FinalisationInfo minPeers int slotDuration time.Duration - logSyncTicker *time.Ticker - logSyncTickerC <-chan time.Time // channel as field for unit testing - logSyncStarted bool - logSyncDone chan struct{} - storageState StorageState transactionState TransactionState babeVerifier BabeVerifier @@ -145,11 +136,9 @@ type chainSyncConfig struct { } func newChainSync(cfg chainSyncConfig) *chainSync { - ctx, cancel := context.WithCancel(context.Background()) - const syncSamplesToKeep = 30 - const logSyncPeriod = 3 * time.Second - logSyncTicker := time.NewTicker(logSyncPeriod) + atomicState := atomic.Value{} + atomicState.Store(bootstrap) return &chainSync{ stopCh: make(chan struct{}), storageState: cfg.storageState, @@ -158,20 +147,14 @@ func newChainSync(cfg chainSyncConfig) *chainSync { finalityGadget: cfg.finalityGadget, blockImportHandler: cfg.blockImportHandler, telemetry: cfg.telemetry, - ctx: ctx, - cancel: cancel, blockState: cfg.bs, network: cfg.net, peerView: make(map[peer.ID]*peerView), pendingBlocks: cfg.pendingBlocks, - state: bootstrap, - benchmarker: newSyncBenchmarker(syncSamplesToKeep), + state: atomicState, finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), minPeers: cfg.minPeers, slotDuration: cfg.slotDuration, - logSyncTicker: logSyncTicker, - logSyncTickerC: logSyncTicker.C, - logSyncDone: make(chan struct{}), workerPool: newSyncWorkerPool(cfg.net), blockAnnounceCh: make(chan announcedBlock, cfg.maxPeers), badBlocks: cfg.badBlocks, @@ -192,106 +175,60 @@ func (cs *chainSync) start() { time.Sleep(time.Millisecond * 100) } - isSyncedGauge.Set(float64(cs.state)) - + isSyncedGauge.Set(0) go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh) go cs.workerPool.listenForRequests(cs.stopCh) go cs.sync() - cs.logSyncStarted = true - go cs.logSyncSpeed() } func (cs *chainSync) stop() { close(cs.stopCh) <-cs.workerPool.doneCh - - cs.cancel() - if cs.logSyncStarted { - <-cs.logSyncDone - } -} - -func (cs *chainSync) syncState() chainSyncState { - return cs.state } -func (cs *chainSync) setBlockAnnounce(who peer.ID, blockAnnounceHeader *types.Header) error { - blockAnnounceHeaderHash := blockAnnounceHeader.Hash() - - // if the peer reports a lower or equal best block number than us, - // check if they are on a fork or not - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - return fmt.Errorf("best block header: %w", err) - } - - if blockAnnounceHeader.Number <= bestBlockHeader.Number { - // check if our block hash for that number is the same, if so, do nothing - // as we already have that block - // TODO: check what happens when get hash by number retuns nothing or ErrNotExists - ourHash, err := cs.blockState.GetHashByNumber(blockAnnounceHeader.Number) +func (cs *chainSync) sync() { + for { + bestBlockHeader, err := cs.blockState.BestBlockHeader() if err != nil { - return fmt.Errorf("get block hash by number: %w", err) - } - - if ourHash == blockAnnounceHeaderHash { - return nil + logger.Criticalf("getting best block header: %s", err) + return } - // check if their best block is on an invalid chain, if it is, - // potentially downscore them - // for now, we can remove them from the syncing peers set - fin, err := cs.blockState.GetHighestFinalisedHeader() + syncTarget, err := cs.getTarget() if err != nil { - return fmt.Errorf("get highest finalised header: %w", err) + logger.Criticalf("getting target: %w", err) + return } - // their block hash doesn't match ours for that number (ie. they are on a different - // chain), and also the highest finalised block is higher than that number. - // thus the peer is on an invalid chain - if fin.Number >= blockAnnounceHeader.Number { - // TODO: downscore this peer, or temporarily don't sync from them? (#1399) - // perhaps we need another field in `peerState` to mark whether the state is valid or not - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, who) - return fmt.Errorf("%w: for peer %s and block number %d", - errPeerOnInvalidFork, who, blockAnnounceHeader.Number) - } + bestBlockNumber := bestBlockHeader.Number + isFarFromTarget := bestBlockNumber+maxResponseSize < syncTarget - // peer is on a fork, check if we have processed the fork already or not - // ie. is their block written to our db? - has, err := cs.blockState.HasHeader(blockAnnounceHeaderHash) - if err != nil { - return fmt.Errorf("has header: %w", err) - } + if isFarFromTarget { + // we are at least 128 blocks behind the head, switch to bootstrap + swapped := cs.state.CompareAndSwap(tip, bootstrap) + isSyncedGauge.Set(0) - // if so, do nothing, as we already have their fork - if has { - return nil - } - } + if swapped { + logger.Debugf("switched sync mode to %d", bootstrap) + } - hasPendingBlock := cs.pendingBlocks.hasBlock(blockAnnounceHeaderHash) - if hasPendingBlock { - return fmt.Errorf("%w: block %s (#%d)", - errAlreadyInDisjointSet, blockAnnounceHeaderHash, blockAnnounceHeader.Number) - } + cs.executeBootstrapSync() + } else { + // we are less than 128 blocks behind the target we can use tip sync + swapped := cs.state.CompareAndSwap(bootstrap, tip) + isSyncedGauge.Set(1) - if err = cs.pendingBlocks.addHeader(blockAnnounceHeader); err != nil { - return fmt.Errorf("adding pending block header: %w", err) - } + if swapped { + logger.Debugf("switched sync mode to %d", tip) + } - // we assume that if a peer sends us a block announce for a certain block, - // that is also has the chain up until and including that block. - // this may not be a valid assumption, but perhaps we can assume that - // it is likely they will receive this block and its ancestors before us. - cs.blockAnnounceCh <- announcedBlock{ - who: who, - header: blockAnnounceHeader, + cs.requestPendingBlocks() + } } - return nil +} + +func (cs *chainSync) syncState() chainSyncState { + return cs.state.Load().(chainSyncState) } // setPeerHead sets a peer's best known block @@ -308,206 +245,135 @@ func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber u } } -func (cs *chainSync) logSyncSpeed() { - defer func() { - cs.logSyncTicker.Stop() - close(cs.logSyncDone) - }() - - for { - before, err := cs.blockState.BestBlockHeader() - if err != nil { - continue - } +func (cs *chainSync) onImportBlock(announced announcedBlock) error { + if cs.pendingBlocks.hasBlock(announced.header.Hash()) { + return fmt.Errorf("%w: block %s (#%d)", + errAlreadyInDisjointSet, announced.header.Hash(), announced.header.Number) + } - if cs.state == bootstrap { - cs.benchmarker.begin(time.Now(), before.Number) - } + err := cs.pendingBlocks.addHeader(announced.header) + if err != nil { + return fmt.Errorf("while adding pending block header: %w", err) + } - select { - case <-cs.logSyncTickerC: - case <-cs.ctx.Done(): - return - } + syncState := cs.state.Load().(chainSyncState) + switch syncState { + case tip: + return cs.requestImportedBlock(announced) + } - finalised, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - continue - } + return nil +} - after, err := cs.blockState.BestBlockHeader() - if err != nil { - continue - } +func (cs *chainSync) requestImportedBlock(announce announcedBlock) error { + peerWhoAnnounced := announce.who + announcedHash := announce.header.Hash() + announcedNumber := announce.header.Number - totalWorkers := cs.workerPool.totalWorkers() + has, err := cs.blockState.HasHeader(announcedHash) + if err != nil { + return fmt.Errorf("checking if header exists: %s", err) + } - switch cs.state { - case bootstrap: - cs.benchmarker.end(time.Now(), after.Number) - target, err := cs.getTarget() - if errors.Is(err, errUnableToGetTarget) { - continue - } else if err != nil { - logger.Errorf("while getting target: %s", err) - continue - } + if has { + return nil + } - logger.Infof( - "🔗 imported blocks from %d to %d (hashes [%s ... %s])", - before.Number, after.Number, before.Hash(), after.Hash()) - - logger.Infof( - "🚣 currently syncing, %d connected peers, %d peers available to sync, "+ - "target block number %d, %.2f average blocks/second, "+ - "%.2f overall average, finalised block number %d with hash %s", - len(cs.network.Peers()), - totalWorkers, - target, cs.benchmarker.mostRecentAverage(), - cs.benchmarker.average(), finalised.Number, finalised.Hash()) - case tip: - logger.Infof( - "💤 node waiting, %d connected peers, %d peers available to sync, "+ - "head block number %d with hash %s, "+ - "finalised block number %d with hash %s", - len(cs.network.Peers()), - totalWorkers, - after.Number, after.Hash(), - finalised.Number, finalised.Hash()) - } + bestBlockHeader, err := cs.blockState.BestBlockHeader() + if err != nil { + return fmt.Errorf("getting best block header: %w", err) } -} -func (cs *chainSync) sync() { - for { - err := cs.maybeSwitchMode() + // if the announced block contains a lower number than our best + // block header, let's check if it is greater than our latests + // finalized header, if so this block belongs to a fork chain + if announcedNumber < bestBlockHeader.Number { + highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() if err != nil { - logger.Errorf("trying to switch mode: %w", err) - return + return fmt.Errorf("getting highest finalized header") } - switch { - case cs.state == bootstrap: - logger.Infof("using bootstrap sync") - err = cs.executeBootstrapSync() - case cs.state == tip: - logger.Infof("using tip sync") - err = cs.executeTipSync() + // ignore the block if it has the same or lower number + if announcedNumber <= highestFinalizedHeader.Number { + return nil } - if err != nil { - logger.Errorf("executing bootstrap sync: %s", err) - continue - } + return cs.requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announce.header, announce.who) } -} - -func (cs *chainSync) executeTipSync() error { - for { - //cs.workerPool.useConnectedPeers() - slotDurationTimer := time.NewTimer(cs.slotDuration) - - blockAnnouncement := <-cs.blockAnnounceCh - - if !slotDurationTimer.Stop() { - <-slotDurationTimer.C - } - peerWhoAnnounced := blockAnnouncement.who - announcedHash := blockAnnouncement.header.Hash() - announcedNumber := blockAnnouncement.header.Number + cs.requestChainBlocks(announce.header, bestBlockHeader, peerWhoAnnounced) - has, err := cs.blockState.HasHeader(announcedHash) - if err != nil { - return fmt.Errorf("checking if header exists: %s", err) - } - - if has { - continue - } - - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - return fmt.Errorf("getting best block header: %w", err) - } + err = cs.requestPendingBlocks() + if err != nil { + return fmt.Errorf("while requesting pending blocks") + } - // if the announced block contains a lower number than our best - // block header, let's check if it is greater than our latests - // finalized header, if so this block belongs to a fork chain - if announcedNumber < bestBlockHeader.Number { - highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - return fmt.Errorf("getting highest finalized header") - } + return nil +} - // ignore the block if it has the same or lower number - if announcedNumber <= highestFinalizedHeader.Number { - continue - } +func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types.Header, peerWhoAnnounced peer.ID) { + gapLength := uint32(announcedHeader.Number - bestBlockHeader.Number) + startAtBlock := announcedHeader.Number + totalBlocks := uint32(1) + var request *network.BlockRequestMessage + if gapLength > 1 { + request = descendingBlockRequest(announcedHeader.Hash(), gapLength, bootstrapRequestData) + startAtBlock = announcedHeader.Number - uint(*request.Max) + 1 + totalBlocks = *request.Max + + logger.Debugf("received a block announce from %s, requesting %d blocks, descending request from %s (#%d)", + peerWhoAnnounced, gapLength, announcedHeader.Hash(), announcedHeader.Number) + } else { + gapLength = 1 + request = singleBlockRequest(announcedHeader.Hash(), bootstrapRequestData) + logger.Debugf("received a block announce from %s, requesting a single block %s (#%d)", + peerWhoAnnounced, announcedHeader.Hash(), announcedHeader.Number) + } - logger.Debugf("block announce lower than best block %s (#%d) and greater highest finalized %s (#%d)", - bestBlockHeader.Hash(), bestBlockHeader.Number, highestFinalizedHeader.Hash(), highestFinalizedHeader.Number) + resultsQueue := make(chan *syncTaskResult) + wg := sync.WaitGroup{} - parentExists, err := cs.blockState.HasHeader(blockAnnouncement.header.ParentHash) - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { - return fmt.Errorf("while checking header exists: %w", err) - } + wg.Add(1) + go cs.handleWorkersResults(resultsQueue, startAtBlock, totalBlocks, &wg) + cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) + wg.Wait() +} - gapLength := uint32(1) - startAtBlock := announcedNumber - var request *network.BlockRequestMessage +func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announcedHeader *types.Header, + peerWhoAnnounced peer.ID) error { + logger.Debugf("block announce lower than best block %s (#%d) and greater highest finalized %s (#%d)", + bestBlockHeader.Hash(), bestBlockHeader.Number, highestFinalizedHeader.Hash(), highestFinalizedHeader.Number) - if parentExists { - request = singleBlockRequest(announcedHash, bootstrapRequestData) - } else { - gapLength = uint32(announcedNumber - highestFinalizedHeader.Number) - startAtBlock = highestFinalizedHeader.Number + 1 - request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) - } + parentExists, err := cs.blockState.HasHeader(announcedHeader.ParentHash) + if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + return fmt.Errorf("while checking header exists: %w", err) + } - logger.Debugf("received a block announce from %s, requesting %d blocks, starting %s (#%d)", - peerWhoAnnounced, gapLength, announcedHash, announcedNumber) + gapLength := uint32(1) + startAtBlock := announcedHeader.Number + announcedHash := announcedHeader.Hash() + var request *network.BlockRequestMessage - resultsQueue := make(chan *syncTaskResult) - wg := sync.WaitGroup{} + if parentExists { + request = singleBlockRequest(announcedHash, bootstrapRequestData) + } else { + gapLength = uint32(announcedHeader.Number - highestFinalizedHeader.Number) + startAtBlock = highestFinalizedHeader.Number + 1 + request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) + } - wg.Add(1) - go cs.handleWorkersResults(resultsQueue, startAtBlock, gapLength, &wg) - cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) - wg.Wait() - } else { - gapLength := uint32(announcedNumber - bestBlockHeader.Number) - startAtBlock := announcedNumber - totalBlocks := uint32(1) - var request *network.BlockRequestMessage - if gapLength > 1 { - request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) - startAtBlock = announcedNumber - uint(*request.Max) + 1 - totalBlocks = *request.Max - - logger.Debugf("received a block announce from %s, requesting %d blocks, descending request from %s (#%d)", - peerWhoAnnounced, gapLength, announcedHash, announcedNumber) - } else { - gapLength = 1 - request = singleBlockRequest(announcedHash, bootstrapRequestData) - logger.Debugf("received a block announce from %s, requesting a single block %s (#%d)", - peerWhoAnnounced, announcedHash, announcedNumber) - } + logger.Debugf("received a block announce from %s, requesting %d blocks, starting %s (#%d)", + peerWhoAnnounced, gapLength, announcedHash, announcedHeader.Number) - resultsQueue := make(chan *syncTaskResult) - wg := sync.WaitGroup{} + resultsQueue := make(chan *syncTaskResult) + wg := sync.WaitGroup{} - wg.Add(1) - go cs.handleWorkersResults(resultsQueue, startAtBlock, totalBlocks, &wg) - cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) - wg.Wait() - } + wg.Add(1) + go cs.handleWorkersResults(resultsQueue, startAtBlock, gapLength, &wg) + cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) + wg.Wait() - err = cs.requestPendingBlocks() - if err != nil { - return fmt.Errorf("while requesting pending blocks") - } - } + return nil } func (cs *chainSync) requestPendingBlocks() error { @@ -521,7 +387,8 @@ func (cs *chainSync) requestPendingBlocks() error { return fmt.Errorf("getting highest finalised header: %w", err) } - for _, pendingBlock := range cs.pendingBlocks.getBlocks() { + pendingBlocks := cs.pendingBlocks.getBlocks() + for _, pendingBlock := range pendingBlocks { if pendingBlock.number <= highestFinalized.Number { cs.pendingBlocks.removeBlock(pendingBlock.hash) continue @@ -551,7 +418,7 @@ func (cs *chainSync) requestPendingBlocks() error { startAtBlock := pendingBlock.number - uint(*descendingGapRequest.Max) + 1 // the `requests` in the tip sync are not related necessarily - // the is why we need to treat them separately + // this is why we need to treat them separately wg := sync.WaitGroup{} wg.Add(1) resultsQueue := make(chan *syncTaskResult) @@ -618,36 +485,6 @@ func (cs *chainSync) executeBootstrapSync() error { return nil } -func (cs *chainSync) maybeSwitchMode() error { - head, err := cs.blockState.BestBlockHeader() - if err != nil { - return fmt.Errorf("getting best block header: %w", err) - } - - target, err := cs.getTarget() - if err != nil { - return fmt.Errorf("getting target: %w", err) - } - - switch { - case head.Number+maxResponseSize < target: - // we are at least 128 blocks behind the head, switch to bootstrap - cs.state = bootstrap - isSyncedGauge.Set(float64(cs.state)) - logger.Debugf("switched sync mode to %d", cs.state) - - case head.Number+maxResponseSize > target: - cs.state = tip - isSyncedGauge.Set(float64(cs.state)) - logger.Debugf("switched sync mode to %d", cs.state) - - default: - // head is between (target-128, target), and we don't want to switch modes. - } - - return nil -} - // getTarget takes the average of all peer heads // TODO: should we just return the highest? could be an attack vector potentially, if a peer reports some very large // head block number, it would leave us in bootstrap mode forever @@ -862,7 +699,7 @@ func (cs *chainSync) processBlockData(blockData types.BlockData) error { //nolin } // while in bootstrap mode we don't need to broadcast block announcements - announceImportedBlock := cs.state == tip + announceImportedBlock := cs.state.Load().(chainSyncState) == tip if headerInState && bodyInState { err = cs.processBlockDataWithStateHeaderAndBody(blockData, announceImportedBlock) if err != nil { diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 15e07346e3..09a62995de 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -10,7 +10,6 @@ import ( "time" "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" @@ -79,189 +78,26 @@ func Test_chainSyncState_String(t *testing.T) { } } -func Test_chainSync_setBlockAnnounce(t *testing.T) { +func Test_chainSync_onImportBlock(t *testing.T) { t.Parallel() - - errTest := errors.New("test error") const somePeer = peer.ID("abc") - block1AnnounceHeader := types.NewHeader(common.Hash{}, common.Hash{}, - common.Hash{}, 1, scale.VaryingDataTypeSlice{}) + errTest := errors.New("test error") block2AnnounceHeader := types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 2, scale.VaryingDataTypeSlice{}) testCases := map[string]struct { - chainSyncBuilder func(ctrl *gomock.Controller) *chainSync - peerID peer.ID - blockAnnounceHeader *types.Header - errWrapped error - errMessage string - expectedQueuedBlockAnnounce *announcedBlock + chainSyncBuilder func(ctrl *gomock.Controller) *chainSync + peerID peer.ID + blockAnnounceHeader *types.Header + errWrapped error + errMessage string }{ - "best_block_header_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().BestBlockHeader().Return(nil, errTest) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - errWrapped: errTest, - errMessage: "best block header: test error", - }, - "number_smaller_than_best_block_number_get_hash_by_number_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{}, errTest) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - errWrapped: errTest, - errMessage: "get block hash by number: test error", - }, - "number_smaller_than_best_block_number_and_same_hash": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)).Return(block1AnnounceHeader.Hash(), nil) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - }, - "number_smaller_than_best_block_number_get_highest_finalised_header_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{2}, nil) - blockState.EXPECT().GetHighestFinalisedHeader().Return(nil, errTest) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - errWrapped: errTest, - errMessage: "get highest finalised header: test error", - }, - "number_smaller_than_best_block_announced_number_equaks_finalised_number": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - network := NewMockNetwork(ctrl) - network.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, somePeer) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - network: network, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - errWrapped: errPeerOnInvalidFork, - errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", - }, - "number_smaller_than_best_block_number_and_finalised_number_bigger_than_number": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - network := NewMockNetwork(ctrl) - network.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, somePeer) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - network: network, - } - }, - peerID: somePeer, - blockAnnounceHeader: block1AnnounceHeader, - errWrapped: errPeerOnInvalidFork, - errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", - }, - "number_smaller_than_best_block_number_and_" + - "finalised_number_smaller_than_number_and_" + - "has_header_error": { + "announced_block_already_exists_in_disjoint_set": { chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 3} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(2)). - Return(common.Hash{5, 1, 2}, nil) // other hash than block2AnnounceHeader hash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - blockState.EXPECT().HasHeader(block2AnnounceHeader.Hash()).Return(false, errTest) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - errWrapped: errTest, - errMessage: "has header: test error", - }, - "number_smaller_than_best_block_number_and_" + - "finalised_number_smaller_than_number_and_" + - "has_the_hash": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 3} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(2)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - blockState.EXPECT().HasHeader(block2AnnounceHeader.Hash()).Return(true, nil) - return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - }, - "number_bigger_than_best_block_number_already_exists_in_disjoint_set": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) pendingBlocks := NewMockDisjointBlockSet(ctrl) pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(true) return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, pendingBlocks: pendingBlocks, } }, @@ -271,50 +107,42 @@ func Test_chainSync_setBlockAnnounce(t *testing.T) { errMessage: fmt.Sprintf("already in disjoint set: block %s (#%d)", block2AnnounceHeader.Hash(), block2AnnounceHeader.Number), }, - "number_bigger_than_best_block_number_added_in_disjoint_set_with_success": { + "failed_to_add_announced_block_in_disjoint_set": { chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) pendingBlocks := NewMockDisjointBlockSet(ctrl) pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(nil) + pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(errTest) + return &chainSync{ - peerView: map[peer.ID]*peerView{}, - blockState: blockState, pendingBlocks: pendingBlocks, - // buffered of 1 so setBlockAnnounce can write to it - // without a consumer of the channel on the other end. - blockAnnounceCh: make(chan announcedBlock, 1), } }, peerID: somePeer, blockAnnounceHeader: block2AnnounceHeader, - expectedQueuedBlockAnnounce: &announcedBlock{ - who: somePeer, - header: block2AnnounceHeader, - }, + errWrapped: errTest, + errMessage: "while adding pending block header: test error", }, + //"announced_block_while_in_bootstrap_mode": {}, + //"announced_block_while_in_tip_mode": {}, } - for name, testCase := range testCases { - testCase := testCase + for name, tt := range testCases { + tt := tt t.Run(name, func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) - chainSync := testCase.chainSyncBuilder(ctrl) - - err := chainSync.setBlockAnnounce(testCase.peerID, testCase.blockAnnounceHeader) + chainSync := tt.chainSyncBuilder(ctrl) - assert.ErrorIs(t, err, testCase.errWrapped) - if testCase.errWrapped != nil { - assert.EqualError(t, err, testCase.errMessage) + announced := announcedBlock{ + who: tt.peerID, + header: tt.blockAnnounceHeader, } + err := chainSync.onImportBlock(announced) - if testCase.expectedQueuedBlockAnnounce != nil { - queuedBlockAnnounce := <-chainSync.blockAnnounceCh - assert.Equal(t, *testCase.expectedQueuedBlockAnnounce, queuedBlockAnnounce) + assert.ErrorIs(t, err, tt.errWrapped) + if tt.errWrapped != nil { + assert.EqualError(t, err, tt.errMessage) } }) } @@ -325,12 +153,12 @@ func TestChainSync_setPeerHead(t *testing.T) { randomHash := common.MustHexToHash(randomHashString) testcases := map[string]struct { - newChainSync func(t *testing.T, ctrl *gomock.Controller) *chainSync - peerID peer.ID - bestHash common.Hash - bestNumber uint - shouldBeAndWorker bool - workerStatus byte + newChainSync func(t *testing.T, ctrl *gomock.Controller) *chainSync + peerID peer.ID + bestHash common.Hash + bestNumber uint + shouldBeAWorker bool + workerStatus byte }{ "set_peer_head_with_new_peer": { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { @@ -341,13 +169,13 @@ func TestChainSync_setPeerHead(t *testing.T) { cs.workerPool = workerPool return cs }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAndWorker: true, - workerStatus: available, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAWorker: true, + workerStatus: available, }, - "set_peer_head_with_a_to_ignore_peer_should_not_be_included_in_the_workerpoll": { + "set_peer_head_with_a_to_ignore_peer_should_be_included_in_the_workerpoll": { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { networkMock := NewMockNetwork(ctrl) workerPool := newSyncWorkerPool(networkMock) @@ -359,10 +187,10 @@ func TestChainSync_setPeerHead(t *testing.T) { cs.workerPool = workerPool return cs }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAndWorker: false, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAWorker: true, }, "set_peer_head_that_stills_punished_in_the_worker_poll": { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { @@ -379,11 +207,11 @@ func TestChainSync_setPeerHead(t *testing.T) { cs.workerPool = workerPool return cs }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAndWorker: true, - workerStatus: punished, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAWorker: true, + workerStatus: punished, }, "set_peer_head_that_punishment_isnot_valid_in_the_worker_poll": { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { @@ -400,11 +228,11 @@ func TestChainSync_setPeerHead(t *testing.T) { cs.workerPool = workerPool return cs }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAndWorker: true, - workerStatus: available, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAWorker: true, + workerStatus: available, }, } @@ -421,7 +249,7 @@ func TestChainSync_setPeerHead(t *testing.T) { require.Equal(t, tt.bestHash, view.hash) require.Equal(t, tt.bestNumber, view.number) - if tt.shouldBeAndWorker { + if tt.shouldBeAWorker { syncWorker, exists := cs.workerPool.workers[tt.peerID] require.True(t, exists) require.Equal(t, tt.workerStatus, syncWorker.status) diff --git a/dot/sync/mock_chain_sync_test.go b/dot/sync/mock_chain_sync_test.go index b3a0decdab..7a59eae49c 100644 --- a/dot/sync/mock_chain_sync_test.go +++ b/dot/sync/mock_chain_sync_test.go @@ -7,7 +7,6 @@ package sync import ( reflect "reflect" - types "github.com/ChainSafe/gossamer/dot/types" common "github.com/ChainSafe/gossamer/lib/common" gomock "github.com/golang/mock/gomock" peer "github.com/libp2p/go-libp2p/core/peer" @@ -51,18 +50,18 @@ func (mr *MockChainSyncMockRecorder) getHighestBlock() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getHighestBlock", reflect.TypeOf((*MockChainSync)(nil).getHighestBlock)) } -// setBlockAnnounce mocks base method. -func (m *MockChainSync) setBlockAnnounce(from peer.ID, header *types.Header) error { +// onImportBlock mocks base method. +func (m *MockChainSync) onImportBlock(arg0 announcedBlock) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "setBlockAnnounce", from, header) + ret := m.ctrl.Call(m, "onImportBlock", arg0) ret0, _ := ret[0].(error) return ret0 } -// setBlockAnnounce indicates an expected call of setBlockAnnounce. -func (mr *MockChainSyncMockRecorder) setBlockAnnounce(from, header interface{}) *gomock.Call { +// onImportBlock indicates an expected call of onImportBlock. +func (mr *MockChainSyncMockRecorder) onImportBlock(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setBlockAnnounce", reflect.TypeOf((*MockChainSync)(nil).setBlockAnnounce), from, header) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "onImportBlock", reflect.TypeOf((*MockChainSync)(nil).onImportBlock), arg0) } // setPeerHead mocks base method. diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 1a8d4aa2a8..2feb455d8c 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -4,9 +4,11 @@ package sync import ( + "fmt" "time" "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" @@ -90,8 +92,73 @@ func (s *Service) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockA // HandleBlockAnnounce notifies the `chainSync` module that we have received a block announcement from the given peer. func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { logger.Debug("received BlockAnnounceMessage") - header := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) - return s.chainSync.setBlockAnnounce(from, header) + blockAnnounceHeader := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) + blockAnnounceHeaderHash := blockAnnounceHeader.Hash() + + // if the peer reports a lower or equal best block number than us, + // check if they are on a fork or not + bestBlockHeader, err := s.blockState.BestBlockHeader() + if err != nil { + return fmt.Errorf("best block header: %w", err) + } + + if blockAnnounceHeader.Number <= bestBlockHeader.Number { + // check if our block hash for that number is the same, if so, do nothing + // as we already have that block + // TODO: check what happens when get hash by number retuns nothing or ErrNotExists + ourHash, err := s.blockState.GetHashByNumber(blockAnnounceHeader.Number) + if err != nil { + return fmt.Errorf("get block hash by number: %w", err) + } + + if ourHash == blockAnnounceHeaderHash { + return nil + } + + // check if their best block is on an invalid chain, if it is, + // potentially downscore them + // for now, we can remove them from the syncing peers set + fin, err := s.blockState.GetHighestFinalisedHeader() + if err != nil { + return fmt.Errorf("get highest finalised header: %w", err) + } + + // their block hash doesn't match ours for that number (ie. they are on a different + // chain), and also the highest finalised block is higher than that number. + // thus the peer is on an invalid chain + if fin.Number >= blockAnnounceHeader.Number { + // TODO: downscore this peer, or temporarily don't sync from them? (#1399) + // perhaps we need another field in `peerState` to mark whether the state is valid or not + s.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, from) + return fmt.Errorf("%w: for peer %s and block number %d", + errPeerOnInvalidFork, from, blockAnnounceHeader.Number) + } + + // peer is on a fork, check if we have processed the fork already or not + // ie. is their block written to our db? + has, err := s.blockState.HasHeader(blockAnnounceHeaderHash) + if err != nil { + return fmt.Errorf("while checking if header exists: %w", err) + } + + // if so, do nothing, as we already have their fork + if has { + return nil + } + } + + // we assume that if a peer sends us a block announce for a certain block, + // that is also has the chain up until and including that block. + // this may not be a valid assumption, but perhaps we can assume that + // it is likely they will receive this block and its ancestors before us. + announcedBlock := announcedBlock{ + who: from, + header: blockAnnounceHeader, + } + return s.chainSync.onImportBlock(announcedBlock) } // IsSynced exposes the synced state diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go index 302a8a45db..3371037d83 100644 --- a/dot/sync/syncer_test.go +++ b/dot/sync/syncer_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/pkg/scale" @@ -63,49 +64,213 @@ func TestNewService(t *testing.T) { func TestService_HandleBlockAnnounce(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) + errTest := errors.New("test error") + const somePeer = peer.ID("abc") - type fields struct { - chainSync ChainSync - } - type args struct { - from peer.ID - msg *network.BlockAnnounceMessage - } - tests := []struct { - name string - fields fields - args args - wantErr bool + block1AnnounceHeader := types.NewHeader(common.Hash{}, common.Hash{}, + common.Hash{}, 1, scale.VaryingDataTypeSlice{}) + block2AnnounceHeader := types.NewHeader(common.Hash{}, common.Hash{}, + common.Hash{}, 2, scale.VaryingDataTypeSlice{}) + + testCases := map[string]struct { + serviceBuilder func(ctrl *gomock.Controller) *Service + peerID peer.ID + blockAnnounceHeader *types.Header + errWrapped error + errMessage string }{ - { - name: "working_example", - fields: fields{ - chainSync: newMockChainSync(ctrl), + "best_block_header_error": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + blockState.EXPECT().BestBlockHeader().Return(nil, errTest) + return &Service{ + blockState: blockState, + } }, - args: args{ - from: peer.ID("1"), - msg: &network.BlockAnnounceMessage{ - ParentHash: common.Hash{}, - Number: 1, - StateRoot: common.Hash{}, - ExtrinsicsRoot: common.Hash{}, - Digest: scale.VaryingDataTypeSlice{}, - BestBlock: false, - }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errTest, + errMessage: "best block header: test error", + }, + "number_smaller_than_best_block_number_get_hash_by_number_error": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{}, errTest) + + return &Service{ + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errTest, + errMessage: "get block hash by number: test error", + }, + "number_smaller_than_best_block_number_and_same_hash": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)).Return(block1AnnounceHeader.Hash(), nil) + return &Service{ + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + }, + "number_smaller_than_best_block_number_get_highest_finalised_header_error": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{2}, nil) + blockState.EXPECT().GetHighestFinalisedHeader().Return(nil, errTest) + return &Service{ + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errTest, + errMessage: "get highest finalised header: test error", + }, + "number_smaller_than_best_block_announced_number_equaks_finalised_number": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + network := NewMockNetwork(ctrl) + network.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, somePeer) + return &Service{ + blockState: blockState, + network: network, + } }, - wantErr: false, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errPeerOnInvalidFork, + errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", + }, + "number_smaller_than_best_block_number_and_finalised_number_bigger_than_number": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + network := NewMockNetwork(ctrl) + network.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, somePeer) + return &Service{ + blockState: blockState, + network: network, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errPeerOnInvalidFork, + errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", + }, + "number_smaller_than_best_block_number_and_" + + "finalised_number_smaller_than_number_and_" + + "has_header_error": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 3} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(2)). + Return(common.Hash{5, 1, 2}, nil) // other hash than block2AnnounceHeader hash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + blockState.EXPECT().HasHeader(block2AnnounceHeader.Hash()).Return(false, errTest) + return &Service{ + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + errWrapped: errTest, + errMessage: "has header: test error", + }, + "number_smaller_than_best_block_number_and_" + + "finalised_number_smaller_than_number_and_" + + "has_the_hash": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 3} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(2)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + blockState.EXPECT().HasHeader(block2AnnounceHeader.Hash()).Return(true, nil) + return &Service{ + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + }, + "number_bigger_than_best_block_number_added_in_disjoint_set_with_success": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + chainSyncMock := NewMockChainSync(ctrl) + + expectedAnnouncedBlock := announcedBlock{ + who: somePeer, + header: block2AnnounceHeader, + } + + chainSyncMock.EXPECT().onImportBlock(expectedAnnouncedBlock).Return(nil) + + return &Service{ + blockState: blockState, + chainSync: chainSyncMock, + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, }, } - for _, tt := range tests { + + for name, tt := range testCases { tt := tt - t.Run(tt.name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { t.Parallel() - s := &Service{ - chainSync: tt.fields.chainSync, + ctrl := gomock.NewController(t) + + service := tt.serviceBuilder(ctrl) + + blockAnnounceMessage := &network.BlockAnnounceMessage{ + ParentHash: tt.blockAnnounceHeader.ParentHash, + Number: tt.blockAnnounceHeader.Number, + StateRoot: tt.blockAnnounceHeader.StateRoot, + ExtrinsicsRoot: tt.blockAnnounceHeader.ExtrinsicsRoot, + Digest: tt.blockAnnounceHeader.Digest, + BestBlock: true, } - if err := s.HandleBlockAnnounce(tt.args.from, tt.args.msg); (err != nil) != tt.wantErr { - t.Errorf("HandleBlockAnnounce() error = %v, wantErr %v", err, tt.wantErr) + err := service.HandleBlockAnnounce(tt.peerID, blockAnnounceMessage) + assert.ErrorIs(t, err, tt.errWrapped) + if tt.errWrapped != nil { + assert.EqualError(t, err, tt.errMessage) } }) } @@ -113,10 +278,6 @@ func TestService_HandleBlockAnnounce(t *testing.T) { func newMockChainSync(ctrl *gomock.Controller) ChainSync { mock := NewMockChainSync(ctrl) - header := types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, - scale.VaryingDataTypeSlice{}) - - mock.EXPECT().setBlockAnnounce(peer.ID("1"), header).Return(nil).AnyTimes() mock.EXPECT().setPeerHead(peer.ID("1"), common.Hash{}, uint(0)).Return(nil).AnyTimes() mock.EXPECT().syncState().Return(bootstrap).AnyTimes() mock.EXPECT().start().AnyTimes() diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 576a62715f..7aab436ea5 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -74,20 +74,24 @@ func (s *syncWorkerPool) useConnectedPeers() { s.l.Lock() defer s.l.Unlock() for _, connectedPeer := range connectedPeers { - s.newPeer(connectedPeer) + s.newPeer(connectedPeer, false) } } func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) { s.l.Lock() defer s.l.Unlock() - s.newPeer(who) + s.newPeer(who, true) } -func (s *syncWorkerPool) newPeer(who peer.ID) { - _, toIgnore := s.ignorePeers[who] - if toIgnore { - return +func (s *syncWorkerPool) newPeer(who peer.ID, isFromBlockAnnounce bool) { + _, exists := s.ignorePeers[who] + if exists { + if isFromBlockAnnounce { + delete(s.ignorePeers, who) + } else { + return + } } peerSync, has := s.workers[who] From ae8b3670a454ba07f5b3baa6a93412ed8536147c Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 29 May 2023 17:58:09 -0400 Subject: [PATCH 045/140] chore: make the peer discovery everytime --- chain/westend/defaults.go | 2 +- dot/network/discovery.go | 10 +++++----- dot/sync/chain_sync.go | 18 ++++++++++------- dot/sync/worker_pool.go | 41 ++++++++++++++++++++------------------- 4 files changed, 38 insertions(+), 33 deletions(-) diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index 0e24657e54..80be61e83a 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -13,7 +13,7 @@ var ( // defaultID Default chain ID defaultID = "westend2" // defaultBasePath Default node base directory path - defaultBasePath = "~/.gossamer/westend" + defaultBasePath = "/Volumes/SDD01/gossamer/westend" // defaultChainSpec is the default chain specification path defaultChainSpec = "./chain/westend/genesis.json" ) diff --git a/dot/network/discovery.go b/dot/network/discovery.go index e8cb8212c1..69e739b650 100644 --- a/dot/network/discovery.go +++ b/dot/network/discovery.go @@ -28,7 +28,7 @@ var ( startDHTTimeout = time.Second * 10 initialAdvertisementTimeout = time.Millisecond tryAdvertiseTimeout = time.Second * 30 - connectToPeersTimeout = time.Minute * 5 + connectToPeersTimeout = time.Minute findPeersTimeout = time.Minute ) @@ -183,9 +183,9 @@ func (d *discovery) checkPeerCount() { case <-d.ctx.Done(): return case <-ticker.C: - if len(d.h.Network().Peers()) > d.minPeers { - continue - } + // if len(d.h.Network().Peers()) > d.minPeers { + // continue + // } d.findPeers() } @@ -212,7 +212,7 @@ func (d *discovery) findPeers() { continue } - logger.Tracef("found new peer %s via DHT", peer.ID) + logger.Infof("found new peer %s via DHT", peer.ID) d.h.Peerstore().AddAddrs(peer.ID, peer.Addrs, peerstore.PermanentAddrTTL) d.handler.AddPeer(0, peer.ID) } diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 6f2ba15791..9bf3ad956e 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -535,6 +535,9 @@ loop: idleTimer := time.NewTimer(idleDuration) select { + case <-cs.stopCh: + return + case <-idleTimer.C: logger.Warnf("idle ticker triggered! checking pool") cs.workerPool.useConnectedPeers() @@ -553,10 +556,8 @@ loop: logger.Errorf("task result: peer(%s) error: %s", taskResult.who, taskResult.err) - if errors.Is(taskResult.err, network.ErrFailedToReadEntireMessage) { - cs.workerPool.punishPeer(taskResult.who, false) - } else if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - cs.workerPool.punishPeer(taskResult.who, true) + if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { + cs.workerPool.punishPeer(taskResult.who) } cs.workerPool.submitRequest(taskResult.request, workersResults) @@ -576,20 +577,22 @@ loop: switch { case errors.Is(err, errResponseIsNotChain): logger.Criticalf("response invalid: %s", err) - cs.workerPool.punishPeer(taskResult.who, false) + cs.workerPool.punishPeer(taskResult.who) cs.workerPool.submitRequest(taskResult.request, workersResults) continue case errors.Is(err, errEmptyBlockData): + cs.workerPool.punishPeer(taskResult.who) cs.workerPool.submitRequest(taskResult.request, workersResults) continue case errors.Is(err, errUnknownParent): case errors.Is(err, errBadBlock): logger.Warnf("peer %s sent a bad block: %s", who, err) - cs.workerPool.punishPeer(taskResult.who, true) + cs.workerPool.ignorePeerAsWorker(taskResult.who) cs.workerPool.submitRequest(taskResult.request, workersResults) + continue case err != nil: logger.Criticalf("response invalid: %s", err) - cs.workerPool.punishPeer(taskResult.who, false) + cs.workerPool.punishPeer(taskResult.who) cs.workerPool.submitRequest(taskResult.request, workersResults) continue } @@ -611,6 +614,7 @@ loop: // we need to check if we've filled all positions // otherwise we should wait for more responses + fmt.Printf("actual: %d, next state: %d\n", waitingBlocks, waitingBlocks-uint32(len(response.BlockData))) waitingBlocks -= uint32(len(response.BlockData)) if waitingBlocks == 0 { break loop diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 7aab436ea5..59524925fb 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -18,8 +18,8 @@ const ( ) const ( - ignorePeerTimeout = 2 * time.Minute - maxRequestsAllowed uint = 40 + ignorePeerBaseTimeout = time.Minute + maxRequestsAllowed uint = 40 ) type syncTask struct { @@ -85,15 +85,6 @@ func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) { } func (s *syncWorkerPool) newPeer(who peer.ID, isFromBlockAnnounce bool) { - _, exists := s.ignorePeers[who] - if exists { - if isFromBlockAnnounce { - delete(s.ignorePeers, who) - } else { - return - } - } - peerSync, has := s.workers[who] if !has { peerSync = &peerSyncWorker{status: available} @@ -129,16 +120,10 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage, } } -func (s *syncWorkerPool) punishPeer(who peer.ID, ignore bool) { +func (s *syncWorkerPool) punishPeer(who peer.ID) { s.l.Lock() defer s.l.Unlock() - if ignore { - s.ignorePeers[who] = struct{}{} - delete(s.workers, who) - return - } - _, has := s.workers[who] if !has { return @@ -146,14 +131,30 @@ func (s *syncWorkerPool) punishPeer(who peer.ID, ignore bool) { s.workers[who] = &peerSyncWorker{ status: punished, - punishedTime: time.Now().Add(ignorePeerTimeout), + punishedTime: time.Now().Add(ignorePeerBaseTimeout), } } +func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { + s.l.Lock() + defer s.l.Unlock() + + delete(s.workers, who) + s.ignorePeers[who] = struct{}{} +} + +// totalWorkers only returns available or busy workers func (s *syncWorkerPool) totalWorkers() (total uint) { s.l.RLock() defer s.l.RUnlock() - return uint(len(s.workers)) + + for _, worker := range s.workers { + if worker.status != punished { + total += 1 + } + } + + return total } // getAvailablePeer returns the very first peer available and changes From e1b4bd4cea88131a53706f667ae83632829346ef Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 30 May 2023 08:42:13 -0400 Subject: [PATCH 046/140] chore: use a resource manager to autoscale outbound requests --- chain/westend/defaults.go | 3 ++- dot/network/discovery.go | 9 ++++--- dot/network/host.go | 8 ++++++ dot/sync/chain_sync.go | 49 ++++++++++++++++++++++++++++--------- dot/sync/chain_sync_test.go | 8 +++--- dot/sync/worker_pool.go | 28 ++++++++++++++------- 6 files changed, 75 insertions(+), 30 deletions(-) diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index 80be61e83a..3f9e0ade53 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -13,7 +13,7 @@ var ( // defaultID Default chain ID defaultID = "westend2" // defaultBasePath Default node base directory path - defaultBasePath = "/Volumes/SDD01/gossamer/westend" + defaultBasePath = "~/.gossamer/westend" // defaultChainSpec is the default chain specification path defaultChainSpec = "./chain/westend/genesis.json" ) @@ -31,6 +31,7 @@ func DefaultConfig() *cfg.Config { config.Network.NoMDNS = false config.Log.Digest = "trace" config.Log.Sync = "trace" + config.Network.MaxPeers = 1024 return config } diff --git a/dot/network/discovery.go b/dot/network/discovery.go index 69e739b650..1de63ae49a 100644 --- a/dot/network/discovery.go +++ b/dot/network/discovery.go @@ -183,9 +183,9 @@ func (d *discovery) checkPeerCount() { case <-d.ctx.Done(): return case <-ticker.C: - // if len(d.h.Network().Peers()) > d.minPeers { - // continue - // } + if len(d.h.Network().Peers()) >= d.maxPeers { + continue + } d.findPeers() } @@ -212,7 +212,8 @@ func (d *discovery) findPeers() { continue } - logger.Infof("found new peer %s via DHT", peer.ID) + //fmt.Printf("%v\n", peer.Addrs) + //logger.Infof("found new peer %s via DHT", peer.ID) d.h.Peerstore().AddAddrs(peer.ID, peer.Addrs, peerstore.PermanentAddrTTL) d.handler.AddPeer(0, peer.ID) } diff --git a/dot/network/host.go b/dot/network/host.go index 1374cb672f..3217ae3d6b 100644 --- a/dot/network/host.go +++ b/dot/network/host.go @@ -26,6 +26,7 @@ import ( "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" ma "github.com/multiformats/go-multiaddr" ) @@ -178,8 +179,15 @@ func newHost(ctx context.Context, cfg *Config) (*host, error) { return nil, fmt.Errorf("failed to create peerstore: %w", err) } + limiter := rcmgr.NewFixedLimiter(rcmgr.DefaultLimits.AutoScale()) + rcmanager, err := rcmgr.NewResourceManager(limiter) + if err != nil { + return nil, fmt.Errorf("while creating the resource manager: %w", err) + } + // set libp2p host options opts := []libp2p.Option{ + libp2p.ResourceManager(rcmanager), libp2p.ListenAddrs(addr), libp2p.DisableRelay(), libp2p.Identity(cfg.privateKey), diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 9bf3ad956e..fd2627d864 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "math/big" + "strings" "sync" "sync/atomic" "time" @@ -200,6 +201,20 @@ func (cs *chainSync) sync() { return } + finalisedHeader, err := cs.blockState.GetHighestFinalisedHeader() + if err != nil { + logger.Criticalf("getting finalised block header: %s", err) + return + } + logger.Infof( + "🚣 currently syncing, %d peers connected, "+ + "%d available workers, "+ + "target block number %d, "+ + "finalised block number %d with hash %s", + len(cs.network.Peers()), + cs.workerPool.totalWorkers(), + syncTarget, finalisedHeader.Number, finalisedHeader.Hash()) + bestBlockNumber := bestBlockHeader.Number isFarFromTarget := bestBlockNumber+maxResponseSize < syncTarget @@ -517,19 +532,18 @@ func (cs *chainSync) getTarget() (uint, error) { func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, startAtBlock uint, totalBlocks uint32, wg *sync.WaitGroup) { startTime := time.Now() defer func() { - tookSeeconds := time.Since(startTime).Seconds() - bps := float64(totalBlocks) / tookSeeconds - logger.Debugf("⛓️ synced %d blocks, took: %.2f seconds, bps: %.2f blocks/second", totalBlocks, tookSeeconds, bps) + totalSyncAndImportSeconds := time.Since(startTime).Seconds() + bps := float64(totalBlocks) / totalSyncAndImportSeconds + logger.Debugf("⛓️ synced %d blocks, took: %.2f seconds, bps: %.2f blocks/second", totalBlocks, totalSyncAndImportSeconds, bps) wg.Done() }() - logger.Debugf("waiting for %d blocks", totalBlocks) + logger.Debugf("💤 waiting for %d blocks", totalBlocks) syncingChain := make([]*types.BlockData, totalBlocks) // the total numbers of blocks is missing in the syncing chain waitingBlocks := totalBlocks -loop: - for { + for waitingBlocks > 0 { // in a case where we don't handle workers results we should check the pool idleDuration := time.Minute idleTimer := time.NewTimer(idleDuration) @@ -557,7 +571,16 @@ loop: taskResult.who, taskResult.err) if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - cs.workerPool.punishPeer(taskResult.who) + switch { + case strings.Contains(taskResult.err.Error(), "protocols not supported"): + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadProtocolValue, + Reason: peerset.BadProtocolReason, + }, taskResult.who) + cs.workerPool.ignorePeerAsWorker(taskResult.who) + default: + cs.workerPool.punishPeer(taskResult.who) + } } cs.workerPool.submitRequest(taskResult.request, workersResults) @@ -607,6 +630,11 @@ loop: lastBlockInResponse.Header.Number, lastBlockInResponse.Hash) } + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.GossipSuccessValue, + Reason: peerset.GossipSuccessReason, + }, taskResult.who) + for _, blockInResponse := range response.BlockData { blockExactIndex := blockInResponse.Header.Number - startAtBlock syncingChain[blockExactIndex] = blockInResponse @@ -614,15 +642,12 @@ loop: // we need to check if we've filled all positions // otherwise we should wait for more responses - fmt.Printf("actual: %d, next state: %d\n", waitingBlocks, waitingBlocks-uint32(len(response.BlockData))) waitingBlocks -= uint32(len(response.BlockData)) - if waitingBlocks == 0 { - break loop - } } } - logger.Debugf("synced %d blocks, starting process", totalBlocks) + retreiveBlocksSeconds := time.Since(startTime).Seconds() + logger.Debugf("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", totalBlocks, retreiveBlocksSeconds) if len(syncingChain) >= 2 { // ensuring the parents are in the right place parentElement := syncingChain[0] diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 09a62995de..68be2dbac7 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -198,8 +198,8 @@ func TestChainSync_setPeerHead(t *testing.T) { workerPool := newSyncWorkerPool(networkMock) workerPool.workers = map[peer.ID]*peerSyncWorker{ peer.ID("peer-test"): { - status: punished, - punishedTime: time.Now().Add(3 * time.Hour), + status: punished, + punishmentTime: time.Now().Add(3 * time.Hour), }, } @@ -219,8 +219,8 @@ func TestChainSync_setPeerHead(t *testing.T) { workerPool := newSyncWorkerPool(networkMock) workerPool.workers = map[peer.ID]*peerSyncWorker{ peer.ID("peer-test"): { - status: punished, - punishedTime: time.Now().Add(-3 * time.Hour), + status: punished, + punishmentTime: time.Now().Add(-3 * time.Hour), }, } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 59524925fb..7d7c375b3f 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -18,8 +18,8 @@ const ( ) const ( - ignorePeerBaseTimeout = time.Minute - maxRequestsAllowed uint = 40 + punishmentBaseTimeout = 5 * time.Minute + maxRequestsAllowed uint = 60 ) type syncTask struct { @@ -36,8 +36,9 @@ type syncTaskResult struct { } type peerSyncWorker struct { - status byte - punishedTime time.Time + status byte + timesPunished int + punishmentTime time.Time } type syncWorkerPool struct { @@ -85,6 +86,10 @@ func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) { } func (s *syncWorkerPool) newPeer(who peer.ID, isFromBlockAnnounce bool) { + if _, ok := s.ignorePeers[who]; ok { + return + } + peerSync, has := s.workers[who] if !has { peerSync = &peerSyncWorker{status: available} @@ -94,7 +99,7 @@ func (s *syncWorkerPool) newPeer(who peer.ID, isFromBlockAnnounce bool) { } // check if the punishment is not valid - if peerSync.status == punished && peerSync.punishedTime.Before(time.Now()) { + if peerSync.status == punished && peerSync.punishmentTime.Before(time.Now()) { s.workers[who] = &peerSyncWorker{status: available} } } @@ -124,14 +129,19 @@ func (s *syncWorkerPool) punishPeer(who peer.ID) { s.l.Lock() defer s.l.Unlock() - _, has := s.workers[who] + worker, has := s.workers[who] if !has { return } + timesPunished := worker.timesPunished + 1 + punishmentTime := time.Duration(timesPunished) * punishmentBaseTimeout + logger.Debugf("⏱️ punishement time for peer %s: %.2fs", who, punishmentTime.Seconds()) + s.workers[who] = &peerSyncWorker{ - status: punished, - punishedTime: time.Now().Add(ignorePeerBaseTimeout), + status: punished, + timesPunished: timesPunished, + punishmentTime: time.Now().Add(punishmentTime), } } @@ -167,7 +177,7 @@ func (s *syncWorkerPool) getAvailablePeer() peer.ID { // if the punishedTime has passed then we mark it // as available and notify it availability if needed // otherwise we keep the peer in the punishment and don't notify - if peerSync.punishedTime.Before(time.Now()) { + if peerSync.punishmentTime.Before(time.Now()) { return peerID } case available: From a26b8456be4fa8b5024b55528d4d281c093db79b Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 30 May 2023 11:53:22 -0400 Subject: [PATCH 047/140] chore: remove `maxRequestsAllowed` restriction --- dot/sync/chain_sync.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index fd2627d864..43c509456d 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -463,9 +463,10 @@ func (cs *chainSync) executeBootstrapSync() error { // so we limit to `maxRequestAllowed` to avoid the error: // cannot reserve outbound connection: resource limit exceeded availableWorkers := cs.workerPool.totalWorkers() - if availableWorkers > maxRequestsAllowed { - availableWorkers = maxRequestsAllowed - } + + // if availableWorkers > maxRequestsAllowed { + // availableWorkers = maxRequestsAllowed + // } // targetBlockNumber is the virtual target we will request, however // we should bound it to the real target which is collected through From 90606abbaa0e8244b818cd6c0072d2e5df432055 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 30 May 2023 16:32:49 -0400 Subject: [PATCH 048/140] chore: remove unneeded deltas on westend chain --- chain/westend/defaults.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index 3f9e0ade53..f6dcb2bfe5 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -29,9 +29,6 @@ func DefaultConfig() *cfg.Config { config.Core.GrandpaAuthority = false config.Core.Role = 1 config.Network.NoMDNS = false - config.Log.Digest = "trace" - config.Log.Sync = "trace" - config.Network.MaxPeers = 1024 return config } From bc880641bff2e591704d128442905ded99ec5299 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 30 May 2023 17:47:21 -0400 Subject: [PATCH 049/140] chore: addressing comments --- dot/network/discovery.go | 3 +-- dot/sync/chain_sync.go | 7 +------ dot/sync/chain_sync_test.go | 2 +- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/dot/network/discovery.go b/dot/network/discovery.go index 1de63ae49a..ca9d47adb2 100644 --- a/dot/network/discovery.go +++ b/dot/network/discovery.go @@ -212,8 +212,7 @@ func (d *discovery) findPeers() { continue } - //fmt.Printf("%v\n", peer.Addrs) - //logger.Infof("found new peer %s via DHT", peer.ID) + logger.Tracef("found new peer %s via DHT", peer.ID) d.h.Peerstore().AddAddrs(peer.ID, peer.Addrs, peerstore.PermanentAddrTTL) d.handler.AddPeer(0, peer.ID) } diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 43c509456d..56ed246836 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -464,10 +464,6 @@ func (cs *chainSync) executeBootstrapSync() error { // cannot reserve outbound connection: resource limit exceeded availableWorkers := cs.workerPool.totalWorkers() - // if availableWorkers > maxRequestsAllowed { - // availableWorkers = maxRequestsAllowed - // } - // targetBlockNumber is the virtual target we will request, however // we should bound it to the real target which is collected through // block announces received from other peers @@ -479,7 +475,7 @@ func (cs *chainSync) executeBootstrapSync() error { if targetBlockNumber > realTarget { // basically if our virtual target is beyond the real target - // that means we are few requests far from the tip, then we + // that means we are only a few requests away, then we // calculate the correct amount of missing requests and then // change to tip sync which should take care of the rest diff := targetBlockNumber - realTarget @@ -558,7 +554,6 @@ func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, s cs.workerPool.useConnectedPeers() continue - // TODO: implement a case to stop case taskResult := <-workersResults: if !idleTimer.Stop() { <-idleTimer.C diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 68be2dbac7..94439bf56d 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 ChainSafe Systems (ON) +// Copyright 2021 ChainSafe Systems (ON) // SPDX-License-Identifier: LGPL-3.0-only package sync From 0e3318883239b66e18076da071aa7677f015707a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 30 May 2023 19:35:13 -0400 Subject: [PATCH 050/140] chore: reduce number of max peers --- chain/westend/defaults.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index f6dcb2bfe5..fdd296ac22 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -29,6 +29,9 @@ func DefaultConfig() *cfg.Config { config.Core.GrandpaAuthority = false config.Core.Role = 1 config.Network.NoMDNS = false + config.Log.Sync = "trace" + config.Log.Digest = "trace" + config.Network.MaxPeers = 256 return config } From 6982f2a5a601ad87dd62061532c6fb683127411b Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 31 May 2023 10:42:37 -0400 Subject: [PATCH 051/140] chore: make target number clear on `requesting_4_chunks_of_128_plus_3_blocks` test --- chain/westend/defaults.go | 3 --- dot/sync/requests_test.go | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index fdd296ac22..f6dcb2bfe5 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -29,9 +29,6 @@ func DefaultConfig() *cfg.Config { config.Core.GrandpaAuthority = false config.Core.Role = 1 config.Network.NoMDNS = false - config.Log.Sync = "trace" - config.Log.Digest = "trace" - config.Network.MaxPeers = 256 return config } diff --git a/dot/sync/requests_test.go b/dot/sync/requests_test.go index 893c611444..15ac0fbbc9 100644 --- a/dot/sync/requests_test.go +++ b/dot/sync/requests_test.go @@ -84,7 +84,7 @@ func TestAscendingBlockRequest(t *testing.T) { "requesting_4_chunks_of_128_plus_3_blocks": { startNumber: 0, - targetNumber: 512 + 3, // 128 * 4 + targetNumber: (128 * 4) + 3, expectedBlockRequestMessage: []*network.BlockRequestMessage{ { RequestedData: bootstrapRequestData, From 22f62d7edac2dc3fb3ba49d39dab599147a21dbd Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 31 May 2023 10:48:03 -0400 Subject: [PATCH 052/140] chore: remove unneeded `blockAnnounceCh` --- dot/sync/chain_sync.go | 5 ++--- dot/sync/worker_pool.go | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 56ed246836..f9c5a550ae 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -92,8 +92,7 @@ type chainSync struct { blockState BlockState network Network - workerPool *syncWorkerPool - blockAnnounceCh chan announcedBlock + workerPool *syncWorkerPool // tracks the latest state we know of from our peers, // ie. their best block hash and number @@ -157,7 +156,6 @@ func newChainSync(cfg chainSyncConfig) *chainSync { minPeers: cfg.minPeers, slotDuration: cfg.slotDuration, workerPool: newSyncWorkerPool(cfg.net), - blockAnnounceCh: make(chan announcedBlock, cfg.maxPeers), badBlocks: cfg.badBlocks, } } @@ -309,6 +307,7 @@ func (cs *chainSync) requestImportedBlock(announce announcedBlock) error { } // ignore the block if it has the same or lower number + // TODO: is it following the protocol to send a blockAnnounce with number < highestFinalized number? if announcedNumber <= highestFinalizedHeader.Number { return nil } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 7d7c375b3f..d33b235217 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -186,7 +186,7 @@ func (s *syncWorkerPool) getAvailablePeer() peer.ID { } } - //could not found an available peer to dispatch + //could not find an available peer to dispatch return peer.ID("") } From 35197c565ef23f5d302aa68f06d206d9b849db53 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 31 May 2023 20:09:03 -0400 Subject: [PATCH 053/140] chore: reduce the amount of blocks synced at same time --- chain/westend/defaults.go | 3 +++ dot/sync/chain_sync.go | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index f6dcb2bfe5..241ad8cc2a 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -30,5 +30,8 @@ func DefaultConfig() *cfg.Config { config.Core.Role = 1 config.Network.NoMDNS = false + config.Log.Digest = "trace" + config.Log.Sync = "trace" + return config } diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index f9c5a550ae..2c4cf3f37e 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -457,11 +457,15 @@ func (cs *chainSync) executeBootstrapSync() error { } startRequestAt := bestBlockHeader.Number + 1 + const maxRequestsAllowed = 50 // we build the set of requests based on the amount of available peers // in the worker pool, if we have more peers than `maxRequestAllowed` // so we limit to `maxRequestAllowed` to avoid the error: // cannot reserve outbound connection: resource limit exceeded availableWorkers := cs.workerPool.totalWorkers() + if availableWorkers > maxRequestsAllowed { + availableWorkers = maxRequestsAllowed + } // targetBlockNumber is the virtual target we will request, however // we should bound it to the real target which is collected through From 1b991b3cfda3bb3fb758b3a0c15fd88ce6d8da25 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 5 Jun 2023 11:04:33 -0400 Subject: [PATCH 054/140] chore: merge development --- cmd/testcases/main.go | 9 +- dot/core/messages_integration_test.go | 7 +- dot/core/service_test.go | 82 +- dot/rpc/modules/author_integration_test.go | 1 - dot/rpc/modules/author_test.go | 2 +- dot/rpc/modules/chain_test.go | 2 +- dot/rpc/modules/childstate_test.go | 18 +- dot/rpc/modules/offchain_integration_test.go | 12 +- dot/rpc/modules/payment_integration_test.go | 8 +- dot/rpc/modules/payment_test.go | 6 +- dot/rpc/modules/state_integration_test.go | 6 +- dot/rpc/modules/state_test.go | 2 +- dot/rpc/modules/system_integration_test.go | 6 +- dot/rpc/modules/system_test.go | 4 +- .../listeners_integration_test.go | 5 +- .../websocket_integration_test.go | 3 +- dot/rpc/websocket_integration_test.go | 5 +- dot/services_integration_test.go | 15 +- dot/state/block_test.go | 18 +- dot/state/grandpa_test.go | 6 +- dot/state/service_integration_test.go | 8 +- dot/state/transaction_test.go | 2 +- dot/sync/benchmark_test.go | 239 ++++ dot/sync/block_queue_test.go | 252 ++++ dot/sync/chain_processor_test.go | 1181 +++++++++++++++++ internal/trie/node/branch_encode_test.go | 2 +- internal/trie/node/header_test.go | 2 +- lib/babe/verify_test.go | 4 +- lib/grandpa/commits_tracker_test.go | 8 +- .../message_handler_integration_test.go | 21 - lib/grandpa/network_integration_test.go | 6 - lib/grandpa/votes_tracker_test.go | 12 +- lib/runtime/wasmer/config_test.go | 4 +- lib/trie/trie_test.go | 6 +- pkg/scale/encode_test.go | 2 +- tests/rpc/rpc_05-state_test.go | 16 +- 36 files changed, 1813 insertions(+), 169 deletions(-) create mode 100644 dot/sync/benchmark_test.go create mode 100644 dot/sync/block_queue_test.go create mode 100644 dot/sync/chain_processor_test.go diff --git a/cmd/testcases/main.go b/cmd/testcases/main.go index b01774a2e9..1887432f50 100644 --- a/cmd/testcases/main.go +++ b/cmd/testcases/main.go @@ -23,9 +23,10 @@ func main() { } var ( - regexMapStringKeyWithSpaces = regexp.MustCompile(`\t".+ .+"?: \{`) - regexSliceStringWithSpaces = regexp.MustCompile(`(name|test)( |\t)*: ".+ .+",`) - regexStringWithSpaces = regexp.MustCompile(`".+( .+)+"`) + regexSubtestStringWithSpaces = regexp.MustCompile(`\tt\.Run\(".+ .+"?\)`) + regexMapStringKeyWithSpaces = regexp.MustCompile(`\t".+ .+"?: \{`) + regexSliceStringWithSpaces = regexp.MustCompile(`(name|test)( |\t)*: ".+ .+",`) + regexStringWithSpaces = regexp.MustCompile(`".+( .+)+"`) ) func walk(path string, entry fs.DirEntry, err error) error { @@ -60,6 +61,8 @@ func walk(path string, entry fs.DirEntry, err error) error { var toReplace string switch { + case regexSubtestStringWithSpaces.MatchString(line): + toReplace = regexSubtestStringWithSpaces.FindString(line) case regexMapStringKeyWithSpaces.MatchString(line): toReplace = regexMapStringKeyWithSpaces.FindString(line) case regexSliceStringWithSpaces.MatchString(line): diff --git a/dot/core/messages_integration_test.go b/dot/core/messages_integration_test.go index aa2c0e2968..038e5dfb9b 100644 --- a/dot/core/messages_integration_test.go +++ b/dot/core/messages_integration_test.go @@ -137,15 +137,14 @@ func TestService_HandleTransactionMessage(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()) net := NewMockNetwork(ctrl) - net.EXPECT().GossipMessage(gomock.AssignableToTypeOf(new(network.TransactionMessage))).AnyTimes() - net.EXPECT().IsSynced().Return(true).AnyTimes() + net.EXPECT().IsSynced().Return(true).Times(2) net.EXPECT().ReportPeer( gomock.AssignableToTypeOf(peerset.ReputationChange{}), gomock.AssignableToTypeOf(peer.ID("")), - ).AnyTimes() + ) cfg := &Config{ Keystore: ks, diff --git a/dot/core/service_test.go b/dot/core/service_test.go index b7dffb664e..66ee3a8485 100644 --- a/dot/core/service_test.go +++ b/dot/core/service_test.go @@ -344,13 +344,13 @@ func Test_Service_handleBlock(t *testing.T) { } } - t.Run("nil input", func(t *testing.T) { + t.Run("nil_input", func(t *testing.T) { t.Parallel() service := &Service{} execTest(t, service, nil, nil, ErrNilBlockHandlerParameter) }) - t.Run("storeTrie error", func(t *testing.T) { + t.Run("storeTrie_error", func(t *testing.T) { t.Parallel() trieState := rtstorage.NewTrieState(nil) @@ -366,7 +366,7 @@ func Test_Service_handleBlock(t *testing.T) { execTest(t, service, &block, trieState, errTestDummyError) }) - t.Run("addBlock quit error", func(t *testing.T) { + t.Run("addBlock_quit_error", func(t *testing.T) { t.Parallel() trieState := rtstorage.NewTrieState(nil) @@ -387,7 +387,7 @@ func Test_Service_handleBlock(t *testing.T) { execTest(t, service, &block, trieState, errTestDummyError) }) - t.Run("addBlock parent not found error", func(t *testing.T) { + t.Run("addBlock_parent_not_found_error", func(t *testing.T) { t.Parallel() trieState := rtstorage.NewTrieState(nil) @@ -408,7 +408,7 @@ func Test_Service_handleBlock(t *testing.T) { execTest(t, service, &block, trieState, blocktree.ErrParentNotFound) }) - t.Run("addBlock error continue", func(t *testing.T) { + t.Run("addBlock_error_continue", func(t *testing.T) { t.Parallel() trieState := rtstorage.NewTrieState(nil) @@ -430,7 +430,7 @@ func Test_Service_handleBlock(t *testing.T) { execTest(t, service, &block, trieState, errTestDummyError) }) - t.Run("handle runtime changes error", func(t *testing.T) { + t.Run("handle_runtime_changes_error", func(t *testing.T) { t.Parallel() trieState := rtstorage.NewTrieState(nil) @@ -455,7 +455,7 @@ func Test_Service_handleBlock(t *testing.T) { execTest(t, service, &block, trieState, errTestDummyError) }) - t.Run("code substitution ok", func(t *testing.T) { + t.Run("code_substitution_ok", func(t *testing.T) { t.Parallel() trieState := rtstorage.NewTrieState(nil) @@ -490,13 +490,13 @@ func Test_Service_HandleBlockProduced(t *testing.T) { assert.EqualError(t, err, "handling block: "+expErr.Error()) } } - t.Run("nil input", func(t *testing.T) { + t.Run("nil_input", func(t *testing.T) { t.Parallel() service := &Service{} execTest(t, service, nil, nil, ErrNilBlockHandlerParameter) }) - t.Run("happy path", func(t *testing.T) { + t.Run("happy_path", func(t *testing.T) { t.Parallel() trieState := rtstorage.NewTrieState(nil) @@ -544,7 +544,7 @@ func Test_Service_HandleBlockProduced(t *testing.T) { func Test_Service_maintainTransactionPool(t *testing.T) { t.Parallel() - t.Run("Validate Transaction err", func(t *testing.T) { + t.Run("Validate_Transaction_err", func(t *testing.T) { t.Parallel() testHeader := types.NewEmptyHeader() block := types.NewBlock(*testHeader, *types.NewBody([]types.Extrinsic{[]byte{21}})) @@ -608,7 +608,7 @@ func Test_Service_maintainTransactionPool(t *testing.T) { require.NoError(t, err) }) - t.Run("Validate Transaction ok", func(t *testing.T) { + t.Run("Validate_Transaction_ok", func(t *testing.T) { t.Parallel() testHeader := types.NewEmptyHeader() block := types.NewBlock(*testHeader, *types.NewBody([]types.Extrinsic{[]byte{21}})) @@ -678,7 +678,7 @@ func Test_Service_maintainTransactionPool(t *testing.T) { func Test_Service_handleBlocksAsync(t *testing.T) { t.Parallel() - t.Run("cancelled context", func(t *testing.T) { + t.Run("cancelled_context", func(t *testing.T) { t.Parallel() blockAddChan := make(chan *types.Block) ctx, cancel := context.WithCancel(context.Background()) @@ -690,7 +690,7 @@ func Test_Service_handleBlocksAsync(t *testing.T) { service.handleBlocksAsync() }) - t.Run("channel not ok", func(t *testing.T) { + t.Run("channel_not_ok", func(t *testing.T) { t.Parallel() blockAddChan := make(chan *types.Block) close(blockAddChan) @@ -701,7 +701,7 @@ func Test_Service_handleBlocksAsync(t *testing.T) { service.handleBlocksAsync() }) - t.Run("nil block", func(t *testing.T) { + t.Run("nil_block", func(t *testing.T) { t.Parallel() blockAddChan := make(chan *types.Block) go func() { @@ -715,7 +715,7 @@ func Test_Service_handleBlocksAsync(t *testing.T) { service.handleBlocksAsync() }) - t.Run("handleChainReorg error", func(t *testing.T) { + t.Run("handleChainReorg_error", func(t *testing.T) { t.Parallel() testHeader := types.NewEmptyHeader() @@ -763,7 +763,7 @@ func TestService_handleChainReorg(t *testing.T) { testValidity := &transaction.Validity{Propagate: true} vtx := transaction.NewValidTransaction(ext, testValidity) - t.Run("highest common ancestor err", func(t *testing.T) { + t.Run("highest_common_ancestor_err", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) @@ -776,7 +776,7 @@ func TestService_handleChainReorg(t *testing.T) { execTest(t, service, testPrevHash, testCurrentHash, errDummyErr) }) - t.Run("highest common ancestor err", func(t *testing.T) { + t.Run("highest_common_ancestor_err", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) @@ -789,7 +789,7 @@ func TestService_handleChainReorg(t *testing.T) { execTest(t, service, testPrevHash, testCurrentHash, errDummyErr) }) - t.Run("ancestor eq priv", func(t *testing.T) { + t.Run("ancestor_eq_priv", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) @@ -802,7 +802,7 @@ func TestService_handleChainReorg(t *testing.T) { execTest(t, service, testPrevHash, testCurrentHash, nil) }) - t.Run("subchain err", func(t *testing.T) { + t.Run("subchain_err", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) @@ -816,7 +816,7 @@ func TestService_handleChainReorg(t *testing.T) { execTest(t, service, testPrevHash, testCurrentHash, errDummyErr) }) - t.Run("empty subchain", func(t *testing.T) { + t.Run("empty_subchain", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) @@ -830,7 +830,7 @@ func TestService_handleChainReorg(t *testing.T) { execTest(t, service, testPrevHash, testCurrentHash, nil) }) - t.Run("get runtime err", func(t *testing.T) { + t.Run("get_runtime_err", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) @@ -846,7 +846,7 @@ func TestService_handleChainReorg(t *testing.T) { execTest(t, service, testPrevHash, testCurrentHash, errDummyErr) }) - t.Run("invalid transaction", func(t *testing.T) { + t.Run("invalid_transaction", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) @@ -886,7 +886,7 @@ func TestService_handleChainReorg(t *testing.T) { execTest(t, service, testPrevHash, testCurrentHash, nil) }) - t.Run("happy path", func(t *testing.T) { + t.Run("happy_path", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) runtimeMockOk := NewMockInstance(ctrl) @@ -1051,7 +1051,7 @@ func TestService_DecodeSessionKeys(t *testing.T) { assert.Equal(t, exp, res) } - t.Run("ok case", func(t *testing.T) { + t.Run("ok_case", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) runtimeMock := NewMockInstance(ctrl) @@ -1065,7 +1065,7 @@ func TestService_DecodeSessionKeys(t *testing.T) { execTest(t, service, testEncKeys, testEncKeys, nil) }) - t.Run("err case", func(t *testing.T) { + t.Run("err_case", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) @@ -1105,7 +1105,7 @@ func TestServiceGetRuntimeVersion(t *testing.T) { assert.Equal(t, exp, res) } - t.Run("get state root err", func(t *testing.T) { + t.Run("get_state_root_err", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockStorageState := NewMockStorageState(ctrl) @@ -1117,7 +1117,7 @@ func TestServiceGetRuntimeVersion(t *testing.T) { execTest(t, service, &common.Hash{}, runtime.Version{}, errDummyErr, expectedErrMessage) }) - t.Run("trie state err", func(t *testing.T) { + t.Run("trie_state_err", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockStorageState := NewMockStorageState(ctrl) @@ -1130,7 +1130,7 @@ func TestServiceGetRuntimeVersion(t *testing.T) { execTest(t, service, &common.Hash{}, runtime.Version{}, errDummyErr, expectedErrMessage) }) - t.Run("get runtime err", func(t *testing.T) { + t.Run("get_runtime_err", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockStorageState := NewMockStorageState(ctrl) @@ -1147,7 +1147,7 @@ func TestServiceGetRuntimeVersion(t *testing.T) { execTest(t, service, &common.Hash{}, runtime.Version{}, errDummyErr, expectedErrMessage) }) - t.Run("happy path", func(t *testing.T) { + t.Run("happy_path", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockStorageState := NewMockStorageState(ctrl) @@ -1184,13 +1184,13 @@ func TestServiceHandleSubmittedExtrinsic(t *testing.T) { } } - t.Run("nil network", func(t *testing.T) { + t.Run("nil_network", func(t *testing.T) { t.Parallel() service := &Service{} execTest(t, service, nil, nil) }) - t.Run("trie state err", func(t *testing.T) { + t.Run("trie_state_err", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockStorageState := NewMockStorageState(ctrl) @@ -1210,7 +1210,7 @@ func TestServiceHandleSubmittedExtrinsic(t *testing.T) { execTest(t, service, nil, errDummyErr) }) - t.Run("get runtime err", func(t *testing.T) { + t.Run("get_runtime_err", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) @@ -1233,7 +1233,7 @@ func TestServiceHandleSubmittedExtrinsic(t *testing.T) { execTest(t, service, nil, errDummyErr) }) - t.Run("validate txn err", func(t *testing.T) { + t.Run("validate_txn_err", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) @@ -1273,7 +1273,7 @@ func TestServiceHandleSubmittedExtrinsic(t *testing.T) { execTest(t, service, types.Extrinsic{}, errDummyErr) }) - t.Run("happy path", func(t *testing.T) { + t.Run("happy_path", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) @@ -1330,7 +1330,7 @@ func TestServiceGetMetadata(t *testing.T) { assert.Equal(t, exp, res) } - t.Run("get state root error", func(t *testing.T) { + t.Run("get_state_root_error", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockStorageState := NewMockStorageState(ctrl) @@ -1342,7 +1342,7 @@ func TestServiceGetMetadata(t *testing.T) { execTest(t, service, &common.Hash{}, nil, errDummyErr, expectedErrMessage) }) - t.Run("trie state error", func(t *testing.T) { + t.Run("trie_state_error", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockStorageState := NewMockStorageState(ctrl) @@ -1354,7 +1354,7 @@ func TestServiceGetMetadata(t *testing.T) { execTest(t, service, nil, nil, errDummyErr, expectedErrMessage) }) - t.Run("get runtime error", func(t *testing.T) { + t.Run("get_runtime_error", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockStorageState := NewMockStorageState(ctrl) @@ -1370,7 +1370,7 @@ func TestServiceGetMetadata(t *testing.T) { execTest(t, service, nil, nil, errDummyErr, expectedErrMessage) }) - t.Run("happy path", func(t *testing.T) { + t.Run("happy_path", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockStorageState := NewMockStorageState(ctrl) @@ -1403,7 +1403,7 @@ func TestService_GetReadProofAt(t *testing.T) { assert.Equal(t, expProofForKeys, resProofForKeys) } - t.Run("get block state root error", func(t *testing.T) { + t.Run("get_block_state_root_error", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) @@ -1415,7 +1415,7 @@ func TestService_GetReadProofAt(t *testing.T) { execTest(t, service, common.Hash{}, nil, common.Hash{}, nil, errDummyErr) }) - t.Run("generate trie proof error", func(t *testing.T) { + t.Run("generate_trie_proof_error", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) @@ -1431,7 +1431,7 @@ func TestService_GetReadProofAt(t *testing.T) { execTest(t, service, common.Hash{}, [][]byte{{1}}, common.Hash{}, nil, errDummyErr) }) - t.Run("happy path", func(t *testing.T) { + t.Run("happy_path", func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) diff --git a/dot/rpc/modules/author_integration_test.go b/dot/rpc/modules/author_integration_test.go index b58c90f118..dba6a25ef7 100644 --- a/dot/rpc/modules/author_integration_test.go +++ b/dot/rpc/modules/author_integration_test.go @@ -236,7 +236,6 @@ func TestAuthorModule_SubmitExtrinsic_bad_proof(t *testing.T) { ctrl := gomock.NewController(t) net2test := NewMockNetwork(ctrl) - net2test.EXPECT().GossipMessage(nil).MaxTimes(0) integrationTestController.network = net2test diff --git a/dot/rpc/modules/author_test.go b/dot/rpc/modules/author_test.go index d2dd08b623..8abf606b95 100644 --- a/dot/rpc/modules/author_test.go +++ b/dot/rpc/modules/author_test.go @@ -44,7 +44,7 @@ func TestAuthorModule_HasSessionKeys(t *testing.T) { coreMockAPIOk := mocks.NewMockCoreAPI(ctrl) coreMockAPIOk.EXPECT().DecodeSessionKeys(pkeys).Return(data, nil) coreMockAPIOk.EXPECT().HasKey(gomock.Any(), gomock.Any()). - Return(true, nil).AnyTimes() + Return(true, nil).Times(4) coreMockAPIErr := mocks.NewMockCoreAPI(ctrl) coreMockAPIErr.EXPECT().DecodeSessionKeys(pkeys).Return(data, nil) diff --git a/dot/rpc/modules/chain_test.go b/dot/rpc/modules/chain_test.go index 71607b36c3..61a20b7c1c 100644 --- a/dot/rpc/modules/chain_test.go +++ b/dot/rpc/modules/chain_test.go @@ -126,7 +126,7 @@ func TestChainModule_GetBlockHash(t *testing.T) { mockBlockAPI := mocks.NewMockBlockAPI(ctrl) mockBlockAPI.EXPECT().BestBlockHash().Return(testHash) mockBlockAPI.EXPECT().GetHashByNumber(uint(21)). - Return(testHash, nil).AnyTimes() + Return(testHash, nil).Times(2) mockBlockAPIErr := mocks.NewMockBlockAPI(ctrl) mockBlockAPIErr.EXPECT().GetHashByNumber(uint(21)). diff --git a/dot/rpc/modules/childstate_test.go b/dot/rpc/modules/childstate_test.go index 3b17ec7357..c84df630ce 100644 --- a/dot/rpc/modules/childstate_test.go +++ b/dot/rpc/modules/childstate_test.go @@ -58,11 +58,11 @@ func TestChildStateModule_GetKeys(t *testing.T) { mockBlockAPI := apimocks.NewMockBlockAPI(ctrl) hash := common.MustHexToHash("0x3aa96b0149b6ca3688878bdbd19464448624136398e3ce45b9e755d3ab61355a") - mockBlockAPI.EXPECT().BestBlockHash().Return(hash).AnyTimes() + mockBlockAPI.EXPECT().BestBlockHash().Return(hash).Times(2) - mockStorageAPI.EXPECT().GetStateRootFromBlock(&hash).Return(&sr, nil).AnyTimes() + mockStorageAPI.EXPECT().GetStateRootFromBlock(&hash).Return(&sr, nil).Times(2) mockStorageAPI.EXPECT().GetStorageChild(&sr, []byte(":child_storage_key")). - Return(tr, nil).AnyTimes() + Return(tr, nil).Times(2) mockErrorStorageAPI1.EXPECT().GetStateRootFromBlock(&common.Hash{}).Return(nil, nil) mockErrorStorageAPI1.EXPECT().GetStorageChild((*common.Hash)(nil), []byte(nil)). @@ -171,9 +171,9 @@ func TestChildStateModule_GetStorageSize(t *testing.T) { hash := common.MustHexToHash("0x3aa96b0149b6ca3688878bdbd19464448624136398e3ce45b9e755d3ab61355a") mockBlockAPI.EXPECT().BestBlockHash().Return(hash) - mockStorageAPI.EXPECT().GetStateRootFromBlock(&hash).Return(&sr, nil).AnyTimes() + mockStorageAPI.EXPECT().GetStateRootFromBlock(&hash).Return(&sr, nil).Times(2) mockStorageAPI.EXPECT().GetStorageFromChild(&sr, []byte(":child_storage_key"), []byte(":child_first")). - Return([]byte(""), nil).AnyTimes() + Return([]byte(""), nil).Times(2) mockErrorStorageAPI1.EXPECT().GetStateRootFromBlock(&hash).Return(nil, nil) mockErrorStorageAPI1.EXPECT().GetStorageFromChild((*common.Hash)(nil), []byte(nil), []byte(nil)). @@ -284,9 +284,9 @@ func TestChildStateModule_GetStorageHash(t *testing.T) { hash := common.MustHexToHash("0x3aa96b0149b6ca3688878bdbd19464448624136398e3ce45b9e755d3ab61355a") mockBlockAPI.EXPECT().BestBlockHash().Return(hash) - mockStorageAPI.EXPECT().GetStateRootFromBlock(&hash).Return(&sr, nil).AnyTimes() + mockStorageAPI.EXPECT().GetStateRootFromBlock(&hash).Return(&sr, nil).Times(2) mockStorageAPI.EXPECT().GetStorageFromChild(&sr, []byte(":child_storage_key"), []byte(":child_first")). - Return([]byte(""), nil).AnyTimes() + Return([]byte(""), nil).Times(2) mockErrorStorageAPI1.EXPECT().GetStateRootFromBlock(&hash).Return(nil, nil) mockErrorStorageAPI1.EXPECT().GetStorageFromChild((*common.Hash)(nil), []byte(nil), []byte(nil)). @@ -397,9 +397,9 @@ func TestChildStateModule_GetStorage(t *testing.T) { hash := common.MustHexToHash("0x3aa96b0149b6ca3688878bdbd19464448624136398e3ce45b9e755d3ab61355a") mockBlockAPI.EXPECT().BestBlockHash().Return(hash) - mockStorageAPI.EXPECT().GetStateRootFromBlock(&hash).Return(&sr, nil).AnyTimes() + mockStorageAPI.EXPECT().GetStateRootFromBlock(&hash).Return(&sr, nil).Times(2) mockStorageAPI.EXPECT().GetStorageFromChild(&sr, []byte(":child_storage_key"), []byte(":child_first")). - Return([]byte("test"), nil).AnyTimes() + Return([]byte("test"), nil).Times(2) mockErrorStorageAPI1.EXPECT().GetStateRootFromBlock(&hash).Return(nil, nil) mockErrorStorageAPI1.EXPECT().GetStorageFromChild((*common.Hash)(nil), []byte(nil), []byte(nil)). diff --git a/dot/rpc/modules/offchain_integration_test.go b/dot/rpc/modules/offchain_integration_test.go index ffdac98cfb..a1d1dec1d8 100644 --- a/dot/rpc/modules/offchain_integration_test.go +++ b/dot/rpc/modules/offchain_integration_test.go @@ -17,7 +17,7 @@ import ( ) func Test_OffchainModule_LocalStorageGet(t *testing.T) { - t.Run("get local error", func(t *testing.T) { + t.Run("get_local_error", func(t *testing.T) { ctrl := gomock.NewController(t) runtimeStorage := mocks.NewMockRuntimeStorageAPI(ctrl) @@ -38,7 +38,7 @@ func Test_OffchainModule_LocalStorageGet(t *testing.T) { assert.ErrorIs(t, err, errTest) }) - t.Run("local kind", func(t *testing.T) { + t.Run("local_kind", func(t *testing.T) { ctrl := gomock.NewController(t) runtimeStorage := mocks.NewMockRuntimeStorageAPI(ctrl) @@ -60,7 +60,7 @@ func Test_OffchainModule_LocalStorageGet(t *testing.T) { assert.Equal(t, response, expectedResponse) }) - t.Run("persistent kind", func(t *testing.T) { + t.Run("persistent_kind", func(t *testing.T) { ctrl := gomock.NewController(t) runtimeStorage := mocks.NewMockRuntimeStorageAPI(ctrl) @@ -104,7 +104,7 @@ func TestOffchainStorage_OtherKind(t *testing.T) { func Test_OffchainModule_LocalStorageSet(t *testing.T) { const keyHex, valueHex = "0x11111111111111", "0x22222222222222" - t.Run("set local error", func(t *testing.T) { + t.Run("set_local_error", func(t *testing.T) { ctrl := gomock.NewController(t) runtimeStorage := mocks.NewMockRuntimeStorageAPI(ctrl) @@ -126,7 +126,7 @@ func Test_OffchainModule_LocalStorageSet(t *testing.T) { assert.ErrorIs(t, err, errTest) }) - t.Run("local kind", func(t *testing.T) { + t.Run("local_kind", func(t *testing.T) { ctrl := gomock.NewController(t) runtimeStorage := mocks.NewMockRuntimeStorageAPI(ctrl) @@ -148,7 +148,7 @@ func Test_OffchainModule_LocalStorageSet(t *testing.T) { assert.Empty(t, response) }) - t.Run("persistent kind", func(t *testing.T) { + t.Run("persistent_kind", func(t *testing.T) { ctrl := gomock.NewController(t) runtimeStorage := mocks.NewMockRuntimeStorageAPI(ctrl) diff --git a/dot/rpc/modules/payment_integration_test.go b/dot/rpc/modules/payment_integration_test.go index 731f7b72cc..ae24fdbbf3 100644 --- a/dot/rpc/modules/payment_integration_test.go +++ b/dot/rpc/modules/payment_integration_test.go @@ -23,7 +23,7 @@ func TestPaymentQueryInfo(t *testing.T) { state := newTestStateService(t) bestBlockHash := state.Block.BestBlockHash() - t.Run("When there is no errors", func(t *testing.T) { + t.Run("When_there_is_no_errors", func(t *testing.T) { ctrl := gomock.NewController(t) mockedQueryInfo := &types.RuntimeDispatchInfo{ @@ -61,7 +61,7 @@ func TestPaymentQueryInfo(t *testing.T) { require.Equal(t, expected, res) }) - t.Run("When could not get runtime", func(t *testing.T) { + t.Run("When_could_not_get_runtime", func(t *testing.T) { ctrl := gomock.NewController(t) blockAPIMock := mocks.NewMockBlockAPI(ctrl) @@ -85,7 +85,7 @@ func TestPaymentQueryInfo(t *testing.T) { require.Equal(t, res, PaymentQueryInfoResponse{}) }) - t.Run("When PaymentQueryInfo returns error", func(t *testing.T) { + t.Run("When_PaymentQueryInfo_returns_error", func(t *testing.T) { ctrl := gomock.NewController(t) runtimeMock := mocksruntime.NewMockInstance(ctrl) @@ -110,7 +110,7 @@ func TestPaymentQueryInfo(t *testing.T) { require.Equal(t, res, PaymentQueryInfoResponse{}) }) - t.Run("When PaymentQueryInfo returns a nil info", func(t *testing.T) { + t.Run("When_PaymentQueryInfo_returns_a_nil_info", func(t *testing.T) { ctrl := gomock.NewController(t) runtimeMock := mocksruntime.NewMockInstance(ctrl) diff --git a/dot/rpc/modules/payment_test.go b/dot/rpc/modules/payment_test.go index 67733c42d1..d5cc0434ff 100644 --- a/dot/rpc/modules/payment_test.go +++ b/dot/rpc/modules/payment_test.go @@ -36,8 +36,8 @@ func TestPaymentModule_QueryInfo(t *testing.T) { blockErrorAPIMock1 := mocks.NewMockBlockAPI(ctrl) blockErrorAPIMock2 := mocks.NewMockBlockAPI(ctrl) - blockAPIMock.EXPECT().BestBlockHash().Return(testHash).AnyTimes() - blockAPIMock.EXPECT().GetRuntime(testHash).Return(runtimeMock, nil).AnyTimes() + blockAPIMock.EXPECT().BestBlockHash().Return(testHash).Times(2) + blockAPIMock.EXPECT().GetRuntime(testHash).Return(runtimeMock, nil).Times(3) blockAPIMock2.EXPECT().GetRuntime(testHash).Return(runtimeMock2, nil) @@ -45,7 +45,7 @@ func TestPaymentModule_QueryInfo(t *testing.T) { blockErrorAPIMock2.EXPECT().GetRuntime(testHash).Return(nil, errors.New("GetRuntime error")) - runtimeMock.EXPECT().PaymentQueryInfo(common.MustHexToBytes("0x0000")).Return(nil, nil).AnyTimes() + runtimeMock.EXPECT().PaymentQueryInfo(common.MustHexToBytes("0x0000")).Return(nil, nil).Times(2) runtimeMock2.EXPECT().PaymentQueryInfo(common.MustHexToBytes("0x0000")).Return(&types.RuntimeDispatchInfo{ Weight: uint64(21), Class: 21, diff --git a/dot/rpc/modules/state_integration_test.go b/dot/rpc/modules/state_integration_test.go index 7b7c2e5455..31ba029260 100644 --- a/dot/rpc/modules/state_integration_test.go +++ b/dot/rpc/modules/state_integration_test.go @@ -313,7 +313,7 @@ func TestStateModule_GetStorageSize(t *testing.T) { } func TestStateModule_QueryStorage(t *testing.T) { - t.Run("When starting block is empty", func(t *testing.T) { + t.Run("When_starting_block_is_empty", func(t *testing.T) { module := new(StateModule) req := new(StateStorageQueryRangeRequest) @@ -322,7 +322,7 @@ func TestStateModule_QueryStorage(t *testing.T) { require.Error(t, err, "the start block hash cannot be an empty value") }) - t.Run("When blockAPI returns error", func(t *testing.T) { + t.Run("When_blockAPI_returns_error", func(t *testing.T) { mockError := errors.New("mock test error") ctrl := gomock.NewController(t) mockBlockAPI := NewMockBlockAPI(ctrl) @@ -338,7 +338,7 @@ func TestStateModule_QueryStorage(t *testing.T) { assert.ErrorIs(t, err, mockError) }) - t.Run("When QueryStorage returns data", func(t *testing.T) { + t.Run("When_QueryStorage_returns_data", func(t *testing.T) { expectedChanges := [][2]*string{ makeChange("0x90", stringToHex("value")), makeChange("0x80", stringToHex("another value")), diff --git a/dot/rpc/modules/state_test.go b/dot/rpc/modules/state_test.go index a216d5d844..b7b0f6d31f 100644 --- a/dot/rpc/modules/state_test.go +++ b/dot/rpc/modules/state_test.go @@ -45,7 +45,7 @@ func TestStateModuleGetPairs(t *testing.T) { mockStorageAPI := mocks.NewMockStorageAPI(ctrl) mockStorageAPI.EXPECT().GetStateRootFromBlock(&hash).Return(&hash, nil) mockStorageAPI.EXPECT().GetKeysWithPrefix(&hash, common.MustHexToBytes(str)).Return([][]byte{{1}, {1}}, nil) - mockStorageAPI.EXPECT().GetStorage(&hash, []byte{1}).Return([]byte{21}, nil).AnyTimes() + mockStorageAPI.EXPECT().GetStorage(&hash, []byte{1}).Return([]byte{21}, nil).Times(2) mockStorageAPINil := mocks.NewMockStorageAPI(ctrl) mockStorageAPINil.EXPECT().GetStateRootFromBlock(&hash).Return(&hash, nil) diff --git a/dot/rpc/modules/system_integration_test.go b/dot/rpc/modules/system_integration_test.go index aab4313fb3..c7c7b90a15 100644 --- a/dot/rpc/modules/system_integration_test.go +++ b/dot/rpc/modules/system_integration_test.go @@ -496,7 +496,7 @@ func TestLocalPeerId(t *testing.T) { } func TestAddReservedPeer(t *testing.T) { - t.Run("Test Add and Remove reserved peers with success", func(t *testing.T) { + t.Run("Test_Add_and_Remove_reserved_peers_with_success", func(t *testing.T) { ctrl := gomock.NewController(t) networkMock := mocks.NewMockNetworkAPI(ctrl) @@ -519,7 +519,7 @@ func TestAddReservedPeer(t *testing.T) { require.Nil(t, b) }) - t.Run("Test Add and Remove reserved peers without success", func(t *testing.T) { + t.Run("Test_Add_and_Remove_reserved_peers_without_success", func(t *testing.T) { ctrl := gomock.NewController(t) networkMock := mocks.NewMockNetworkAPI(ctrl) @@ -546,7 +546,7 @@ func TestAddReservedPeer(t *testing.T) { require.Nil(t, b) }) - t.Run("Test trying to add or remove peers with empty or white space request", func(t *testing.T) { + t.Run("Test_trying_to_add_or_remove_peers_with_empty_or_white_space_request", func(t *testing.T) { sysModule := &SystemModule{} require.Error(t, sysModule.AddReservedPeer(nil, &StringRequest{String: ""}, nil)) require.Error(t, sysModule.RemoveReservedPeer(nil, &StringRequest{String: " "}, nil)) diff --git a/dot/rpc/modules/system_test.go b/dot/rpc/modules/system_test.go index 0d7e56cf5f..e3b3bd1342 100644 --- a/dot/rpc/modules/system_test.go +++ b/dot/rpc/modules/system_test.go @@ -242,11 +242,11 @@ func TestSystemModule_AccountNextIndex(t *testing.T) { } mockTxStateAPI := mocks.NewMockTransactionStateAPI(ctrl) - mockTxStateAPI.EXPECT().Pending().Return(v).AnyTimes() + mockTxStateAPI.EXPECT().Pending().Return(v).Times(5) mockCoreAPI := mocks.NewMockCoreAPI(ctrl) mockCoreAPI.EXPECT().GetMetadata((*common.Hash)(nil)). - Return(common.MustHexToBytes(testdata.NewTestMetadata()), nil).AnyTimes() + Return(common.MustHexToBytes(testdata.NewTestMetadata()), nil).Times(2) mockCoreAPIErr := mocks.NewMockCoreAPI(ctrl) mockCoreAPIErr.EXPECT().GetMetadata((*common.Hash)(nil)). diff --git a/dot/rpc/subscription/listeners_integration_test.go b/dot/rpc/subscription/listeners_integration_test.go index 7f0102bf73..a6d415e1ba 100644 --- a/dot/rpc/subscription/listeners_integration_test.go +++ b/dot/rpc/subscription/listeners_integration_test.go @@ -189,8 +189,7 @@ func TestExtrinsicSubmitListener_Listen(t *testing.T) { wsconn.BlockAPI = BlockAPI TxStateAPI := NewMockTransactionStateAPI(ctrl) - TxStateAPI.EXPECT().FreeStatusNotifierChannel(gomock.Any()).AnyTimes() - TxStateAPI.EXPECT().GetStatusNotifierChannel(gomock.Any()).Return(make(chan transaction.Status)).AnyTimes() + TxStateAPI.EXPECT().FreeStatusNotifierChannel(gomock.Any()) wsconn.TxStateAPI = TxStateAPI esl := ExtrinsicSubmitListener{ @@ -244,7 +243,7 @@ func TestExtrinsicSubmitListener_Listen(t *testing.T) { } func TestGrandpaJustification_Listen(t *testing.T) { - t.Run("When justification doesnt returns error", func(t *testing.T) { + t.Run("When_justification_doesnt_returns_error", func(t *testing.T) { ctrl := gomock.NewController(t) wsconn, ws, cancel := setupWSConn(t) diff --git a/dot/rpc/subscription/websocket_integration_test.go b/dot/rpc/subscription/websocket_integration_test.go index a427da8859..86c014d6d6 100644 --- a/dot/rpc/subscription/websocket_integration_test.go +++ b/dot/rpc/subscription/websocket_integration_test.go @@ -216,8 +216,7 @@ func TestWSConn_HandleConn(t *testing.T) { wsconn.BlockAPI = nil transactionStateAPI := NewMockTransactionStateAPI(ctrl) - transactionStateAPI.EXPECT().FreeStatusNotifierChannel(gomock.Any()).AnyTimes() - transactionStateAPI.EXPECT().GetStatusNotifierChannel(gomock.Any()).Return(make(chan transaction.Status)).AnyTimes() + transactionStateAPI.EXPECT().GetStatusNotifierChannel(gomock.Any()).Return(make(chan transaction.Status)).Times(2) wsconn.TxStateAPI = transactionStateAPI listner, err := wsconn.initExtrinsicWatch(0, []string{"NotHex"}) diff --git a/dot/rpc/websocket_integration_test.go b/dot/rpc/websocket_integration_test.go index 5f14e1a13b..adbadc867f 100644 --- a/dot/rpc/websocket_integration_test.go +++ b/dot/rpc/websocket_integration_test.go @@ -15,7 +15,6 @@ import ( "github.com/ChainSafe/gossamer/dot/rpc/modules" "github.com/ChainSafe/gossamer/dot/system" "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/transaction" "github.com/golang/mock/gomock" "github.com/gorilla/websocket" @@ -76,9 +75,7 @@ func TestHTTPServer_ServeHTTP(t *testing.T) { sAPI := modules.NewMockAnyStorageAPI(ctrl) TxStateAPI := NewMockTransactionStateAPI(ctrl) - TxStateAPI.EXPECT().FreeStatusNotifierChannel(gomock.Any()).AnyTimes() - TxStateAPI.EXPECT().GetStatusNotifierChannel(gomock.Any()).Return(make(chan transaction.Status)).AnyTimes() - TxStateAPI.EXPECT().AddToPool(gomock.Any()).Return(common.Hash{}).AnyTimes() + TxStateAPI.EXPECT().GetStatusNotifierChannel(gomock.Any()).Return(make(chan transaction.Status)) cfg := &HTTPServerConfig{ Modules: []string{"system", "chain"}, diff --git a/dot/services_integration_test.go b/dot/services_integration_test.go index 14297fba8e..bcf9d1d596 100644 --- a/dot/services_integration_test.go +++ b/dot/services_integration_test.go @@ -98,12 +98,15 @@ func Test_nodeBuilder_createBABEService(t *testing.T) { stateSrvc := newStateService(t, ctrl) mockBabeBuilder := NewMockServiceBuilder(ctrl) - mockBabeBuilder.EXPECT().NewServiceIFace( - gomock.AssignableToTypeOf(&babe.ServiceConfig{})). - DoAndReturn( - func(cfg *babe.ServiceConfig) (*babe.Service, error) { - return &babe.Service{}, nil - }).AnyTimes() + if tt.err == nil { + mockBabeBuilder.EXPECT().NewServiceIFace( + gomock.AssignableToTypeOf(&babe.ServiceConfig{})). + DoAndReturn( + func(cfg *babe.ServiceConfig) (*babe.Service, error) { + return &babe.Service{}, nil + }) + } + builder := nodeBuilder{} var got *babe.Service if tt.args.initStateService { diff --git a/dot/state/block_test.go b/dot/state/block_test.go index 6a4a1d1478..af05b5c9c3 100644 --- a/dot/state/block_test.go +++ b/dot/state/block_test.go @@ -745,7 +745,7 @@ func TestRange(t *testing.T) { newBlockState: func(t *testing.T, ctrl *gomock.Controller, genesisHeader *types.Header) *BlockState { telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()).Times(2) db := NewInMemoryDB(t) @@ -775,7 +775,7 @@ func TestRange(t *testing.T) { newBlockState: func(t *testing.T, ctrl *gomock.Controller, genesisHeader *types.Header) *BlockState { telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()) db := NewInMemoryDB(t) @@ -805,7 +805,7 @@ func TestRange(t *testing.T) { newBlockState: func(t *testing.T, ctrl *gomock.Controller, genesisHeader *types.Header) *BlockState { telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()).Times(2) db := NewInMemoryDB(t) @@ -837,7 +837,7 @@ func TestRange(t *testing.T) { newBlockState: func(t *testing.T, ctrl *gomock.Controller, genesisHeader *types.Header) *BlockState { telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()) db := NewInMemoryDB(t) blockState, err := NewBlockStateFromGenesis(db, newTriesEmpty(), genesisHeader, telemetryMock) @@ -872,7 +872,7 @@ func TestRange(t *testing.T) { newBlockState: func(t *testing.T, ctrl *gomock.Controller, genesisHeader *types.Header) *BlockState { telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()) db := NewInMemoryDB(t) @@ -904,7 +904,7 @@ func TestRange(t *testing.T) { newBlockState: func(t *testing.T, ctrl *gomock.Controller, genesisHeader *types.Header) *BlockState { telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()).Times(2) db := NewInMemoryDB(t) @@ -937,7 +937,7 @@ func TestRange(t *testing.T) { newBlockState: func(t *testing.T, ctrl *gomock.Controller, genesisHeader *types.Header) *BlockState { telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()) db := NewInMemoryDB(t) @@ -970,7 +970,7 @@ func TestRange(t *testing.T) { newBlockState: func(t *testing.T, ctrl *gomock.Controller, genesisHeader *types.Header) *BlockState { telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()).Times(2) db := NewInMemoryDB(t) @@ -1062,7 +1062,7 @@ func Test_loadHeaderFromDisk_WithGenesisBlock(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()) db := NewInMemoryDB(t) diff --git a/dot/state/grandpa_test.go b/dot/state/grandpa_test.go index 1b8531df29..53b3c5d1b8 100644 --- a/dot/state/grandpa_test.go +++ b/dot/state/grandpa_test.go @@ -127,7 +127,7 @@ func TestGrandpaState_LatestRound(t *testing.T) { func testBlockState(t *testing.T, db *chaindb.BadgerDB) *BlockState { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.AssignableToTypeOf(&telemetry.NotifyFinalized{})).Times(1) + telemetryMock.EXPECT().SendMessage(gomock.AssignableToTypeOf(&telemetry.NotifyFinalized{})) header := testGenesisHeader bs, err := NewBlockStateFromGenesis(db, newTriesEmpty(), header, telemetryMock) @@ -656,7 +656,7 @@ func TestApplyForcedChanges(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Eq(&telemetry.AfgApplyingForcedAuthoritySetChange{Block: "8"})).Times(1) + telemetryMock.EXPECT().SendMessage(gomock.Eq(&telemetry.AfgApplyingForcedAuthoritySetChange{Block: "8"})) return telemetryMock }(), @@ -1379,7 +1379,7 @@ func TestApplyScheduledChange(t *testing.T) { telemetryMock := NewMockTelemetry(ctrl) telemetryMock.EXPECT().SendMessage( gomock.Eq(&telemetry.AfgApplyingScheduledAuthoritySetChange{Block: "6"}), - ).Times(1) + ) return telemetryMock }(), diff --git a/dot/state/service_integration_test.go b/dot/state/service_integration_test.go index 9122ed01df..6ee2f9aa81 100644 --- a/dot/state/service_integration_test.go +++ b/dot/state/service_integration_test.go @@ -172,7 +172,7 @@ func TestService_StorageTriePruning(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()) const retainBlocks uint = 2 config := Config{ @@ -226,7 +226,7 @@ func TestService_StorageTriePruning(t *testing.T) { func TestService_PruneStorage(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()).Times(2) config := Config{ Path: t.TempDir(), @@ -307,7 +307,7 @@ func TestService_PruneStorage(t *testing.T) { func TestService_Rewind(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()).Times(3) config := Config{ Path: t.TempDir(), @@ -365,7 +365,7 @@ func TestService_Rewind(t *testing.T) { func TestService_Import(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()) config := Config{ Path: t.TempDir(), diff --git a/dot/state/transaction_test.go b/dot/state/transaction_test.go index 33b0967f06..af1b05844e 100644 --- a/dot/state/transaction_test.go +++ b/dot/state/transaction_test.go @@ -20,7 +20,7 @@ import ( func TestTransactionState_Pending(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + telemetryMock.EXPECT().SendMessage(gomock.Any()).Times(5) ts := NewTransactionState(telemetryMock) diff --git a/dot/sync/benchmark_test.go b/dot/sync/benchmark_test.go new file mode 100644 index 0000000000..eae329300e --- /dev/null +++ b/dot/sync/benchmark_test.go @@ -0,0 +1,239 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "container/ring" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_newSyncBenchmarker(t *testing.T) { + t.Parallel() + + t.Run("10_samples_to_keep", func(t *testing.T) { + t.Parallel() + const samplesToKeep = 10 + actual := newSyncBenchmarker(samplesToKeep) + + expected := &syncBenchmarker{ + blocksPerSecond: ring.New(samplesToKeep), + samplesToKeep: samplesToKeep, + } + + assert.Equal(t, expected, actual) + }) + + t.Run("panics_on_0_sample_to_keep", func(t *testing.T) { + t.Parallel() + const samplesToKeep = 0 + assert.PanicsWithValue(t, "cannot have 0 samples to keep", func() { + newSyncBenchmarker(samplesToKeep) + }) + }) +} + +func Test_syncBenchmarker_begin(t *testing.T) { + t.Parallel() + + const startSec = 1000 + start := time.Unix(startSec, 0) + const startBlock = 10 + + b := syncBenchmarker{} + b.begin(start, startBlock) + + expected := syncBenchmarker{ + start: start, + startBlock: startBlock, + } + + assert.Equal(t, expected, b) +} + +func Test_syncBenchmarker_end(t *testing.T) { + t.Parallel() + + const startSec = 1000 + start := time.Unix(startSec, 0) + + const nowSec = 1010 + now := time.Unix(nowSec, 0) + + const ( + startBlock = 10 + endBlock = 12 + ) + + const ringCap = 3 + + blocksPerSecond := ring.New(ringCap) + blocksPerSecond.Value = 1.00 + blocksPerSecond = blocksPerSecond.Next() + + b := syncBenchmarker{ + start: start, + startBlock: startBlock, + blocksPerSecond: blocksPerSecond, + } + b.end(now, endBlock) + + expectedBlocksPerSecond := ring.New(ringCap) + expectedBlocksPerSecond.Value = 1.00 + expectedBlocksPerSecond = expectedBlocksPerSecond.Next() + expectedBlocksPerSecond.Value = 0.2 + expectedBlocksPerSecond = expectedBlocksPerSecond.Next() + + expected := syncBenchmarker{ + start: start, + startBlock: startBlock, + blocksPerSecond: expectedBlocksPerSecond, + } + + assert.Equal(t, expected, b) +} + +func Test_syncBenchmarker_average(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + values []float64 + ringCap int + average float64 + }{ + // zero size ring is not possible due to constructor check + "empty_ring": { + ringCap: 1, + }, + "single_element_in_one-size_ring": { + values: []float64{1.1}, + ringCap: 1, + average: 1.1, + }, + "single_element_in_two-size_ring": { + values: []float64{1.1}, + ringCap: 2, + average: 1.1, + }, + "two_elements_in_two-size_ring": { + values: []float64{1.0, 2.0}, + ringCap: 2, + average: 1.5, + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + blocksPerSecond := ring.New(testCase.ringCap) + for _, value := range testCase.values { + blocksPerSecond.Value = value + blocksPerSecond = blocksPerSecond.Next() + } + + benchmarker := syncBenchmarker{ + blocksPerSecond: blocksPerSecond, + samplesToKeep: testCase.ringCap, + } + + avg := benchmarker.average() + + assert.Equal(t, testCase.average, avg) + }) + } +} + +func Test_syncBenchmarker_mostRecentAverage(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + values []float64 + ringCap int + average float64 + }{ + // zero size ring is not possible due to constructor check + "empty_ring": { + ringCap: 1, + }, + "single_element_in_one-size_ring": { + values: []float64{1.1}, + ringCap: 1, + average: 1.1, + }, + "single_element_in_two-size_ring": { + values: []float64{1.1}, + ringCap: 2, + average: 1.1, + }, + "two_elements_in_two-size_ring": { + values: []float64{1.0, 2.0}, + ringCap: 2, + average: 2.0, + }, + "three_elements_in_two-size_ring": { + values: []float64{1.0, 2.0, 3.0}, + ringCap: 2, + average: 3.0, + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + blocksPerSecond := ring.New(testCase.ringCap) + for _, value := range testCase.values { + blocksPerSecond.Value = value + blocksPerSecond = blocksPerSecond.Next() + } + + benchmarker := syncBenchmarker{ + blocksPerSecond: blocksPerSecond, + } + + avg := benchmarker.mostRecentAverage() + + assert.Equal(t, testCase.average, avg) + }) + } +} + +func Test_syncBenchmarker(t *testing.T) { + t.Parallel() + + const samplesToKeep = 5 + benchmarker := newSyncBenchmarker(samplesToKeep) + + const initialBlock = 10 + timeZero := time.Unix(0, 0) + const timeIncrement = time.Second + const baseBlocksIncrement uint = 1 + + startTime := timeZero + endTime := startTime.Add(timeIncrement) + var block uint = initialBlock + + const samples = 10 + for i := 0; i < samples; i++ { + benchmarker.begin(startTime, block) + block += baseBlocksIncrement + uint(i) + benchmarker.end(endTime, block) + + startTime = startTime.Add(timeIncrement) + endTime = startTime.Add(timeIncrement) + } + + avg := benchmarker.average() + const expectedAvg = 8.0 + assert.Equal(t, expectedAvg, avg) + + mostRecentAvg := benchmarker.mostRecentAverage() + const expectedMostRecentAvg = 10.0 + assert.Equal(t, expectedMostRecentAvg, mostRecentAvg) +} diff --git a/dot/sync/block_queue_test.go b/dot/sync/block_queue_test.go new file mode 100644 index 0000000000..cff9b181b3 --- /dev/null +++ b/dot/sync/block_queue_test.go @@ -0,0 +1,252 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_newBlockQueue(t *testing.T) { + t.Parallel() + + const capacity = 1 + bq := newBlockQueue(capacity) + + require.NotNil(t, bq.queue) + assert.Equal(t, 1, cap(bq.queue)) + assert.Equal(t, 0, len(bq.queue)) + bq.queue = nil + + expectedBlockQueue := &blockQueue{ + hashesSet: make(map[common.Hash]struct{}, capacity), + } + assert.Equal(t, expectedBlockQueue, bq) +} + +func Test_blockQueue_push(t *testing.T) { + t.Parallel() + + const capacity = 1 + bq := newBlockQueue(capacity) + blockData := &types.BlockData{ + Hash: common.Hash{1}, + } + + bq.push(blockData) + + // cannot compare channels + require.NotNil(t, bq.queue) + assert.Len(t, bq.queue, 1) + + receivedBlockData := <-bq.queue + expectedBlockData := &types.BlockData{ + Hash: common.Hash{1}, + } + assert.Equal(t, expectedBlockData, receivedBlockData) + + bq.queue = nil + expectedBlockQueue := &blockQueue{ + hashesSet: map[common.Hash]struct{}{{1}: {}}, + } + assert.Equal(t, expectedBlockQueue, bq) +} + +func Test_blockQueue_pop(t *testing.T) { + t.Parallel() + + t.Run("context_canceled", func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + const capacity = 1 + bq := newBlockQueue(capacity) + + blockData, err := bq.pop(ctx) + assert.Nil(t, blockData) + assert.ErrorIs(t, err, context.Canceled) + }) + + t.Run("get_block_data_after_waiting", func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + const capacity = 1 + bq := newBlockQueue(capacity) + + const afterDuration = 5 * time.Millisecond + time.AfterFunc(afterDuration, func() { + blockData := &types.BlockData{ + Hash: common.Hash{1}, + } + bq.push(blockData) + }) + + blockData, err := bq.pop(ctx) + + expectedBlockData := &types.BlockData{ + Hash: common.Hash{1}, + } + assert.Equal(t, expectedBlockData, blockData) + assert.NoError(t, err) + + assert.Len(t, bq.queue, 0) + bq.queue = nil + expectedBlockQueue := &blockQueue{ + hashesSet: map[common.Hash]struct{}{}, + } + assert.Equal(t, expectedBlockQueue, bq) + }) +} + +func Test_blockQueue_has(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + blockQueue *blockQueue + blockHash common.Hash + has bool + }{ + "absent": { + blockQueue: &blockQueue{ + hashesSet: map[common.Hash]struct{}{}, + }, + blockHash: common.Hash{1}, + }, + "exists": { + blockQueue: &blockQueue{ + hashesSet: map[common.Hash]struct{}{{1}: {}}, + }, + blockHash: common.Hash{1}, + has: true, + }, + } + + for name, tc := range testCases { + testCase := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + has := testCase.blockQueue.has(testCase.blockHash) + assert.Equal(t, testCase.has, has) + }) + } +} + +func Test_lockQueue_endToEnd(t *testing.T) { + t.Parallel() + + const capacity = 10 + blockQueue := newBlockQueue(capacity) + + newBlockData := func(i byte) *types.BlockData { + return &types.BlockData{ + Hash: common.Hash{i}, + } + } + + blockQueue.push(newBlockData(1)) + blockQueue.push(newBlockData(2)) + blockQueue.push(newBlockData(3)) + + blockData, err := blockQueue.pop(context.Background()) + assert.Equal(t, newBlockData(1), blockData) + assert.NoError(t, err) + + has := blockQueue.has(newBlockData(2).Hash) + assert.True(t, has) + has = blockQueue.has(newBlockData(3).Hash) + assert.True(t, has) + + blockQueue.push(newBlockData(4)) + + has = blockQueue.has(newBlockData(4).Hash) + assert.True(t, has) + + blockData, err = blockQueue.pop(context.Background()) + assert.Equal(t, newBlockData(2), blockData) + assert.NoError(t, err) + + // drain queue + for len(blockQueue.queue) > 0 { + <-blockQueue.queue + } +} + +func Test_lockQueue_threadSafety(t *testing.T) { + // This test consists in checking for concurrent access + // using the -race detector. + t.Parallel() + + var startWg, endWg sync.WaitGroup + ctx, cancel := context.WithCancel(context.Background()) + + const operations = 3 + const parallelism = 3 + const goroutines = parallelism * operations + startWg.Add(goroutines) + endWg.Add(goroutines) + + const testDuration = 50 * time.Millisecond + go func() { + timer := time.NewTimer(time.Hour) + startWg.Wait() + _ = timer.Reset(testDuration) + <-timer.C + cancel() + }() + + runInLoop := func(f func()) { + defer endWg.Done() + startWg.Done() + startWg.Wait() + for ctx.Err() == nil { + f() + } + } + + const capacity = 10 + blockQueue := newBlockQueue(capacity) + blockData := &types.BlockData{ + Hash: common.Hash{1}, + } + blockHash := common.Hash{1} + + endWg.Add(1) + go func() { + defer endWg.Done() + <-ctx.Done() + // Empty queue channel to make sure `push` does not block + // when the context is cancelled. + for len(blockQueue.queue) > 0 { + <-blockQueue.queue + } + }() + + for i := 0; i < parallelism; i++ { + go runInLoop(func() { + blockQueue.push(blockData) + }) + + go runInLoop(func() { + _, _ = blockQueue.pop(ctx) + }) + + go runInLoop(func() { + _ = blockQueue.has(blockHash) + }) + } + + endWg.Wait() +} diff --git a/dot/sync/chain_processor_test.go b/dot/sync/chain_processor_test.go new file mode 100644 index 0000000000..8e794767ab --- /dev/null +++ b/dot/sync/chain_processor_test.go @@ -0,0 +1,1181 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "context" + "errors" + "testing" + + "github.com/ChainSafe/gossamer/dot/telemetry" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/blocktree" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/runtime/storage" + "github.com/ChainSafe/gossamer/lib/trie" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func Test_chainProcessor_handleBlock(t *testing.T) { + t.Parallel() + mockError := errors.New("test mock error") + testHash := common.MustHexToHash("0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") + testParentHash := common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a") + + tests := map[string]struct { + chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor + block *types.Block + announce bool + wantErr error + }{ + "handle_getHeader_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) + chainProcessor.blockState = mockBlockState + return + }, + block: &types.Block{ + Body: types.Body{}, + }, + wantErr: errFailedToGetParent, + }, + "handle_trieState_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) + chainProcessor.blockState = mockBlockState + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, mockError) + mockStorageState.EXPECT().Unlock() + chainProcessor.storageState = mockStorageState + return + }, + block: &types.Block{ + Body: types.Body{}, + }, + wantErr: mockError, + }, + "handle_getRuntime_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + StateRoot: testHash, + }, nil) + mockBlockState.EXPECT().GetRuntime(testParentHash).Return(nil, mockError) + chainProcessor.blockState = mockBlockState + trieState := storage.NewTrieState(nil) + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) + mockStorageState.EXPECT().Unlock() + chainProcessor.storageState = mockStorageState + return + }, + block: &types.Block{ + Body: types.Body{}, + }, + wantErr: mockError, + }, + "handle_runtime_ExecuteBlock_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + trieState := storage.NewTrieState(nil) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + StateRoot: testHash, + }, nil) + mockInstance := NewMockInstance(ctrl) + mockInstance.EXPECT().SetContextStorage(trieState) + mockInstance.EXPECT().ExecuteBlock(&types.Block{Body: types.Body{}}).Return(nil, mockError) + mockBlockState.EXPECT().GetRuntime(testParentHash).Return(mockInstance, nil) + chainProcessor.blockState = mockBlockState + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) + mockStorageState.EXPECT().Unlock() + chainProcessor.storageState = mockStorageState + return + }, + block: &types.Block{ + Body: types.Body{}, + }, + wantErr: mockError, + }, + "handle_block_import_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + trieState := storage.NewTrieState(nil) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + StateRoot: testHash, + }, nil) + mockBlock := &types.Block{Body: types.Body{}} + mockInstance := NewMockInstance(ctrl) + mockInstance.EXPECT().SetContextStorage(trieState) + mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) + mockBlockState.EXPECT().GetRuntime(testParentHash).Return(mockInstance, nil) + chainProcessor.blockState = mockBlockState + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) + mockStorageState.EXPECT().Unlock() + chainProcessor.storageState = mockStorageState + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, + trieState, false).Return(mockError) + chainProcessor.blockImportHandler = mockBlockImportHandler + return + }, + block: &types.Block{ + Body: types.Body{}, + }, + wantErr: mockError, + }, + "base_case": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + mockBlock := &types.Block{ + Body: types.Body{}, // empty slice of extrinsics + } + trieState := storage.NewTrieState(nil) + mockBlockState := NewMockBlockState(ctrl) + mockHeader := &types.Header{ + Number: 0, + StateRoot: trie.EmptyHash, + } + mockHeaderHash := mockHeader.Hash() + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(mockHeader, nil) + + mockInstance := NewMockInstance(ctrl) + mockInstance.EXPECT().SetContextStorage(trieState) + mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) + mockBlockState.EXPECT().GetRuntime(mockHeaderHash).Return(mockInstance, nil) + chainProcessor.blockState = mockBlockState + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().Unlock() + mockStorageState.EXPECT().TrieState(&trie.EmptyHash).Return(trieState, nil) + chainProcessor.storageState = mockStorageState + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, trieState, false).Return(nil) + chainProcessor.blockImportHandler = mockBlockImportHandler + mockTelemetry := NewMockTelemetry(ctrl) + mockTelemetry.EXPECT().SendMessage(gomock.Any()) + chainProcessor.telemetry = mockTelemetry + return + }, + block: &types.Block{ + Header: types.Header{ + Number: 0, + }, + Body: types.Body{}, + }, + }, + "import_block_and_announce": { + announce: true, + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + mockBlock := &types.Block{ + Body: types.Body{}, // empty slice of extrinsics + } + trieState := storage.NewTrieState(nil) + mockBlockState := NewMockBlockState(ctrl) + mockHeader := &types.Header{ + Number: 0, + StateRoot: trie.EmptyHash, + } + mockHeaderHash := mockHeader.Hash() + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(mockHeader, nil) + + mockInstance := NewMockInstance(ctrl) + mockInstance.EXPECT().SetContextStorage(trieState) + mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) + mockBlockState.EXPECT().GetRuntime(mockHeaderHash).Return(mockInstance, nil) + chainProcessor.blockState = mockBlockState + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().Unlock() + mockStorageState.EXPECT().TrieState(&trie.EmptyHash).Return(trieState, nil) + chainProcessor.storageState = mockStorageState + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, trieState, true).Return(nil) + chainProcessor.blockImportHandler = mockBlockImportHandler + mockTelemetry := NewMockTelemetry(ctrl) + mockTelemetry.EXPECT().SendMessage(gomock.Any()) + chainProcessor.telemetry = mockTelemetry + return + }, + block: &types.Block{ + Header: types.Header{ + Number: 0, + }, + Body: types.Body{}, + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := tt.chainProcessorBuilder(ctrl) + + err := s.handleBlock(tt.block, tt.announce) + assert.ErrorIs(t, err, tt.wantErr) + }) + } + t.Run("panics_on_different_parent_state_root", func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + bock := &types.Block{ + Header: types.Header{ + ParentHash: common.Hash{1}, + }, + } + blockState := NewMockBlockState(ctrl) + blockState.EXPECT().GetHeader(common.Hash{1}). + Return(&types.Header{StateRoot: common.Hash{2}}, nil) + trieState := storage.NewTrieState(nil) + storageState := NewMockStorageState(ctrl) + lockCall := storageState.EXPECT().Lock() + trieStateCall := storageState.EXPECT().TrieState(&common.Hash{2}). + Return(trieState, nil).After(lockCall) + storageState.EXPECT().Unlock().After(trieStateCall) + chainProcessor := &chainProcessor{ + blockState: blockState, + storageState: storageState, + } + const expectedPanicValue = "parent state root does not match snapshot state root" + assert.PanicsWithValue(t, expectedPanicValue, func() { + _ = chainProcessor.handleBlock(bock, false) + }) + }) +} + +func Test_chainProcessor_handleBody(t *testing.T) { + t.Parallel() + + testExtrinsics := []types.Extrinsic{{1, 2, 3}, {7, 8, 9, 0}, {0xa, 0xb}} + testBody := types.NewBody(testExtrinsics) + + t.Run("base_case", func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + mockTransactionState := NewMockTransactionState(ctrl) + mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[0]) + mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[1]) + mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[2]) + processor := chainProcessor{ + transactionState: mockTransactionState, + } + processor.handleBody(testBody) + }) +} + +func Test_chainProcessor_handleJustification(t *testing.T) { + t.Parallel() + + header := &types.Header{ + Number: 2, + } + headerHash := header.Hash() + errTest := errors.New("test error") + + type args struct { + header *types.Header + justification []byte + } + tests := map[string]struct { + chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor + args args + sentinelError error + errorMessage string + }{ + "invalid_justification": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, + []byte(`x`)).Return(errTest) + return chainProcessor{ + finalityGadget: mockFinalityGadget, + } + }, + args: args{ + header: header, + justification: []byte(`x`), + }, + sentinelError: errTest, + errorMessage: "verifying block number 2 justification: test error", + }, + "set_justification_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().SetJustification(headerHash, []byte(`xx`)).Return(errTest) + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, []byte(`xx`)).Return(nil) + return chainProcessor{ + blockState: mockBlockState, + finalityGadget: mockFinalityGadget, + } + }, + args: args{ + header: header, + justification: []byte(`xx`), + }, + sentinelError: errTest, + errorMessage: "setting justification for block number 2: test error", + }, + "base_case_set": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().SetJustification(headerHash, []byte(`1234`)).Return(nil) + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, []byte(`1234`)).Return(nil) + return chainProcessor{ + blockState: mockBlockState, + finalityGadget: mockFinalityGadget, + } + }, + args: args{ + header: header, + justification: []byte(`1234`), + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + processor := tt.chainProcessorBuilder(ctrl) + + err := processor.handleJustification(tt.args.header, tt.args.justification) + + assert.ErrorIs(t, err, tt.sentinelError) + if tt.sentinelError != nil { + assert.EqualError(t, err, tt.errorMessage) + } + }) + } +} + +func Test_chainProcessor_processBlockData(t *testing.T) { + t.Parallel() + + mockError := errors.New("mock test error") + + tests := map[string]struct { + chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor + blockData types.BlockData + expectedError error + }{ + "handle_has_header_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, mockError) + + return chainProcessor{ + blockState: mockBlockState, + } + }, + blockData: types.BlockData{}, + expectedError: mockError, + }, + "handle_has_block_body_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, mockError) + return chainProcessor{ + blockState: mockBlockState, + } + }, + blockData: types.BlockData{}, + expectedError: mockError, + }, + "handle_getBlockByHash_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(nil, mockError) + + mockChainSync := NewMockChainSync(ctrl) + mockChainSync.EXPECT().syncState().Return(bootstrap) + return chainProcessor{ + blockState: mockBlockState, + chainSync: mockChainSync, + } + }, + blockData: types.BlockData{}, + expectedError: mockError, + }, + "handle_block_data_justification_!=_nil": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlock := &types.Block{ + Header: types.Header{ + Number: uint(1), + }, + } + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(mockBlock, nil) + mockBlockState.EXPECT().AddBlockToBlockTree(&types.Block{ + Header: types.Header{Number: 1}}).Return(nil) + mockBlockState.EXPECT().SetJustification(common.MustHexToHash( + "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, 3}) + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(common.MustHexToHash( + "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, + 3}).Return(nil) + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, nil) + + // given our current chain sync state is `tip` + // the `HandleBlockImport` method should expect + // true as the announce parameter + mockChainSync := NewMockChainSync(ctrl) + mockChainSync.EXPECT().syncState().Return(tip) + + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, + nil, true).Return(nil) + + return chainProcessor{ + chainSync: mockChainSync, + blockState: mockBlockState, + finalityGadget: mockFinalityGadget, + storageState: mockStorageState, + blockImportHandler: mockBlockImportHandler, + } + }, + blockData: types.BlockData{ + Justification: &[]byte{1, 2, 3}, + }, + }, + "handle_babe_verify_block_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(mockError) + + mockChainSync := NewMockChainSync(ctrl) + mockChainSync.EXPECT().syncState().Return(bootstrap) + + return chainProcessor{ + chainSync: mockChainSync, + blockState: mockBlockState, + babeVerifier: mockBabeVerifier, + } + }, + blockData: types.BlockData{ + Header: &types.Header{}, + Body: &types.Body{}, + }, + expectedError: mockError, + }, + "no_header_and_body_-_fail_to_handle_justification": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + blockState := NewMockBlockState(ctrl) + blockState.EXPECT().HasHeader(common.Hash{1}).Return(false, nil) + blockState.EXPECT().HasBlockBody(common.Hash{1}).Return(true, nil) + + finalityGadget := NewMockFinalityGadget(ctrl) + expectedBlockDataHeader := &types.Header{Number: 2} + expectedBlockDataHeaderHash := expectedBlockDataHeader.Hash() + finalityGadget.EXPECT(). + VerifyBlockJustification(expectedBlockDataHeaderHash, []byte{1, 2, 3}). + Return(mockError) + + mockChainSync := NewMockChainSync(ctrl) + mockChainSync.EXPECT().syncState().Return(bootstrap) + + return chainProcessor{ + chainSync: mockChainSync, + blockState: blockState, + finalityGadget: finalityGadget, + } + }, + blockData: types.BlockData{ + Hash: common.Hash{1}, + Header: &types.Header{Number: 2}, + Justification: &[]byte{1, 2, 3}, + }, + expectedError: mockError, + }, + "handle_compareAndSetBlockData_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{}).Return(mockError) + + mockChainSync := NewMockChainSync(ctrl) + mockChainSync.EXPECT().syncState().Return(bootstrap) + return chainProcessor{ + chainSync: mockChainSync, + blockState: mockBlockState, + } + }, + blockData: types.BlockData{}, + expectedError: mockError, + }, + "success_with_justification": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + stateRootHash := common.MustHexToHash("0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") + runtimeHash := common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a") + mockTrieState := storage.NewTrieState(nil) + mockBlock := &types.Block{Header: types.Header{}, Body: types.Body{}} + + mockInstance := NewMockInstance(ctrl) + mockInstance.EXPECT().SetContextStorage(mockTrieState) + mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + Number: 0, + StateRoot: stateRootHash, + }, nil) + mockBlockState.EXPECT().SetJustification( + common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), []byte{1, 2, 3}) + mockBlockState.EXPECT().CompareAndSetBlockData(gomock.AssignableToTypeOf(&types.BlockData{})) + mockBlockState.EXPECT().GetRuntime(runtimeHash).Return(mockInstance, nil) + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}) + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().TrieState(&stateRootHash).Return(mockTrieState, nil) + mockStorageState.EXPECT().Unlock() + + mockChainSync := NewMockChainSync(ctrl) + mockChainSync.EXPECT().syncState().Return(bootstrap) + + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, mockTrieState, false) + + mockTelemetry := NewMockTelemetry(ctrl) + mockTelemetry.EXPECT().SendMessage(gomock.Any()) + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification( + common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), + []byte{1, 2, 3}).Return(nil) + return chainProcessor{ + chainSync: mockChainSync, + blockState: mockBlockState, + babeVerifier: mockBabeVerifier, + storageState: mockStorageState, + blockImportHandler: mockBlockImportHandler, + telemetry: mockTelemetry, + finalityGadget: mockFinalityGadget, + } + }, + blockData: types.BlockData{ + Header: &types.Header{ + Number: 0, + }, + Body: &types.Body{}, + Justification: &[]byte{1, 2, 3}, + }, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + processor := tt.chainProcessorBuilder(ctrl) + err := processor.processBlockData(tt.blockData) + assert.ErrorIs(t, err, tt.expectedError) + }) + } +} + +func Test_chainProcessor_processBlockDataWithStateHeaderAndBody(t *testing.T) { + t.Parallel() + + errTest := errors.New("test error") + + testCases := map[string]struct { + chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor + blockData types.BlockData + announceImportedBlock bool + sentinelError error + errorMessage string + }{ + "get_block_by_hash_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + blockState := NewMockBlockState(ctrl) + blockState.EXPECT().GetBlockByHash(common.Hash{1}). + Return(nil, errTest) + return chainProcessor{ + blockState: blockState, + } + }, + blockData: types.BlockData{Hash: common.Hash{1}}, + sentinelError: errTest, + errorMessage: "getting block by hash: test error", + }, + "block_already_exists_in_blocktree": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + blockState := NewMockBlockState(ctrl) + block := &types.Block{Header: types.Header{Number: 2}} + blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) + blockState.EXPECT().AddBlockToBlockTree(block).Return(blocktree.ErrBlockExists) + return chainProcessor{ + blockState: blockState, + } + }, + blockData: types.BlockData{Hash: common.Hash{1}}, + }, + "add_block_to_blocktree_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + blockState := NewMockBlockState(ctrl) + block := &types.Block{Header: types.Header{Number: 2}} + blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) + blockState.EXPECT().AddBlockToBlockTree(block).Return(errTest) + return chainProcessor{ + blockState: blockState, + } + }, + blockData: types.BlockData{Hash: common.Hash{1}}, + sentinelError: errTest, + errorMessage: "adding block to blocktree: test error", + }, + "handle_justification_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + blockState := NewMockBlockState(ctrl) + blockHeader := types.Header{Number: 2} + blockHeaderHash := blockHeader.Hash() + block := &types.Block{Header: blockHeader} + blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) + blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) + + finalityGadget := NewMockFinalityGadget(ctrl) + finalityGadget.EXPECT(). + VerifyBlockJustification(blockHeaderHash, []byte{3}). + Return(errTest) + + return chainProcessor{ + blockState: blockState, + finalityGadget: finalityGadget, + } + }, + blockData: types.BlockData{ + Hash: common.Hash{1}, + Justification: &[]byte{3}, + }, + sentinelError: errTest, + errorMessage: "handling justification: verifying block number 2 justification: test error", + }, + "trie_state_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + blockState := NewMockBlockState(ctrl) + blockHeader := types.Header{StateRoot: common.Hash{2}} + block := &types.Block{Header: blockHeader} + blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) + blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) + + storageState := NewMockStorageState(ctrl) + storageState.EXPECT().TrieState(&common.Hash{2}). + Return(nil, errTest) + + return chainProcessor{ + blockState: blockState, + storageState: storageState, + } + }, + blockData: types.BlockData{ + Hash: common.Hash{1}, + }, + sentinelError: errTest, + errorMessage: "loading trie state: test error", + }, + "handle_block_import_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + blockState := NewMockBlockState(ctrl) + blockHeader := types.Header{StateRoot: common.Hash{2}} + block := &types.Block{Header: blockHeader} + blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) + blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) + + storageState := NewMockStorageState(ctrl) + trieState := storage.NewTrieState(nil) + storageState.EXPECT().TrieState(&common.Hash{2}). + Return(trieState, nil) + + blockImportHandler := NewMockBlockImportHandler(ctrl) + const announceImportedBlock = true + blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). + Return(errTest) + + return chainProcessor{ + blockState: blockState, + storageState: storageState, + blockImportHandler: blockImportHandler, + } + }, + blockData: types.BlockData{ + Hash: common.Hash{1}, + }, + announceImportedBlock: true, + sentinelError: errTest, + errorMessage: "handling block import: test error", + }, + "success": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + blockState := NewMockBlockState(ctrl) + blockHeader := types.Header{StateRoot: common.Hash{2}} + block := &types.Block{Header: blockHeader} + blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) + blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) + + storageState := NewMockStorageState(ctrl) + trieState := storage.NewTrieState(nil) + storageState.EXPECT().TrieState(&common.Hash{2}). + Return(trieState, nil) + + blockImportHandler := NewMockBlockImportHandler(ctrl) + const announceImportedBlock = true + blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). + Return(nil) + + return chainProcessor{ + blockState: blockState, + storageState: storageState, + blockImportHandler: blockImportHandler, + } + }, + blockData: types.BlockData{ + Hash: common.Hash{1}, + }, + announceImportedBlock: true, + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + processor := testCase.chainProcessorBuilder(ctrl) + + err := processor.processBlockDataWithStateHeaderAndBody( + testCase.blockData, testCase.announceImportedBlock) + + assert.ErrorIs(t, err, testCase.sentinelError) + if testCase.sentinelError != nil { + assert.EqualError(t, err, testCase.errorMessage) + } + }) + } +} + +func Test_chainProcessor_processBlockDataWithHeaderAndBody(t *testing.T) { + t.Parallel() + + errTest := errors.New("test error") + + testCases := map[string]struct { + chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor + blockData types.BlockData + announceImportedBlock bool + sentinelError error + errorMessage string + }{ + "verify_block_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + babeVerifier := NewMockBabeVerifier(ctrl) + babeVerifier.EXPECT().VerifyBlock(&types.Header{Number: 1}). + Return(errTest) + + return chainProcessor{ + babeVerifier: babeVerifier, + } + }, + blockData: types.BlockData{ + Header: &types.Header{Number: 1}, + }, + sentinelError: errTest, + errorMessage: "babe verifying block: test error", + }, + "handle_block_error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + babeVerifier := NewMockBabeVerifier(ctrl) + expectedHeader := &types.Header{ParentHash: common.Hash{1}} + babeVerifier.EXPECT().VerifyBlock(expectedHeader). + Return(nil) + + transactionState := NewMockTransactionState(ctrl) + transactionState.EXPECT().RemoveExtrinsic(types.Extrinsic{2}) + + blockState := NewMockBlockState(ctrl) + blockState.EXPECT().GetHeader(common.Hash{1}). + Return(nil, errTest) + + return chainProcessor{ + babeVerifier: babeVerifier, + transactionState: transactionState, + blockState: blockState, + } + }, + blockData: types.BlockData{ + Header: &types.Header{ParentHash: common.Hash{1}}, + Body: &types.Body{{2}}, + }, + sentinelError: errFailedToGetParent, + errorMessage: "handling block: failed to get parent header: test error", + }, + "success": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + babeVerifier := NewMockBabeVerifier(ctrl) + expectedHeader := &types.Header{ + ParentHash: common.Hash{1}, + Number: 5, + } + babeVerifier.EXPECT().VerifyBlock(expectedHeader). + Return(nil) + + transactionState := NewMockTransactionState(ctrl) + transactionState.EXPECT().RemoveExtrinsic(types.Extrinsic{2}) + + blockState := NewMockBlockState(ctrl) + parentHeader := &types.Header{StateRoot: trie.EmptyHash} + blockState.EXPECT().GetHeader(common.Hash{1}). + Return(parentHeader, nil) + + storageState := NewMockStorageState(ctrl) + lockCall := storageState.EXPECT().Lock() + storageState.EXPECT().Unlock().After(lockCall) + trieState := storage.NewTrieState(nil) + storageState.EXPECT().TrieState(&trie.EmptyHash). + Return(trieState, nil) + + parentHeaderHash := parentHeader.Hash() + instance := NewMockInstance(ctrl) + blockState.EXPECT().GetRuntime(parentHeaderHash). + Return(instance, nil) + + instance.EXPECT().SetContextStorage(trieState) + block := &types.Block{ + Header: *expectedHeader, + Body: types.Body{{2}}, + } + instance.EXPECT().ExecuteBlock(block).Return(nil, nil) + + blockImportHandler := NewMockBlockImportHandler(ctrl) + const announceImportedBlock = true + blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). + Return(nil) + + telemetryClient := NewMockTelemetry(ctrl) + headerHash := common.MustHexToHash("0x18d21d2901e4a4ac6a8c6431da2dfee1b8701f31a9e49283a082e6c744d4117c") + message := telemetry.NewBlockImport(&headerHash, expectedHeader.Number, "NetworkInitialSync") + telemetryClient.EXPECT().SendMessage(message) + + return chainProcessor{ + babeVerifier: babeVerifier, + transactionState: transactionState, + blockState: blockState, + storageState: storageState, + blockImportHandler: blockImportHandler, + telemetry: telemetryClient, + } + }, + blockData: types.BlockData{ + Header: &types.Header{ + ParentHash: common.Hash{1}, + Number: 5, + }, + Body: &types.Body{{2}}, + }, + announceImportedBlock: true, + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + processor := testCase.chainProcessorBuilder(ctrl) + + err := processor.processBlockDataWithHeaderAndBody( + testCase.blockData, testCase.announceImportedBlock) + + assert.ErrorIs(t, err, testCase.sentinelError) + if testCase.sentinelError != nil { + assert.EqualError(t, err, testCase.errorMessage) + } + }) + } +} + +func Test_chainProcessor_processReadyBlocks(t *testing.T) { + t.Parallel() + mockError := errors.New("test mock error") + tests := map[string]struct { + chainSyncBuilder func(ctrl *gomock.Controller) ChainSync + blockStateBuilder func(ctrl *gomock.Controller, done chan struct{}) BlockState + blockData *types.BlockData + babeVerifierBuilder func(ctrl *gomock.Controller) BabeVerifier + pendingBlockBuilder func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet + storageStateBuilder func(ctrl *gomock.Controller, done chan struct{}) StorageState + }{ + "base_case": { + chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { + cs := NewMockChainSync(ctrl) + cs.EXPECT().syncState().Return(bootstrap) + return cs + }, + blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{}).DoAndReturn(func(*types. + BlockData) error { + close(done) + return nil + }) + return mockBlockState + }, + blockData: &types.BlockData{ + Hash: common.Hash{}, + }, + babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { + return nil + }, + pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { + return nil + }, + storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { + return nil + }, + }, + "add_block": { + chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { + cs := NewMockChainSync(ctrl) + cs.EXPECT().syncState().Return(bootstrap) + return cs + }, + blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) + return mockBlockState + }, + blockData: &types.BlockData{ + Hash: common.Hash{}, + Header: &types.Header{}, + Body: &types.Body{}, + }, + babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) + return mockBabeVerifier + }, + pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { + mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) + mockDisjointBlockSet.EXPECT().addBlock(&types.Block{ + Header: types.Header{}, + Body: types.Body{}, + }).DoAndReturn(func(block *types.Block) error { + close(done) + return nil + }) + return mockDisjointBlockSet + }, + storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { + return nil + }, + }, + "error_in_process_block": { + chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { + cs := NewMockChainSync(ctrl) + cs.EXPECT().syncState().Return(bootstrap) + return cs + }, + blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) + return mockBlockState + }, + blockData: &types.BlockData{ + Hash: common.Hash{}, + Header: &types.Header{}, + Body: &types.Body{}, + }, + babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) + return mockBabeVerifier + }, + pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { + return nil + }, + storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().Unlock() + mockStorageState.EXPECT().TrieState(&common.Hash{}).DoAndReturn(func(hash *common.Hash) (*storage. + TrieState, error) { + close(done) + return nil, mockError + }) + return mockStorageState + }, + }, + "add_block_error": { + chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { + cs := NewMockChainSync(ctrl) + cs.EXPECT().syncState().Return(bootstrap) + return cs + }, + blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) + return mockBlockState + }, + blockData: &types.BlockData{ + Hash: common.Hash{}, + Header: &types.Header{}, + Body: &types.Body{}, + }, + babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) + return mockBabeVerifier + }, + pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { + mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) + mockDisjointBlockSet.EXPECT().addBlock(&types.Block{ + Header: types.Header{}, + Body: types.Body{}, + }).DoAndReturn(func(block *types.Block) error { + close(done) + return mockError + }) + return mockDisjointBlockSet + }, + storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { + return nil + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + ctx, cancel := context.WithCancel(context.Background()) + readyBlock := newBlockQueue(5) + done := make(chan struct{}) + + s := &chainProcessor{ + ctx: ctx, + cancel: cancel, + readyBlocks: readyBlock, + chainSync: tt.chainSyncBuilder(ctrl), + blockState: tt.blockStateBuilder(ctrl, done), + babeVerifier: tt.babeVerifierBuilder(ctrl), + pendingBlocks: tt.pendingBlockBuilder(ctrl, done), + storageState: tt.storageStateBuilder(ctrl, done), + } + + go s.processReadyBlocks() + + readyBlock.push(tt.blockData) + <-done + s.cancel() + }) + } +} + +func Test_newChainProcessor(t *testing.T) { + t.Parallel() + + mockReadyBlock := newBlockQueue(5) + mockDisjointBlockSet := NewMockDisjointBlockSet(nil) + mockBlockState := NewMockBlockState(nil) + mockStorageState := NewMockStorageState(nil) + mockTransactionState := NewMockTransactionState(nil) + mockBabeVerifier := NewMockBabeVerifier(nil) + mockFinalityGadget := NewMockFinalityGadget(nil) + mockBlockImportHandler := NewMockBlockImportHandler(nil) + + type args struct { + readyBlocks *blockQueue + pendingBlocks DisjointBlockSet + blockState BlockState + storageState StorageState + transactionState TransactionState + babeVerifier BabeVerifier + finalityGadget FinalityGadget + blockImportHandler BlockImportHandler + } + tests := []struct { + name string + args args + want *chainProcessor + }{ + { + name: "with_args", + args: args{ + readyBlocks: mockReadyBlock, + pendingBlocks: mockDisjointBlockSet, + blockState: mockBlockState, + storageState: mockStorageState, + transactionState: mockTransactionState, + babeVerifier: mockBabeVerifier, + finalityGadget: mockFinalityGadget, + blockImportHandler: mockBlockImportHandler, + }, + want: &chainProcessor{ + readyBlocks: mockReadyBlock, + pendingBlocks: mockDisjointBlockSet, + blockState: mockBlockState, + storageState: mockStorageState, + transactionState: mockTransactionState, + babeVerifier: mockBabeVerifier, + finalityGadget: mockFinalityGadget, + blockImportHandler: mockBlockImportHandler, + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + cpCfg := chainProcessorConfig{ + readyBlocks: tt.args.readyBlocks, + pendingBlocks: tt.args.pendingBlocks, + blockState: tt.args.blockState, + storageState: tt.args.storageState, + transactionState: tt.args.transactionState, + babeVerifier: tt.args.babeVerifier, + finalityGadget: tt.args.finalityGadget, + blockImportHandler: tt.args.blockImportHandler, + } + + got := newChainProcessor(cpCfg) + assert.NotNil(t, got.ctx) + got.ctx = nil + assert.NotNil(t, got.cancel) + got.cancel = nil + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/internal/trie/node/branch_encode_test.go b/internal/trie/node/branch_encode_test.go index 46a1f89ce5..7ef2ec4c0f 100644 --- a/internal/trie/node/branch_encode_test.go +++ b/internal/trie/node/branch_encode_test.go @@ -150,7 +150,7 @@ func Test_encodeChildrenOpportunisticParallel(t *testing.T) { }) } - t.Run("opportunist parallel branch encoding", func(t *testing.T) { + t.Run("opportunist_parallel_branch_encoding", func(t *testing.T) { t.Parallel() children := make([]*Node, ChildrenCapacity) diff --git a/internal/trie/node/header_test.go b/internal/trie/node/header_test.go index 5258158b1d..934131df93 100644 --- a/internal/trie/node/header_test.go +++ b/internal/trie/node/header_test.go @@ -226,7 +226,7 @@ func Test_encodeHeader(t *testing.T) { }) } - t.Run("partial key length is too big", func(t *testing.T) { + t.Run("partial_key_length_is_too_big", func(t *testing.T) { t.Parallel() const keyLength = uint(maxPartialKeyLength) + 1 diff --git a/lib/babe/verify_test.go b/lib/babe/verify_test.go index e889d66ecc..ef0a9a5ba0 100644 --- a/lib/babe/verify_test.go +++ b/lib/babe/verify_test.go @@ -771,7 +771,7 @@ func Test_verifier_verifyBlockEquivocation(t *testing.T) { SecondHeader: *testHeader5, } - mockRuntime.EXPECT().BabeGenerateKeyOwnershipProof(slot, offenderPublicKey).Return(keyOwnershipProof, nil).Times(1) + mockRuntime.EXPECT().BabeGenerateKeyOwnershipProof(slot, offenderPublicKey).Return(keyOwnershipProof, nil) mockRuntime.EXPECT().BabeSubmitReportEquivocationUnsignedExtrinsic(equivocationProof, keyOwnershipProof).Return(nil) mockBlockState5.EXPECT().GetRuntime(existingHeader.Hash()).Return(mockRuntime, nil) @@ -917,7 +917,7 @@ func Test_verifier_submitAndReportEquivocation(t *testing.T) { FirstHeader: *firstHeader, SecondHeader: *secondHeader, } - mockRuntime.EXPECT().BabeGenerateKeyOwnershipProof(slot, offenderPublicKey).Return(keyOwnershipProof, nil).Times(1) + mockRuntime.EXPECT().BabeGenerateKeyOwnershipProof(slot, offenderPublicKey).Return(keyOwnershipProof, nil) mockRuntime.EXPECT().BabeSubmitReportEquivocationUnsignedExtrinsic(equivocationProof, keyOwnershipProof).Return(nil) mockBlockState.EXPECT().BestBlockHash().Return(firstHash).Times(2) diff --git a/lib/grandpa/commits_tracker_test.go b/lib/grandpa/commits_tracker_test.go index 7f69d17da9..8a948d5669 100644 --- a/lib/grandpa/commits_tracker_test.go +++ b/lib/grandpa/commits_tracker_test.go @@ -97,7 +97,7 @@ func Test_commitsTracker_cleanup(t *testing.T) { func Test_commitsTracker_overriding(t *testing.T) { t.Parallel() - t.Run("override oldest", func(t *testing.T) { + t.Run("override_oldest", func(t *testing.T) { t.Parallel() const capacity = 2 @@ -122,7 +122,7 @@ func Test_commitsTracker_overriding(t *testing.T) { }) }) - t.Run("override newest", func(t *testing.T) { + t.Run("override_newest", func(t *testing.T) { t.Parallel() const capacity = 2 @@ -151,7 +151,7 @@ func Test_commitsTracker_overriding(t *testing.T) { func Test_commitsTracker_delete(t *testing.T) { t.Parallel() - t.Run("non existing block hash", func(t *testing.T) { + t.Run("non_existing_block_hash", func(t *testing.T) { t.Parallel() const capacity = 2 @@ -170,7 +170,7 @@ func Test_commitsTracker_delete(t *testing.T) { }) }) - t.Run("existing block hash", func(t *testing.T) { + t.Run("existing_block_hash", func(t *testing.T) { t.Parallel() const capacity = 2 diff --git a/lib/grandpa/message_handler_integration_test.go b/lib/grandpa/message_handler_integration_test.go index f58f98a481..831d863e64 100644 --- a/lib/grandpa/message_handler_integration_test.go +++ b/lib/grandpa/message_handler_integration_test.go @@ -192,7 +192,6 @@ func TestMessageHandler_VoteMessage(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() h := NewMessageHandler(gs, st.Block, telemetryMock) out, err := h.handleMessage("", vm) @@ -216,7 +215,6 @@ func TestMessageHandler_NeighbourMessage(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() h := NewMessageHandler(gs, st.Block, telemetryMock) @@ -273,7 +271,6 @@ func TestMessageHandler_VerifyJustification_InvalidSig(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() // scale encode the message to assert the wrapped error message expectedFullVote := FullVote{ @@ -325,10 +322,6 @@ func TestMessageHandler_CommitMessage_NoCatchUpRequest_ValidSig(t *testing.T) { err = st.Block.AddBlock(block) require.NoError(t, err) - ctrl := gomock.NewController(t) - telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() - out, err := gs.messageHandler.handleMessage("", fm) require.NoError(t, err) require.Nil(t, out) @@ -359,7 +352,6 @@ func TestMessageHandler_CommitMessage_NoCatchUpRequest_MinVoteError(t *testing.T ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() h := NewMessageHandler(gs, st.Block, telemetryMock) out, err := h.handleMessage("", fm) @@ -400,7 +392,6 @@ func TestMessageHandler_CommitMessage_WithCatchUpRequest(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() h := NewMessageHandler(gs, st.Block, telemetryMock) _, err = h.handleMessage("", fm) @@ -419,7 +410,6 @@ func TestMessageHandler_CatchUpRequest_InvalidRound(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() h := NewMessageHandler(gs, st.Block, telemetryMock) _, err = h.handleMessage("", req) @@ -438,7 +428,6 @@ func TestMessageHandler_CatchUpRequest_InvalidSetID(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() h := NewMessageHandler(gs, st.Block, telemetryMock) _, err = h.handleMessage("", req) @@ -513,7 +502,6 @@ func TestMessageHandler_CatchUpRequest_WithResponse(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() h := NewMessageHandler(gs, st.Block, telemetryMock) out, err := h.handleMessage("", req) @@ -530,7 +518,6 @@ func TestVerifyJustification(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() gs, st := newTestService(t, aliceKeyPair) h := NewMessageHandler(gs, st.Block, telemetryMock) @@ -555,7 +542,6 @@ func TestVerifyJustification_InvalidSignature(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() gs, st := newTestService(t, aliceKeyPair) h := NewMessageHandler(gs, st.Block, telemetryMock) @@ -591,7 +577,6 @@ func TestVerifyJustification_InvalidAuthority(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() gs, st := newTestService(t, aliceKeyPair) h := NewMessageHandler(gs, st.Block, telemetryMock) @@ -625,7 +610,6 @@ func TestMessageHandler_VerifyPreVoteJustification(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() gs, st := newTestService(t, aliceKeyPair) @@ -661,7 +645,6 @@ func TestMessageHandler_VerifyPreCommitJustification(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() gs, st := newTestService(t, aliceKeyPair) @@ -706,7 +689,6 @@ func TestMessageHandler_HandleCatchUpResponse(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() h := NewMessageHandler(gs, st.Block, telemetryMock) @@ -1248,7 +1230,6 @@ func Test_VerifyCommitMessageJustification_ShouldRemoveEquivocatoryVotes(t *test gs, st := newTestService(t, aliceKeyPair) ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() h := NewMessageHandler(gs, st.Block, telemetryMock) @@ -1320,7 +1301,6 @@ func Test_VerifyPrevoteJustification_CountEquivocatoryVoters(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() gs, st := newTestService(t, aliceKeyPair) h := NewMessageHandler(gs, st.Block, telemetryMock) @@ -1400,7 +1380,6 @@ func Test_VerifyPreCommitJustification(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() telemetryMock. EXPECT(). diff --git a/lib/grandpa/network_integration_test.go b/lib/grandpa/network_integration_test.go index 7a902b2dcf..8a47516201 100644 --- a/lib/grandpa/network_integration_test.go +++ b/lib/grandpa/network_integration_test.go @@ -61,12 +61,6 @@ func TestHandleNetworkMessage(t *testing.T) { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) - telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() - - telemetryMock. - EXPECT(). - SendMessage(gomock.Any()). - AnyTimes() h := NewMessageHandler(gs, st.Block, telemetryMock) gs.messageHandler = h diff --git a/lib/grandpa/votes_tracker_test.go b/lib/grandpa/votes_tracker_test.go index ecfd134ca8..91a9b220b8 100644 --- a/lib/grandpa/votes_tracker_test.go +++ b/lib/grandpa/votes_tracker_test.go @@ -85,7 +85,7 @@ func Test_newVotesTracker(t *testing.T) { func Test_votesTracker_cleanup(t *testing.T) { t.Parallel() - t.Run("in same block", func(t *testing.T) { + t.Run("in_same_block", func(t *testing.T) { t.Parallel() const capacity = 2 @@ -117,7 +117,7 @@ func Test_votesTracker_cleanup(t *testing.T) { }) }) - t.Run("remove entire block", func(t *testing.T) { + t.Run("remove_entire_block", func(t *testing.T) { t.Parallel() const capacity = 2 @@ -157,7 +157,7 @@ func Test_votesTracker_cleanup(t *testing.T) { func Test_votesTracker_overriding(t *testing.T) { t.Parallel() - t.Run("override oldest", func(t *testing.T) { + t.Run("override_oldest", func(t *testing.T) { t.Parallel() const capacity = 2 @@ -188,7 +188,7 @@ func Test_votesTracker_overriding(t *testing.T) { }) }) - t.Run("override newest", func(t *testing.T) { + t.Run("override_newest", func(t *testing.T) { t.Parallel() const capacity = 2 @@ -223,7 +223,7 @@ func Test_votesTracker_overriding(t *testing.T) { func Test_votesTracker_delete(t *testing.T) { t.Parallel() - t.Run("non existing block hash", func(t *testing.T) { + t.Run("non_existing_block_hash", func(t *testing.T) { t.Parallel() const capacity = 2 @@ -248,7 +248,7 @@ func Test_votesTracker_delete(t *testing.T) { }) }) - t.Run("existing block hash", func(t *testing.T) { + t.Run("existing_block_hash", func(t *testing.T) { t.Parallel() const capacity = 2 diff --git a/lib/runtime/wasmer/config_test.go b/lib/runtime/wasmer/config_test.go index cdf2e2dcad..c543e4bbe5 100644 --- a/lib/runtime/wasmer/config_test.go +++ b/lib/runtime/wasmer/config_test.go @@ -12,7 +12,7 @@ import ( ) func Test_Config_SetTestVersion(t *testing.T) { - t.Run("panics with nil *testing.T", func(t *testing.T) { + t.Run("panics_with_nil_*testing.T", func(t *testing.T) { var c Config assert.PanicsWithValue(t, "*testing.T argument cannot be nil. Please don't use this function outside of Go tests.", @@ -21,7 +21,7 @@ func Test_Config_SetTestVersion(t *testing.T) { }) }) - t.Run("set test version", func(t *testing.T) { + t.Run("set_test_version", func(t *testing.T) { var c Config testVersion := runtime.Version{ StateVersion: 1, diff --git a/lib/trie/trie_test.go b/lib/trie/trie_test.go index 0f6544ca74..68f907f650 100644 --- a/lib/trie/trie_test.go +++ b/lib/trie/trie_test.go @@ -599,7 +599,7 @@ func entriesMatch(t *testing.T, expected, actual map[string][]byte) { func Test_Trie_Entries(t *testing.T) { t.Parallel() - t.Run("simple root", func(t *testing.T) { + t.Run("simple_root", func(t *testing.T) { t.Parallel() root := &Node{ @@ -632,7 +632,7 @@ func Test_Trie_Entries(t *testing.T) { entriesMatch(t, expectedEntries, entries) }) - t.Run("custom root", func(t *testing.T) { + t.Run("custom_root", func(t *testing.T) { t.Parallel() root := &Node{ @@ -689,7 +689,7 @@ func Test_Trie_Entries(t *testing.T) { entriesMatch(t, expectedEntries, entries) }) - t.Run("end to end", func(t *testing.T) { + t.Run("end_to_end", func(t *testing.T) { t.Parallel() trie := Trie{ diff --git a/pkg/scale/encode_test.go b/pkg/scale/encode_test.go index 1d70a7fc53..92de411919 100644 --- a/pkg/scale/encode_test.go +++ b/pkg/scale/encode_test.go @@ -82,7 +82,7 @@ func Test_MustMarshal(t *testing.T) { assert.Equal(t, []byte{4, 1}, b) }) - t.Run("panics on error", func(t *testing.T) { + t.Run("panics_on_error", func(t *testing.T) { t.Parallel() const expected = "unsupported type: chan struct {}" diff --git a/tests/rpc/rpc_05-state_test.go b/tests/rpc/rpc_05-state_test.go index 66ae887f36..734e65774b 100644 --- a/tests/rpc/rpc_05-state_test.go +++ b/tests/rpc/rpc_05-state_test.go @@ -88,7 +88,7 @@ func TestStateRPCResponseValidation(t *testing.T) { //nolint:tparallel // TODO assert response }) - t.Run("valid block hash state_getPairs", func(t *testing.T) { + t.Run("valid_block_hash_state_getPairs", func(t *testing.T) { t.Parallel() params := fmt.Sprintf(`["0x", "%s"]`, blockHash) @@ -99,7 +99,7 @@ func TestStateRPCResponseValidation(t *testing.T) { //nolint:tparallel // TODO assert response }) - t.Run("valid block hash state_getMetadata", func(t *testing.T) { + t.Run("valid_block_hash_state_getMetadata", func(t *testing.T) { t.Parallel() params := fmt.Sprintf(`["%s"]`, blockHash) @@ -110,7 +110,7 @@ func TestStateRPCResponseValidation(t *testing.T) { //nolint:tparallel // TODO assert response }) - t.Run("valid block hash state_getRuntimeVersion", func(t *testing.T) { + t.Run("valid_block_hash_state_getRuntimeVersion", func(t *testing.T) { t.Parallel() var response modules.StateRuntimeVersionResponse @@ -120,7 +120,7 @@ func TestStateRPCResponseValidation(t *testing.T) { //nolint:tparallel // TODO assert response }) - t.Run("optional params hash state_getPairs", func(t *testing.T) { + t.Run("optional_params_hash_state_getPairs", func(t *testing.T) { t.Parallel() var response modules.StatePairResponse @@ -130,7 +130,7 @@ func TestStateRPCResponseValidation(t *testing.T) { //nolint:tparallel // TODO assert response }) - t.Run("optional param hash state_getMetadata", func(t *testing.T) { + t.Run("optional_param_hash_state_getMetadata", func(t *testing.T) { t.Parallel() var response modules.StateMetadataResponse @@ -140,7 +140,7 @@ func TestStateRPCResponseValidation(t *testing.T) { //nolint:tparallel // TODO assert response }) - t.Run("optional param value as null state_getRuntimeVersion", func(t *testing.T) { + t.Run("optional_param_value_as_null_state_getRuntimeVersion", func(t *testing.T) { t.Parallel() var response modules.StateRuntimeVersionResponse @@ -150,7 +150,7 @@ func TestStateRPCResponseValidation(t *testing.T) { //nolint:tparallel // TODO assert response }) - t.Run("optional param value as null state_getMetadata", func(t *testing.T) { + t.Run("optional_param_value_as_null_state_getMetadata", func(t *testing.T) { t.Parallel() var response modules.StateMetadataResponse @@ -160,7 +160,7 @@ func TestStateRPCResponseValidation(t *testing.T) { //nolint:tparallel // TODO assert response }) - t.Run("optional param value as null state_getPairs", func(t *testing.T) { + t.Run("optional_param_value_as_null_state_getPairs", func(t *testing.T) { t.Parallel() var response modules.StatePairResponse From d9b5d19cd8865ecd197b77c2cc4822f0c2e5b86a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 5 Jun 2023 11:06:24 -0400 Subject: [PATCH 055/140] chore: remove unneeded sync files --- dot/sync/benchmark_test.go | 239 ------ dot/sync/block_queue_test.go | 252 ------- dot/sync/chain_processor.go | 300 -------- dot/sync/chain_processor_test.go | 1181 ------------------------------ 4 files changed, 1972 deletions(-) delete mode 100644 dot/sync/benchmark_test.go delete mode 100644 dot/sync/block_queue_test.go delete mode 100644 dot/sync/chain_processor.go delete mode 100644 dot/sync/chain_processor_test.go diff --git a/dot/sync/benchmark_test.go b/dot/sync/benchmark_test.go deleted file mode 100644 index eae329300e..0000000000 --- a/dot/sync/benchmark_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2022 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "container/ring" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func Test_newSyncBenchmarker(t *testing.T) { - t.Parallel() - - t.Run("10_samples_to_keep", func(t *testing.T) { - t.Parallel() - const samplesToKeep = 10 - actual := newSyncBenchmarker(samplesToKeep) - - expected := &syncBenchmarker{ - blocksPerSecond: ring.New(samplesToKeep), - samplesToKeep: samplesToKeep, - } - - assert.Equal(t, expected, actual) - }) - - t.Run("panics_on_0_sample_to_keep", func(t *testing.T) { - t.Parallel() - const samplesToKeep = 0 - assert.PanicsWithValue(t, "cannot have 0 samples to keep", func() { - newSyncBenchmarker(samplesToKeep) - }) - }) -} - -func Test_syncBenchmarker_begin(t *testing.T) { - t.Parallel() - - const startSec = 1000 - start := time.Unix(startSec, 0) - const startBlock = 10 - - b := syncBenchmarker{} - b.begin(start, startBlock) - - expected := syncBenchmarker{ - start: start, - startBlock: startBlock, - } - - assert.Equal(t, expected, b) -} - -func Test_syncBenchmarker_end(t *testing.T) { - t.Parallel() - - const startSec = 1000 - start := time.Unix(startSec, 0) - - const nowSec = 1010 - now := time.Unix(nowSec, 0) - - const ( - startBlock = 10 - endBlock = 12 - ) - - const ringCap = 3 - - blocksPerSecond := ring.New(ringCap) - blocksPerSecond.Value = 1.00 - blocksPerSecond = blocksPerSecond.Next() - - b := syncBenchmarker{ - start: start, - startBlock: startBlock, - blocksPerSecond: blocksPerSecond, - } - b.end(now, endBlock) - - expectedBlocksPerSecond := ring.New(ringCap) - expectedBlocksPerSecond.Value = 1.00 - expectedBlocksPerSecond = expectedBlocksPerSecond.Next() - expectedBlocksPerSecond.Value = 0.2 - expectedBlocksPerSecond = expectedBlocksPerSecond.Next() - - expected := syncBenchmarker{ - start: start, - startBlock: startBlock, - blocksPerSecond: expectedBlocksPerSecond, - } - - assert.Equal(t, expected, b) -} - -func Test_syncBenchmarker_average(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - values []float64 - ringCap int - average float64 - }{ - // zero size ring is not possible due to constructor check - "empty_ring": { - ringCap: 1, - }, - "single_element_in_one-size_ring": { - values: []float64{1.1}, - ringCap: 1, - average: 1.1, - }, - "single_element_in_two-size_ring": { - values: []float64{1.1}, - ringCap: 2, - average: 1.1, - }, - "two_elements_in_two-size_ring": { - values: []float64{1.0, 2.0}, - ringCap: 2, - average: 1.5, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - - blocksPerSecond := ring.New(testCase.ringCap) - for _, value := range testCase.values { - blocksPerSecond.Value = value - blocksPerSecond = blocksPerSecond.Next() - } - - benchmarker := syncBenchmarker{ - blocksPerSecond: blocksPerSecond, - samplesToKeep: testCase.ringCap, - } - - avg := benchmarker.average() - - assert.Equal(t, testCase.average, avg) - }) - } -} - -func Test_syncBenchmarker_mostRecentAverage(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - values []float64 - ringCap int - average float64 - }{ - // zero size ring is not possible due to constructor check - "empty_ring": { - ringCap: 1, - }, - "single_element_in_one-size_ring": { - values: []float64{1.1}, - ringCap: 1, - average: 1.1, - }, - "single_element_in_two-size_ring": { - values: []float64{1.1}, - ringCap: 2, - average: 1.1, - }, - "two_elements_in_two-size_ring": { - values: []float64{1.0, 2.0}, - ringCap: 2, - average: 2.0, - }, - "three_elements_in_two-size_ring": { - values: []float64{1.0, 2.0, 3.0}, - ringCap: 2, - average: 3.0, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - - blocksPerSecond := ring.New(testCase.ringCap) - for _, value := range testCase.values { - blocksPerSecond.Value = value - blocksPerSecond = blocksPerSecond.Next() - } - - benchmarker := syncBenchmarker{ - blocksPerSecond: blocksPerSecond, - } - - avg := benchmarker.mostRecentAverage() - - assert.Equal(t, testCase.average, avg) - }) - } -} - -func Test_syncBenchmarker(t *testing.T) { - t.Parallel() - - const samplesToKeep = 5 - benchmarker := newSyncBenchmarker(samplesToKeep) - - const initialBlock = 10 - timeZero := time.Unix(0, 0) - const timeIncrement = time.Second - const baseBlocksIncrement uint = 1 - - startTime := timeZero - endTime := startTime.Add(timeIncrement) - var block uint = initialBlock - - const samples = 10 - for i := 0; i < samples; i++ { - benchmarker.begin(startTime, block) - block += baseBlocksIncrement + uint(i) - benchmarker.end(endTime, block) - - startTime = startTime.Add(timeIncrement) - endTime = startTime.Add(timeIncrement) - } - - avg := benchmarker.average() - const expectedAvg = 8.0 - assert.Equal(t, expectedAvg, avg) - - mostRecentAvg := benchmarker.mostRecentAverage() - const expectedMostRecentAvg = 10.0 - assert.Equal(t, expectedMostRecentAvg, mostRecentAvg) -} diff --git a/dot/sync/block_queue_test.go b/dot/sync/block_queue_test.go deleted file mode 100644 index cff9b181b3..0000000000 --- a/dot/sync/block_queue_test.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func Test_newBlockQueue(t *testing.T) { - t.Parallel() - - const capacity = 1 - bq := newBlockQueue(capacity) - - require.NotNil(t, bq.queue) - assert.Equal(t, 1, cap(bq.queue)) - assert.Equal(t, 0, len(bq.queue)) - bq.queue = nil - - expectedBlockQueue := &blockQueue{ - hashesSet: make(map[common.Hash]struct{}, capacity), - } - assert.Equal(t, expectedBlockQueue, bq) -} - -func Test_blockQueue_push(t *testing.T) { - t.Parallel() - - const capacity = 1 - bq := newBlockQueue(capacity) - blockData := &types.BlockData{ - Hash: common.Hash{1}, - } - - bq.push(blockData) - - // cannot compare channels - require.NotNil(t, bq.queue) - assert.Len(t, bq.queue, 1) - - receivedBlockData := <-bq.queue - expectedBlockData := &types.BlockData{ - Hash: common.Hash{1}, - } - assert.Equal(t, expectedBlockData, receivedBlockData) - - bq.queue = nil - expectedBlockQueue := &blockQueue{ - hashesSet: map[common.Hash]struct{}{{1}: {}}, - } - assert.Equal(t, expectedBlockQueue, bq) -} - -func Test_blockQueue_pop(t *testing.T) { - t.Parallel() - - t.Run("context_canceled", func(t *testing.T) { - t.Parallel() - - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - const capacity = 1 - bq := newBlockQueue(capacity) - - blockData, err := bq.pop(ctx) - assert.Nil(t, blockData) - assert.ErrorIs(t, err, context.Canceled) - }) - - t.Run("get_block_data_after_waiting", func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - const capacity = 1 - bq := newBlockQueue(capacity) - - const afterDuration = 5 * time.Millisecond - time.AfterFunc(afterDuration, func() { - blockData := &types.BlockData{ - Hash: common.Hash{1}, - } - bq.push(blockData) - }) - - blockData, err := bq.pop(ctx) - - expectedBlockData := &types.BlockData{ - Hash: common.Hash{1}, - } - assert.Equal(t, expectedBlockData, blockData) - assert.NoError(t, err) - - assert.Len(t, bq.queue, 0) - bq.queue = nil - expectedBlockQueue := &blockQueue{ - hashesSet: map[common.Hash]struct{}{}, - } - assert.Equal(t, expectedBlockQueue, bq) - }) -} - -func Test_blockQueue_has(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - blockQueue *blockQueue - blockHash common.Hash - has bool - }{ - "absent": { - blockQueue: &blockQueue{ - hashesSet: map[common.Hash]struct{}{}, - }, - blockHash: common.Hash{1}, - }, - "exists": { - blockQueue: &blockQueue{ - hashesSet: map[common.Hash]struct{}{{1}: {}}, - }, - blockHash: common.Hash{1}, - has: true, - }, - } - - for name, tc := range testCases { - testCase := tc - t.Run(name, func(t *testing.T) { - t.Parallel() - - has := testCase.blockQueue.has(testCase.blockHash) - assert.Equal(t, testCase.has, has) - }) - } -} - -func Test_lockQueue_endToEnd(t *testing.T) { - t.Parallel() - - const capacity = 10 - blockQueue := newBlockQueue(capacity) - - newBlockData := func(i byte) *types.BlockData { - return &types.BlockData{ - Hash: common.Hash{i}, - } - } - - blockQueue.push(newBlockData(1)) - blockQueue.push(newBlockData(2)) - blockQueue.push(newBlockData(3)) - - blockData, err := blockQueue.pop(context.Background()) - assert.Equal(t, newBlockData(1), blockData) - assert.NoError(t, err) - - has := blockQueue.has(newBlockData(2).Hash) - assert.True(t, has) - has = blockQueue.has(newBlockData(3).Hash) - assert.True(t, has) - - blockQueue.push(newBlockData(4)) - - has = blockQueue.has(newBlockData(4).Hash) - assert.True(t, has) - - blockData, err = blockQueue.pop(context.Background()) - assert.Equal(t, newBlockData(2), blockData) - assert.NoError(t, err) - - // drain queue - for len(blockQueue.queue) > 0 { - <-blockQueue.queue - } -} - -func Test_lockQueue_threadSafety(t *testing.T) { - // This test consists in checking for concurrent access - // using the -race detector. - t.Parallel() - - var startWg, endWg sync.WaitGroup - ctx, cancel := context.WithCancel(context.Background()) - - const operations = 3 - const parallelism = 3 - const goroutines = parallelism * operations - startWg.Add(goroutines) - endWg.Add(goroutines) - - const testDuration = 50 * time.Millisecond - go func() { - timer := time.NewTimer(time.Hour) - startWg.Wait() - _ = timer.Reset(testDuration) - <-timer.C - cancel() - }() - - runInLoop := func(f func()) { - defer endWg.Done() - startWg.Done() - startWg.Wait() - for ctx.Err() == nil { - f() - } - } - - const capacity = 10 - blockQueue := newBlockQueue(capacity) - blockData := &types.BlockData{ - Hash: common.Hash{1}, - } - blockHash := common.Hash{1} - - endWg.Add(1) - go func() { - defer endWg.Done() - <-ctx.Done() - // Empty queue channel to make sure `push` does not block - // when the context is cancelled. - for len(blockQueue.queue) > 0 { - <-blockQueue.queue - } - }() - - for i := 0; i < parallelism; i++ { - go runInLoop(func() { - blockQueue.push(blockData) - }) - - go runInLoop(func() { - _, _ = blockQueue.pop(ctx) - }) - - go runInLoop(func() { - _ = blockQueue.has(blockHash) - }) - } - - endWg.Wait() -} diff --git a/dot/sync/chain_processor.go b/dot/sync/chain_processor.go deleted file mode 100644 index c90e9b7159..0000000000 --- a/dot/sync/chain_processor.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "bytes" - "context" - "errors" - "fmt" - - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/blocktree" -) - -// ChainProcessor processes ready blocks. -// it is implemented by *chainProcessor -type ChainProcessor interface { - processReadyBlocks() - stop() -} - -type chainProcessor struct { - ctx context.Context - cancel context.CancelFunc - - chainSync ChainSync - - // blocks that are ready for processing. ie. their parent is known, or their parent is ahead - // of them within this channel and thus will be processed first - readyBlocks *blockQueue - - // set of block not yet ready to be processed. - // blocks are placed here if they fail to be processed due to missing parent block - pendingBlocks DisjointBlockSet - - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry -} - -type chainProcessorConfig struct { - readyBlocks *blockQueue - pendingBlocks DisjointBlockSet - syncer ChainSync - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry - badBlocks []string -} - -func newChainProcessor(cfg chainProcessorConfig) *chainProcessor { - ctx, cancel := context.WithCancel(context.Background()) - - return &chainProcessor{ - ctx: ctx, - cancel: cancel, - readyBlocks: cfg.readyBlocks, - pendingBlocks: cfg.pendingBlocks, - chainSync: cfg.syncer, - blockState: cfg.blockState, - storageState: cfg.storageState, - transactionState: cfg.transactionState, - babeVerifier: cfg.babeVerifier, - finalityGadget: cfg.finalityGadget, - blockImportHandler: cfg.blockImportHandler, - telemetry: cfg.telemetry, - } -} - -func (s *chainProcessor) stop() { - s.cancel() -} - -func (s *chainProcessor) processReadyBlocks() { - for { - bd, err := s.readyBlocks.pop(s.ctx) - if err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return - } - panic(fmt.Sprintf("unhandled error: %s", err)) - } - - if err := s.processBlockData(*bd); err != nil { - // depending on the error, we might want to save this block for later - if !errors.Is(err, errFailedToGetParent) && !errors.Is(err, blocktree.ErrParentNotFound) { - logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) - continue - } - - logger.Tracef("block data processing for block with hash %s failed: %s", bd.Hash, err) - if err := s.pendingBlocks.addBlock(&types.Block{ - Header: *bd.Header, - Body: *bd.Body, - }); err != nil { - logger.Debugf("failed to re-add block to pending blocks: %s", err) - } - } - } -} - -// processBlockData processes the BlockData from a BlockResponse and -// returns the index of the last BlockData it handled on success, -// or the index of the block data that errored on failure. -func (c *chainProcessor) processBlockData(blockData types.BlockData) error { - logger.Debugf("processing block data with hash %s", blockData.Hash) - - headerInState, err := c.blockState.HasHeader(blockData.Hash) - if err != nil { - return fmt.Errorf("checking if block state has header: %w", err) - } - - bodyInState, err := c.blockState.HasBlockBody(blockData.Hash) - if err != nil { - return fmt.Errorf("checking if block state has body: %w", err) - } - - // while in bootstrap mode we don't need to broadcast block announcements - announceImportedBlock := c.chainSync.syncState() == tip - if headerInState && bodyInState { - err = c.processBlockDataWithStateHeaderAndBody(blockData, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and "+ - "body in block state: %w", err) - } - return nil - } - - if blockData.Header != nil { - if blockData.Body != nil { - err = c.processBlockDataWithHeaderAndBody(blockData, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and body: %w", err) - } - logger.Debugf("block with hash %s processed", blockData.Hash) - } - - if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err = c.handleJustification(blockData.Header, *blockData.Justification) - if err != nil { - return fmt.Errorf("handling justification: %w", err) - } - } - } - - err = c.blockState.CompareAndSetBlockData(&blockData) - if err != nil { - return fmt.Errorf("comparing and setting block data: %w", err) - } - - return nil -} - -func (c *chainProcessor) processBlockDataWithStateHeaderAndBody(blockData types.BlockData, - announceImportedBlock bool) (err error) { - // TODO: fix this; sometimes when the node shuts down the "best block" isn't stored properly, - // so when the node restarts it has blocks higher than what it thinks is the best, causing it not to sync - // if we update the node to only store finalised blocks in the database, this should be fixed and the entire - // code block can be removed (#1784) - block, err := c.blockState.GetBlockByHash(blockData.Hash) - if err != nil { - return fmt.Errorf("getting block by hash: %w", err) - } - - err = c.blockState.AddBlockToBlockTree(block) - if errors.Is(err, blocktree.ErrBlockExists) { - logger.Debugf( - "block number %d with hash %s already exists in block tree, skipping it.", - block.Header.Number, blockData.Hash) - return nil - } else if err != nil { - return fmt.Errorf("adding block to blocktree: %w", err) - } - - if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err = c.handleJustification(&block.Header, *blockData.Justification) - if err != nil { - return fmt.Errorf("handling justification: %w", err) - } - } - - // TODO: this is probably unnecessary, since the state is already in the database - // however, this case shouldn't be hit often, since it's only hit if the node state - // is rewinded or if the node shuts down unexpectedly (#1784) - state, err := c.storageState.TrieState(&block.Header.StateRoot) - if err != nil { - return fmt.Errorf("loading trie state: %w", err) - } - - err = c.blockImportHandler.HandleBlockImport(block, state, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block import: %w", err) - } - - return nil -} - -func (c *chainProcessor) processBlockDataWithHeaderAndBody(blockData types.BlockData, - announceImportedBlock bool) (err error) { - err = c.babeVerifier.VerifyBlock(blockData.Header) - if err != nil { - return fmt.Errorf("babe verifying block: %w", err) - } - - c.handleBody(blockData.Body) - - block := &types.Block{ - Header: *blockData.Header, - Body: *blockData.Body, - } - - err = c.handleBlock(block, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block: %w", err) - } - - return nil -} - -// handleHeader handles block bodies included in BlockResponses -func (s *chainProcessor) handleBody(body *types.Body) { - for _, ext := range *body { - s.transactionState.RemoveExtrinsic(ext) - } -} - -// handleHeader handles blocks (header+body) included in BlockResponses -func (s *chainProcessor) handleBlock(block *types.Block, announceImportedBlock bool) error { - parent, err := s.blockState.GetHeader(block.Header.ParentHash) - if err != nil { - return fmt.Errorf("%w: %s", errFailedToGetParent, err) - } - - s.storageState.Lock() - defer s.storageState.Unlock() - - ts, err := s.storageState.TrieState(&parent.StateRoot) - if err != nil { - return err - } - - root := ts.MustRoot() - if !bytes.Equal(parent.StateRoot[:], root[:]) { - panic("parent state root does not match snapshot state root") - } - - rt, err := s.blockState.GetRuntime(parent.Hash()) - if err != nil { - return err - } - - rt.SetContextStorage(ts) - - _, err = rt.ExecuteBlock(block) - if err != nil { - return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) - } - - if err = s.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { - return err - } - - logger.Debugf("🔗 imported block number %d with hash %s", block.Header.Number, block.Header.Hash()) - - blockHash := block.Header.Hash() - s.telemetry.SendMessage(telemetry.NewBlockImport( - &blockHash, - block.Header.Number, - "NetworkInitialSync")) - - return nil -} - -func (s *chainProcessor) handleJustification(header *types.Header, justification []byte) (err error) { - logger.Debugf("handling justification for block %d...", header.Number) - - headerHash := header.Hash() - err = s.finalityGadget.VerifyBlockJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) - } - - err = s.blockState.SetJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) - } - - logger.Infof("🔨 finalised block number %d with hash %s", header.Number, headerHash) - return nil -} diff --git a/dot/sync/chain_processor_test.go b/dot/sync/chain_processor_test.go deleted file mode 100644 index 8e794767ab..0000000000 --- a/dot/sync/chain_processor_test.go +++ /dev/null @@ -1,1181 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "errors" - "testing" - - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/blocktree" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/ChainSafe/gossamer/lib/trie" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func Test_chainProcessor_handleBlock(t *testing.T) { - t.Parallel() - mockError := errors.New("test mock error") - testHash := common.MustHexToHash("0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") - testParentHash := common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a") - - tests := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - block *types.Block - announce bool - wantErr error - }{ - "handle_getHeader_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) - chainProcessor.blockState = mockBlockState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: errFailedToGetParent, - }, - "handle_trieState_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, mockError) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "handle_getRuntime_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - StateRoot: testHash, - }, nil) - mockBlockState.EXPECT().GetRuntime(testParentHash).Return(nil, mockError) - chainProcessor.blockState = mockBlockState - trieState := storage.NewTrieState(nil) - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "handle_runtime_ExecuteBlock_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - StateRoot: testHash, - }, nil) - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(&types.Block{Body: types.Body{}}).Return(nil, mockError) - mockBlockState.EXPECT().GetRuntime(testParentHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "handle_block_import_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - StateRoot: testHash, - }, nil) - mockBlock := &types.Block{Body: types.Body{}} - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState.EXPECT().GetRuntime(testParentHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, - trieState, false).Return(mockError) - chainProcessor.blockImportHandler = mockBlockImportHandler - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "base_case": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlock := &types.Block{ - Body: types.Body{}, // empty slice of extrinsics - } - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockHeader := &types.Header{ - Number: 0, - StateRoot: trie.EmptyHash, - } - mockHeaderHash := mockHeader.Hash() - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(mockHeader, nil) - - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState.EXPECT().GetRuntime(mockHeaderHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().Unlock() - mockStorageState.EXPECT().TrieState(&trie.EmptyHash).Return(trieState, nil) - chainProcessor.storageState = mockStorageState - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, trieState, false).Return(nil) - chainProcessor.blockImportHandler = mockBlockImportHandler - mockTelemetry := NewMockTelemetry(ctrl) - mockTelemetry.EXPECT().SendMessage(gomock.Any()) - chainProcessor.telemetry = mockTelemetry - return - }, - block: &types.Block{ - Header: types.Header{ - Number: 0, - }, - Body: types.Body{}, - }, - }, - "import_block_and_announce": { - announce: true, - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlock := &types.Block{ - Body: types.Body{}, // empty slice of extrinsics - } - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockHeader := &types.Header{ - Number: 0, - StateRoot: trie.EmptyHash, - } - mockHeaderHash := mockHeader.Hash() - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(mockHeader, nil) - - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState.EXPECT().GetRuntime(mockHeaderHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().Unlock() - mockStorageState.EXPECT().TrieState(&trie.EmptyHash).Return(trieState, nil) - chainProcessor.storageState = mockStorageState - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, trieState, true).Return(nil) - chainProcessor.blockImportHandler = mockBlockImportHandler - mockTelemetry := NewMockTelemetry(ctrl) - mockTelemetry.EXPECT().SendMessage(gomock.Any()) - chainProcessor.telemetry = mockTelemetry - return - }, - block: &types.Block{ - Header: types.Header{ - Number: 0, - }, - Body: types.Body{}, - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := tt.chainProcessorBuilder(ctrl) - - err := s.handleBlock(tt.block, tt.announce) - assert.ErrorIs(t, err, tt.wantErr) - }) - } - t.Run("panics_on_different_parent_state_root", func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - bock := &types.Block{ - Header: types.Header{ - ParentHash: common.Hash{1}, - }, - } - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().GetHeader(common.Hash{1}). - Return(&types.Header{StateRoot: common.Hash{2}}, nil) - trieState := storage.NewTrieState(nil) - storageState := NewMockStorageState(ctrl) - lockCall := storageState.EXPECT().Lock() - trieStateCall := storageState.EXPECT().TrieState(&common.Hash{2}). - Return(trieState, nil).After(lockCall) - storageState.EXPECT().Unlock().After(trieStateCall) - chainProcessor := &chainProcessor{ - blockState: blockState, - storageState: storageState, - } - const expectedPanicValue = "parent state root does not match snapshot state root" - assert.PanicsWithValue(t, expectedPanicValue, func() { - _ = chainProcessor.handleBlock(bock, false) - }) - }) -} - -func Test_chainProcessor_handleBody(t *testing.T) { - t.Parallel() - - testExtrinsics := []types.Extrinsic{{1, 2, 3}, {7, 8, 9, 0}, {0xa, 0xb}} - testBody := types.NewBody(testExtrinsics) - - t.Run("base_case", func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - mockTransactionState := NewMockTransactionState(ctrl) - mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[0]) - mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[1]) - mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[2]) - processor := chainProcessor{ - transactionState: mockTransactionState, - } - processor.handleBody(testBody) - }) -} - -func Test_chainProcessor_handleJustification(t *testing.T) { - t.Parallel() - - header := &types.Header{ - Number: 2, - } - headerHash := header.Hash() - errTest := errors.New("test error") - - type args struct { - header *types.Header - justification []byte - } - tests := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - args args - sentinelError error - errorMessage string - }{ - "invalid_justification": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, - []byte(`x`)).Return(errTest) - return chainProcessor{ - finalityGadget: mockFinalityGadget, - } - }, - args: args{ - header: header, - justification: []byte(`x`), - }, - sentinelError: errTest, - errorMessage: "verifying block number 2 justification: test error", - }, - "set_justification_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().SetJustification(headerHash, []byte(`xx`)).Return(errTest) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, []byte(`xx`)).Return(nil) - return chainProcessor{ - blockState: mockBlockState, - finalityGadget: mockFinalityGadget, - } - }, - args: args{ - header: header, - justification: []byte(`xx`), - }, - sentinelError: errTest, - errorMessage: "setting justification for block number 2: test error", - }, - "base_case_set": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().SetJustification(headerHash, []byte(`1234`)).Return(nil) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, []byte(`1234`)).Return(nil) - return chainProcessor{ - blockState: mockBlockState, - finalityGadget: mockFinalityGadget, - } - }, - args: args{ - header: header, - justification: []byte(`1234`), - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - processor := tt.chainProcessorBuilder(ctrl) - - err := processor.handleJustification(tt.args.header, tt.args.justification) - - assert.ErrorIs(t, err, tt.sentinelError) - if tt.sentinelError != nil { - assert.EqualError(t, err, tt.errorMessage) - } - }) - } -} - -func Test_chainProcessor_processBlockData(t *testing.T) { - t.Parallel() - - mockError := errors.New("mock test error") - - tests := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - blockData types.BlockData - expectedError error - }{ - "handle_has_header_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, mockError) - - return chainProcessor{ - blockState: mockBlockState, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "handle_has_block_body_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, mockError) - return chainProcessor{ - blockState: mockBlockState, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "handle_getBlockByHash_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(nil, mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - return chainProcessor{ - blockState: mockBlockState, - chainSync: mockChainSync, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "handle_block_data_justification_!=_nil": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlock := &types.Block{ - Header: types.Header{ - Number: uint(1), - }, - } - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(mockBlock, nil) - mockBlockState.EXPECT().AddBlockToBlockTree(&types.Block{ - Header: types.Header{Number: 1}}).Return(nil) - mockBlockState.EXPECT().SetJustification(common.MustHexToHash( - "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, 3}) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(common.MustHexToHash( - "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, - 3}).Return(nil) - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, nil) - - // given our current chain sync state is `tip` - // the `HandleBlockImport` method should expect - // true as the announce parameter - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(tip) - - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, - nil, true).Return(nil) - - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - finalityGadget: mockFinalityGadget, - storageState: mockStorageState, - blockImportHandler: mockBlockImportHandler, - } - }, - blockData: types.BlockData{ - Justification: &[]byte{1, 2, 3}, - }, - }, - "handle_babe_verify_block_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - babeVerifier: mockBabeVerifier, - } - }, - blockData: types.BlockData{ - Header: &types.Header{}, - Body: &types.Body{}, - }, - expectedError: mockError, - }, - "no_header_and_body_-_fail_to_handle_justification": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().HasHeader(common.Hash{1}).Return(false, nil) - blockState.EXPECT().HasBlockBody(common.Hash{1}).Return(true, nil) - - finalityGadget := NewMockFinalityGadget(ctrl) - expectedBlockDataHeader := &types.Header{Number: 2} - expectedBlockDataHeaderHash := expectedBlockDataHeader.Hash() - finalityGadget.EXPECT(). - VerifyBlockJustification(expectedBlockDataHeaderHash, []byte{1, 2, 3}). - Return(mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - - return chainProcessor{ - chainSync: mockChainSync, - blockState: blockState, - finalityGadget: finalityGadget, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - Header: &types.Header{Number: 2}, - Justification: &[]byte{1, 2, 3}, - }, - expectedError: mockError, - }, - "handle_compareAndSetBlockData_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{}).Return(mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "success_with_justification": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - stateRootHash := common.MustHexToHash("0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") - runtimeHash := common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a") - mockTrieState := storage.NewTrieState(nil) - mockBlock := &types.Block{Header: types.Header{}, Body: types.Body{}} - - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(mockTrieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - Number: 0, - StateRoot: stateRootHash, - }, nil) - mockBlockState.EXPECT().SetJustification( - common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), []byte{1, 2, 3}) - mockBlockState.EXPECT().CompareAndSetBlockData(gomock.AssignableToTypeOf(&types.BlockData{})) - mockBlockState.EXPECT().GetRuntime(runtimeHash).Return(mockInstance, nil) - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}) - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&stateRootHash).Return(mockTrieState, nil) - mockStorageState.EXPECT().Unlock() - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, mockTrieState, false) - - mockTelemetry := NewMockTelemetry(ctrl) - mockTelemetry.EXPECT().SendMessage(gomock.Any()) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification( - common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), - []byte{1, 2, 3}).Return(nil) - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - babeVerifier: mockBabeVerifier, - storageState: mockStorageState, - blockImportHandler: mockBlockImportHandler, - telemetry: mockTelemetry, - finalityGadget: mockFinalityGadget, - } - }, - blockData: types.BlockData{ - Header: &types.Header{ - Number: 0, - }, - Body: &types.Body{}, - Justification: &[]byte{1, 2, 3}, - }, - }, - } - - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - processor := tt.chainProcessorBuilder(ctrl) - err := processor.processBlockData(tt.blockData) - assert.ErrorIs(t, err, tt.expectedError) - }) - } -} - -func Test_chainProcessor_processBlockDataWithStateHeaderAndBody(t *testing.T) { - t.Parallel() - - errTest := errors.New("test error") - - testCases := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - blockData types.BlockData - announceImportedBlock bool - sentinelError error - errorMessage string - }{ - "get_block_by_hash_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().GetBlockByHash(common.Hash{1}). - Return(nil, errTest) - return chainProcessor{ - blockState: blockState, - } - }, - blockData: types.BlockData{Hash: common.Hash{1}}, - sentinelError: errTest, - errorMessage: "getting block by hash: test error", - }, - "block_already_exists_in_blocktree": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - block := &types.Block{Header: types.Header{Number: 2}} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(blocktree.ErrBlockExists) - return chainProcessor{ - blockState: blockState, - } - }, - blockData: types.BlockData{Hash: common.Hash{1}}, - }, - "add_block_to_blocktree_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - block := &types.Block{Header: types.Header{Number: 2}} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(errTest) - return chainProcessor{ - blockState: blockState, - } - }, - blockData: types.BlockData{Hash: common.Hash{1}}, - sentinelError: errTest, - errorMessage: "adding block to blocktree: test error", - }, - "handle_justification_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{Number: 2} - blockHeaderHash := blockHeader.Hash() - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - finalityGadget := NewMockFinalityGadget(ctrl) - finalityGadget.EXPECT(). - VerifyBlockJustification(blockHeaderHash, []byte{3}). - Return(errTest) - - return chainProcessor{ - blockState: blockState, - finalityGadget: finalityGadget, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - Justification: &[]byte{3}, - }, - sentinelError: errTest, - errorMessage: "handling justification: verifying block number 2 justification: test error", - }, - "trie_state_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{StateRoot: common.Hash{2}} - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - storageState := NewMockStorageState(ctrl) - storageState.EXPECT().TrieState(&common.Hash{2}). - Return(nil, errTest) - - return chainProcessor{ - blockState: blockState, - storageState: storageState, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - }, - sentinelError: errTest, - errorMessage: "loading trie state: test error", - }, - "handle_block_import_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{StateRoot: common.Hash{2}} - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - storageState := NewMockStorageState(ctrl) - trieState := storage.NewTrieState(nil) - storageState.EXPECT().TrieState(&common.Hash{2}). - Return(trieState, nil) - - blockImportHandler := NewMockBlockImportHandler(ctrl) - const announceImportedBlock = true - blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). - Return(errTest) - - return chainProcessor{ - blockState: blockState, - storageState: storageState, - blockImportHandler: blockImportHandler, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - }, - announceImportedBlock: true, - sentinelError: errTest, - errorMessage: "handling block import: test error", - }, - "success": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{StateRoot: common.Hash{2}} - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - storageState := NewMockStorageState(ctrl) - trieState := storage.NewTrieState(nil) - storageState.EXPECT().TrieState(&common.Hash{2}). - Return(trieState, nil) - - blockImportHandler := NewMockBlockImportHandler(ctrl) - const announceImportedBlock = true - blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). - Return(nil) - - return chainProcessor{ - blockState: blockState, - storageState: storageState, - blockImportHandler: blockImportHandler, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - }, - announceImportedBlock: true, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - processor := testCase.chainProcessorBuilder(ctrl) - - err := processor.processBlockDataWithStateHeaderAndBody( - testCase.blockData, testCase.announceImportedBlock) - - assert.ErrorIs(t, err, testCase.sentinelError) - if testCase.sentinelError != nil { - assert.EqualError(t, err, testCase.errorMessage) - } - }) - } -} - -func Test_chainProcessor_processBlockDataWithHeaderAndBody(t *testing.T) { - t.Parallel() - - errTest := errors.New("test error") - - testCases := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - blockData types.BlockData - announceImportedBlock bool - sentinelError error - errorMessage string - }{ - "verify_block_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - babeVerifier := NewMockBabeVerifier(ctrl) - babeVerifier.EXPECT().VerifyBlock(&types.Header{Number: 1}). - Return(errTest) - - return chainProcessor{ - babeVerifier: babeVerifier, - } - }, - blockData: types.BlockData{ - Header: &types.Header{Number: 1}, - }, - sentinelError: errTest, - errorMessage: "babe verifying block: test error", - }, - "handle_block_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - babeVerifier := NewMockBabeVerifier(ctrl) - expectedHeader := &types.Header{ParentHash: common.Hash{1}} - babeVerifier.EXPECT().VerifyBlock(expectedHeader). - Return(nil) - - transactionState := NewMockTransactionState(ctrl) - transactionState.EXPECT().RemoveExtrinsic(types.Extrinsic{2}) - - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().GetHeader(common.Hash{1}). - Return(nil, errTest) - - return chainProcessor{ - babeVerifier: babeVerifier, - transactionState: transactionState, - blockState: blockState, - } - }, - blockData: types.BlockData{ - Header: &types.Header{ParentHash: common.Hash{1}}, - Body: &types.Body{{2}}, - }, - sentinelError: errFailedToGetParent, - errorMessage: "handling block: failed to get parent header: test error", - }, - "success": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - babeVerifier := NewMockBabeVerifier(ctrl) - expectedHeader := &types.Header{ - ParentHash: common.Hash{1}, - Number: 5, - } - babeVerifier.EXPECT().VerifyBlock(expectedHeader). - Return(nil) - - transactionState := NewMockTransactionState(ctrl) - transactionState.EXPECT().RemoveExtrinsic(types.Extrinsic{2}) - - blockState := NewMockBlockState(ctrl) - parentHeader := &types.Header{StateRoot: trie.EmptyHash} - blockState.EXPECT().GetHeader(common.Hash{1}). - Return(parentHeader, nil) - - storageState := NewMockStorageState(ctrl) - lockCall := storageState.EXPECT().Lock() - storageState.EXPECT().Unlock().After(lockCall) - trieState := storage.NewTrieState(nil) - storageState.EXPECT().TrieState(&trie.EmptyHash). - Return(trieState, nil) - - parentHeaderHash := parentHeader.Hash() - instance := NewMockInstance(ctrl) - blockState.EXPECT().GetRuntime(parentHeaderHash). - Return(instance, nil) - - instance.EXPECT().SetContextStorage(trieState) - block := &types.Block{ - Header: *expectedHeader, - Body: types.Body{{2}}, - } - instance.EXPECT().ExecuteBlock(block).Return(nil, nil) - - blockImportHandler := NewMockBlockImportHandler(ctrl) - const announceImportedBlock = true - blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). - Return(nil) - - telemetryClient := NewMockTelemetry(ctrl) - headerHash := common.MustHexToHash("0x18d21d2901e4a4ac6a8c6431da2dfee1b8701f31a9e49283a082e6c744d4117c") - message := telemetry.NewBlockImport(&headerHash, expectedHeader.Number, "NetworkInitialSync") - telemetryClient.EXPECT().SendMessage(message) - - return chainProcessor{ - babeVerifier: babeVerifier, - transactionState: transactionState, - blockState: blockState, - storageState: storageState, - blockImportHandler: blockImportHandler, - telemetry: telemetryClient, - } - }, - blockData: types.BlockData{ - Header: &types.Header{ - ParentHash: common.Hash{1}, - Number: 5, - }, - Body: &types.Body{{2}}, - }, - announceImportedBlock: true, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - processor := testCase.chainProcessorBuilder(ctrl) - - err := processor.processBlockDataWithHeaderAndBody( - testCase.blockData, testCase.announceImportedBlock) - - assert.ErrorIs(t, err, testCase.sentinelError) - if testCase.sentinelError != nil { - assert.EqualError(t, err, testCase.errorMessage) - } - }) - } -} - -func Test_chainProcessor_processReadyBlocks(t *testing.T) { - t.Parallel() - mockError := errors.New("test mock error") - tests := map[string]struct { - chainSyncBuilder func(ctrl *gomock.Controller) ChainSync - blockStateBuilder func(ctrl *gomock.Controller, done chan struct{}) BlockState - blockData *types.BlockData - babeVerifierBuilder func(ctrl *gomock.Controller) BabeVerifier - pendingBlockBuilder func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet - storageStateBuilder func(ctrl *gomock.Controller, done chan struct{}) StorageState - }{ - "base_case": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{}).DoAndReturn(func(*types. - BlockData) error { - close(done) - return nil - }) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - return nil - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - return nil - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - return nil - }, - }, - "add_block": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - Header: &types.Header{}, - Body: &types.Body{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) - return mockBabeVerifier - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().addBlock(&types.Block{ - Header: types.Header{}, - Body: types.Body{}, - }).DoAndReturn(func(block *types.Block) error { - close(done) - return nil - }) - return mockDisjointBlockSet - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - return nil - }, - }, - "error_in_process_block": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - Header: &types.Header{}, - Body: &types.Body{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) - return mockBabeVerifier - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - return nil - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().Unlock() - mockStorageState.EXPECT().TrieState(&common.Hash{}).DoAndReturn(func(hash *common.Hash) (*storage. - TrieState, error) { - close(done) - return nil, mockError - }) - return mockStorageState - }, - }, - "add_block_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - Header: &types.Header{}, - Body: &types.Body{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) - return mockBabeVerifier - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().addBlock(&types.Block{ - Header: types.Header{}, - Body: types.Body{}, - }).DoAndReturn(func(block *types.Block) error { - close(done) - return mockError - }) - return mockDisjointBlockSet - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - return nil - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - ctx, cancel := context.WithCancel(context.Background()) - readyBlock := newBlockQueue(5) - done := make(chan struct{}) - - s := &chainProcessor{ - ctx: ctx, - cancel: cancel, - readyBlocks: readyBlock, - chainSync: tt.chainSyncBuilder(ctrl), - blockState: tt.blockStateBuilder(ctrl, done), - babeVerifier: tt.babeVerifierBuilder(ctrl), - pendingBlocks: tt.pendingBlockBuilder(ctrl, done), - storageState: tt.storageStateBuilder(ctrl, done), - } - - go s.processReadyBlocks() - - readyBlock.push(tt.blockData) - <-done - s.cancel() - }) - } -} - -func Test_newChainProcessor(t *testing.T) { - t.Parallel() - - mockReadyBlock := newBlockQueue(5) - mockDisjointBlockSet := NewMockDisjointBlockSet(nil) - mockBlockState := NewMockBlockState(nil) - mockStorageState := NewMockStorageState(nil) - mockTransactionState := NewMockTransactionState(nil) - mockBabeVerifier := NewMockBabeVerifier(nil) - mockFinalityGadget := NewMockFinalityGadget(nil) - mockBlockImportHandler := NewMockBlockImportHandler(nil) - - type args struct { - readyBlocks *blockQueue - pendingBlocks DisjointBlockSet - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - } - tests := []struct { - name string - args args - want *chainProcessor - }{ - { - name: "with_args", - args: args{ - readyBlocks: mockReadyBlock, - pendingBlocks: mockDisjointBlockSet, - blockState: mockBlockState, - storageState: mockStorageState, - transactionState: mockTransactionState, - babeVerifier: mockBabeVerifier, - finalityGadget: mockFinalityGadget, - blockImportHandler: mockBlockImportHandler, - }, - want: &chainProcessor{ - readyBlocks: mockReadyBlock, - pendingBlocks: mockDisjointBlockSet, - blockState: mockBlockState, - storageState: mockStorageState, - transactionState: mockTransactionState, - babeVerifier: mockBabeVerifier, - finalityGadget: mockFinalityGadget, - blockImportHandler: mockBlockImportHandler, - }, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - cpCfg := chainProcessorConfig{ - readyBlocks: tt.args.readyBlocks, - pendingBlocks: tt.args.pendingBlocks, - blockState: tt.args.blockState, - storageState: tt.args.storageState, - transactionState: tt.args.transactionState, - babeVerifier: tt.args.babeVerifier, - finalityGadget: tt.args.finalityGadget, - blockImportHandler: tt.args.blockImportHandler, - } - - got := newChainProcessor(cpCfg) - assert.NotNil(t, got.ctx) - got.ctx = nil - assert.NotNil(t, got.cancel) - got.cancel = nil - assert.Equal(t, tt.want, got) - }) - } -} From 5bc45f92437b916462aaa8e6204b13bbdaec0fdd Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 5 Jun 2023 20:27:20 -0400 Subject: [PATCH 056/140] From 5fb3070c817190e74dc6048d4f55da933a55aaf3 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 6 Jun 2023 09:57:09 -0400 Subject: [PATCH 057/140] chore: use highest finalized header instead of best block number --- chain/westend/defaults.go | 4 ++++ dot/sync/chain_sync.go | 30 +++++++++++++----------------- lib/trie/database.go | 4 ++++ 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index 241ad8cc2a..2712331cb6 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -32,6 +32,10 @@ func DefaultConfig() *cfg.Config { config.Log.Digest = "trace" config.Log.Sync = "trace" + config.Pprof.Enabled = true + config.Pprof.ListeningAddress = "localhost:6060" + config.PrometheusExternal = true + config.PrometheusPort = 9876 return config } diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 2c4cf3f37e..288d78152d 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -225,7 +225,7 @@ func (cs *chainSync) sync() { logger.Debugf("switched sync mode to %d", bootstrap) } - cs.executeBootstrapSync() + cs.executeBootstrapSync(finalisedHeader) } else { // we are less than 128 blocks behind the target we can use tip sync swapped := cs.state.CompareAndSwap(bootstrap, tip) @@ -235,7 +235,7 @@ func (cs *chainSync) sync() { logger.Debugf("switched sync mode to %d", tip) } - cs.requestPendingBlocks() + cs.requestPendingBlocks(finalisedHeader) } } } @@ -317,7 +317,12 @@ func (cs *chainSync) requestImportedBlock(announce announcedBlock) error { cs.requestChainBlocks(announce.header, bestBlockHeader, peerWhoAnnounced) - err = cs.requestPendingBlocks() + highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() + if err != nil { + return fmt.Errorf("while getting highest finalized header: %w", err) + } + + err = cs.requestPendingBlocks(highestFinalizedHeader) if err != nil { return fmt.Errorf("while requesting pending blocks") } @@ -390,20 +395,15 @@ func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, return nil } -func (cs *chainSync) requestPendingBlocks() error { +func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) error { logger.Infof("total of pending blocks: %d", cs.pendingBlocks.size()) if cs.pendingBlocks.size() == 0 { return nil } - highestFinalized, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - return fmt.Errorf("getting highest finalised header: %w", err) - } - pendingBlocks := cs.pendingBlocks.getBlocks() for _, pendingBlock := range pendingBlocks { - if pendingBlock.number <= highestFinalized.Number { + if pendingBlock.number <= highestFinalizedHeader.Number { cs.pendingBlocks.removeBlock(pendingBlock.hash) continue } @@ -421,7 +421,7 @@ func (cs *chainSync) requestPendingBlocks() error { continue } - gapLength := pendingBlock.number - highestFinalized.Number + gapLength := pendingBlock.number - highestFinalizedHeader.Number if gapLength > 128 { logger.Criticalf("GAP LENGHT: %d, GREATER THAN 128 block", gapLength) gapLength = 128 @@ -448,14 +448,10 @@ func (cs *chainSync) requestPendingBlocks() error { return nil } -func (cs *chainSync) executeBootstrapSync() error { +func (cs *chainSync) executeBootstrapSync(highestFinalizedHeader *types.Header) error { cs.workerPool.useConnectedPeers() - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - return fmt.Errorf("getting best block header while syncing: %w", err) - } - startRequestAt := bestBlockHeader.Number + 1 + startRequestAt := highestFinalizedHeader.Number + 1 const maxRequestsAllowed = 50 // we build the set of requests based on the amount of available peers diff --git a/lib/trie/database.go b/lib/trie/database.go index 2c1f5b9300..e075fad78e 100644 --- a/lib/trie/database.go +++ b/lib/trie/database.go @@ -79,6 +79,10 @@ func (t *Trie) loadNode(db Getter, n *Node) error { } nodeHash := merkleValue + if len(nodeHash) < 1 { + fmt.Printf(">>>>>>>>>> trie loadNode, empty node hash\n") + } + encodedNode, err := db.Get(nodeHash) if err != nil { return fmt.Errorf("cannot find child node key 0x%x in database: %w", nodeHash, err) From c7e145bcfd4a5b2f5e6a718c66178dab4c7e4a4e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 6 Jun 2023 15:41:22 -0400 Subject: [PATCH 058/140] chore: use a checkpoint header every time we failed to import blocks --- dot/sync/chain_sync.go | 274 ++++++++++++++++++++++------------------ dot/sync/worker_pool.go | 8 +- 2 files changed, 157 insertions(+), 125 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 288d78152d..e78e99dab4 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -186,6 +186,8 @@ func (cs *chainSync) stop() { } func (cs *chainSync) sync() { + var useFinalisedHeader bool + for { bestBlockHeader, err := cs.blockState.BestBlockHeader() if err != nil { @@ -225,7 +227,19 @@ func (cs *chainSync) sync() { logger.Debugf("switched sync mode to %d", bootstrap) } - cs.executeBootstrapSync(finalisedHeader) + checkPointHeader := bestBlockHeader + if useFinalisedHeader { + checkPointHeader = finalisedHeader + } + err := cs.executeBootstrapSync(checkPointHeader) + + if err != nil { + logger.Errorf("while executing bootsrap sync: %s", err) + useFinalisedHeader = true + continue + } + + useFinalisedHeader = false } else { // we are less than 128 blocks behind the target we can use tip sync swapped := cs.state.CompareAndSwap(bootstrap, tip) @@ -448,12 +462,12 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) return nil } -func (cs *chainSync) executeBootstrapSync(highestFinalizedHeader *types.Header) error { +func (cs *chainSync) executeBootstrapSync(checkPointHeader *types.Header) error { cs.workerPool.useConnectedPeers() - startRequestAt := highestFinalizedHeader.Number + 1 + startRequestAt := checkPointHeader.Number + 1 - const maxRequestsAllowed = 50 + const maxRequestsAllowed = 40 // we build the set of requests based on the amount of available peers // in the worker pool, if we have more peers than `maxRequestAllowed` // so we limit to `maxRequestAllowed` to avoid the error: @@ -489,10 +503,14 @@ func (cs *chainSync) executeBootstrapSync(highestFinalizedHeader *types.Header) resultsQueue := make(chan *syncTaskResult) wg.Add(1) - go cs.handleWorkersResults(resultsQueue, startRequestAt, expectedAmountOfBlocks, &wg) + resultErrCh := cs.handleWorkersResults(resultsQueue, startRequestAt, expectedAmountOfBlocks, &wg) cs.workerPool.submitRequests(requests, resultsQueue) wg.Wait() + if err := <-resultErrCh; err != nil { + return fmt.Errorf("while handling workers results: %w", err) + } + return nil } @@ -525,144 +543,154 @@ func (cs *chainSync) getTarget() (uint, error) { // and every cicle we should endup with a complete chain, whenever we identify // any error from a worker we should evaluate the error and re-insert the request // in the queue and wait for it to completes -func (cs *chainSync) handleWorkersResults(workersResults chan *syncTaskResult, startAtBlock uint, totalBlocks uint32, wg *sync.WaitGroup) { - startTime := time.Now() - defer func() { - totalSyncAndImportSeconds := time.Since(startTime).Seconds() - bps := float64(totalBlocks) / totalSyncAndImportSeconds - logger.Debugf("⛓️ synced %d blocks, took: %.2f seconds, bps: %.2f blocks/second", totalBlocks, totalSyncAndImportSeconds, bps) - wg.Done() - }() - - logger.Debugf("💤 waiting for %d blocks", totalBlocks) - syncingChain := make([]*types.BlockData, totalBlocks) - // the total numbers of blocks is missing in the syncing chain - waitingBlocks := totalBlocks +func (cs *chainSync) handleWorkersResults( + workersResults chan *syncTaskResult, startAtBlock uint, totalBlocks uint32, wg *sync.WaitGroup) chan error { + errCh := make(chan error) + + go func() { + startTime := time.Now() + defer func() { + totalSyncAndImportSeconds := time.Since(startTime).Seconds() + bps := float64(totalBlocks) / totalSyncAndImportSeconds + logger.Debugf("⛓️ synced %d blocks, took: %.2f seconds, bps: %.2f blocks/second", totalBlocks, totalSyncAndImportSeconds, bps) + + close(errCh) + wg.Done() + }() + + logger.Debugf("💤 waiting for %d blocks", totalBlocks) + syncingChain := make([]*types.BlockData, totalBlocks) + // the total numbers of blocks is missing in the syncing chain + waitingBlocks := totalBlocks + + for waitingBlocks > 0 { + // in a case where we don't handle workers results we should check the pool + idleDuration := time.Minute + idleTimer := time.NewTimer(idleDuration) + + select { + case <-cs.stopCh: + return + + case <-idleTimer.C: + logger.Warnf("idle ticker triggered! checking pool") + cs.workerPool.useConnectedPeers() + continue - for waitingBlocks > 0 { - // in a case where we don't handle workers results we should check the pool - idleDuration := time.Minute - idleTimer := time.NewTimer(idleDuration) + case taskResult := <-workersResults: + if !idleTimer.Stop() { + <-idleTimer.C + } - select { - case <-cs.stopCh: - return + logger.Debugf("task result: peer(%s), with error: %v, with response: %v", + taskResult.who, taskResult.err != nil, taskResult.response != nil) + + if taskResult.err != nil { + logger.Errorf("task result: peer(%s) error: %s", + taskResult.who, taskResult.err) + + if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { + switch { + case strings.Contains(taskResult.err.Error(), "protocols not supported"): + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadProtocolValue, + Reason: peerset.BadProtocolReason, + }, taskResult.who) + cs.workerPool.ignorePeerAsWorker(taskResult.who) + default: + cs.workerPool.punishPeer(taskResult.who) + } + } - case <-idleTimer.C: - logger.Warnf("idle ticker triggered! checking pool") - cs.workerPool.useConnectedPeers() - continue + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue + } - case taskResult := <-workersResults: - if !idleTimer.Stop() { - <-idleTimer.C - } + who := taskResult.who + request := taskResult.request + response := taskResult.response - logger.Debugf("task result: peer(%s), with error: %v, with response: %v", - taskResult.who, taskResult.err != nil, taskResult.response != nil) - - if taskResult.err != nil { - logger.Errorf("task result: peer(%s) error: %s", - taskResult.who, taskResult.err) - - if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - switch { - case strings.Contains(taskResult.err.Error(), "protocols not supported"): - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, taskResult.who) - cs.workerPool.ignorePeerAsWorker(taskResult.who) - default: - cs.workerPool.punishPeer(taskResult.who) - } + if request.Direction == network.Descending { + // reverse blocks before pre-validating and placing in ready queue + reverseBlockData(response.BlockData) } - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue - } + err := cs.validateResponse(request, response, who) + switch { + case errors.Is(err, errResponseIsNotChain): + logger.Criticalf("response invalid: %s", err) + cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue + case errors.Is(err, errEmptyBlockData): + cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue + case errors.Is(err, errUnknownParent): + case errors.Is(err, errBadBlock): + logger.Warnf("peer %s sent a bad block: %s", who, err) + cs.workerPool.ignorePeerAsWorker(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue + case err != nil: + logger.Criticalf("response invalid: %s", err) + cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue + } - who := taskResult.who - request := taskResult.request - response := taskResult.response + if len(response.BlockData) > 0 { + firstBlockInResponse := response.BlockData[0] + lastBlockInResponse := response.BlockData[len(response.BlockData)-1] - if request.Direction == network.Descending { - // reverse blocks before pre-validating and placing in ready queue - reverseBlockData(response.BlockData) - } + logger.Tracef("processing %d blocks: %d (%s) to %d (%s)", + len(response.BlockData), + firstBlockInResponse.Header.Number, firstBlockInResponse.Hash, + lastBlockInResponse.Header.Number, lastBlockInResponse.Hash) + } - err := cs.validateResponse(request, response, who) - switch { - case errors.Is(err, errResponseIsNotChain): - logger.Criticalf("response invalid: %s", err) - cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue - case errors.Is(err, errEmptyBlockData): - cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue - case errors.Is(err, errUnknownParent): - case errors.Is(err, errBadBlock): - logger.Warnf("peer %s sent a bad block: %s", who, err) - cs.workerPool.ignorePeerAsWorker(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue - case err != nil: - logger.Criticalf("response invalid: %s", err) - cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue - } + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.GossipSuccessValue, + Reason: peerset.GossipSuccessReason, + }, taskResult.who) - if len(response.BlockData) > 0 { - firstBlockInResponse := response.BlockData[0] - lastBlockInResponse := response.BlockData[len(response.BlockData)-1] + for _, blockInResponse := range response.BlockData { + blockExactIndex := blockInResponse.Header.Number - startAtBlock + syncingChain[blockExactIndex] = blockInResponse + } - logger.Tracef("processing %d blocks: %d (%s) to %d (%s)", - len(response.BlockData), - firstBlockInResponse.Header.Number, firstBlockInResponse.Hash, - lastBlockInResponse.Header.Number, lastBlockInResponse.Hash) + // we need to check if we've filled all positions + // otherwise we should wait for more responses + waitingBlocks -= uint32(len(response.BlockData)) } + } - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.GossipSuccessValue, - Reason: peerset.GossipSuccessReason, - }, taskResult.who) - - for _, blockInResponse := range response.BlockData { - blockExactIndex := blockInResponse.Header.Number - startAtBlock - syncingChain[blockExactIndex] = blockInResponse + retreiveBlocksSeconds := time.Since(startTime).Seconds() + logger.Debugf("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", totalBlocks, retreiveBlocksSeconds) + if len(syncingChain) >= 2 { + // ensuring the parents are in the right place + parentElement := syncingChain[0] + for _, element := range syncingChain[1:] { + if parentElement.Header.Hash() != element.Header.ParentHash { + panic(fmt.Sprintf("expected %s be parent of %s", + parentElement.Header.Hash(), element.Header.ParentHash)) + } + parentElement = element } - - // we need to check if we've filled all positions - // otherwise we should wait for more responses - waitingBlocks -= uint32(len(response.BlockData)) } - } - retreiveBlocksSeconds := time.Since(startTime).Seconds() - logger.Debugf("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", totalBlocks, retreiveBlocksSeconds) - if len(syncingChain) >= 2 { - // ensuring the parents are in the right place - parentElement := syncingChain[0] - for _, element := range syncingChain[1:] { - if parentElement.Header.Hash() != element.Header.ParentHash { - panic(fmt.Sprintf("expected %s be parent of %s", - parentElement.Header.Hash(), element.Header.ParentHash)) + // response was validated! place into ready block queue + for _, bd := range syncingChain { + // block is ready to be processed! + if err := cs.handleReadyBlock(bd); err != nil { + logger.Criticalf("error while handling a ready block: %s", err) + errCh <- err + return } - parentElement = element } - } + }() - // response was validated! place into ready block queue - for _, bd := range syncingChain { - // block is ready to be processed! - if err := cs.handleReadyBlock(bd); err != nil { - logger.Criticalf("error while handling a ready block: %s", err) - return - } - } + return errCh } func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index d33b235217..4551e1f875 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -100,7 +100,7 @@ func (s *syncWorkerPool) newPeer(who peer.ID, isFromBlockAnnounce bool) { // check if the punishment is not valid if peerSync.status == punished && peerSync.punishmentTime.Before(time.Now()) { - s.workers[who] = &peerSyncWorker{status: available} + s.workers[who] = &peerSyncWorker{status: available, timesPunished: peerSync.timesPunished} } } @@ -245,7 +245,11 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { defer func() { s.l.Lock() - s.workers[who] = &peerSyncWorker{status: available} + peerSync, has := s.workers[who] + if has { + peerSync.status = available + s.workers[who] = peerSync + } s.l.Unlock() s.availableCond.Signal() From a6086d6a6b86ef059c7ba4f14503c54bf604533d Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 6 Jun 2023 22:10:07 -0400 Subject: [PATCH 059/140] chore: fix `all goroutines are asleep - deadlock` problem --- dot/sync/chain_sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index e78e99dab4..69e25d3539 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -545,7 +545,7 @@ func (cs *chainSync) getTarget() (uint, error) { // in the queue and wait for it to completes func (cs *chainSync) handleWorkersResults( workersResults chan *syncTaskResult, startAtBlock uint, totalBlocks uint32, wg *sync.WaitGroup) chan error { - errCh := make(chan error) + errCh := make(chan error, 1) go func() { startTime := time.Now() From 22699e856260d0cbdb6da66278e2ae31aa1128f1 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 7 Jun 2023 17:03:39 -0400 Subject: [PATCH 060/140] chore: fix punishment --- chain/westend/defaults.go | 4 +++- dot/sync/chain_sync.go | 12 +++++++++++- dot/sync/worker_pool.go | 10 +++++++--- 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index 2712331cb6..30f378d0ce 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -13,7 +13,7 @@ var ( // defaultID Default chain ID defaultID = "westend2" // defaultBasePath Default node base directory path - defaultBasePath = "~/.gossamer/westend" + defaultBasePath = "/Volumes/SDD01/gossamer/westend" // defaultChainSpec is the default chain specification path defaultChainSpec = "./chain/westend/genesis.json" ) @@ -30,6 +30,8 @@ func DefaultConfig() *cfg.Config { config.Core.Role = 1 config.Network.NoMDNS = false + config.Network.MaxPeers = 128 + config.Log.Digest = "trace" config.Log.Sync = "trace" config.Pprof.Enabled = true diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 69e25d3539..3d88c0482b 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -55,6 +55,12 @@ var ( Name: "is_synced", Help: "bool representing whether the node is synced to the head of the chain", }) + + blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: "gossamer_sync", + Name: "block_size", + Help: "represent the size of blocks synced", + }) ) // peerView tracks our peers's best reported blocks @@ -467,7 +473,6 @@ func (cs *chainSync) executeBootstrapSync(checkPointHeader *types.Header) error startRequestAt := checkPointHeader.Number + 1 - const maxRequestsAllowed = 40 // we build the set of requests based on the amount of available peers // in the worker pool, if we have more peers than `maxRequestAllowed` // so we limit to `maxRequestAllowed` to avoid the error: @@ -854,9 +859,14 @@ func (cs *chainSync) processBlockDataWithHeaderAndBody(blockData types.BlockData // handleHeader handles block bodies included in BlockResponses func (cs *chainSync) handleBody(body *types.Body) { + acc := 0 for _, ext := range *body { + acc += len(ext) cs.transactionState.RemoveExtrinsic(ext) } + + blockSizeGauge.Set(float64(acc)) + //logger.Infof("📦 roughly body size: %d, sum of extrinsics size: %d", len(*body), acc) } func (cs *chainSync) handleJustification(header *types.Header, justification []byte) (err error) { diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 4551e1f875..cd4dccb774 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -58,7 +58,7 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { network: net, doneCh: make(chan struct{}), workers: make(map[peer.ID]*peerSyncWorker), - taskQueue: make(chan *syncTask), + taskQueue: make(chan *syncTask, maxRequestsAllowed+1), ignorePeers: make(map[peer.ID]struct{}), } @@ -100,7 +100,8 @@ func (s *syncWorkerPool) newPeer(who peer.ID, isFromBlockAnnounce bool) { // check if the punishment is not valid if peerSync.status == punished && peerSync.punishmentTime.Before(time.Now()) { - s.workers[who] = &peerSyncWorker{status: available, timesPunished: peerSync.timesPunished} + peerSync.status = available + s.workers[who] = peerSync } } @@ -228,7 +229,10 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { } if peerID != peer.ID("") { - s.workers[peerID] = &peerSyncWorker{status: busy} + peerSync := s.workers[peerID] + peerSync.status = busy + s.workers[peerID] = peerSync + s.l.Unlock() s.wg.Add(1) From 845e42eaf8d9ac913c4312856499964394830e1d Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 16 Jun 2023 09:17:57 -0400 Subject: [PATCH 061/140] chore: fix `executeBootstrapSync` tests --- chain/westend/defaults.go | 13 ++++++------- dot/sync/chain_sync.go | 31 +++++++++++-------------------- dot/sync/chain_sync_test.go | 6 +++--- 3 files changed, 20 insertions(+), 30 deletions(-) diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index 30f378d0ce..6603e25d54 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -13,7 +13,7 @@ var ( // defaultID Default chain ID defaultID = "westend2" // defaultBasePath Default node base directory path - defaultBasePath = "/Volumes/SDD01/gossamer/westend" + defaultBasePath = "~/.gossamer/westend" // defaultChainSpec is the default chain specification path defaultChainSpec = "./chain/westend/genesis.json" ) @@ -29,15 +29,14 @@ func DefaultConfig() *cfg.Config { config.Core.GrandpaAuthority = false config.Core.Role = 1 config.Network.NoMDNS = false - config.Network.MaxPeers = 128 - - config.Log.Digest = "trace" - config.Log.Sync = "trace" - config.Pprof.Enabled = true - config.Pprof.ListeningAddress = "localhost:6060" config.PrometheusExternal = true config.PrometheusPort = 9876 + config.Log.Sync = "trace" + config.Log.Digest = "trace" + + config.Pprof.Enabled = true + config.Pprof.ListeningAddress = "0.0.0.0:6060" return config } diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 3d88c0482b..1ac9858053 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -192,15 +192,7 @@ func (cs *chainSync) stop() { } func (cs *chainSync) sync() { - var useFinalisedHeader bool - for { - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - logger.Criticalf("getting best block header: %s", err) - return - } - syncTarget, err := cs.getTarget() if err != nil { logger.Criticalf("getting target: %w", err) @@ -212,6 +204,7 @@ func (cs *chainSync) sync() { logger.Criticalf("getting finalised block header: %s", err) return } + logger.Infof( "🚣 currently syncing, %d peers connected, "+ "%d available workers, "+ @@ -221,11 +214,17 @@ func (cs *chainSync) sync() { cs.workerPool.totalWorkers(), syncTarget, finalisedHeader.Number, finalisedHeader.Hash()) + bestBlockHeader, err := cs.blockState.BestBlockHeader() + if err != nil { + logger.Criticalf("getting best block header: %s", err) + return + } + bestBlockNumber := bestBlockHeader.Number isFarFromTarget := bestBlockNumber+maxResponseSize < syncTarget if isFarFromTarget { - // we are at least 128 blocks behind the head, switch to bootstrap + // we are more than 128 blocks behind the head, switch to bootstrap swapped := cs.state.CompareAndSwap(tip, bootstrap) isSyncedGauge.Set(0) @@ -233,19 +232,11 @@ func (cs *chainSync) sync() { logger.Debugf("switched sync mode to %d", bootstrap) } - checkPointHeader := bestBlockHeader - if useFinalisedHeader { - checkPointHeader = finalisedHeader - } - err := cs.executeBootstrapSync(checkPointHeader) - + err := cs.executeBootstrapSync(bestBlockHeader) if err != nil { logger.Errorf("while executing bootsrap sync: %s", err) - useFinalisedHeader = true - continue } - useFinalisedHeader = false } else { // we are less than 128 blocks behind the target we can use tip sync swapped := cs.state.CompareAndSwap(bootstrap, tip) @@ -468,10 +459,10 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) return nil } -func (cs *chainSync) executeBootstrapSync(checkPointHeader *types.Header) error { +func (cs *chainSync) executeBootstrapSync(bestBlockHeader *types.Header) error { cs.workerPool.useConnectedPeers() - startRequestAt := checkPointHeader.Number + 1 + startRequestAt := bestBlockHeader.Number + 1 // we build the set of requests based on the amount of available peers // in the worker pool, if we have more peers than `maxRequestAllowed` diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 94439bf56d..3fc3509fe3 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -374,7 +374,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { stopCh := make(chan struct{}) go cs.workerPool.listenForRequests(stopCh) - err = cs.executeBootstrapSync() + err = cs.executeBootstrapSync(mockedGenesisHeader) require.NoError(t, err) close(stopCh) @@ -453,7 +453,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { stopCh := make(chan struct{}) go cs.workerPool.listenForRequests(stopCh) - err = cs.executeBootstrapSync() + err = cs.executeBootstrapSync(mockedGenesisHeader) require.NoError(t, err) close(stopCh) @@ -559,7 +559,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing stopCh := make(chan struct{}) go cs.workerPool.listenForRequests(stopCh) - err = cs.executeBootstrapSync() + err = cs.executeBootstrapSync(mockedGenesisHeader) require.NoError(t, err) close(stopCh) From 61c1a513afd84a4e45fb2a6b794fdef197f52804 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 16 Jun 2023 09:45:46 -0400 Subject: [PATCH 062/140] chore: revert libp2p resource manager additions --- dot/network/host.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/dot/network/host.go b/dot/network/host.go index 3217ae3d6b..1374cb672f 100644 --- a/dot/network/host.go +++ b/dot/network/host.go @@ -26,7 +26,6 @@ import ( "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" - rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" ma "github.com/multiformats/go-multiaddr" ) @@ -179,15 +178,8 @@ func newHost(ctx context.Context, cfg *Config) (*host, error) { return nil, fmt.Errorf("failed to create peerstore: %w", err) } - limiter := rcmgr.NewFixedLimiter(rcmgr.DefaultLimits.AutoScale()) - rcmanager, err := rcmgr.NewResourceManager(limiter) - if err != nil { - return nil, fmt.Errorf("while creating the resource manager: %w", err) - } - // set libp2p host options opts := []libp2p.Option{ - libp2p.ResourceManager(rcmanager), libp2p.ListenAddrs(addr), libp2p.DisableRelay(), libp2p.Identity(cfg.privateKey), From f415b90490fded702e9307018cb6e6d8772ff341 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 16 Jun 2023 09:53:21 -0400 Subject: [PATCH 063/140] chore: rename `AllConnectedPeers` to `AllConnectedPeersID` --- dot/network/service.go | 4 ++-- dot/sync/interfaces.go | 2 +- dot/sync/mocks_test.go | 12 ++++++------ dot/sync/worker_pool.go | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/dot/network/service.go b/dot/network/service.go index eadeb61708..48fdf35d57 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -593,8 +593,8 @@ func (s *Service) NetworkState() common.NetworkState { } } -// AllConnectedPeers returns all the connected to the node instance -func (s *Service) AllConnectedPeers() []peer.ID { +// AllConnectedPeersID returns all the connected to the node instance +func (s *Service) AllConnectedPeersID() []peer.ID { return s.host.p2pHost.Network().Peers() } diff --git a/dot/sync/interfaces.go b/dot/sync/interfaces.go index 02ea7e113e..0e21ee5ce4 100644 --- a/dot/sync/interfaces.go +++ b/dot/sync/interfaces.go @@ -82,7 +82,7 @@ type Network interface { // ReportPeer reports peer based on the peer behaviour. ReportPeer(change peerset.ReputationChange, p peer.ID) - AllConnectedPeers() []peer.ID + AllConnectedPeersID() []peer.ID } // Telemetry is the telemetry client to send telemetry messages. diff --git a/dot/sync/mocks_test.go b/dot/sync/mocks_test.go index f62a830693..e211e5dc2a 100644 --- a/dot/sync/mocks_test.go +++ b/dot/sync/mocks_test.go @@ -609,18 +609,18 @@ func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { return m.recorder } -// AllConnectedPeers mocks base method. -func (m *MockNetwork) AllConnectedPeers() []peer.ID { +// AllConnectedPeersID mocks base method. +func (m *MockNetwork) AllConnectedPeersID() []peer.ID { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AllConnectedPeers") + ret := m.ctrl.Call(m, "AllConnectedPeersID") ret0, _ := ret[0].([]peer.ID) return ret0 } -// AllConnectedPeers indicates an expected call of AllConnectedPeers. -func (mr *MockNetworkMockRecorder) AllConnectedPeers() *gomock.Call { +// AllConnectedPeersID indicates an expected call of AllConnectedPeersID. +func (mr *MockNetworkMockRecorder) AllConnectedPeersID() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllConnectedPeers", reflect.TypeOf((*MockNetwork)(nil).AllConnectedPeers)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllConnectedPeersID", reflect.TypeOf((*MockNetwork)(nil).AllConnectedPeersID)) } // DoBlockRequest mocks base method. diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index cd4dccb774..489aa99415 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -67,7 +67,7 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { } func (s *syncWorkerPool) useConnectedPeers() { - connectedPeers := s.network.AllConnectedPeers() + connectedPeers := s.network.AllConnectedPeersID() if len(connectedPeers) < 1 { return } From fe132b4a08c11932018ec3dbab23c68dd2616213 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 16 Jun 2023 09:58:46 -0400 Subject: [PATCH 064/140] chore: move `ErrReceivedEmptyMessage` to errors file --- dot/network/errors.go | 2 ++ dot/network/sync.go | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dot/network/errors.go b/dot/network/errors.go index ef895a6735..640f15c4ba 100644 --- a/dot/network/errors.go +++ b/dot/network/errors.go @@ -8,6 +8,8 @@ import ( ) var ( + ErrReceivedEmptyMessage = errors.New("received empty message") + errCannotValidateHandshake = errors.New("failed to validate handshake") errMessageTypeNotValid = errors.New("message type is not valid") errInvalidHandshakeForPeer = errors.New("peer previously sent invalid handshake") diff --git a/dot/network/sync.go b/dot/network/sync.go index 8aeca4c272..4cddacb7c4 100644 --- a/dot/network/sync.go +++ b/dot/network/sync.go @@ -5,7 +5,6 @@ package network import ( "context" - "errors" "fmt" "time" @@ -15,7 +14,6 @@ import ( ) var blockRequestTimeout = time.Second * 20 -var ErrReceivedEmptyMessage = errors.New("received empty message") // DoBlockRequest sends a request to the given peer. // If a response is received within a certain time period, it is returned, From 1df21a3c320133a905621ba8318f383d345bfda9 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 16 Jun 2023 10:06:28 -0400 Subject: [PATCH 065/140] chore: rename `<-done` to `<-stop` --- dot/sync/chain_sync_test.go | 6 +++--- dot/sync/disjoint_block_set.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 3fc3509fe3..559e0e3e39 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -337,7 +337,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { Direction: network.Ascending, Max: &max, }).Return(totalBlockResponse, nil) - mockedNetwork.EXPECT().AllConnectedPeers().Return([]peer.ID{}) + mockedNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) mockedBlockState := NewMockBlockState(ctrl) mockedBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) @@ -429,7 +429,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { mockNetwork.EXPECT().DoBlockRequest(gomock.Any(), gomock.Any()). Return(worker2Response, nil) - mockNetwork.EXPECT().AllConnectedPeers().Return([]peer.ID{}) + mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) // setup a chain sync which holds in its peer view map // 3 peers, each one announce block 129 as its best block number. // We start this test with genesis block being our best block, so @@ -535,7 +535,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing return worker2Response, nil }).Times(3) - mockNetwork.EXPECT().AllConnectedPeers().Return([]peer.ID{}) + mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) // setup a chain sync which holds in its peer view map // 3 peers, each one announce block 129 as its best block number. // We start this test with genesis block being our best block, so diff --git a/dot/sync/disjoint_block_set.go b/dot/sync/disjoint_block_set.go index 538f302ea2..400b3877e9 100644 --- a/dot/sync/disjoint_block_set.go +++ b/dot/sync/disjoint_block_set.go @@ -27,7 +27,7 @@ var ( // DisjointBlockSet represents a set of incomplete blocks, or blocks // with an unknown parent. it is implemented by *disjointBlockSet type DisjointBlockSet interface { - run(finalisedCh <-chan *types.FinalisationInfo, done <-chan struct{}) + run(finalisedCh <-chan *types.FinalisationInfo, stop <-chan struct{}) addHashAndNumber(hash common.Hash, number uint) error addHeader(*types.Header) error addBlock(*types.Block) error @@ -114,7 +114,7 @@ func newDisjointBlockSet(limit int) *disjointBlockSet { } } -func (s *disjointBlockSet) run(finalisedCh <-chan *types.FinalisationInfo, done <-chan struct{}) { +func (s *disjointBlockSet) run(finalisedCh <-chan *types.FinalisationInfo, stop <-chan struct{}) { ticker := time.NewTicker(clearBlocksInterval) defer ticker.Stop() @@ -124,7 +124,7 @@ func (s *disjointBlockSet) run(finalisedCh <-chan *types.FinalisationInfo, done s.clearBlocks() case finalisedInfo := <-finalisedCh: s.removeLowerBlocks(finalisedInfo.Header.Number) - case <-done: + case <-stop: return } } From eeb5118a598720491c941ddf735d7148e1192a53 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 09:54:30 -0400 Subject: [PATCH 066/140] chore: create `TestSyncWorkerPool_useConnectedPeers` test --- dot/sync/worker_pool.go | 6 +- dot/sync/worker_pool_test.go | 139 +++++++++++++++++++++++++++++++++++ 2 files changed, 142 insertions(+), 3 deletions(-) create mode 100644 dot/sync/worker_pool_test.go diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 489aa99415..7b5e925fac 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -75,17 +75,17 @@ func (s *syncWorkerPool) useConnectedPeers() { s.l.Lock() defer s.l.Unlock() for _, connectedPeer := range connectedPeers { - s.newPeer(connectedPeer, false) + s.newPeer(connectedPeer) } } func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) { s.l.Lock() defer s.l.Unlock() - s.newPeer(who, true) + s.newPeer(who) } -func (s *syncWorkerPool) newPeer(who peer.ID, isFromBlockAnnounce bool) { +func (s *syncWorkerPool) newPeer(who peer.ID) { if _, ok := s.ignorePeers[who]; ok { return } diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go new file mode 100644 index 0000000000..d6f097699e --- /dev/null +++ b/dot/sync/worker_pool_test.go @@ -0,0 +1,139 @@ +package sync + +import ( + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { + t.Parallel() + stablePunishmentTime := time.Now().Add(time.Minute * 2) + + cases := map[string]struct { + setupWorkerPool func(t *testing.T) *syncWorkerPool + expectedPool map[peer.ID]*peerSyncWorker + }{ + "no_connected_peers": { + setupWorkerPool: func(t *testing.T) *syncWorkerPool { + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT(). + AllConnectedPeersID(). + Return([]peer.ID{}) + + return newSyncWorkerPool(networkMock) + }, + expectedPool: make(map[peer.ID]*peerSyncWorker), + }, + "3_available_peers": { + setupWorkerPool: func(t *testing.T) *syncWorkerPool { + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT(). + AllConnectedPeersID(). + Return([]peer.ID{ + peer.ID("available-1"), + peer.ID("available-2"), + peer.ID("available-3"), + }) + return newSyncWorkerPool(networkMock) + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("available-1"): {status: available}, + peer.ID("available-2"): {status: available}, + peer.ID("available-3"): {status: available}, + }, + }, + "2_available_peers_1_to_ignore": { + setupWorkerPool: func(t *testing.T) *syncWorkerPool { + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT(). + AllConnectedPeersID(). + Return([]peer.ID{ + peer.ID("available-1"), + peer.ID("available-2"), + peer.ID("available-3"), + }) + workerPool := newSyncWorkerPool(networkMock) + workerPool.ignorePeers[peer.ID("available-3")] = struct{}{} + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("available-1"): {status: available}, + peer.ID("available-2"): {status: available}, + }, + }, + "peer_punishment_not_valid_anymore": { + setupWorkerPool: func(t *testing.T) *syncWorkerPool { + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT(). + AllConnectedPeersID(). + Return([]peer.ID{ + peer.ID("available-1"), + peer.ID("available-2"), + peer.ID("available-3"), + }) + workerPool := newSyncWorkerPool(networkMock) + workerPool.workers[peer.ID("available-3")] = &peerSyncWorker{ + status: punished, + punishmentTime: time.Unix(1000, 0), //arbitrary unix value + } + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("available-1"): {status: available}, + peer.ID("available-2"): {status: available}, + peer.ID("available-3"): { + status: available, + punishmentTime: time.Unix(1000, 0), + }, + }, + }, + "peer_punishment_still_valid": { + setupWorkerPool: func(t *testing.T) *syncWorkerPool { + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT(). + AllConnectedPeersID(). + Return([]peer.ID{ + peer.ID("available-1"), + peer.ID("available-2"), + peer.ID("available-3"), + }) + workerPool := newSyncWorkerPool(networkMock) + workerPool.workers[peer.ID("available-3")] = &peerSyncWorker{ + status: punished, + punishmentTime: stablePunishmentTime, + } + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("available-1"): {status: available}, + peer.ID("available-2"): {status: available}, + peer.ID("available-3"): { + status: punished, + punishmentTime: stablePunishmentTime, + }, + }, + }, + } + + for tname, tt := range cases { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + workerPool := tt.setupWorkerPool(t) + workerPool.useConnectedPeers() + + require.Equal(t, workerPool.workers, tt.expectedPool) + }) + } + +} From 10dd486bfb1d88ef43f82c05c29a74f756f94982 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 10:09:57 -0400 Subject: [PATCH 067/140] chore: add methods comments --- dot/sync/worker_pool.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 7b5e925fac..005224bc04 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -66,6 +66,8 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { return swp } +// useConnectedPeers will retrieve all connected peers +// through the network layer and use them as sources of blocks func (s *syncWorkerPool) useConnectedPeers() { connectedPeers := s.network.AllConnectedPeersID() if len(connectedPeers) < 1 { @@ -85,6 +87,8 @@ func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) { s.newPeer(who) } +// newPeer a new peer will be included in the worker +// pool if it is not a peer to ignore or is not punished func (s *syncWorkerPool) newPeer(who peer.ID) { if _, ok := s.ignorePeers[who]; ok { return @@ -105,6 +109,9 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { } } +// submitBoundedRequest given a request the worker pool will driven it +// to the given peer.ID, used for tip sync when we receive a block announce +// from a peer and we want to use the exact same peer to request blocks func (s *syncWorkerPool) submitBoundedRequest(request *network.BlockRequestMessage, who peer.ID, resultCh chan<- *syncTaskResult) { s.taskQueue <- &syncTask{ boundTo: &who, @@ -113,6 +120,8 @@ func (s *syncWorkerPool) submitBoundedRequest(request *network.BlockRequestMessa } } +// submitRequest given a request the worker pool will get the very first available worker +// to perform the request, the response will be dispatch in the resultCh func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { s.taskQueue <- &syncTask{ request: request, @@ -120,12 +129,17 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, res } } +// submitRequests takes an set of requests and will submit to the pool through submitRequest +// the response will be dispatch in the resultCh func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { for _, request := range requests { s.submitRequest(request, resultCh) } } +// punishPeer given a peer.ID we check increase its times punished +// and apply the punishment time using the base timeout of 5m, so +// each time a peer is punished its timeout will increase by 5m func (s *syncWorkerPool) punishPeer(who peer.ID) { s.l.Lock() defer s.l.Unlock() From 9d2b32d05695fe2e29130244005c7d13fe9eb369 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 10:14:48 -0400 Subject: [PATCH 068/140] chore: include `TestSyncWorkerPool_newPeer` test --- dot/sync/worker_pool_test.go | 76 ++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index d6f097699e..b13c6421cd 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -135,5 +135,81 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { require.Equal(t, workerPool.workers, tt.expectedPool) }) } +} + +func TestSyncWorkerPool_newPeer(t *testing.T) { + t.Parallel() + stablePunishmentTime := time.Now().Add(time.Minute * 2) + + cases := map[string]struct { + peerID peer.ID + setupWorkerPool func(t *testing.T) *syncWorkerPool + expectedPool map[peer.ID]*peerSyncWorker + }{ + "very_fist_entry": { + peerID: peer.ID("peer-1"), + setupWorkerPool: func(*testing.T) *syncWorkerPool { + return newSyncWorkerPool(nil) + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("peer-1"): {status: available}, + }, + }, + "peer_to_ignore": { + peerID: peer.ID("to-ignore"), + setupWorkerPool: func(*testing.T) *syncWorkerPool { + workerPool := newSyncWorkerPool(nil) + workerPool.ignorePeers[peer.ID("to-ignore")] = struct{}{} + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{}, + }, + "peer_punishment_not_valid_anymore": { + peerID: peer.ID("free-again"), + setupWorkerPool: func(*testing.T) *syncWorkerPool { + workerPool := newSyncWorkerPool(nil) + workerPool.workers[peer.ID("free-again")] = &peerSyncWorker{ + status: punished, + punishmentTime: time.Unix(1000, 0), //arbitrary unix value + } + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("free-again"): { + status: available, + punishmentTime: time.Unix(1000, 0), + }, + }, + }, + "peer_punishment_still_valid": { + peerID: peer.ID("peer_punished"), + setupWorkerPool: func(*testing.T) *syncWorkerPool { + + workerPool := newSyncWorkerPool(nil) + workerPool.workers[peer.ID("peer_punished")] = &peerSyncWorker{ + status: punished, + punishmentTime: stablePunishmentTime, + } + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("peer_punished"): { + status: punished, + punishmentTime: stablePunishmentTime, + }, + }, + }, + } + for tname, tt := range cases { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + workerPool := tt.setupWorkerPool(t) + workerPool.newPeer(tt.peerID) + + require.Equal(t, workerPool.workers, tt.expectedPool) + }) + } } From e59228ab81f689ea412a7642e6c60de275fc515e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 10:21:15 -0400 Subject: [PATCH 069/140] chore: rename `l` to `mtx` --- dot/sync/worker_pool.go | 44 ++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 005224bc04..2f6489efc0 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -43,7 +43,7 @@ type peerSyncWorker struct { type syncWorkerPool struct { wg sync.WaitGroup - l sync.RWMutex + mtx sync.RWMutex doneCh chan struct{} availableCond *sync.Cond @@ -62,7 +62,7 @@ func newSyncWorkerPool(net Network) *syncWorkerPool { ignorePeers: make(map[peer.ID]struct{}), } - swp.availableCond = sync.NewCond(&swp.l) + swp.availableCond = sync.NewCond(&swp.mtx) return swp } @@ -74,16 +74,16 @@ func (s *syncWorkerPool) useConnectedPeers() { return } - s.l.Lock() - defer s.l.Unlock() + s.mtx.Lock() + defer s.mtx.Unlock() for _, connectedPeer := range connectedPeers { s.newPeer(connectedPeer) } } func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) { - s.l.Lock() - defer s.l.Unlock() + s.mtx.Lock() + defer s.mtx.Unlock() s.newPeer(who) } @@ -141,8 +141,8 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage, // and apply the punishment time using the base timeout of 5m, so // each time a peer is punished its timeout will increase by 5m func (s *syncWorkerPool) punishPeer(who peer.ID) { - s.l.Lock() - defer s.l.Unlock() + s.mtx.Lock() + defer s.mtx.Unlock() worker, has := s.workers[who] if !has { @@ -161,8 +161,8 @@ func (s *syncWorkerPool) punishPeer(who peer.ID) { } func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { - s.l.Lock() - defer s.l.Unlock() + s.mtx.Lock() + defer s.mtx.Unlock() delete(s.workers, who) s.ignorePeers[who] = struct{}{} @@ -170,11 +170,11 @@ func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { // totalWorkers only returns available or busy workers func (s *syncWorkerPool) totalWorkers() (total uint) { - s.l.RLock() - defer s.l.RUnlock() + s.mtx.RLock() + defer s.mtx.RUnlock() for _, worker := range s.workers { - if worker.status != punished { + if worker.status == available { total += 1 } } @@ -182,16 +182,14 @@ func (s *syncWorkerPool) totalWorkers() (total uint) { return total } -// getAvailablePeer returns the very first peer available and changes -// its status from available to busy, if there is no peer avaible then -// the caller should wait for availablePeerCh +// getAvailablePeer returns the very first peer available, if there +// is no peer avaible then the caller should wait for availablePeerCh func (s *syncWorkerPool) getAvailablePeer() peer.ID { for peerID, peerSync := range s.workers { switch peerSync.status { case punished: - // if the punishedTime has passed then we mark it - // as available and notify it availability if needed - // otherwise we keep the peer in the punishment and don't notify + // if the punishedTime has passed then we can + // use it as an available peer if peerSync.punishmentTime.Before(time.Now()) { return peerID } @@ -230,7 +228,7 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { // arrives and there is no available peer, then we should wait // for someone to become free and then use it. - s.l.Lock() + s.mtx.Lock() for { var peerID peer.ID if task.boundTo != nil { @@ -247,7 +245,7 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { peerSync.status = busy s.workers[peerID] = peerSync - s.l.Unlock() + s.mtx.Unlock() s.wg.Add(1) go s.executeRequest(peerID, task) @@ -262,13 +260,13 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { defer func() { - s.l.Lock() + s.mtx.Lock() peerSync, has := s.workers[who] if has { peerSync.status = available s.workers[who] = peerSync } - s.l.Unlock() + s.mtx.Unlock() s.availableCond.Signal() s.wg.Done() From 3e7f42081f6ba4d4dda297542894db0825e2dd66 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 10:21:40 -0400 Subject: [PATCH 070/140] chore: remove unneeded variable --- dot/sync/syncer.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 2feb455d8c..809b07b2db 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -154,11 +154,10 @@ func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMe // that is also has the chain up until and including that block. // this may not be a valid assumption, but perhaps we can assume that // it is likely they will receive this block and its ancestors before us. - announcedBlock := announcedBlock{ + return s.chainSync.onImportBlock(announcedBlock{ who: from, header: blockAnnounceHeader, - } - return s.chainSync.onImportBlock(announcedBlock) + }) } // IsSynced exposes the synced state From 19592314b73c0591122f605a645a51f2e7ba7206 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 11:36:27 -0400 Subject: [PATCH 071/140] chore: add `TestSyncWorkerPool__listenForRequests_submitRequest` test --- dot/sync/worker_pool_test.go | 67 ++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index b13c6421cd..27d2022d17 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -4,6 +4,9 @@ import ( "testing" "time" + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" @@ -213,3 +216,67 @@ func TestSyncWorkerPool_newPeer(t *testing.T) { }) } } + +func TestSyncWorkerPool__listenForRequests_submitRequest(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + workerPool := newSyncWorkerPool(networkMock) + + stopCh := make(chan struct{}) + defer close(stopCh) + go workerPool.listenForRequests(stopCh) + + availablePeer := peer.ID("available-peer") + workerPool.newPeer(availablePeer) + + blockRequest, mockedBlockResponse := singleBlockRequestAndResponse() + + // introduce a timeout of 5s then we can test the + // peer status change to busy + networkMock.EXPECT(). + DoBlockRequest(availablePeer, blockRequest). + DoAndReturn(func(any, any) (any, any) { + time.Sleep(5 * time.Second) + return mockedBlockResponse, nil + }) + + resultCh := make(chan *syncTaskResult) + workerPool.submitRequest(blockRequest, resultCh) + + // ensure the task is in the pool and was already + // assigned to the peer + time.Sleep(time.Second) + + totalWorkers := workerPool.totalWorkers() + require.Zero(t, totalWorkers) + + peerSync := workerPool.getPeerByID(availablePeer) + require.Equal(t, peerSync.status, busy) + + syncTaskResult := <-resultCh + require.NoError(t, syncTaskResult.err) + require.Equal(t, syncTaskResult.who, availablePeer) + require.Equal(t, syncTaskResult.request, blockRequest) + require.Equal(t, syncTaskResult.response, mockedBlockResponse) +} + +// singleBlockRequestAndResponse creates a statical block request and response for a single block +func singleBlockRequestAndResponse() (*network.BlockRequestMessage, *network.BlockResponseMessage) { + blockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") + blockRequest := singleBlockRequest(blockHash, bootstrapRequestData) + mockedBlockResponse := &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: blockHash, + Header: &types.Header{ + ParentHash: common. + MustHexToHash("0x5895897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), + }, + }, + }, + } + + return blockRequest, mockedBlockResponse +} From 4ea26dc46c13938e43df97f953ca1d635798a9e1 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 14:42:51 -0400 Subject: [PATCH 072/140] chore: add `TestSyncWorkerPool_listenForRequests_busyWorkers` test --- dot/sync/worker_pool_test.go | 95 ++++++++++++++++++++++++++++++++---- 1 file changed, 86 insertions(+), 9 deletions(-) diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index 27d2022d17..1a71f49f96 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -217,7 +217,7 @@ func TestSyncWorkerPool_newPeer(t *testing.T) { } } -func TestSyncWorkerPool__listenForRequests_submitRequest(t *testing.T) { +func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) @@ -231,7 +231,19 @@ func TestSyncWorkerPool__listenForRequests_submitRequest(t *testing.T) { availablePeer := peer.ID("available-peer") workerPool.newPeer(availablePeer) - blockRequest, mockedBlockResponse := singleBlockRequestAndResponse() + blockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") + blockRequest := singleBlockRequest(blockHash, bootstrapRequestData) + mockedBlockResponse := &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: blockHash, + Header: &types.Header{ + ParentHash: common. + MustHexToHash("0x5895897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), + }, + }, + }, + } // introduce a timeout of 5s then we can test the // peer status change to busy @@ -262,14 +274,30 @@ func TestSyncWorkerPool__listenForRequests_submitRequest(t *testing.T) { require.Equal(t, syncTaskResult.response, mockedBlockResponse) } -// singleBlockRequestAndResponse creates a statical block request and response for a single block -func singleBlockRequestAndResponse() (*network.BlockRequestMessage, *network.BlockResponseMessage) { - blockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - blockRequest := singleBlockRequest(blockHash, bootstrapRequestData) - mockedBlockResponse := &network.BlockResponseMessage{ +func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + workerPool := newSyncWorkerPool(networkMock) + + stopCh := make(chan struct{}) + defer close(stopCh) + go workerPool.listenForRequests(stopCh) + + availablePeer := peer.ID("available-peer") + workerPool.newPeer(availablePeer) + + firstRequestBlockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") + firstBlockRequest := singleBlockRequest(firstRequestBlockHash, bootstrapRequestData) + + secondRequestBlockHash := common.MustHexToHash("0x897646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") + secondBlockRequest := singleBlockRequest(firstRequestBlockHash, bootstrapRequestData) + + firstMockedBlockResponse := &network.BlockResponseMessage{ BlockData: []*types.BlockData{ { - Hash: blockHash, + Hash: firstRequestBlockHash, Header: &types.Header{ ParentHash: common. MustHexToHash("0x5895897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), @@ -278,5 +306,54 @@ func singleBlockRequestAndResponse() (*network.BlockRequestMessage, *network.Blo }, } - return blockRequest, mockedBlockResponse + secondMockedBlockResponse := &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: secondRequestBlockHash, + Header: &types.Header{ + ParentHash: common. + MustHexToHash("0x8965897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), + }, + }, + }, + } + + // introduce a timeout of 5s then we can test the + // then we can simulate a busy peer + networkMock.EXPECT(). + DoBlockRequest(availablePeer, firstBlockRequest). + DoAndReturn(func(any, any) (any, any) { + time.Sleep(5 * time.Second) + return firstMockedBlockResponse, nil + }) + + networkMock.EXPECT(). + DoBlockRequest(availablePeer, secondBlockRequest). + DoAndReturn(func(any, any) (any, any) { + return secondMockedBlockResponse, nil + }) + + resultCh := make(chan *syncTaskResult) + + workerPool.submitRequests( + []*network.BlockRequestMessage{firstBlockRequest, secondBlockRequest}, resultCh) + + // ensure the task is in the pool and was already + // assigned to the peer + time.Sleep(time.Second) + require.Zero(t, workerPool.totalWorkers()) + + syncTaskResult := <-resultCh + require.NoError(t, syncTaskResult.err) + require.Equal(t, syncTaskResult.who, availablePeer) + require.Equal(t, syncTaskResult.request, firstBlockRequest) + require.Equal(t, syncTaskResult.response, firstMockedBlockResponse) + + syncTaskResult = <-resultCh + require.NoError(t, syncTaskResult.err) + require.Equal(t, syncTaskResult.who, availablePeer) + require.Equal(t, syncTaskResult.request, secondBlockRequest) + require.Equal(t, syncTaskResult.response, secondMockedBlockResponse) + + require.Equal(t, uint(1), workerPool.totalWorkers()) } From 16de56f9e7bea1f00780e86908fe6cb3cddab948 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 16:41:20 -0400 Subject: [PATCH 073/140] chore: move request message creation to `network` pkg --- dot/network/message.go | 82 ++++++++++++++++++++++ dot/network/message_test.go | 119 +++++++++++++++++++++++++++++++ dot/sync/chain_sync.go | 30 ++++---- dot/sync/message.go | 11 ++- dot/sync/requests.go | 94 ------------------------- dot/sync/requests_test.go | 131 ----------------------------------- dot/sync/worker_pool.go | 6 +- dot/sync/worker_pool_test.go | 6 +- 8 files changed, 229 insertions(+), 250 deletions(-) delete mode 100644 dot/sync/requests.go delete mode 100644 dot/sync/requests_test.go diff --git a/dot/network/message.go b/dot/network/message.go index 7afa554738..9a14e42202 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -17,6 +17,9 @@ import ( "github.com/ChainSafe/gossamer/pkg/scale" ) +// maxResponseSize is maximum number of block data a BlockResponse message can contain +const MaxBlockResponseSize = 128 + type MessageType byte // Message types for notifications protocol messages. Used internally to map message to protocol. @@ -44,6 +47,9 @@ const ( RequestedDataReceipt = byte(4) RequestedDataMessageQueue = byte(8) RequestedDataJustification = byte(16) + BootstrapRequestData = RequestedDataHeader + + RequestedDataBody + + RequestedDataJustification ) var _ Message = &BlockRequestMessage{} @@ -354,3 +360,79 @@ func (cm *ConsensusMessage) Hash() (common.Hash, error) { } return common.Blake2bHash(encMsg) } + +// NewSingleBlockRequestMessage returns a request to retrieve a single block +func NewSingleBlockRequestMessage(blockHash common.Hash, requestedData byte) *BlockRequestMessage { + one := uint32(1) + return &BlockRequestMessage{ + RequestedData: requestedData, + StartingBlock: *variadic.MustNewUint32OrHash(blockHash), + Direction: Descending, + Max: &one, + } +} + +// NewDescendingBlockRequest returns a descending block request message +func NewDescendingBlockRequest(blockHash common.Hash, amount uint32, requestedData byte) *BlockRequestMessage { + return &BlockRequestMessage{ + RequestedData: requestedData, + StartingBlock: *variadic.MustNewUint32OrHash(blockHash), + Direction: Descending, + Max: &amount, + } +} + +func NewAscedingBlockRequests(startNumber, targetNumber uint, requestedData byte) []*BlockRequestMessage { + if startNumber > targetNumber { + return []*BlockRequestMessage{} + } + + diff := targetNumber - startNumber + + // start and end block are the same, just request 1 block + if diff == 0 { + one := uint32(1) + return []*BlockRequestMessage{ + { + RequestedData: requestedData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(startNumber)), + Direction: Ascending, + Max: &one, + }, + } + } + + numRequests := diff / MaxBlockResponseSize + // we should check if the diff is in the maxResponseSize bounds + // otherwise we should increase the numRequests by one, take this + // example, we want to sync from 0 to 259, the diff is 259 + // then the num of requests is 2 (uint(259)/uint(128)) however two requests will + // retrieve only 256 blocks (each request can retrive a max of 128 blocks), so we should + // create one more request to retrive those missing blocks, 3 in this example. + missingBlocks := diff % MaxBlockResponseSize + if missingBlocks != 0 { + numRequests++ + } + + reqs := make([]*BlockRequestMessage, numRequests) + for i := uint(0); i < numRequests; i++ { + max := uint32(MaxBlockResponseSize) + + lastIteration := numRequests - 1 + if i == lastIteration && missingBlocks != 0 { + max = uint32(missingBlocks) + } + + start := variadic.MustNewUint32OrHash(startNumber) + + reqs[i] = &BlockRequestMessage{ + RequestedData: requestedData, + StartingBlock: *start, + Direction: Ascending, + Max: &max, + } + startNumber += uint(max) + } + + return reqs +} diff --git a/dot/network/message_test.go b/dot/network/message_test.go index 44aed673a9..53f6c25d14 100644 --- a/dot/network/message_test.go +++ b/dot/network/message_test.go @@ -422,3 +422,122 @@ func TestDecodeConsensusMessage(t *testing.T) { require.NoError(t, err) require.Equal(t, encMsg, encodedMessage) } + +func TestAscendingBlockRequest(t *testing.T) { + one := uint32(1) + three := uint32(3) + maxResponseSize := uint32(MaxBlockResponseSize) + cases := map[string]struct { + startNumber, targetNumber uint + expectedBlockRequestMessage []*BlockRequestMessage + }{ + "start_greater_than_target": { + startNumber: 10, + targetNumber: 0, + expectedBlockRequestMessage: []*BlockRequestMessage{}, + }, + + "no_difference_between_start_and_target": { + startNumber: 10, + targetNumber: 10, + expectedBlockRequestMessage: []*BlockRequestMessage{ + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(10)), + Direction: Ascending, + Max: &one, + }, + }, + }, + + "requesting_128_blocks": { + startNumber: 0, + targetNumber: 128, + expectedBlockRequestMessage: []*BlockRequestMessage{ + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + Direction: Ascending, + Max: &maxResponseSize, + }, + }, + }, + + "requesting_4_chunks_of_128_blocks": { + startNumber: 0, + targetNumber: 512, // 128 * 4 + expectedBlockRequestMessage: []*BlockRequestMessage{ + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(128)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(256)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(384)), + Direction: Ascending, + Max: &maxResponseSize, + }, + }, + }, + + "requesting_4_chunks_of_128_plus_3_blocks": { + startNumber: 0, + targetNumber: (128 * 4) + 3, + expectedBlockRequestMessage: []*BlockRequestMessage{ + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(128)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(256)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(384)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(512)), + Direction: Ascending, + Max: &three, + }, + }, + }, + } + + for tname, tt := range cases { + tt := tt + + t.Run(tname, func(t *testing.T) { + requests := NewAscedingBlockRequests(tt.startNumber, tt.targetNumber, BootstrapRequestData) + require.Equal(t, requests, tt.expectedBlockRequestMessage) + }) + } +} diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 1ac9858053..5a20e26dea 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -48,9 +48,8 @@ func (s chainSyncState) String() string { } var ( - bootstrapRequestData = network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification - pendingBlocksLimit = maxResponseSize * 32 - isSyncedGauge = promauto.NewGauge(prometheus.GaugeOpts{ + pendingBlocksLimit = network.MaxBlockResponseSize * 32 + isSyncedGauge = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: "gossamer_network_syncer", Name: "is_synced", Help: "bool representing whether the node is synced to the head of the chain", @@ -221,7 +220,7 @@ func (cs *chainSync) sync() { } bestBlockNumber := bestBlockHeader.Number - isFarFromTarget := bestBlockNumber+maxResponseSize < syncTarget + isFarFromTarget := bestBlockNumber+network.MaxBlockResponseSize < syncTarget if isFarFromTarget { // we are more than 128 blocks behind the head, switch to bootstrap @@ -347,7 +346,7 @@ func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types. totalBlocks := uint32(1) var request *network.BlockRequestMessage if gapLength > 1 { - request = descendingBlockRequest(announcedHeader.Hash(), gapLength, bootstrapRequestData) + request = network.NewDescendingBlockRequest(announcedHeader.Hash(), gapLength, network.BootstrapRequestData) startAtBlock = announcedHeader.Number - uint(*request.Max) + 1 totalBlocks = *request.Max @@ -355,7 +354,7 @@ func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types. peerWhoAnnounced, gapLength, announcedHeader.Hash(), announcedHeader.Number) } else { gapLength = 1 - request = singleBlockRequest(announcedHeader.Hash(), bootstrapRequestData) + request = network.NewSingleBlockRequestMessage(announcedHeader.Hash(), network.BootstrapRequestData) logger.Debugf("received a block announce from %s, requesting a single block %s (#%d)", peerWhoAnnounced, announcedHeader.Hash(), announcedHeader.Number) } @@ -385,11 +384,11 @@ func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, var request *network.BlockRequestMessage if parentExists { - request = singleBlockRequest(announcedHash, bootstrapRequestData) + request = network.NewSingleBlockRequestMessage(announcedHash, network.BootstrapRequestData) } else { gapLength = uint32(announcedHeader.Number - highestFinalizedHeader.Number) startAtBlock = highestFinalizedHeader.Number + 1 - request = descendingBlockRequest(announcedHash, gapLength, bootstrapRequestData) + request = network.NewDescendingBlockRequest(announcedHash, gapLength, network.BootstrapRequestData) } logger.Debugf("received a block announce from %s, requesting %d blocks, starting %s (#%d)", @@ -438,8 +437,8 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) gapLength = 128 } - descendingGapRequest := descendingBlockRequest(pendingBlock.hash, - uint32(gapLength), bootstrapRequestData) + descendingGapRequest := network.NewDescendingBlockRequest(pendingBlock.hash, + uint32(gapLength), network.BootstrapRequestData) startAtBlock := pendingBlock.number - uint(*descendingGapRequest.Max) + 1 // the `requests` in the tip sync are not related necessarily @@ -492,8 +491,15 @@ func (cs *chainSync) executeBootstrapSync(bestBlockHeader *types.Header) error { targetBlockNumber = targetBlockNumber - (numOfRequestsToDrop * 128) } - requests := ascedingBlockRequests(startRequestAt, targetBlockNumber, bootstrapRequestData) - expectedAmountOfBlocks := totalBlocksRequested(requests) + requests := network.NewAscedingBlockRequests(startRequestAt, targetBlockNumber, + network.BootstrapRequestData) + + var expectedAmountOfBlocks uint32 + for _, request := range requests { + if request.Max != nil { + expectedAmountOfBlocks += *request.Max + } + } wg := sync.WaitGroup{} resultsQueue := make(chan *syncTaskResult) diff --git a/dot/sync/message.go b/dot/sync/message.go index 41853fe7fd..a7709f359d 100644 --- a/dot/sync/message.go +++ b/dot/sync/message.go @@ -11,9 +11,6 @@ import ( "github.com/ChainSafe/gossamer/lib/common" ) -// maxResponseSize is maximum number of block data a BlockResponse message can contain -const maxResponseSize = 128 - // CreateBlockResponse creates a block response message from a block request message func (s *Service) CreateBlockResponse(req *network.BlockRequestMessage) (*network.BlockResponseMessage, error) { switch req.Direction { @@ -28,13 +25,13 @@ func (s *Service) CreateBlockResponse(req *network.BlockRequestMessage) (*networ func (s *Service) handleAscendingRequest(req *network.BlockRequestMessage) (*network.BlockResponseMessage, error) { var ( - max uint = maxResponseSize + max uint = network.MaxBlockResponseSize startHash *common.Hash startNumber uint ) // determine maximum response size - if req.Max != nil && *req.Max < maxResponseSize { + if req.Max != nil && *req.Max < network.MaxBlockResponseSize { max = uint(*req.Max) } @@ -105,11 +102,11 @@ func (s *Service) handleDescendingRequest(req *network.BlockRequestMessage) (*ne var ( startHash *common.Hash startNumber uint - max uint = maxResponseSize + max uint = network.MaxBlockResponseSize ) // determine maximum response size - if req.Max != nil && *req.Max < maxResponseSize { + if req.Max != nil && *req.Max < network.MaxBlockResponseSize { max = uint(*req.Max) } diff --git a/dot/sync/requests.go b/dot/sync/requests.go deleted file mode 100644 index 7e237aad0e..0000000000 --- a/dot/sync/requests.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2023 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" -) - -func singleBlockRequest(blockHash common.Hash, requestedData byte) *network.BlockRequestMessage { - one := uint32(1) - return &network.BlockRequestMessage{ - RequestedData: requestedData, - StartingBlock: *variadic.MustNewUint32OrHash(blockHash), - Direction: network.Descending, - Max: &one, - } -} - -func descendingBlockRequest(blockHash common.Hash, amount uint32, requestedData byte) *network.BlockRequestMessage { - return &network.BlockRequestMessage{ - RequestedData: requestedData, - StartingBlock: *variadic.MustNewUint32OrHash(blockHash), - Direction: network.Descending, - Max: &amount, - } -} - -func ascedingBlockRequests(startNumber, targetNumber uint, requestedData byte) []*network.BlockRequestMessage { - if startNumber > targetNumber { - return []*network.BlockRequestMessage{} - } - - diff := targetNumber - startNumber - - // start and end block are the same, just request 1 block - if diff == 0 { - one := uint32(1) - return []*network.BlockRequestMessage{ - { - RequestedData: requestedData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(startNumber)), - Direction: network.Ascending, - Max: &one, - }, - } - } - - numRequests := diff / maxResponseSize - // we should check if the diff is in the maxResponseSize bounds - // otherwise we should increase the numRequests by one, take this - // example, we want to sync from 0 to 259, the diff is 259 - // then the num of requests is 2 (uint(259)/uint(128)) however two requests will - // retrieve only 256 blocks (each request can retrive a max of 128 blocks), so we should - // create one more request to retrive those missing blocks, 3 in this example. - missingBlocks := diff % maxResponseSize - if missingBlocks != 0 { - numRequests++ - } - - reqs := make([]*network.BlockRequestMessage, numRequests) - for i := uint(0); i < numRequests; i++ { - max := uint32(maxResponseSize) - - lastIteration := numRequests - 1 - if i == lastIteration && missingBlocks != 0 { - max = uint32(missingBlocks) - } - - start := variadic.MustNewUint32OrHash(startNumber) - - reqs[i] = &network.BlockRequestMessage{ - RequestedData: requestedData, - StartingBlock: *start, - Direction: network.Ascending, - Max: &max, - } - startNumber += uint(max) - } - - return reqs -} - -func totalBlocksRequested(requests []*network.BlockRequestMessage) (total uint32) { - for _, request := range requests { - if request.Max != nil { - total += *request.Max - } - } - - return total -} diff --git a/dot/sync/requests_test.go b/dot/sync/requests_test.go deleted file mode 100644 index 15ac0fbbc9..0000000000 --- a/dot/sync/requests_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2023 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "testing" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/lib/common/variadic" - "github.com/stretchr/testify/require" -) - -func TestAscendingBlockRequest(t *testing.T) { - one := uint32(1) - three := uint32(3) - maxResponseSize := uint32(maxResponseSize) - cases := map[string]struct { - startNumber, targetNumber uint - expectedBlockRequestMessage []*network.BlockRequestMessage - }{ - "start_greater_than_target": { - startNumber: 10, - targetNumber: 0, - expectedBlockRequestMessage: []*network.BlockRequestMessage{}, - }, - - "no_difference_between_start_and_target": { - startNumber: 10, - targetNumber: 10, - expectedBlockRequestMessage: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(10)), - Direction: network.Ascending, - Max: &one, - }, - }, - }, - - "requesting_128_blocks": { - startNumber: 0, - targetNumber: 128, - expectedBlockRequestMessage: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), - Direction: network.Ascending, - Max: &maxResponseSize, - }, - }, - }, - - "requesting_4_chunks_of_128_blocks": { - startNumber: 0, - targetNumber: 512, // 128 * 4 - expectedBlockRequestMessage: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), - Direction: network.Ascending, - Max: &maxResponseSize, - }, - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(128)), - Direction: network.Ascending, - Max: &maxResponseSize, - }, - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(256)), - Direction: network.Ascending, - Max: &maxResponseSize, - }, - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(384)), - Direction: network.Ascending, - Max: &maxResponseSize, - }, - }, - }, - - "requesting_4_chunks_of_128_plus_3_blocks": { - startNumber: 0, - targetNumber: (128 * 4) + 3, - expectedBlockRequestMessage: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), - Direction: network.Ascending, - Max: &maxResponseSize, - }, - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(128)), - Direction: network.Ascending, - Max: &maxResponseSize, - }, - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(256)), - Direction: network.Ascending, - Max: &maxResponseSize, - }, - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(384)), - Direction: network.Ascending, - Max: &maxResponseSize, - }, - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(512)), - Direction: network.Ascending, - Max: &three, - }, - }, - }, - } - - for tname, tt := range cases { - tt := tt - - t.Run(tname, func(t *testing.T) { - requests := ascedingBlockRequests(tt.startNumber, tt.targetNumber, bootstrapRequestData) - require.Equal(t, requests, tt.expectedBlockRequestMessage) - }) - } -} diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 2f6489efc0..64201db18d 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -273,12 +273,12 @@ func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { }() request := task.request - logger.Debugf("[EXECUTING] worker %s: block request: %s", who, request) + logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) response, err := s.network.DoBlockRequest(who, request) if err != nil { - logger.Debugf("[FINISHED] worker %s: err: %s", who, err) + logger.Debugf("[FINISHED] worker %s, err: %s", who, err) } else if response != nil { - logger.Debugf("[FINISHED] worker %s: block data amount: %d", who, len(response.BlockData)) + logger.Debugf("[FINISHED] worker %s, block data amount: %d", who, len(response.BlockData)) } task.resultCh <- &syncTaskResult{ diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index 1a71f49f96..e437ff94a8 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -232,7 +232,7 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { workerPool.newPeer(availablePeer) blockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - blockRequest := singleBlockRequest(blockHash, bootstrapRequestData) + blockRequest := network.NewSingleBlockRequestMessage(blockHash, network.BootstrapRequestData) mockedBlockResponse := &network.BlockResponseMessage{ BlockData: []*types.BlockData{ { @@ -289,10 +289,10 @@ func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { workerPool.newPeer(availablePeer) firstRequestBlockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - firstBlockRequest := singleBlockRequest(firstRequestBlockHash, bootstrapRequestData) + firstBlockRequest := network.NewSingleBlockRequestMessage(firstRequestBlockHash, network.BootstrapRequestData) secondRequestBlockHash := common.MustHexToHash("0x897646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - secondBlockRequest := singleBlockRequest(firstRequestBlockHash, bootstrapRequestData) + secondBlockRequest := network.NewSingleBlockRequestMessage(firstRequestBlockHash, network.BootstrapRequestData) firstMockedBlockResponse := &network.BlockResponseMessage{ BlockData: []*types.BlockData{ From 3c2b14fc249aee76d6a697d9031dbd6136d6714f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 19:49:43 -0400 Subject: [PATCH 074/140] chore: fix tests to use `network.MaxBlockResponseSize` --- dot/sync/chain_sync_integration_test.go | 4 ++-- dot/sync/chain_sync_test.go | 2 +- dot/sync/message_integration_test.go | 24 ++++++++++++------------ 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/dot/sync/chain_sync_integration_test.go b/dot/sync/chain_sync_integration_test.go index 375059cb34..2cea6501c7 100644 --- a/dot/sync/chain_sync_integration_test.go +++ b/dot/sync/chain_sync_integration_test.go @@ -29,7 +29,7 @@ func TestValidateBlockData(t *testing.T) { cs.network = mockNetwork req := &network.BlockRequestMessage{ - RequestedData: bootstrapRequestData, + RequestedData: network.BootstrapRequestData, } err := cs.validateBlockData(req, nil, "") @@ -58,7 +58,7 @@ func TestChainSync_validateResponse_firstBlock_Integration(t *testing.T) { cs.blockState = bs req := &network.BlockRequestMessage{ - RequestedData: bootstrapRequestData, + RequestedData: network.BootstrapRequestData, } header := &types.Header{ diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 559e0e3e39..b59a42666f 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -332,7 +332,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { max := uint32(128) mockedNetwork.EXPECT().DoBlockRequest(workerPeerID, &network.BlockRequestMessage{ - RequestedData: bootstrapRequestData, + RequestedData: network.BootstrapRequestData, StartingBlock: *startingBlock, Direction: network.Ascending, Max: &max, diff --git a/dot/sync/message_integration_test.go b/dot/sync/message_integration_test.go index a030f1593a..3d0d44d2c6 100644 --- a/dot/sync/message_integration_test.go +++ b/dot/sync/message_integration_test.go @@ -48,7 +48,7 @@ func addTestBlocksToState(t *testing.T, depth uint, blockState BlockState) { func TestService_CreateBlockResponse_MaxSize(t *testing.T) { s := newTestSyncer(t) - addTestBlocksToState(t, maxResponseSize*2, s.blockState) + addTestBlocksToState(t, network.MaxBlockResponseSize*2, s.blockState) // test ascending start, err := variadic.NewUint32OrHash(1) @@ -63,11 +63,11 @@ func TestService_CreateBlockResponse_MaxSize(t *testing.T) { resp, err := s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlockResponseSize), len(resp.BlockData)) require.Equal(t, uint(1), resp.BlockData[0].Number()) require.Equal(t, uint(128), resp.BlockData[127].Number()) - max := uint32(maxResponseSize + 100) + max := uint32(network.MaxBlockResponseSize + 100) req = &network.BlockRequestMessage{ RequestedData: 3, StartingBlock: *start, @@ -77,7 +77,7 @@ func TestService_CreateBlockResponse_MaxSize(t *testing.T) { resp, err = s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlockResponseSize), len(resp.BlockData)) require.Equal(t, uint(1), resp.BlockData[0].Number()) require.Equal(t, uint(128), resp.BlockData[127].Number()) @@ -108,11 +108,11 @@ func TestService_CreateBlockResponse_MaxSize(t *testing.T) { resp, err = s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlockResponseSize), len(resp.BlockData)) require.Equal(t, uint(128), resp.BlockData[0].Number()) require.Equal(t, uint(1), resp.BlockData[127].Number()) - max = uint32(maxResponseSize + 100) + max = uint32(network.MaxBlockResponseSize + 100) start, err = variadic.NewUint32OrHash(uint32(256)) require.NoError(t, err) @@ -125,7 +125,7 @@ func TestService_CreateBlockResponse_MaxSize(t *testing.T) { resp, err = s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlockResponseSize), len(resp.BlockData)) require.Equal(t, uint(256), resp.BlockData[0].Number()) require.Equal(t, uint(129), resp.BlockData[127].Number()) @@ -146,7 +146,7 @@ func TestService_CreateBlockResponse_MaxSize(t *testing.T) { func TestService_CreateBlockResponse_StartHash(t *testing.T) { s := newTestSyncer(t) - addTestBlocksToState(t, uint(maxResponseSize*2), s.blockState) + addTestBlocksToState(t, uint(network.MaxBlockResponseSize*2), s.blockState) // test ascending with nil endBlockHash startHash, err := s.blockState.GetHashByNumber(1) @@ -164,7 +164,7 @@ func TestService_CreateBlockResponse_StartHash(t *testing.T) { resp, err := s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlockResponseSize), len(resp.BlockData)) require.Equal(t, uint(1), resp.BlockData[0].Number()) require.Equal(t, uint(128), resp.BlockData[127].Number()) @@ -201,7 +201,7 @@ func TestService_CreateBlockResponse_StartHash(t *testing.T) { require.Equal(t, uint(16), resp.BlockData[0].Number()) require.Equal(t, uint(1), resp.BlockData[15].Number()) - // test descending with nil endBlockHash and start > maxResponseSize + // test descending with nil endBlockHash and start > network.MaxBlockResponseSize startHash, err = s.blockState.GetHashByNumber(256) require.NoError(t, err) @@ -217,7 +217,7 @@ func TestService_CreateBlockResponse_StartHash(t *testing.T) { resp, err = s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlockResponseSize), len(resp.BlockData)) require.Equal(t, uint(256), resp.BlockData[0].Number()) require.Equal(t, uint(129), resp.BlockData[127].Number()) @@ -236,7 +236,7 @@ func TestService_CreateBlockResponse_StartHash(t *testing.T) { resp, err = s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, maxResponseSize, len(resp.BlockData)) + require.Equal(t, network.MaxBlockResponseSize, len(resp.BlockData)) require.Equal(t, uint(128), resp.BlockData[0].Number()) require.Equal(t, uint(1), resp.BlockData[127].Number()) } From beb7f8fdd534f36e8a0babdff2e0332ea345721f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 19:59:03 -0400 Subject: [PATCH 075/140] chore: fix lint warns --- dot/network/message.go | 4 ++-- dot/sync/chain_sync.go | 22 +++++++++++++--------- dot/sync/chain_sync_test.go | 9 ++++++--- dot/sync/syncer.go | 2 +- dot/sync/syncer_test.go | 11 ----------- dot/sync/worker_pool.go | 3 ++- 6 files changed, 24 insertions(+), 27 deletions(-) diff --git a/dot/network/message.go b/dot/network/message.go index 9a14e42202..9cdf1978a8 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -407,8 +407,8 @@ func NewAscedingBlockRequests(startNumber, targetNumber uint, requestedData byte // otherwise we should increase the numRequests by one, take this // example, we want to sync from 0 to 259, the diff is 259 // then the num of requests is 2 (uint(259)/uint(128)) however two requests will - // retrieve only 256 blocks (each request can retrive a max of 128 blocks), so we should - // create one more request to retrive those missing blocks, 3 in this example. + // retrieve only 256 blocks (each request can retrieve a max of 128 blocks), so we should + // create one more request to retrieve those missing blocks, 3 in this example. missingBlocks := diff % MaxBlockResponseSize if missingBlocks != 0 { numRequests++ diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 5a20e26dea..5eb05f0df7 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -235,7 +235,6 @@ func (cs *chainSync) sync() { if err != nil { logger.Errorf("while executing bootsrap sync: %s", err) } - } else { // we are less than 128 blocks behind the target we can use tip sync swapped := cs.state.CompareAndSwap(bootstrap, tip) @@ -245,7 +244,10 @@ func (cs *chainSync) sync() { logger.Debugf("switched sync mode to %d", tip) } - cs.requestPendingBlocks(finalisedHeader) + err := cs.requestPendingBlocks(finalisedHeader) + if err != nil { + logger.Errorf("while requesting pending blocks: %w", err) + } } } } @@ -344,6 +346,7 @@ func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types. gapLength := uint32(announcedHeader.Number - bestBlockHeader.Number) startAtBlock := announcedHeader.Number totalBlocks := uint32(1) + var request *network.BlockRequestMessage if gapLength > 1 { request = network.NewDescendingBlockRequest(announcedHeader.Hash(), gapLength, network.BootstrapRequestData) @@ -353,7 +356,6 @@ func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types. logger.Debugf("received a block announce from %s, requesting %d blocks, descending request from %s (#%d)", peerWhoAnnounced, gapLength, announcedHeader.Hash(), announcedHeader.Number) } else { - gapLength = 1 request = network.NewSingleBlockRequestMessage(announcedHeader.Hash(), network.BootstrapRequestData) logger.Debugf("received a block announce from %s, requesting a single block %s (#%d)", peerWhoAnnounced, announcedHeader.Hash(), announcedHeader.Number) @@ -433,7 +435,7 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) gapLength := pendingBlock.number - highestFinalizedHeader.Number if gapLength > 128 { - logger.Criticalf("GAP LENGHT: %d, GREATER THAN 128 block", gapLength) + logger.Criticalf("gap of %d blocks, max expected: 128 block", gapLength) gapLength = 128 } @@ -475,7 +477,7 @@ func (cs *chainSync) executeBootstrapSync(bestBlockHeader *types.Header) error { // targetBlockNumber is the virtual target we will request, however // we should bound it to the real target which is collected through // block announces received from other peers - targetBlockNumber := startRequestAt + uint(availableWorkers)*128 + targetBlockNumber := startRequestAt + availableWorkers*128 realTarget, err := cs.getTarget() if err != nil { return fmt.Errorf("while getting target: %w", err) @@ -554,7 +556,9 @@ func (cs *chainSync) handleWorkersResults( defer func() { totalSyncAndImportSeconds := time.Since(startTime).Seconds() bps := float64(totalBlocks) / totalSyncAndImportSeconds - logger.Debugf("⛓️ synced %d blocks, took: %.2f seconds, bps: %.2f blocks/second", totalBlocks, totalSyncAndImportSeconds, bps) + logger.Debugf("⛓️ synced %d blocks, "+ + "took: %.2f seconds, bps: %.2f blocks/second", + totalBlocks, totalSyncAndImportSeconds, bps) close(errCh) wg.Done() @@ -741,7 +745,7 @@ func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { // processBlockData processes the BlockData from a BlockResponse and // returns the index of the last BlockData it handled on success, // or the index of the block data that errored on failure. -func (cs *chainSync) processBlockData(blockData types.BlockData) error { //nolint:revive +func (cs *chainSync) processBlockData(blockData types.BlockData) error { headerInState, err := cs.blockState.HasHeader(blockData.Hash) if err != nil { return fmt.Errorf("checking if block state has header: %w", err) @@ -788,7 +792,7 @@ func (cs *chainSync) processBlockData(blockData types.BlockData) error { //nolin return nil } -func (cs *chainSync) processBlockDataWithStateHeaderAndBody(blockData types.BlockData, //nolint:revive +func (cs *chainSync) processBlockDataWithStateHeaderAndBody(blockData types.BlockData, announceImportedBlock bool) (err error) { // TODO: fix this; sometimes when the node shuts down the "best block" isn't stored properly, // so when the node restarts it has blocks higher than what it thinks is the best, causing it not to sync @@ -832,7 +836,7 @@ func (cs *chainSync) processBlockDataWithStateHeaderAndBody(blockData types.Bloc return nil } -func (cs *chainSync) processBlockDataWithHeaderAndBody(blockData types.BlockData, //nolint:revive +func (cs *chainSync) processBlockDataWithHeaderAndBody(blockData types.BlockData, announceImportedBlock bool) (err error) { err = cs.babeVerifier.VerifyBlock(blockData.Header) if err != nil { diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index b59a42666f..48d5d2fd6e 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -573,7 +573,8 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing require.False(t, ok) } -func createSuccesfullBlockResponse(t *testing.T, genesisHash common.Hash, startingAt, numBlocks int) *network.BlockResponseMessage { +func createSuccesfullBlockResponse(_ *testing.T, genesisHash common.Hash, + startingAt, numBlocks int) *network.BlockResponseMessage { response := new(network.BlockResponseMessage) response.BlockData = make([]*types.BlockData, numBlocks) @@ -606,8 +607,10 @@ func createSuccesfullBlockResponse(t *testing.T, genesisHash common.Hash, starti return response } -// ensureSuccessfulBlockImportFlow will setup the expectations for method calls that happens while chain sync imports a block -func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, blocksReceived []*types.BlockData, mockBlockState *MockBlockState, +// ensureSuccessfulBlockImportFlow will setup the expectations for method calls +// that happens while chain sync imports a block +func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, + blocksReceived []*types.BlockData, mockBlockState *MockBlockState, mockBabeVerifier *MockBabeVerifier, mockStorageState *MockStorageState, mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry) { t.Helper() diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 809b07b2db..c96fd1efbf 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -105,7 +105,7 @@ func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMe if blockAnnounceHeader.Number <= bestBlockHeader.Number { // check if our block hash for that number is the same, if so, do nothing // as we already have that block - // TODO: check what happens when get hash by number retuns nothing or ErrNotExists + // TODO: check what happens when get hash by number returns nothing or ErrNotExists ourHash, err := s.blockState.GetHashByNumber(blockAnnounceHeader.Number) if err != nil { return fmt.Errorf("get block hash by number: %w", err) diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go index 3371037d83..a987982dec 100644 --- a/dot/sync/syncer_test.go +++ b/dot/sync/syncer_test.go @@ -276,17 +276,6 @@ func TestService_HandleBlockAnnounce(t *testing.T) { } } -func newMockChainSync(ctrl *gomock.Controller) ChainSync { - mock := NewMockChainSync(ctrl) - mock.EXPECT().setPeerHead(peer.ID("1"), common.Hash{}, uint(0)).Return(nil).AnyTimes() - mock.EXPECT().syncState().Return(bootstrap).AnyTimes() - mock.EXPECT().start().AnyTimes() - mock.EXPECT().stop().AnyTimes() - mock.EXPECT().getHighestBlock().Return(uint(2), nil).AnyTimes() - - return mock -} - func Test_Service_HandleBlockAnnounceHandshake(t *testing.T) { t.Parallel() diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 64201db18d..a377dcb9c5 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -112,7 +112,8 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { // submitBoundedRequest given a request the worker pool will driven it // to the given peer.ID, used for tip sync when we receive a block announce // from a peer and we want to use the exact same peer to request blocks -func (s *syncWorkerPool) submitBoundedRequest(request *network.BlockRequestMessage, who peer.ID, resultCh chan<- *syncTaskResult) { +func (s *syncWorkerPool) submitBoundedRequest(request *network.BlockRequestMessage, + who peer.ID, resultCh chan<- *syncTaskResult) { s.taskQueue <- &syncTask{ boundTo: &who, request: request, From 6d7d5644bd04cdc793490d68f214e0f7954a9639 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 19:59:37 -0400 Subject: [PATCH 076/140] chore: adding license to `worker_pool_test.go` --- dot/sync/worker_pool_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index e437ff94a8..a7f8997eb2 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -1,3 +1,6 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( From 4096823ef6ed26e4f82e4c9cac67778ddaa089f5 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 20:03:03 -0400 Subject: [PATCH 077/140] chore: remove unneeded else branch --- dot/sync/worker_pool.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index a377dcb9c5..4e1bd107f0 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -251,9 +251,9 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { s.wg.Add(1) go s.executeRequest(peerID, task) break - } else { - s.availableCond.Wait() } + + s.availableCond.Wait() } } } From 060dbc602fc79678c2c541946cbb2038725b60e3 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 20:05:23 -0400 Subject: [PATCH 078/140] chore: fix deepsource warns --- dot/sync/chain_sync.go | 4 +--- dot/sync/chain_sync_test.go | 2 +- dot/sync/worker_pool.go | 3 +-- dot/sync/worker_pool_test.go | 10 ++++++---- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 5eb05f0df7..0074b1d54b 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -282,8 +282,7 @@ func (cs *chainSync) onImportBlock(announced announcedBlock) error { } syncState := cs.state.Load().(chainSyncState) - switch syncState { - case tip: + if syncState == tip { return cs.requestImportedBlock(announced) } @@ -867,7 +866,6 @@ func (cs *chainSync) handleBody(body *types.Body) { } blockSizeGauge.Set(float64(acc)) - //logger.Infof("📦 roughly body size: %d, sum of extrinsics size: %d", len(*body), acc) } func (cs *chainSync) handleJustification(header *types.Header, justification []byte) (err error) { diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 48d5d2fd6e..70462e5327 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -527,7 +527,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing require.FailNow(t, "expected calls by %s and %s, got: %s", peer.ID("alice"), peer.ID("bob"), pID) default: - //ensure the the third call will be made by peer.ID("alice") + // ensure the the third call will be made by peer.ID("alice") require.Equalf(t, pID, peer.ID("alice"), "expect third call be made by %s, got: %s", peer.ID("alice"), pID) } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 4e1bd107f0..1b1bd69f00 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -200,7 +200,6 @@ func (s *syncWorkerPool) getAvailablePeer() peer.ID { } } - //could not find an available peer to dispatch return peer.ID("") } @@ -218,7 +217,7 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { for { select { case <-stopCh: - //wait for ongoing requests to be finished before returning + // wait for ongoing requests to be finished before returning s.wg.Wait() return diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index a7f8997eb2..a7c488859f 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -87,8 +87,9 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { }) workerPool := newSyncWorkerPool(networkMock) workerPool.workers[peer.ID("available-3")] = &peerSyncWorker{ - status: punished, - punishmentTime: time.Unix(1000, 0), //arbitrary unix value + status: punished, + //arbitrary unix value + punishmentTime: time.Unix(1000, 0), } return workerPool }, @@ -175,8 +176,9 @@ func TestSyncWorkerPool_newPeer(t *testing.T) { setupWorkerPool: func(*testing.T) *syncWorkerPool { workerPool := newSyncWorkerPool(nil) workerPool.workers[peer.ID("free-again")] = &peerSyncWorker{ - status: punished, - punishmentTime: time.Unix(1000, 0), //arbitrary unix value + status: punished, + //arbitrary unix value + punishmentTime: time.Unix(1000, 0), } return workerPool }, From dc82d933506c7de0bf5bf6db6600bed629803cf9 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 20 Jun 2023 20:16:44 -0400 Subject: [PATCH 079/140] chore: wip `announced_block_while_in_tip_mode` test --- dot/sync/chain_sync.go | 2 +- dot/sync/chain_sync_test.go | 25 ++++++++++++++++++++----- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 0074b1d54b..77ca6230e4 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -392,7 +392,7 @@ func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, request = network.NewDescendingBlockRequest(announcedHash, gapLength, network.BootstrapRequestData) } - logger.Debugf("received a block announce from %s, requesting %d blocks, starting %s (#%d)", + logger.Debugf("requesting %d fork blocks, starting at %s (#%d)", peerWhoAnnounced, gapLength, announcedHash, announcedHeader.Number) resultsQueue := make(chan *syncTaskResult) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 70462e5327..6f5ed2d74f 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -6,6 +6,7 @@ package sync import ( "errors" "fmt" + "sync/atomic" "testing" "time" @@ -122,7 +123,23 @@ func Test_chainSync_onImportBlock(t *testing.T) { errWrapped: errTest, errMessage: "while adding pending block header: test error", }, - //"announced_block_while_in_bootstrap_mode": {}, + "announced_block_while_in_bootstrap_mode": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + pendingBlocks := NewMockDisjointBlockSet(ctrl) + pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(nil) + + state := atomic.Value{} + state.Store(bootstrap) + + return &chainSync{ + pendingBlocks: pendingBlocks, + state: state, + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + }, //"announced_block_while_in_tip_mode": {}, } @@ -133,12 +150,10 @@ func Test_chainSync_onImportBlock(t *testing.T) { ctrl := gomock.NewController(t) chainSync := tt.chainSyncBuilder(ctrl) - - announced := announcedBlock{ + err := chainSync.onImportBlock(announcedBlock{ who: tt.peerID, header: tt.blockAnnounceHeader, - } - err := chainSync.onImportBlock(announced) + }) assert.ErrorIs(t, err, tt.errWrapped) if tt.errWrapped != nil { From 4a51be3f6c1f036d13fde7cfd96c7fe3e1efddc6 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 21 Jun 2023 16:12:55 -0400 Subject: [PATCH 080/140] chore: add `announced_block_while_in_tip_mode` test --- dot/sync/chain_sync.go | 22 +++---- dot/sync/chain_sync_test.go | 107 +++++++++++++++++++++++++++---- dot/sync/mock_chain_sync_test.go | 12 ++-- dot/sync/syncer.go | 2 +- dot/sync/syncer_test.go | 2 +- 5 files changed, 113 insertions(+), 32 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 77ca6230e4..3c7bc74fbe 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -83,7 +83,7 @@ type ChainSync interface { // getHighestBlock returns the highest block or an error getHighestBlock() (highestBlock uint, err error) - onImportBlock(announcedBlock) error + onBlockAnnounce(announcedBlock) error } type announcedBlock struct { @@ -270,7 +270,7 @@ func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber u } } -func (cs *chainSync) onImportBlock(announced announcedBlock) error { +func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { if cs.pendingBlocks.hasBlock(announced.header.Hash()) { return fmt.Errorf("%w: block %s (#%d)", errAlreadyInDisjointSet, announced.header.Hash(), announced.header.Number) @@ -282,14 +282,14 @@ func (cs *chainSync) onImportBlock(announced announcedBlock) error { } syncState := cs.state.Load().(chainSyncState) - if syncState == tip { - return cs.requestImportedBlock(announced) + if syncState != tip { + return nil } - return nil + return cs.requestAnnouncedBlock(announced) } -func (cs *chainSync) requestImportedBlock(announce announcedBlock) error { +func (cs *chainSync) requestAnnouncedBlock(announce announcedBlock) error { peerWhoAnnounced := announce.who announcedHash := announce.header.Hash() announcedNumber := announce.header.Number @@ -407,8 +407,9 @@ func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, } func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) error { - logger.Infof("total of pending blocks: %d", cs.pendingBlocks.size()) - if cs.pendingBlocks.size() == 0 { + pendingBlocksTotal := cs.pendingBlocks.size() + logger.Infof("total of pending blocks: %d", pendingBlocksTotal) + if pendingBlocksTotal < 1 { return nil } @@ -654,11 +655,6 @@ func (cs *chainSync) handleWorkersResults( lastBlockInResponse.Header.Number, lastBlockInResponse.Hash) } - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.GossipSuccessValue, - Reason: peerset.GossipSuccessReason, - }, taskResult.who) - for _, blockInResponse := range response.BlockData { blockExactIndex := blockInResponse.Header.Number - startAtBlock syncingChain[blockExactIndex] = blockInResponse diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 6f5ed2d74f..ceca8c8339 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -84,10 +84,14 @@ func Test_chainSync_onImportBlock(t *testing.T) { const somePeer = peer.ID("abc") errTest := errors.New("test error") - block2AnnounceHeader := types.NewHeader(common.Hash{}, common.Hash{}, + emptyTrieState := storage.NewTrieState(nil) + block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.MustRoot(), + common.Hash{}, 1, scale.VaryingDataTypeSlice{}) + block2AnnounceHeader := types.NewHeader(block1AnnounceHeader.Hash(), emptyTrieState.MustRoot(), common.Hash{}, 2, scale.VaryingDataTypeSlice{}) testCases := map[string]struct { + listenForRequests bool chainSyncBuilder func(ctrl *gomock.Controller) *chainSync peerID peer.ID blockAnnounceHeader *types.Header @@ -140,7 +144,79 @@ func Test_chainSync_onImportBlock(t *testing.T) { peerID: somePeer, blockAnnounceHeader: block2AnnounceHeader, }, - //"announced_block_while_in_tip_mode": {}, + "announced_block_while_in_tip_mode": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + pendingBlocksMock := NewMockDisjointBlockSet(ctrl) + pendingBlocksMock.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + pendingBlocksMock.EXPECT().addHeader(block2AnnounceHeader).Return(nil) + pendingBlocksMock.EXPECT().removeBlock(block2AnnounceHeader.Hash()) + pendingBlocksMock.EXPECT().size().Return(int(0)) + + blockStateMock := NewMockBlockState(ctrl) + blockStateMock.EXPECT(). + HasHeader(block2AnnounceHeader.Hash()). + Return(false, nil) + + blockStateMock.EXPECT(). + BestBlockHeader(). + Return(block1AnnounceHeader, nil) + + blockStateMock.EXPECT(). + GetHighestFinalisedHeader(). + Return(block2AnnounceHeader, nil) + + expectedRequest := network.NewSingleBlockRequestMessage(block2AnnounceHeader.Hash(), + network.BootstrapRequestData) + + fakeBlockBody := types.Body([]types.Extrinsic{}) + mockedBlockResponse := &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: block2AnnounceHeader.Hash(), + Header: block2AnnounceHeader, + Body: &fakeBlockBody, + }, + }, + } + + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT(). + DoBlockRequest(somePeer, expectedRequest). + Return(mockedBlockResponse, nil) + + babeVerifierMock := NewMockBabeVerifier(ctrl) + storageStateMock := NewMockStorageState(ctrl) + importHandlerMock := NewMockBlockImportHandler(ctrl) + telemetryMock := NewMockTelemetry(ctrl) + + const announceBlock = true + ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, mockedBlockResponse.BlockData, + blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, + announceBlock) + + workerPool := newSyncWorkerPool(networkMock) + // include the peer who announced the block in the pool + workerPool.newPeer(somePeer) + + state := atomic.Value{} + state.Store(tip) + + return &chainSync{ + pendingBlocks: pendingBlocksMock, + state: state, + workerPool: workerPool, + network: networkMock, + blockState: blockStateMock, + babeVerifier: babeVerifierMock, + telemetry: telemetryMock, + storageState: storageStateMock, + blockImportHandler: importHandlerMock, + } + }, + listenForRequests: true, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + }, } for name, tt := range testCases { @@ -150,7 +226,13 @@ func Test_chainSync_onImportBlock(t *testing.T) { ctrl := gomock.NewController(t) chainSync := tt.chainSyncBuilder(ctrl) - err := chainSync.onImportBlock(announcedBlock{ + if tt.listenForRequests { + stopCh := make(chan struct{}) + defer close(stopCh) + go chainSync.workerPool.listenForRequests(stopCh) + } + + err := chainSync.onBlockAnnounce(announcedBlock{ who: tt.peerID, header: tt.blockAnnounceHeader, }) @@ -363,10 +445,10 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { mockStorageState := NewMockStorageState(ctrl) mockImportHandler := NewMockBlockImportHandler(ctrl) mockTelemetry := NewMockTelemetry(ctrl) - + const announceBlock = false // setup mocks for new synced blocks that doesn't exists in our local database ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, totalBlockResponse.BlockData, mockedBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) // setup a chain sync which holds in its peer view map // 3 peers, each one announce block X as its best block number. @@ -422,10 +504,11 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { worker1Response := &network.BlockResponseMessage{ BlockData: blockResponse.BlockData[:128], } + const announceBlock = false // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow // will setup the expectations starting from the genesis header until block 128 ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) worker2Response := &network.BlockResponseMessage{ BlockData: blockResponse.BlockData[128:], @@ -434,7 +517,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { // will setup the expectations starting from block 128, from previous worker, until block 256 parent := worker1Response.BlockData[127] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second @@ -496,15 +579,17 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing // this test expects two workers responding each request with 128 blocks which means // we should import 256 blocks in total blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + const announceBlock = false // here we split the whole set in two parts each one will be the "response" for each peer worker1Response := &network.BlockResponseMessage{ BlockData: blockResponse.BlockData[:128], } + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow // will setup the expectations starting from the genesis header until block 128 ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) worker2Response := &network.BlockResponseMessage{ BlockData: blockResponse.BlockData[128:], @@ -513,7 +598,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing // will setup the expectations starting from block 128, from previous worker, until block 256 parent := worker1Response.BlockData[127] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, - mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second @@ -627,7 +712,7 @@ func createSuccesfullBlockResponse(_ *testing.T, genesisHash common.Hash, func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, blocksReceived []*types.BlockData, mockBlockState *MockBlockState, mockBabeVerifier *MockBabeVerifier, mockStorageState *MockStorageState, - mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry) { + mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry, announceBlock bool) { t.Helper() mockBlockState.EXPECT().HasHeader(parentHeader.Hash()).Return(true, nil) @@ -667,7 +752,7 @@ func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, mockRuntimeInstance.EXPECT().ExecuteBlock(expectedBlock). Return(nil, nil) - mockImportHandler.EXPECT().HandleBlockImport(expectedBlock, emptyTrieState, false). + mockImportHandler.EXPECT().HandleBlockImport(expectedBlock, emptyTrieState, announceBlock). Return(nil) blockHash := blockData.Header.Hash() diff --git a/dot/sync/mock_chain_sync_test.go b/dot/sync/mock_chain_sync_test.go index 7a59eae49c..ed52017a39 100644 --- a/dot/sync/mock_chain_sync_test.go +++ b/dot/sync/mock_chain_sync_test.go @@ -50,18 +50,18 @@ func (mr *MockChainSyncMockRecorder) getHighestBlock() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getHighestBlock", reflect.TypeOf((*MockChainSync)(nil).getHighestBlock)) } -// onImportBlock mocks base method. -func (m *MockChainSync) onImportBlock(arg0 announcedBlock) error { +// onBlockAnnounce mocks base method. +func (m *MockChainSync) onBlockAnnounce(arg0 announcedBlock) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "onImportBlock", arg0) + ret := m.ctrl.Call(m, "onBlockAnnounce", arg0) ret0, _ := ret[0].(error) return ret0 } -// onImportBlock indicates an expected call of onImportBlock. -func (mr *MockChainSyncMockRecorder) onImportBlock(arg0 interface{}) *gomock.Call { +// onBlockAnnounce indicates an expected call of onBlockAnnounce. +func (mr *MockChainSyncMockRecorder) onBlockAnnounce(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "onImportBlock", reflect.TypeOf((*MockChainSync)(nil).onImportBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "onBlockAnnounce", reflect.TypeOf((*MockChainSync)(nil).onBlockAnnounce), arg0) } // setPeerHead mocks base method. diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index c96fd1efbf..e4095a36f7 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -154,7 +154,7 @@ func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMe // that is also has the chain up until and including that block. // this may not be a valid assumption, but perhaps we can assume that // it is likely they will receive this block and its ancestors before us. - return s.chainSync.onImportBlock(announcedBlock{ + return s.chainSync.onBlockAnnounce(announcedBlock{ who: from, header: blockAnnounceHeader, }) diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go index a987982dec..0daca41b6d 100644 --- a/dot/sync/syncer_test.go +++ b/dot/sync/syncer_test.go @@ -239,7 +239,7 @@ func TestService_HandleBlockAnnounce(t *testing.T) { header: block2AnnounceHeader, } - chainSyncMock.EXPECT().onImportBlock(expectedAnnouncedBlock).Return(nil) + chainSyncMock.EXPECT().onBlockAnnounce(expectedAnnouncedBlock).Return(nil) return &Service{ blockState: blockState, From 3d37f61e2fe47f1157296529f562047ac12ddd4e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 21 Jun 2023 16:46:07 -0400 Subject: [PATCH 081/140] chore: add `TestChainSync_validateResponse` test --- dot/sync/chain_sync.go | 18 +-- dot/sync/chain_sync_integration_test.go | 39 ------ dot/sync/chain_sync_test.go | 174 ++++++++++++++++++++++++ dot/sync/syncer_test.go | 2 +- 4 files changed, 184 insertions(+), 49 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 3c7bc74fbe..9572256118 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -951,7 +951,7 @@ func (cs *chainSync) validateResponse(req *network.BlockRequestMessage, } if !has { - return errUnknownParent + return fmt.Errorf("%w: %s", errUnknownParent, firstItem.Header.ParentHash) } previousBlockData := firstItem @@ -991,22 +991,22 @@ func (cs *chainSync) validateBlockData(req *network.BlockRequestMessage, bd *typ } requestedData := req.RequestedData - - if slices.Contains(cs.badBlocks, bd.Hash.String()) { - logger.Errorf("Rejecting known bad block Number: %d Hash: %s", bd.Number(), bd.Hash) - return errBadBlock - } - if (requestedData&network.RequestedDataHeader) == 1 && bd.Header == nil { cs.network.ReportPeer(peerset.ReputationChange{ Value: peerset.IncompleteHeaderValue, Reason: peerset.IncompleteHeaderReason, }, p) - return errNilHeaderInResponse + return fmt.Errorf("%w: %s", errNilHeaderInResponse, bd.Hash) } if (requestedData&network.RequestedDataBody>>1) == 1 && bd.Body == nil { - return fmt.Errorf("%w: hash=%s", errNilBodyInResponse, bd.Hash) + // TODO: report peer + return fmt.Errorf("%w: %s", errNilBodyInResponse, bd.Hash) + } + + if slices.Contains(cs.badBlocks, bd.Hash.String()) { + logger.Errorf("Rejecting known bad block Number: %d Hash: %s", bd.Number(), bd.Hash.String()) + return fmt.Errorf("%w: %s", errBadBlock, bd.Hash.String()) } return nil diff --git a/dot/sync/chain_sync_integration_test.go b/dot/sync/chain_sync_integration_test.go index 2cea6501c7..803a04ba9b 100644 --- a/dot/sync/chain_sync_integration_test.go +++ b/dot/sync/chain_sync_integration_test.go @@ -6,13 +6,11 @@ package sync import ( - "errors" "testing" "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" @@ -49,40 +47,3 @@ func TestValidateBlockData(t *testing.T) { }, "") require.NoError(t, err) } - -func TestChainSync_validateResponse_firstBlock_Integration(t *testing.T) { - ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) - bs := NewMockBlockState(ctrl) - bs.EXPECT().HasHeader(gomock.AssignableToTypeOf(common.Hash{})).Return(false, nil) - cs.blockState = bs - - req := &network.BlockRequestMessage{ - RequestedData: network.BootstrapRequestData, - } - - header := &types.Header{ - Number: 2, - } - - resp := &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: header.Hash(), - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - }, - } - - err := cs.validateResponse(req, resp, "") - require.True(t, errors.Is(err, errUnknownParent)) - require.True(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) - bd := cs.pendingBlocks.getBlock(header.Hash()) - require.NotNil(t, bd.header) - require.NotNil(t, bd.body) - require.NotNil(t, bd.justification) -} diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index ceca8c8339..df52bcfda3 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" @@ -765,3 +766,176 @@ func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, mockBlockState.EXPECT().CompareAndSetBlockData(blockData).Return(nil) } } + +func TestChainSync_validateResponse(t *testing.T) { + t.Parallel() + + block1Header := &types.Header{ + ParentHash: common.MustHexToHash("0x00597cb4bb4cc13bf119f6613aec7642d4c06a2e453de53d34aea6f3f1eeb504"), + Number: 2, + } + + block2Header := &types.Header{ + ParentHash: block1Header.Hash(), + Number: 3, + } + + cases := map[string]struct { + wantErr error + errString string + setupChainSync func(t *testing.T) *chainSync + blockRequest *network.BlockRequestMessage + blockResponse *network.BlockResponseMessage + }{ + "first_item_unkown_parent": { + wantErr: errUnknownParent, + errString: "parent of first block in block response is unknown: " + + block1Header.ParentHash.String(), + blockRequest: &network.BlockRequestMessage{ + RequestedData: network.BootstrapRequestData, + }, + blockResponse: &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: block1Header.Hash(), + Header: block1Header, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + }, + }, + setupChainSync: func(t *testing.T) *chainSync { + ctrl := gomock.NewController(t) + blockStateMock := NewMockBlockState(ctrl) + blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(false, nil) + + return &chainSync{ + blockState: blockStateMock, + } + }, + }, + "bad_block": { + wantErr: errBadBlock, + errString: "known bad block: " + + block2Header.Hash().String(), + blockRequest: &network.BlockRequestMessage{ + RequestedData: network.BootstrapRequestData, + }, + blockResponse: &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: block1Header.Hash(), + Header: block1Header, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + { + Hash: block2Header.Hash(), + Header: block2Header, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + }, + }, + setupChainSync: func(t *testing.T) *chainSync { + ctrl := gomock.NewController(t) + blockStateMock := NewMockBlockState(ctrl) + blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) + + return &chainSync{ + blockState: blockStateMock, + badBlocks: []string{block2Header.Hash().String()}, + } + }, + }, + "requested_bootstrap_data_but_got_nil_header": { + wantErr: errNilHeaderInResponse, + errString: "expected header, received none: " + + block2Header.Hash().String(), + blockRequest: &network.BlockRequestMessage{ + RequestedData: network.BootstrapRequestData, + }, + blockResponse: &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: block1Header.Hash(), + Header: block1Header, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + { + Hash: block2Header.Hash(), + Header: nil, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + }, + }, + setupChainSync: func(t *testing.T) *chainSync { + ctrl := gomock.NewController(t) + blockStateMock := NewMockBlockState(ctrl) + blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) + + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.IncompleteHeaderValue, + Reason: peerset.IncompleteHeaderReason, + }, peer.ID("peer")) + + return &chainSync{ + blockState: blockStateMock, + badBlocks: []string{block2Header.Hash().String()}, + network: networkMock, + } + }, + }, + "requested_bootstrap_data_but_got_nil_body": { + wantErr: errNilBodyInResponse, + errString: "expected body, received none: " + + block2Header.Hash().String(), + blockRequest: &network.BlockRequestMessage{ + RequestedData: network.BootstrapRequestData, + }, + blockResponse: &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: block1Header.Hash(), + Header: block1Header, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + { + Hash: block2Header.Hash(), + Header: block2Header, + Body: nil, + Justification: &[]byte{0}, + }, + }, + }, + setupChainSync: func(t *testing.T) *chainSync { + ctrl := gomock.NewController(t) + blockStateMock := NewMockBlockState(ctrl) + blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) + networkMock := NewMockNetwork(ctrl) + + return &chainSync{ + blockState: blockStateMock, + badBlocks: []string{block2Header.Hash().String()}, + network: networkMock, + } + }, + }, + } + + for tname, tt := range cases { + tt := tt + t.Run(tname, func(t *testing.T) { + chainSync := tt.setupChainSync(t) + err := chainSync.validateResponse(tt.blockRequest, tt.blockResponse, peer.ID("peer")) + require.ErrorIs(t, err, tt.wantErr) + if tt.errString != "" { + require.EqualError(t, err, tt.errString) + } + }) + } +} diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go index 0daca41b6d..b634726320 100644 --- a/dot/sync/syncer_test.go +++ b/dot/sync/syncer_test.go @@ -205,7 +205,7 @@ func TestService_HandleBlockAnnounce(t *testing.T) { peerID: somePeer, blockAnnounceHeader: block2AnnounceHeader, errWrapped: errTest, - errMessage: "has header: test error", + errMessage: "while checking if header exists: test error", }, "number_smaller_than_best_block_number_and_" + "finalised_number_smaller_than_number_and_" + From b09cdaaa5ab6af00585c4f932b93c3fb641622d3 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 21 Jun 2023 19:55:38 -0400 Subject: [PATCH 082/140] chore: fix `validateBlockResponse` checks --- dot/sync/chain_sync.go | 181 ++++++++---------- dot/sync/chain_sync_integration_test.go | 49 ----- dot/sync/chain_sync_test.go | 239 ++++++++++++------------ dot/sync/errors.go | 26 ++- 4 files changed, 204 insertions(+), 291 deletions(-) delete mode 100644 dot/sync/chain_sync_integration_test.go diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 9572256118..d6ba858712 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -547,28 +547,30 @@ func (cs *chainSync) getTarget() (uint, error) { // and every cicle we should endup with a complete chain, whenever we identify // any error from a worker we should evaluate the error and re-insert the request // in the queue and wait for it to completes +// TODO: handle only justification requests func (cs *chainSync) handleWorkersResults( - workersResults chan *syncTaskResult, startAtBlock uint, totalBlocks uint32, wg *sync.WaitGroup) chan error { + workersResults chan *syncTaskResult, startAtBlock uint, expectedSyncedBlocks uint32, wg *sync.WaitGroup) chan error { errCh := make(chan error, 1) go func() { startTime := time.Now() defer func() { totalSyncAndImportSeconds := time.Since(startTime).Seconds() - bps := float64(totalBlocks) / totalSyncAndImportSeconds + bps := float64(expectedSyncedBlocks) / totalSyncAndImportSeconds logger.Debugf("⛓️ synced %d blocks, "+ "took: %.2f seconds, bps: %.2f blocks/second", - totalBlocks, totalSyncAndImportSeconds, bps) + expectedSyncedBlocks, totalSyncAndImportSeconds, bps) close(errCh) wg.Done() }() - logger.Debugf("💤 waiting for %d blocks", totalBlocks) - syncingChain := make([]*types.BlockData, totalBlocks) + logger.Debugf("💤 waiting for %d blocks", expectedSyncedBlocks) + syncingChain := make([]*types.BlockData, expectedSyncedBlocks) // the total numbers of blocks is missing in the syncing chain - waitingBlocks := totalBlocks + waitingBlocks := expectedSyncedBlocks + taskResultLoop: for waitingBlocks > 0 { // in a case where we don't handle workers results we should check the pool idleDuration := time.Minute @@ -621,41 +623,56 @@ func (cs *chainSync) handleWorkersResults( reverseBlockData(response.BlockData) } - err := cs.validateResponse(request, response, who) - switch { - case errors.Is(err, errResponseIsNotChain): - logger.Criticalf("response invalid: %s", err) - cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue - case errors.Is(err, errEmptyBlockData): - cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue - case errors.Is(err, errUnknownParent): - case errors.Is(err, errBadBlock): - logger.Warnf("peer %s sent a bad block: %s", who, err) - cs.workerPool.ignorePeerAsWorker(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue - case err != nil: - logger.Criticalf("response invalid: %s", err) - cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue - } - if len(response.BlockData) > 0 { firstBlockInResponse := response.BlockData[0] lastBlockInResponse := response.BlockData[len(response.BlockData)-1] - logger.Tracef("processing %d blocks: %d (%s) to %d (%s)", + logger.Tracef("processing %d blocks: %s (#%d) to %s (#%d)", len(response.BlockData), - firstBlockInResponse.Header.Number, firstBlockInResponse.Hash, - lastBlockInResponse.Header.Number, lastBlockInResponse.Hash) + firstBlockInResponse.Hash.Short(), firstBlockInResponse.Header.Number, + lastBlockInResponse.Hash.Short(), lastBlockInResponse.Header.Number) + } + + isChain := isResponseAChain(response.BlockData) + if !isChain { + logger.Criticalf("response from %s is not a chain", who) + cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue taskResultLoop } for _, blockInResponse := range response.BlockData { + err := validateResponseFields(request.RequestedData, blockInResponse) + if err != nil { + logger.Criticalf("validating fields: %s", err) + // TODO: check the reputation change for nil body in response + // and nil justification in response + if errors.Is(err, errNilHeaderInResponse) { + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.IncompleteHeaderValue, + Reason: peerset.IncompleteHeaderReason, + }, who) + } + + cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue taskResultLoop + } + + if slices.Contains(cs.badBlocks, blockInResponse.Hash.String()) { + logger.Criticalf("%s sent a known bad block: %s (#%d)", + who, blockInResponse.Hash.String(), blockInResponse.Number()) + + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, who) + + cs.workerPool.ignorePeerAsWorker(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue taskResultLoop + } + blockExactIndex := blockInResponse.Header.Number - startAtBlock syncingChain[blockExactIndex] = blockInResponse } @@ -666,10 +683,8 @@ func (cs *chainSync) handleWorkersResults( } } - retreiveBlocksSeconds := time.Since(startTime).Seconds() - logger.Debugf("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", totalBlocks, retreiveBlocksSeconds) if len(syncingChain) >= 2 { - // ensuring the parents are in the right place + // ensure the acquired block set forms an actual chain parentElement := syncingChain[0] for _, element := range syncingChain[1:] { if parentElement.Header.Hash() != element.Header.ParentHash { @@ -680,6 +695,9 @@ func (cs *chainSync) handleWorkersResults( } } + retreiveBlocksSeconds := time.Since(startTime).Seconds() + logger.Debugf("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", expectedSyncedBlocks, retreiveBlocksSeconds) + // response was validated! place into ready block queue for _, bd := range syncingChain { // block is ready to be processed! @@ -927,89 +945,46 @@ func (cs *chainSync) handleBlock(block *types.Block, announceImportedBlock bool) return nil } -// validateResponse performs pre-validation of a block response before placing it into either the -// pendingBlocks or readyBlocks set. -// It checks the following: -// - the response is not empty -// - the response contains all the expected fields -// - the block is not contained in the bad block list -// - each block has the correct parent, ie. the response constitutes a valid chain -func (cs *chainSync) validateResponse(req *network.BlockRequestMessage, - resp *network.BlockResponseMessage, p peer.ID) error { - if resp == nil || len(resp.BlockData) == 0 { - return errEmptyBlockData +// validateResponseFields checks that the expected fields are in the block data +func validateResponseFields(requestedData byte, bd *types.BlockData) error { + if bd == nil { + return errNilBlockData } - logger.Tracef("validating block response starting at block hash %s", resp.BlockData[0].Hash) - - headerRequested := (req.RequestedData & network.RequestedDataHeader) == 1 - firstItem := resp.BlockData[0] - - has, err := cs.blockState.HasHeader(firstItem.Header.ParentHash) - if err != nil { - return fmt.Errorf("while checking ancestry: %w", err) + if (requestedData&network.RequestedDataHeader) == network.RequestedDataHeader && bd.Header == nil { + return fmt.Errorf("%w: %s", errNilHeaderInResponse, bd.Hash) } - if !has { - return fmt.Errorf("%w: %s", errUnknownParent, firstItem.Header.ParentHash) + if (requestedData&network.RequestedDataBody) == network.RequestedDataBody && bd.Body == nil { + return fmt.Errorf("%w: %s", errNilBodyInResponse, bd.Hash) } - previousBlockData := firstItem - for _, currBlockData := range resp.BlockData[1:] { - if err := cs.validateBlockData(req, currBlockData, p); err != nil { - return err - } - - if headerRequested { - previousHash := previousBlockData.Header.Hash() - if previousHash != currBlockData.Header.ParentHash || - currBlockData.Header.Number != (previousBlockData.Header.Number+1) { - return errResponseIsNotChain - } - } else if currBlockData.Justification != nil { - // if this is a justification-only request, make sure we have the block for the justification - has, _ := cs.blockState.HasHeader(currBlockData.Hash) - if !has { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadJustificationValue, - Reason: peerset.BadJustificationReason, - }, p) - return errUnknownBlockForJustification - } - } - - previousBlockData = currBlockData + // if we requested strictly justification + if (requestedData|network.RequestedDataJustification) == network.RequestedDataJustification && + bd.Justification == nil { + return fmt.Errorf("%w: %s", errNilJustificationInResponse, bd.Hash) } return nil } -// validateBlockData checks that the expected fields are in the block data -func (cs *chainSync) validateBlockData(req *network.BlockRequestMessage, bd *types.BlockData, p peer.ID) error { - if bd == nil { - return errNilBlockData +func isResponseAChain(responseBlockData []*types.BlockData) bool { + if len(responseBlockData) < 2 { + return true } - requestedData := req.RequestedData - if (requestedData&network.RequestedDataHeader) == 1 && bd.Header == nil { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, p) - return fmt.Errorf("%w: %s", errNilHeaderInResponse, bd.Hash) - } - - if (requestedData&network.RequestedDataBody>>1) == 1 && bd.Body == nil { - // TODO: report peer - return fmt.Errorf("%w: %s", errNilBodyInResponse, bd.Hash) - } + previousBlockData := responseBlockData[0] + for _, currBlockData := range responseBlockData[1:] { + previousHash := previousBlockData.Header.Hash() + isParent := previousHash == currBlockData.Header.ParentHash + if !isParent { + return false + } - if slices.Contains(cs.badBlocks, bd.Hash.String()) { - logger.Errorf("Rejecting known bad block Number: %d Hash: %s", bd.Number(), bd.Hash.String()) - return fmt.Errorf("%w: %s", errBadBlock, bd.Hash.String()) + previousBlockData = currBlockData } - return nil + return true } func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { diff --git a/dot/sync/chain_sync_integration_test.go b/dot/sync/chain_sync_integration_test.go deleted file mode 100644 index 803a04ba9b..0000000000 --- a/dot/sync/chain_sync_integration_test.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "testing" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/golang/mock/gomock" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/require" -) - -func TestValidateBlockData(t *testing.T) { - ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: -1048576, - Reason: "Incomplete header", - }, peer.ID("")) - cs.network = mockNetwork - - req := &network.BlockRequestMessage{ - RequestedData: network.BootstrapRequestData, - } - - err := cs.validateBlockData(req, nil, "") - require.Equal(t, errNilBlockData, err) - - err = cs.validateBlockData(req, &types.BlockData{}, "") - require.Equal(t, errNilHeaderInResponse, err) - - err = cs.validateBlockData(req, &types.BlockData{ - Header: &types.Header{}, - }, "") - require.ErrorIs(t, err, errNilBodyInResponse) - - err = cs.validateBlockData(req, &types.BlockData{ - Header: &types.Header{}, - Body: &types.Body{}, - }, "") - require.NoError(t, err) -} diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index df52bcfda3..f586d8ccd0 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -25,27 +25,6 @@ import ( "github.com/stretchr/testify/require" ) -const defaultSlotDuration = 6 * time.Second - -func newTestChainSyncWithReadyBlocks(ctrl *gomock.Controller) *chainSync { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - - cfg := chainSyncConfig{ - bs: mockBlockState, - pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), - minPeers: 1, - maxPeers: 5, - slotDuration: defaultSlotDuration, - } - - return newChainSync(cfg) -} - -func newTestChainSync(ctrl *gomock.Controller) *chainSync { - return newTestChainSyncWithReadyBlocks(ctrl) -} - func Test_chainSyncState_String(t *testing.T) { t.Parallel() @@ -767,7 +746,7 @@ func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, } } -func TestChainSync_validateResponse(t *testing.T) { +func TestChainSync_validateResponseFields(t *testing.T) { t.Parallel() block1Header := &types.Header{ @@ -784,92 +763,19 @@ func TestChainSync_validateResponse(t *testing.T) { wantErr error errString string setupChainSync func(t *testing.T) *chainSync - blockRequest *network.BlockRequestMessage - blockResponse *network.BlockResponseMessage + requestedData byte + blockData *types.BlockData }{ - "first_item_unkown_parent": { - wantErr: errUnknownParent, - errString: "parent of first block in block response is unknown: " + - block1Header.ParentHash.String(), - blockRequest: &network.BlockRequestMessage{ - RequestedData: network.BootstrapRequestData, - }, - blockResponse: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: block1Header.Hash(), - Header: block1Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - }, - }, - setupChainSync: func(t *testing.T) *chainSync { - ctrl := gomock.NewController(t) - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(false, nil) - - return &chainSync{ - blockState: blockStateMock, - } - }, - }, - "bad_block": { - wantErr: errBadBlock, - errString: "known bad block: " + - block2Header.Hash().String(), - blockRequest: &network.BlockRequestMessage{ - RequestedData: network.BootstrapRequestData, - }, - blockResponse: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: block1Header.Hash(), - Header: block1Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block2Header.Hash(), - Header: block2Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - }, - }, - setupChainSync: func(t *testing.T) *chainSync { - ctrl := gomock.NewController(t) - blockStateMock := NewMockBlockState(ctrl) - blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) - - return &chainSync{ - blockState: blockStateMock, - badBlocks: []string{block2Header.Hash().String()}, - } - }, - }, "requested_bootstrap_data_but_got_nil_header": { wantErr: errNilHeaderInResponse, errString: "expected header, received none: " + block2Header.Hash().String(), - blockRequest: &network.BlockRequestMessage{ - RequestedData: network.BootstrapRequestData, - }, - blockResponse: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: block1Header.Hash(), - Header: block1Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block2Header.Hash(), - Header: nil, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - }, + requestedData: network.BootstrapRequestData, + blockData: &types.BlockData{ + Hash: block2Header.Hash(), + Header: nil, + Body: &types.Body{}, + Justification: &[]byte{0}, }, setupChainSync: func(t *testing.T) *chainSync { ctrl := gomock.NewController(t) @@ -884,7 +790,6 @@ func TestChainSync_validateResponse(t *testing.T) { return &chainSync{ blockState: blockStateMock, - badBlocks: []string{block2Header.Hash().String()}, network: networkMock, } }, @@ -893,24 +798,35 @@ func TestChainSync_validateResponse(t *testing.T) { wantErr: errNilBodyInResponse, errString: "expected body, received none: " + block2Header.Hash().String(), - blockRequest: &network.BlockRequestMessage{ - RequestedData: network.BootstrapRequestData, + requestedData: network.BootstrapRequestData, + blockData: &types.BlockData{ + Hash: block2Header.Hash(), + Header: block2Header, + Body: nil, + Justification: &[]byte{0}, }, - blockResponse: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: block1Header.Hash(), - Header: block1Header, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - { - Hash: block2Header.Hash(), - Header: block2Header, - Body: nil, - Justification: &[]byte{0}, - }, - }, + setupChainSync: func(t *testing.T) *chainSync { + ctrl := gomock.NewController(t) + blockStateMock := NewMockBlockState(ctrl) + blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) + networkMock := NewMockNetwork(ctrl) + + return &chainSync{ + blockState: blockStateMock, + network: networkMock, + } + }, + }, + "requested_only_justification_but_got_nil": { + wantErr: errNilJustificationInResponse, + errString: "expected justification, received none: " + + block2Header.Hash().String(), + requestedData: network.RequestedDataJustification, + blockData: &types.BlockData{ + Hash: block2Header.Hash(), + Header: block2Header, + Body: nil, + Justification: nil, }, setupChainSync: func(t *testing.T) *chainSync { ctrl := gomock.NewController(t) @@ -920,7 +836,6 @@ func TestChainSync_validateResponse(t *testing.T) { return &chainSync{ blockState: blockStateMock, - badBlocks: []string{block2Header.Hash().String()}, network: networkMock, } }, @@ -930,8 +845,9 @@ func TestChainSync_validateResponse(t *testing.T) { for tname, tt := range cases { tt := tt t.Run(tname, func(t *testing.T) { - chainSync := tt.setupChainSync(t) - err := chainSync.validateResponse(tt.blockRequest, tt.blockResponse, peer.ID("peer")) + t.Parallel() + + err := validateResponseFields(tt.requestedData, tt.blockData) require.ErrorIs(t, err, tt.wantErr) if tt.errString != "" { require.EqualError(t, err, tt.errString) @@ -939,3 +855,78 @@ func TestChainSync_validateResponse(t *testing.T) { }) } } + +func TestChainSync_isResponseAChain(t *testing.T) { + t.Parallel() + + block1Header := &types.Header{ + ParentHash: common.MustHexToHash("0x00597cb4bb4cc13bf119f6613aec7642d4c06a2e453de53d34aea6f3f1eeb504"), + Number: 2, + } + + block2Header := &types.Header{ + ParentHash: block1Header.Hash(), + Number: 3, + } + + block4Header := &types.Header{ + ParentHash: common.MustHexToHash("0x198616547187613bf119f6613aec7642d4c06a2e453de53d34aea6f390788677"), + Number: 4, + } + + cases := map[string]struct { + expected bool + blockData []*types.BlockData + }{ + "not_a_chain": { + expected: false, + blockData: []*types.BlockData{ + { + Hash: block1Header.Hash(), + Header: block1Header, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + { + Hash: block2Header.Hash(), + Header: block2Header, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + { + Hash: block4Header.Hash(), + Header: block4Header, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + }, + }, + "is_a_chain": { + expected: true, + blockData: []*types.BlockData{ + { + Hash: block1Header.Hash(), + Header: block1Header, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + { + Hash: block2Header.Hash(), + Header: block2Header, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + }, + }, + } + + for tname, tt := range cases { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + output := isResponseAChain(tt.blockData) + require.Equal(t, tt.expected, output) + }) + } + +} diff --git a/dot/sync/errors.go b/dot/sync/errors.go index d44fc045dc..4d9862f3ca 100644 --- a/dot/sync/errors.go +++ b/dot/sync/errors.go @@ -20,19 +20,15 @@ var ( errRequestStartTooHigh = errors.New("request start number is higher than our best block") // chainSync errors - errUnableToGetTarget = errors.New("unable to get target") - errEmptyBlockData = errors.New("empty block data") - errNilBlockData = errors.New("block data is nil") - errNilHeaderInResponse = errors.New("expected header, received none") - errNilBodyInResponse = errors.New("expected body, received none") - errNoPeers = errors.New("no peers to sync with") - errResponseIsNotChain = errors.New("block response does not form a chain") - errPeerOnInvalidFork = errors.New("peer is on an invalid fork") - errUnknownParent = errors.New("parent of first block in block response is unknown") - errUnknownBlockForJustification = errors.New("received justification for unknown block") - errFailedToGetParent = errors.New("failed to get parent header") - errStartAndEndMismatch = errors.New("request start and end hash are not on the same chain") - errFailedToGetDescendant = errors.New("failed to find descendant block") - errBadBlock = errors.New("known bad block") - errAlreadyInDisjointSet = errors.New("already in disjoint set") + errUnableToGetTarget = errors.New("unable to get target") + errNilBlockData = errors.New("block data is nil") + errNilHeaderInResponse = errors.New("expected header, received none") + errNilBodyInResponse = errors.New("expected body, received none") + errNilJustificationInResponse = errors.New("expected justification, received none") + errNoPeers = errors.New("no peers to sync with") + errPeerOnInvalidFork = errors.New("peer is on an invalid fork") + errFailedToGetParent = errors.New("failed to get parent header") + errStartAndEndMismatch = errors.New("request start and end hash are not on the same chain") + errFailedToGetDescendant = errors.New("failed to find descendant block") + errAlreadyInDisjointSet = errors.New("already in disjoint set") ) From 1a56e6da8a4d31bfbd0e96878925102f55aa5ba2 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 22 Jun 2023 09:18:35 -0400 Subject: [PATCH 083/140] chore: solve all tests failures --- dot/sync/chain_sync_test.go | 22 ++++-------- dot/sync/syncer_test.go | 67 +++++++------------------------------ 2 files changed, 18 insertions(+), 71 deletions(-) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index f586d8ccd0..980491bb80 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -252,7 +252,7 @@ func TestChainSync_setPeerHead(t *testing.T) { shouldBeAWorker: true, workerStatus: available, }, - "set_peer_head_with_a_to_ignore_peer_should_be_included_in_the_workerpoll": { + "set_peer_head_with_a_to_ignore_peer_should_not_be_included_in_the_workerpoll": { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { networkMock := NewMockNetwork(ctrl) workerPool := newSyncWorkerPool(networkMock) @@ -267,7 +267,7 @@ func TestChainSync_setPeerHead(t *testing.T) { peerID: peer.ID("peer-test"), bestHash: randomHash, bestNumber: uint(20), - shouldBeAWorker: true, + shouldBeAWorker: false, }, "set_peer_head_that_stills_punished_in_the_worker_poll": { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { @@ -419,8 +419,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { mockedBlockState := NewMockBlockState(ctrl) mockedBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockedBlockState.EXPECT().BestBlockHeader().Return(mockedGenesisHeader, nil) - mockBabeVerifier := NewMockBabeVerifier(ctrl) mockStorageState := NewMockStorageState(ctrl) mockImportHandler := NewMockBlockImportHandler(ctrl) @@ -467,7 +465,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 0, types.NewDigest()) - mockBlockState.EXPECT().BestBlockHeader().Return(mockedGenesisHeader, nil) mockNetwork := NewMockNetwork(ctrl) @@ -539,7 +536,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { } func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing.T) { - t.Parallel() ctrl := gomock.NewController(t) @@ -547,7 +543,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 0, types.NewDigest()) - mockBlockState.EXPECT().BestBlockHeader().Return(mockedGenesisHeader, nil) mockNetwork := NewMockNetwork(ctrl) @@ -586,11 +581,10 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing doBlockRequestCount := 0 mockNetwork.EXPECT().DoBlockRequest(gomock.Any(), gomock.Any()). DoAndReturn(func(peerID, _ any) (any, any) { - // this simple logic does: ensure that the DoBlockRequest is called by + // lets ensure that the DoBlockRequest is called by // peer.ID(alice) and peer.ID(bob). When bob calls, this method will fail // then alice should pick the failed request and re-execute it which will // be the third call - defer func() { doBlockRequestCount++ }() pID := peerID.(peer.ID) // cast to peer ID @@ -645,12 +639,10 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing close(stopCh) <-cs.workerPool.doneCh - // peer should be in the ignore set - _, ok := cs.workerPool.ignorePeers[peer.ID("bob")] + // peer should be punished + syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] require.True(t, ok) - - _, ok = cs.workerPool.workers[peer.ID("bob")] - require.False(t, ok) + require.Equal(t, punished, syncWorker.status) } func createSuccesfullBlockResponse(_ *testing.T, genesisHash common.Hash, @@ -695,8 +687,6 @@ func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry, announceBlock bool) { t.Helper() - mockBlockState.EXPECT().HasHeader(parentHeader.Hash()).Return(true, nil) - for idx, blockData := range blocksReceived { mockBlockState.EXPECT().HasHeader(blockData.Header.Hash()).Return(false, nil) mockBlockState.EXPECT().HasBlockBody(blockData.Header.Hash()).Return(false, nil) diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go index b634726320..ae14dbc52b 100644 --- a/dot/sync/syncer_test.go +++ b/dot/sync/syncer_test.go @@ -16,6 +16,7 @@ import ( "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewService(t *testing.T) { @@ -279,65 +280,21 @@ func TestService_HandleBlockAnnounce(t *testing.T) { func Test_Service_HandleBlockAnnounceHandshake(t *testing.T) { t.Parallel() - errTest := errors.New("test error") + ctrl := gomock.NewController(t) + chainSync := NewMockChainSync(ctrl) + chainSync.EXPECT().setPeerHead(peer.ID("peer"), common.Hash{1}, uint(2)) - testCases := map[string]struct { - serviceBuilder func(ctrl *gomock.Controller) Service - from peer.ID - message *network.BlockAnnounceHandshake - errWrapped error - errMessage string - }{ - "success": { - serviceBuilder: func(ctrl *gomock.Controller) Service { - chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().setPeerHead(peer.ID("abc"), common.Hash{1}, uint(2)). - Return(nil) - return Service{ - chainSync: chainSync, - } - }, - from: peer.ID("abc"), - message: &network.BlockAnnounceHandshake{ - BestBlockHash: common.Hash{1}, - BestBlockNumber: 2, - }, - }, - "failure": { - serviceBuilder: func(ctrl *gomock.Controller) Service { - chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().setPeerHead(peer.ID("abc"), common.Hash{1}, uint(2)). - Return(errTest) - return Service{ - chainSync: chainSync, - } - }, - from: peer.ID("abc"), - message: &network.BlockAnnounceHandshake{ - BestBlockHash: common.Hash{1}, - BestBlockNumber: 2, - }, - errWrapped: errTest, - errMessage: "test error", - }, + service := Service{ + chainSync: chainSync, } - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - service := testCase.serviceBuilder(ctrl) - - err := service.HandleBlockAnnounceHandshake(testCase.from, testCase.message) - - assert.ErrorIs(t, err, testCase.errWrapped) - if testCase.errWrapped != nil { - assert.EqualError(t, err, testCase.errMessage) - } - }) + message := &network.BlockAnnounceHandshake{ + BestBlockHash: common.Hash{1}, + BestBlockNumber: 2, } + + err := service.HandleBlockAnnounceHandshake(peer.ID("peer"), message) + require.Nil(t, err) } func TestService_IsSynced(t *testing.T) { From 28367774b8d1a81b8437fea57e49b8484af004e3 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 22 Jun 2023 09:33:04 -0400 Subject: [PATCH 084/140] chore: move worker status update from `defer` --- dot/sync/worker_pool.go | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 1b1bd69f00..da49f04a07 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -259,18 +259,7 @@ func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { } func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { - defer func() { - s.mtx.Lock() - peerSync, has := s.workers[who] - if has { - peerSync.status = available - s.workers[who] = peerSync - } - s.mtx.Unlock() - - s.availableCond.Signal() - s.wg.Done() - }() + defer s.wg.Done() request := task.request logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) @@ -281,6 +270,15 @@ func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { logger.Debugf("[FINISHED] worker %s, block data amount: %d", who, len(response.BlockData)) } + s.mtx.Lock() + peerSync, has := s.workers[who] + if has { + peerSync.status = available + s.workers[who] = peerSync + } + s.mtx.Unlock() + s.availableCond.Signal() + task.resultCh <- &syncTaskResult{ who: who, request: request, From 55ff3f8bf0e2f4f497eeb6cfec3bc2fa67030b5c Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 22 Jun 2023 09:41:11 -0400 Subject: [PATCH 085/140] chore: solve `lll` lint wanr --- dot/sync/chain_sync.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index d6ba858712..a98feeb8d2 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -696,7 +696,8 @@ func (cs *chainSync) handleWorkersResults( } retreiveBlocksSeconds := time.Since(startTime).Seconds() - logger.Debugf("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", expectedSyncedBlocks, retreiveBlocksSeconds) + logger.Debugf("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", + expectedSyncedBlocks, retreiveBlocksSeconds) // response was validated! place into ready block queue for _, bd := range syncingChain { From 9ef76cfb5be4fe5c53783971662d306b7eb55238 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 22 Jun 2023 11:16:54 -0400 Subject: [PATCH 086/140] chore: improve code coverage by testing failure scenarios --- dot/sync/chain_sync.go | 80 +++--- dot/sync/chain_sync_test.go | 518 +++++++++++++++++++++++++++++++++++- 2 files changed, 550 insertions(+), 48 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index a98feeb8d2..6ab1e55977 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -598,16 +598,13 @@ func (cs *chainSync) handleWorkersResults( taskResult.who, taskResult.err) if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - switch { - case strings.Contains(taskResult.err.Error(), "protocols not supported"): + if strings.Contains(taskResult.err.Error(), "protocols not supported") { cs.network.ReportPeer(peerset.ReputationChange{ Value: peerset.BadProtocolValue, Reason: peerset.BadProtocolReason, }, taskResult.who) - cs.workerPool.ignorePeerAsWorker(taskResult.who) - default: - cs.workerPool.punishPeer(taskResult.who) } + cs.workerPool.punishPeer(taskResult.who) } cs.workerPool.submitRequest(taskResult.request, workersResults) @@ -623,14 +620,21 @@ func (cs *chainSync) handleWorkersResults( reverseBlockData(response.BlockData) } - if len(response.BlockData) > 0 { - firstBlockInResponse := response.BlockData[0] - lastBlockInResponse := response.BlockData[len(response.BlockData)-1] + err := validateResponseFields(request.RequestedData, response.BlockData) + if err != nil { + logger.Criticalf("validating fields: %s", err) + // TODO: check the reputation change for nil body in response + // and nil justification in response + if errors.Is(err, errNilHeaderInResponse) { + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.IncompleteHeaderValue, + Reason: peerset.IncompleteHeaderReason, + }, who) + } - logger.Tracef("processing %d blocks: %s (#%d) to %s (#%d)", - len(response.BlockData), - firstBlockInResponse.Hash.Short(), firstBlockInResponse.Header.Number, - lastBlockInResponse.Hash.Short(), lastBlockInResponse.Header.Number) + cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue taskResultLoop } isChain := isResponseAChain(response.BlockData) @@ -642,23 +646,6 @@ func (cs *chainSync) handleWorkersResults( } for _, blockInResponse := range response.BlockData { - err := validateResponseFields(request.RequestedData, blockInResponse) - if err != nil { - logger.Criticalf("validating fields: %s", err) - // TODO: check the reputation change for nil body in response - // and nil justification in response - if errors.Is(err, errNilHeaderInResponse) { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, who) - } - - cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue taskResultLoop - } - if slices.Contains(cs.badBlocks, blockInResponse.Hash.String()) { logger.Criticalf("%s sent a known bad block: %s (#%d)", who, blockInResponse.Hash.String(), blockInResponse.Number()) @@ -688,8 +675,9 @@ func (cs *chainSync) handleWorkersResults( parentElement := syncingChain[0] for _, element := range syncingChain[1:] { if parentElement.Header.Hash() != element.Header.ParentHash { - panic(fmt.Sprintf("expected %s be parent of %s", - parentElement.Header.Hash(), element.Header.ParentHash)) + panic(fmt.Sprintf("expected %s (#%d) be parent of %s (#%d)", + parentElement.Header.Hash(), parentElement.Header.Number, + element.Header.Hash(), element.Header.Number)) } parentElement = element } @@ -947,23 +935,25 @@ func (cs *chainSync) handleBlock(block *types.Block, announceImportedBlock bool) } // validateResponseFields checks that the expected fields are in the block data -func validateResponseFields(requestedData byte, bd *types.BlockData) error { - if bd == nil { - return errNilBlockData - } +func validateResponseFields(requestedData byte, blocks []*types.BlockData) error { + for _, bd := range blocks { + if bd == nil { + return errNilBlockData + } - if (requestedData&network.RequestedDataHeader) == network.RequestedDataHeader && bd.Header == nil { - return fmt.Errorf("%w: %s", errNilHeaderInResponse, bd.Hash) - } + if (requestedData&network.RequestedDataHeader) == network.RequestedDataHeader && bd.Header == nil { + return fmt.Errorf("%w: %s", errNilHeaderInResponse, bd.Hash) + } - if (requestedData&network.RequestedDataBody) == network.RequestedDataBody && bd.Body == nil { - return fmt.Errorf("%w: %s", errNilBodyInResponse, bd.Hash) - } + if (requestedData&network.RequestedDataBody) == network.RequestedDataBody && bd.Body == nil { + return fmt.Errorf("%w: %s", errNilBodyInResponse, bd.Hash) + } - // if we requested strictly justification - if (requestedData|network.RequestedDataJustification) == network.RequestedDataJustification && - bd.Justification == nil { - return fmt.Errorf("%w: %s", errNilJustificationInResponse, bd.Hash) + // if we requested strictly justification + if (requestedData|network.RequestedDataJustification) == network.RequestedDataJustification && + bd.Justification == nil { + return fmt.Errorf("%w: %s", errNilJustificationInResponse, bd.Hash) + } } return nil diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 980491bb80..589fe6eb38 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -454,7 +454,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { close(stopCh) <-cs.workerPool.doneCh - } func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { @@ -535,7 +534,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { <-cs.workerPool.doneCh } -func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing.T) { +func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) @@ -645,6 +644,478 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker_Failing(t *testing require.Equal(t, punished, syncWorker.status) } +func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + + mockNetwork := NewMockNetwork(ctrl) + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + const announceBlock = false + + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], + } + + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], + } + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + doBlockRequestCount := 0 + mockNetwork.EXPECT().DoBlockRequest(gomock.Any(), gomock.Any()). + DoAndReturn(func(peerID, _ any) (any, any) { + // lets ensure that the DoBlockRequest is called by + // peer.ID(alice) and peer.ID(bob). When bob calls, this method will fail + // then alice should pick the failed request and re-execute it which will + // be the third call + defer func() { doBlockRequestCount++ }() + + pID := peerID.(peer.ID) // cast to peer ID + switch doBlockRequestCount { + case 0, 1: + if pID == peer.ID("alice") { + return worker1Response, nil + } + + if pID == peer.ID("bob") { + return nil, errors.New("protocols not supported") + } + + require.FailNow(t, "expected calls by %s and %s, got: %s", + peer.ID("alice"), peer.ID("bob"), pID) + default: + // ensure the the third call will be made by peer.ID("alice") + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) + } + + return worker2Response, nil + }).Times(3) + + mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) + + // since peer.ID("bob") will fail with protocols not supported his + // reputation will be affected and + mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadProtocolValue, + Reason: peerset.BadProtocolReason, + }, peer.ID("bob")) + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) + + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) + cs.workerPool.fromBlockAnnounce(peer.ID("bob")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.executeBootstrapSync(mockedGenesisHeader) + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh + + // peer should be punished + syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] + require.True(t, ok) + require.Equal(t, punished, syncWorker.status) +} + +func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + + mockNetwork := NewMockNetwork(ctrl) + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + const announceBlock = false + + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], + } + + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], + } + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + doBlockRequestCount := 0 + mockNetwork.EXPECT().DoBlockRequest(gomock.Any(), gomock.Any()). + DoAndReturn(func(peerID, _ any) (any, any) { + // lets ensure that the DoBlockRequest is called by + // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an + // response item but without header as was requested + defer func() { doBlockRequestCount++ }() + + pID := peerID.(peer.ID) // cast to peer ID + switch doBlockRequestCount { + case 0, 1: + if pID == peer.ID("alice") { + return worker1Response, nil + } + + if pID == peer.ID("bob") { + incompleteBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) + incompleteBlockData.BlockData[0].Header = nil + return incompleteBlockData, nil + } + + require.FailNow(t, "expected calls by %s and %s, got: %s", + peer.ID("alice"), peer.ID("bob"), pID) + default: + // ensure the the third call will be made by peer.ID("alice") + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) + } + + return worker2Response, nil + }).Times(3) + + mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) + + // since peer.ID("bob") will fail with protocols not supported his + // reputation will be affected and + mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.IncompleteHeaderValue, + Reason: peerset.IncompleteHeaderReason, + }, peer.ID("bob")) + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) + + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) + cs.workerPool.fromBlockAnnounce(peer.ID("bob")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.executeBootstrapSync(mockedGenesisHeader) + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh + + // peer should be punished + syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] + require.True(t, ok) + require.Equal(t, punished, syncWorker.status) +} + +func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + + mockNetwork := NewMockNetwork(ctrl) + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + const announceBlock = false + + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], + } + + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], + } + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + doBlockRequestCount := 0 + mockNetwork.EXPECT().DoBlockRequest(gomock.Any(), gomock.Any()). + DoAndReturn(func(peerID, _ any) (any, any) { + // lets ensure that the DoBlockRequest is called by + // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an + // response that does not form an chain + defer func() { doBlockRequestCount++ }() + + pID := peerID.(peer.ID) // cast to peer ID + switch doBlockRequestCount { + case 0, 1: + if pID == peer.ID("alice") { + return worker1Response, nil + } + + if pID == peer.ID("bob") { + notAChainBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) + // swap positions to force the problem + firstItem := notAChainBlockData.BlockData[0] + notAChainBlockData.BlockData[0] = notAChainBlockData.BlockData[130] + notAChainBlockData.BlockData[130] = firstItem + return notAChainBlockData, nil + } + + require.FailNow(t, "expected calls by %s and %s, got: %s", + peer.ID("alice"), peer.ID("bob"), pID) + default: + // ensure the the third call will be made by peer.ID("alice") + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) + } + + return worker2Response, nil + }).Times(3) + + mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) + + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) + + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) + cs.workerPool.fromBlockAnnounce(peer.ID("bob")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.executeBootstrapSync(mockedGenesisHeader) + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh + + // peer should be punished + syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] + require.True(t, ok) + require.Equal(t, punished, syncWorker.status) +} + +func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + + mockNetwork := NewMockNetwork(ctrl) + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + const announceBlock = false + + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], + } + + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], + } + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + fakeBadBlockHash := common.MustHexToHash("0x18767cb4bb4cc13bf119f6613aec5487d4c06a2e453de53d34aea6f3f1ee9855") + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + doBlockRequestCount := 0 + mockNetwork.EXPECT().DoBlockRequest(gomock.Any(), gomock.Any()). + DoAndReturn(func(peerID, _ any) (any, any) { + // lets ensure that the DoBlockRequest is called by + // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an + // response that contains a know bad block + defer func() { doBlockRequestCount++ }() + + pID := peerID.(peer.ID) // cast to peer ID + switch doBlockRequestCount { + case 0, 1: + if pID == peer.ID("alice") { + return worker1Response, nil + } + + if pID == peer.ID("bob") { + blockDataWithBadBlock := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 129, 256) + blockDataWithBadBlock.BlockData[4].Hash = fakeBadBlockHash + return blockDataWithBadBlock, nil + } + + require.FailNow(t, "expected calls by %s and %s, got: %s", + peer.ID("alice"), peer.ID("bob"), pID) + default: + // ensure the the third call will be made by peer.ID("alice") + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) + } + + return worker2Response, nil + }).Times(3) + + mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) + mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, peer.ID("bob")) + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + cs.badBlocks = []string{fakeBadBlockHash.String()} + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) + + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) + cs.workerPool.fromBlockAnnounce(peer.ID("bob")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.executeBootstrapSync(mockedGenesisHeader) + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh + + // peer should be not in the worker pool + // peer should be in the ignore list + _, ok := cs.workerPool.workers[peer.ID("bob")] + require.False(t, ok) + + _, ok = cs.workerPool.ignorePeers[peer.ID("bob")] + require.True(t, ok) +} + func createSuccesfullBlockResponse(_ *testing.T, genesisHash common.Hash, startingAt, numBlocks int) *network.BlockResponseMessage { response := new(network.BlockResponseMessage) @@ -837,7 +1308,7 @@ func TestChainSync_validateResponseFields(t *testing.T) { t.Run(tname, func(t *testing.T) { t.Parallel() - err := validateResponseFields(tt.requestedData, tt.blockData) + err := validateResponseFields(tt.requestedData, []*types.BlockData{tt.blockData}) require.ErrorIs(t, err, tt.wantErr) if tt.errString != "" { require.EqualError(t, err, tt.errString) @@ -918,5 +1389,46 @@ func TestChainSync_isResponseAChain(t *testing.T) { require.Equal(t, tt.expected, output) }) } +} + +func TestChainSync_getHighestBlock(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + expectedHighestBlock uint + wantErr error + chainSyncPeerView map[peer.ID]*peerView + }{ + "no_peer_view": { + wantErr: errNoPeers, + expectedHighestBlock: 0, + chainSyncPeerView: make(map[peer.ID]*peerView), + }, + "highest_block": { + expectedHighestBlock: 500, + chainSyncPeerView: map[peer.ID]*peerView{ + peer.ID("peer-A"): { + number: 100, + }, + peer.ID("peer-B"): { + number: 500, + }, + }, + }, + } + + for tname, tt := range cases { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + chainSync := &chainSync{ + peerView: tt.chainSyncPeerView, + } + highestBlock, err := chainSync.getHighestBlock() + require.ErrorIs(t, err, tt.wantErr) + require.Equal(t, tt.expectedHighestBlock, highestBlock) + }) + } } From 70379243cbef84fba6a6ba37a71652c790b765b6 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 26 Jun 2023 20:45:19 -0400 Subject: [PATCH 087/140] chore: request missing blocks --- dot/sync/chain_sync.go | 428 ++++++++++++++++++++---------------- dot/sync/chain_sync_test.go | 14 +- dot/sync/errors.go | 2 +- 3 files changed, 241 insertions(+), 203 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 6ab1e55977..f260280001 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -25,6 +25,7 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/common/variadic" ) var _ ChainSync = &chainSync{} @@ -141,9 +142,8 @@ type chainSyncConfig struct { } func newChainSync(cfg chainSyncConfig) *chainSync { - atomicState := atomic.Value{} - atomicState.Store(bootstrap) + atomicState.Store(tip) return &chainSync{ stopCh: make(chan struct{}), storageState: cfg.storageState, @@ -182,7 +182,16 @@ func (cs *chainSync) start() { isSyncedGauge.Set(0) go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh) go cs.workerPool.listenForRequests(cs.stopCh) - go cs.sync() + + isFarFromTarget, err := cs.isFarFromTarget() + if err != nil && !errors.Is(err, errNoPeerViews) { + panic("failing while checking target distance: " + err.Error()) + } + + if isFarFromTarget { + cs.state.Store(bootstrap) + go cs.bootstrapSync() + } } func (cs *chainSync) stop() { @@ -190,64 +199,55 @@ func (cs *chainSync) stop() { <-cs.workerPool.doneCh } -func (cs *chainSync) sync() { - for { - syncTarget, err := cs.getTarget() - if err != nil { - logger.Criticalf("getting target: %w", err) - return - } +func (cs *chainSync) isFarFromTarget() (bool, error) { + syncTarget, err := cs.getTarget() + if err != nil { + return false, fmt.Errorf("getting target: %w", err) + } - finalisedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - logger.Criticalf("getting finalised block header: %s", err) - return - } + bestBlockHeader, err := cs.blockState.BestBlockHeader() + if err != nil { + return false, fmt.Errorf("getting best block header: %w", err) + } - logger.Infof( - "🚣 currently syncing, %d peers connected, "+ - "%d available workers, "+ - "target block number %d, "+ - "finalised block number %d with hash %s", - len(cs.network.Peers()), - cs.workerPool.totalWorkers(), - syncTarget, finalisedHeader.Number, finalisedHeader.Hash()) + bestBlockNumber := bestBlockHeader.Number + isFarFromTarget := bestBlockNumber+network.MaxBlockResponseSize < syncTarget + return isFarFromTarget, nil +} - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - logger.Criticalf("getting best block header: %s", err) +func (cs *chainSync) bootstrapSync() { + for { + select { + case <-cs.stopCh: + logger.Warn("ending bootstrap sync, chain sync stop channel triggered") return + default: } - bestBlockNumber := bestBlockHeader.Number - isFarFromTarget := bestBlockNumber+network.MaxBlockResponseSize < syncTarget + isFarFromTarget, err := cs.isFarFromTarget() + if err != nil && !errors.Is(err, errNoPeerViews) { + logger.Criticalf("ending bootstrap sync, checking target distance: %s", err) + return + } if isFarFromTarget { - // we are more than 128 blocks behind the head, switch to bootstrap - swapped := cs.state.CompareAndSwap(tip, bootstrap) - isSyncedGauge.Set(0) - - if swapped { - logger.Debugf("switched sync mode to %d", bootstrap) + bestBlockHeader, err := cs.blockState.BestBlockHeader() + if err != nil { + logger.Criticalf("getting best block header: %s", err) + return } - err := cs.executeBootstrapSync(bestBlockHeader) + cs.workerPool.useConnectedPeers() + err = cs.requestMaxBlocksFrom(bestBlockHeader) if err != nil { logger.Errorf("while executing bootsrap sync: %s", err) } } else { // we are less than 128 blocks behind the target we can use tip sync - swapped := cs.state.CompareAndSwap(bootstrap, tip) + cs.state.Store(tip) isSyncedGauge.Set(1) - - if swapped { - logger.Debugf("switched sync mode to %d", tip) - } - - err := cs.requestPendingBlocks(finalisedHeader) - if err != nil { - logger.Errorf("while requesting pending blocks: %w", err) - } + logger.Debugf("switched sync mode to %d", tip) + return } } } @@ -286,7 +286,21 @@ func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { return nil } - return cs.requestAnnouncedBlock(announced) + isFarFromTarget, err := cs.isFarFromTarget() + if err != nil && !errors.Is(err, errNoPeerViews) { + return fmt.Errorf("checking target distance: %w", err) + } + + if !isFarFromTarget { + return cs.requestAnnouncedBlock(announced) + } + + // we are more than 128 blocks behind the head, switch to bootstrap + cs.state.Store(bootstrap) + isSyncedGauge.Set(0) + logger.Debugf("switched sync mode to %d", bootstrap) + go cs.bootstrapSync() + return nil } func (cs *chainSync) requestAnnouncedBlock(announce announcedBlock) error { @@ -341,7 +355,7 @@ func (cs *chainSync) requestAnnouncedBlock(announce announcedBlock) error { return nil } -func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types.Header, peerWhoAnnounced peer.ID) { +func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types.Header, peerWhoAnnounced peer.ID) error { gapLength := uint32(announcedHeader.Number - bestBlockHeader.Number) startAtBlock := announcedHeader.Number totalBlocks := uint32(1) @@ -361,12 +375,13 @@ func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types. } resultsQueue := make(chan *syncTaskResult) - wg := sync.WaitGroup{} - - wg.Add(1) - go cs.handleWorkersResults(resultsQueue, startAtBlock, totalBlocks, &wg) cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) - wg.Wait() + err := cs.handleWorkersResults(resultsQueue, startAtBlock, totalBlocks) + if err != nil { + return fmt.Errorf("while handling workers results: %w", err) + } + + return nil } func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announcedHeader *types.Header, @@ -396,12 +411,12 @@ func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, peerWhoAnnounced, gapLength, announcedHash, announcedHeader.Number) resultsQueue := make(chan *syncTaskResult) - wg := sync.WaitGroup{} - - wg.Add(1) - go cs.handleWorkersResults(resultsQueue, startAtBlock, gapLength, &wg) cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) - wg.Wait() + + err = cs.handleWorkersResults(resultsQueue, startAtBlock, gapLength) + if err != nil { + return fmt.Errorf("while handling workers results: %w", err) + } return nil } @@ -435,7 +450,7 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) gapLength := pendingBlock.number - highestFinalizedHeader.Number if gapLength > 128 { - logger.Criticalf("gap of %d blocks, max expected: 128 block", gapLength) + logger.Warnf("gap of %d blocks, max expected: 128 block", gapLength) gapLength = 128 } @@ -445,24 +460,22 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) // the `requests` in the tip sync are not related necessarily // this is why we need to treat them separately - wg := sync.WaitGroup{} - wg.Add(1) resultsQueue := make(chan *syncTaskResult) + cs.workerPool.submitRequest(descendingGapRequest, resultsQueue) // TODO: we should handle the requests concurrently // a way of achieve that is by constructing a new `handleWorkersResults` for // handling only tip sync requests - go cs.handleWorkersResults(resultsQueue, startAtBlock, *descendingGapRequest.Max, &wg) - cs.workerPool.submitRequest(descendingGapRequest, resultsQueue) - wg.Wait() + err = cs.handleWorkersResults(resultsQueue, startAtBlock, *descendingGapRequest.Max) + if err != nil { + return fmt.Errorf("while handling workers results: %w", err) + } } return nil } -func (cs *chainSync) executeBootstrapSync(bestBlockHeader *types.Header) error { - cs.workerPool.useConnectedPeers() - +func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { startRequestAt := bestBlockHeader.Number + 1 // we build the set of requests based on the amount of available peers @@ -503,15 +516,11 @@ func (cs *chainSync) executeBootstrapSync(bestBlockHeader *types.Header) error { } } - wg := sync.WaitGroup{} resultsQueue := make(chan *syncTaskResult) - - wg.Add(1) - resultErrCh := cs.handleWorkersResults(resultsQueue, startRequestAt, expectedAmountOfBlocks, &wg) cs.workerPool.submitRequests(requests, resultsQueue) - wg.Wait() - if err := <-resultErrCh; err != nil { + err = cs.handleWorkersResults(resultsQueue, startRequestAt, expectedAmountOfBlocks) + if err != nil { return fmt.Errorf("while handling workers results: %w", err) } @@ -528,8 +537,7 @@ func (cs *chainSync) getTarget() (uint, error) { // in practice, this shouldn't happen, as we only start the module once we have some peer states if len(cs.peerView) == 0 { - // return max uint32 instead of 0, as returning 0 would switch us to tip mode unexpectedly - return 0, errUnableToGetTarget + return 0, errNoPeerViews } // we are going to sort the data and remove the outliers then we will return the avg of all the valid elements @@ -549,156 +557,186 @@ func (cs *chainSync) getTarget() (uint, error) { // in the queue and wait for it to completes // TODO: handle only justification requests func (cs *chainSync) handleWorkersResults( - workersResults chan *syncTaskResult, startAtBlock uint, expectedSyncedBlocks uint32, wg *sync.WaitGroup) chan error { - errCh := make(chan error, 1) - - go func() { - startTime := time.Now() - defer func() { - totalSyncAndImportSeconds := time.Since(startTime).Seconds() - bps := float64(expectedSyncedBlocks) / totalSyncAndImportSeconds - logger.Debugf("⛓️ synced %d blocks, "+ - "took: %.2f seconds, bps: %.2f blocks/second", - expectedSyncedBlocks, totalSyncAndImportSeconds, bps) - - close(errCh) - wg.Done() - }() - - logger.Debugf("💤 waiting for %d blocks", expectedSyncedBlocks) - syncingChain := make([]*types.BlockData, expectedSyncedBlocks) - // the total numbers of blocks is missing in the syncing chain - waitingBlocks := expectedSyncedBlocks - - taskResultLoop: - for waitingBlocks > 0 { - // in a case where we don't handle workers results we should check the pool - idleDuration := time.Minute - idleTimer := time.NewTimer(idleDuration) - - select { - case <-cs.stopCh: - return + workersResults chan *syncTaskResult, startAtBlock uint, expectedSyncedBlocks uint32) error { + syncTarget, err := cs.getTarget() + if err != nil { + logger.Warnf("getting target: %w", err) + } - case <-idleTimer.C: - logger.Warnf("idle ticker triggered! checking pool") - cs.workerPool.useConnectedPeers() - continue + finalisedHeader, err := cs.blockState.GetHighestFinalisedHeader() + if err != nil { + return fmt.Errorf("getting finalised block header: %w", err) + } - case taskResult := <-workersResults: - if !idleTimer.Stop() { - <-idleTimer.C - } + logger.Infof( + "🚣 currently syncing, %d peers connected, "+ + "%d available workers, "+ + "target block number %d, "+ + "finalised block number %d with hash %s", + len(cs.network.Peers()), + cs.workerPool.totalWorkers(), + syncTarget, finalisedHeader.Number, finalisedHeader.Hash()) + + startTime := time.Now() + defer func() { + totalSyncAndImportSeconds := time.Since(startTime).Seconds() + bps := float64(expectedSyncedBlocks) / totalSyncAndImportSeconds + logger.Debugf("⛓️ synced %d blocks, "+ + "took: %.2f seconds, bps: %.2f blocks/second", + expectedSyncedBlocks, totalSyncAndImportSeconds, bps) + }() - logger.Debugf("task result: peer(%s), with error: %v, with response: %v", - taskResult.who, taskResult.err != nil, taskResult.response != nil) - - if taskResult.err != nil { - logger.Errorf("task result: peer(%s) error: %s", - taskResult.who, taskResult.err) - - if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { - if strings.Contains(taskResult.err.Error(), "protocols not supported") { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, taskResult.who) - } - cs.workerPool.punishPeer(taskResult.who) - } + syncingChain := make([]*types.BlockData, expectedSyncedBlocks) + // the total numbers of blocks is missing in the syncing chain + waitingBlocks := expectedSyncedBlocks - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue - } +taskResultLoop: + for waitingBlocks > 0 { + // in a case where we don't handle workers results we should check the pool + idleDuration := time.Minute + idleTimer := time.NewTimer(idleDuration) - who := taskResult.who - request := taskResult.request - response := taskResult.response + select { + case <-cs.stopCh: + return nil - if request.Direction == network.Descending { - // reverse blocks before pre-validating and placing in ready queue - reverseBlockData(response.BlockData) - } + case <-idleTimer.C: + logger.Warnf("idle ticker triggered! checking pool") + cs.workerPool.useConnectedPeers() + continue - err := validateResponseFields(request.RequestedData, response.BlockData) - if err != nil { - logger.Criticalf("validating fields: %s", err) - // TODO: check the reputation change for nil body in response - // and nil justification in response - if errors.Is(err, errNilHeaderInResponse) { + case taskResult := <-workersResults: + if !idleTimer.Stop() { + <-idleTimer.C + } + + logger.Debugf("task result: peer(%s), with error: %v, with response: %v", + taskResult.who, taskResult.err != nil, taskResult.response != nil) + + if taskResult.err != nil { + logger.Errorf("task result: peer(%s) error: %s", + taskResult.who, taskResult.err) + + if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { + if strings.Contains(taskResult.err.Error(), "protocols not supported") { cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, who) + Value: peerset.BadProtocolValue, + Reason: peerset.BadProtocolReason, + }, taskResult.who) } - cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue taskResultLoop } - isChain := isResponseAChain(response.BlockData) - if !isChain { - logger.Criticalf("response from %s is not a chain", who) - cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue + } + + who := taskResult.who + request := taskResult.request + response := taskResult.response + + if request.Direction == network.Descending { + // reverse blocks before pre-validating and placing in ready queue + reverseBlockData(response.BlockData) + } + + err := validateResponseFields(request.RequestedData, response.BlockData) + if err != nil { + logger.Criticalf("validating fields: %s", err) + // TODO: check the reputation change for nil body in response + // and nil justification in response + if errors.Is(err, errNilHeaderInResponse) { + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.IncompleteHeaderValue, + Reason: peerset.IncompleteHeaderReason, + }, who) + } + + cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue taskResultLoop + } + + isChain := isResponseAChain(response.BlockData) + if !isChain { + logger.Criticalf("response from %s is not a chain", who) + cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue taskResultLoop + } + + for _, blockInResponse := range response.BlockData { + if slices.Contains(cs.badBlocks, blockInResponse.Hash.String()) { + logger.Criticalf("%s sent a known bad block: %s (#%d)", + who, blockInResponse.Hash.String(), blockInResponse.Number()) + + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, who) + + cs.workerPool.ignorePeerAsWorker(taskResult.who) cs.workerPool.submitRequest(taskResult.request, workersResults) continue taskResultLoop } - for _, blockInResponse := range response.BlockData { - if slices.Contains(cs.badBlocks, blockInResponse.Hash.String()) { - logger.Criticalf("%s sent a known bad block: %s (#%d)", - who, blockInResponse.Hash.String(), blockInResponse.Number()) + blockExactIndex := blockInResponse.Header.Number - startAtBlock + syncingChain[blockExactIndex] = blockInResponse + } - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, who) + // we need to check if we've filled all positions + // otherwise we should wait for more responses + waitingBlocks -= uint32(len(response.BlockData)) - cs.workerPool.ignorePeerAsWorker(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) - continue taskResultLoop - } + // we received a response without the desired amount of blocks + // we should include a new request to retrieve the missing blocks + if len(response.BlockData) < int(*request.Max) { + difference := uint32(int(*request.Max) - len(response.BlockData)) + lastItem := response.BlockData[len(response.BlockData)-1] - blockExactIndex := blockInResponse.Header.Number - startAtBlock - syncingChain[blockExactIndex] = blockInResponse + startRequestNumber := uint32(lastItem.Header.Number + 1) + startAt, err := variadic.NewUint32OrHash(startRequestNumber) + if err != nil { + panic(err) } - // we need to check if we've filled all positions - // otherwise we should wait for more responses - waitingBlocks -= uint32(len(response.BlockData)) + taskResult.request = &network.BlockRequestMessage{ + RequestedData: network.BootstrapRequestData, + StartingBlock: *startAt, + Direction: network.Ascending, + Max: &difference, + } + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue taskResultLoop } } + } - if len(syncingChain) >= 2 { - // ensure the acquired block set forms an actual chain - parentElement := syncingChain[0] - for _, element := range syncingChain[1:] { - if parentElement.Header.Hash() != element.Header.ParentHash { - panic(fmt.Sprintf("expected %s (#%d) be parent of %s (#%d)", - parentElement.Header.Hash(), parentElement.Header.Number, - element.Header.Hash(), element.Header.Number)) - } - parentElement = element + if len(syncingChain) >= 2 { + // ensure the acquired block set forms an actual chain + parentElement := syncingChain[0] + for _, element := range syncingChain[1:] { + if parentElement.Header.Hash() != element.Header.ParentHash { + panic(fmt.Sprintf("expected %s (#%d) be parent of %s (#%d)", + parentElement.Header.Hash(), parentElement.Header.Number, + element.Header.Hash(), element.Header.Number)) } + parentElement = element } + } - retreiveBlocksSeconds := time.Since(startTime).Seconds() - logger.Debugf("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", - expectedSyncedBlocks, retreiveBlocksSeconds) + retreiveBlocksSeconds := time.Since(startTime).Seconds() + logger.Debugf("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", + expectedSyncedBlocks, retreiveBlocksSeconds) - // response was validated! place into ready block queue - for _, bd := range syncingChain { - // block is ready to be processed! - if err := cs.handleReadyBlock(bd); err != nil { - logger.Criticalf("error while handling a ready block: %s", err) - errCh <- err - return - } + // response was validated! place into ready block queue + for _, bd := range syncingChain { + // block is ready to be processed! + if err := cs.handleReadyBlock(bd); err != nil { + return fmt.Errorf("while handling ready block: %w", err) } - }() - - return errCh + } + return nil } func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 589fe6eb38..1c109b407c 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -449,7 +449,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { stopCh := make(chan struct{}) go cs.workerPool.listenForRequests(stopCh) - err = cs.executeBootstrapSync(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) @@ -527,7 +527,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { stopCh := make(chan struct{}) go cs.workerPool.listenForRequests(stopCh) - err = cs.executeBootstrapSync(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) @@ -632,7 +632,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. stopCh := make(chan struct{}) go cs.workerPool.listenForRequests(stopCh) - err = cs.executeBootstrapSync(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) @@ -749,7 +749,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test stopCh := make(chan struct{}) go cs.workerPool.listenForRequests(stopCh) - err = cs.executeBootstrapSync(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) @@ -867,7 +867,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi stopCh := make(chan struct{}) go cs.workerPool.listenForRequests(stopCh) - err = cs.executeBootstrapSync(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) @@ -982,7 +982,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi stopCh := make(chan struct{}) go cs.workerPool.listenForRequests(stopCh) - err = cs.executeBootstrapSync(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) @@ -1101,7 +1101,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. stopCh := make(chan struct{}) go cs.workerPool.listenForRequests(stopCh) - err = cs.executeBootstrapSync(mockedGenesisHeader) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) diff --git a/dot/sync/errors.go b/dot/sync/errors.go index 4d9862f3ca..564c878422 100644 --- a/dot/sync/errors.go +++ b/dot/sync/errors.go @@ -20,7 +20,7 @@ var ( errRequestStartTooHigh = errors.New("request start number is higher than our best block") // chainSync errors - errUnableToGetTarget = errors.New("unable to get target") + errNoPeerViews = errors.New("unable to get target") errNilBlockData = errors.New("block data is nil") errNilHeaderInResponse = errors.New("expected header, received none") errNilBodyInResponse = errors.New("expected body, received none") From 7bbebfdf72584b8cb12c3f80ebdf278068576f58 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 4 Jul 2023 15:41:25 -0400 Subject: [PATCH 088/140] chore: punish only if is a best block, ignore key not found --- dot/sync/syncer.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index e4095a36f7..39fc870f33 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -4,12 +4,15 @@ package sync import ( + "errors" "fmt" "time" + "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/internal/log" "github.com/libp2p/go-libp2p/core/peer" @@ -108,7 +111,11 @@ func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMe // TODO: check what happens when get hash by number returns nothing or ErrNotExists ourHash, err := s.blockState.GetHashByNumber(blockAnnounceHeader.Number) if err != nil { - return fmt.Errorf("get block hash by number: %w", err) + if errors.Is(err, chaindb.ErrKeyNotFound) { + ourHash = common.Hash{} + } else { + return fmt.Errorf("get block hash by number: %w", err) + } } if ourHash == blockAnnounceHeaderHash { @@ -126,7 +133,7 @@ func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMe // their block hash doesn't match ours for that number (ie. they are on a different // chain), and also the highest finalised block is higher than that number. // thus the peer is on an invalid chain - if fin.Number >= blockAnnounceHeader.Number { + if fin.Number >= blockAnnounceHeader.Number && msg.BestBlock { // TODO: downscore this peer, or temporarily don't sync from them? (#1399) // perhaps we need another field in `peerState` to mark whether the state is valid or not s.network.ReportPeer(peerset.ReputationChange{ From 990b9c9480a130ba9acfca3f057177c9be7c9c53 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 5 Jul 2023 10:08:56 -0400 Subject: [PATCH 089/140] chore: add license --- dot/rpc/modules/gomock_reflect_2744729207/prog.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dot/rpc/modules/gomock_reflect_2744729207/prog.go b/dot/rpc/modules/gomock_reflect_2744729207/prog.go index 1da01d477b..7e97fcd1cc 100644 --- a/dot/rpc/modules/gomock_reflect_2744729207/prog.go +++ b/dot/rpc/modules/gomock_reflect_2744729207/prog.go @@ -1,3 +1,6 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package main From 3c96c08b9ccc4379fe0a775a1ccb3092817b5d82 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 5 Jul 2023 10:14:52 -0400 Subject: [PATCH 090/140] chore: remove unneeded nil check and fix `lll` lint warn --- dot/sync/chain_sync.go | 11 ++++++++--- dot/sync/worker_pool.go | 6 +----- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 9a840a9697..2987f7e7ba 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -343,7 +343,10 @@ func (cs *chainSync) requestAnnouncedBlock(announce announcedBlock) error { return cs.requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announce.header, announce.who) } - cs.requestChainBlocks(announce.header, bestBlockHeader, peerWhoAnnounced) + err = cs.requestChainBlocks(announce.header, bestBlockHeader, peerWhoAnnounced) + if err != nil { + return fmt.Errorf("requesting chain blocks: %w", err) + } highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() if err != nil { @@ -358,14 +361,16 @@ func (cs *chainSync) requestAnnouncedBlock(announce announcedBlock) error { return nil } -func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types.Header, peerWhoAnnounced peer.ID) error { +func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types.Header, + peerWhoAnnounced peer.ID) error { gapLength := uint32(announcedHeader.Number - bestBlockHeader.Number) startAtBlock := announcedHeader.Number totalBlocks := uint32(1) var request *network.BlockRequestMessage if gapLength > 1 { - request = network.NewDescendingBlockRequest(announcedHeader.Hash(), gapLength, network.BootstrapRequestData) + request = network.NewDescendingBlockRequest(announcedHeader.Hash(), gapLength, + network.BootstrapRequestData) startAtBlock = announcedHeader.Number - uint(*request.Max) + 1 totalBlocks = *request.Max diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 02c358bebc..3ce7c576dd 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -267,11 +267,7 @@ func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) response := new(network.BlockResponseMessage) err := s.requestMaker.Do(who, request, response) - if err != nil { - logger.Debugf("[FINISHED] worker %s, err: %s", who, err) - } else if response != nil { - logger.Debugf("[FINISHED] worker %s, block data amount: %d", who, len(response.BlockData)) - } + logger.Debugf("[FINISHED] worker %s, err: %s, block data amount: %d", who, err, len(response.BlockData)) s.mtx.Lock() peerSync, has := s.workers[who] From 8d712ca9116f76a8b9e6fc2af4112903aab3cf63 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 5 Jul 2023 14:13:12 -0400 Subject: [PATCH 091/140] chore: fix `dot/sync` integration tests --- dot/network/message.go | 2 +- dot/sync/chain_sync.go | 38 +++++++++++++++++++------------------ dot/sync/chain_sync_test.go | 12 +----------- 3 files changed, 22 insertions(+), 30 deletions(-) diff --git a/dot/network/message.go b/dot/network/message.go index 21a7b4ab15..1006438a80 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -416,7 +416,7 @@ func NewAscedingBlockRequests(startNumber, targetNumber uint, requestedData byte reqs := make([]*BlockRequestMessage, numRequests) for i := uint(0); i < numRequests; i++ { - max := uint32(MaxBlockResponseSize) + max := uint32(MaxBlocksInResponse) lastIteration := numRequests - 1 if i == lastIteration && missingBlocks != 0 { diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 2987f7e7ba..d0a1e0e229 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -227,6 +227,26 @@ func (cs *chainSync) bootstrapSync() { default: } + // TODO: move the syncing logs to a better place + finalisedHeader, err := cs.blockState.GetHighestFinalisedHeader() + if err != nil { + logger.Warnf("getting highest finalized header: %w", err) + } else { + syncTarget, err := cs.getTarget() + if err != nil { + logger.Warnf("getting target: %w", err) + } else { + logger.Infof( + "🚣 currently syncing, %d peers connected, "+ + "%d available workers, "+ + "target block number %d, "+ + "finalised block number %d with hash %s", + len(cs.network.Peers()), + cs.workerPool.totalWorkers(), + syncTarget, finalisedHeader.Number, finalisedHeader.Hash()) + } + } + isFarFromTarget, err := cs.isFarFromTarget() if err != nil && !errors.Is(err, errNoPeerViews) { logger.Criticalf("ending bootstrap sync, checking target distance: %s", err) @@ -566,24 +586,6 @@ func (cs *chainSync) getTarget() (uint, error) { // TODO: handle only justification requests func (cs *chainSync) handleWorkersResults( workersResults chan *syncTaskResult, startAtBlock uint, expectedSyncedBlocks uint32) error { - syncTarget, err := cs.getTarget() - if err != nil { - logger.Warnf("getting target: %w", err) - } - - finalisedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - return fmt.Errorf("getting finalised block header: %w", err) - } - - logger.Infof( - "🚣 currently syncing, %d peers connected, "+ - "%d available workers, "+ - "target block number %d, "+ - "finalised block number %d with hash %s", - len(cs.network.Peers()), - cs.workerPool.totalWorkers(), - syncTarget, finalisedHeader.Number, finalisedHeader.Hash()) startTime := time.Now() defer func() { diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 5c3f635986..153349e40e 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -395,6 +395,7 @@ func setupChainSyncToBootstrapMode(t *testing.T, blocksAhead uint, chainSync := newChainSync(cfg) chainSync.peerView = peerViewMap + chainSync.state.Store(bootstrap) return chainSync } @@ -431,8 +432,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { return nil }) - mockedNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) - mockedBlockState := NewMockBlockState(ctrl) mockedBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) @@ -532,7 +531,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { return nil }) - mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) // setup a chain sync which holds in its peer view map // 3 peers, each one announce block 129 as its best block number. // We start this test with genesis block being our best block, so @@ -642,7 +640,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. return nil }).Times(3) - mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) // setup a chain sync which holds in its peer view map // 3 peers, each one announce block 129 as its best block number. // We start this test with genesis block being our best block, so @@ -757,8 +754,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test return nil }).Times(3) - mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) - // since peer.ID("bob") will fail with protocols not supported his // reputation will be affected and mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ @@ -882,8 +877,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi return nil }).Times(3) - mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) - // since peer.ID("bob") will fail with protocols not supported his // reputation will be affected and mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ @@ -1010,8 +1003,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi return nil }).Times(3) - mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) - // setup a chain sync which holds in its peer view map // 3 peers, each one announce block 129 as its best block number. // We start this test with genesis block being our best block, so @@ -1130,7 +1121,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. return nil }).Times(3) - mockNetwork.EXPECT().AllConnectedPeersID().Return([]peer.ID{}) mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ Value: peerset.BadBlockAnnouncementValue, Reason: peerset.BadBlockAnnouncementReason, From 4a1905e4a24817bb79c1c955aa070434a707ca7c Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 5 Jul 2023 14:42:53 -0400 Subject: [PATCH 092/140] chore: fix `TestAscendingBlockRequest` test --- dot/network/message_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dot/network/message_test.go b/dot/network/message_test.go index 53f6c25d14..095f5a1a82 100644 --- a/dot/network/message_test.go +++ b/dot/network/message_test.go @@ -426,7 +426,7 @@ func TestDecodeConsensusMessage(t *testing.T) { func TestAscendingBlockRequest(t *testing.T) { one := uint32(1) three := uint32(3) - maxResponseSize := uint32(MaxBlockResponseSize) + maxResponseSize := uint32(MaxBlocksInResponse) cases := map[string]struct { startNumber, targetNumber uint expectedBlockRequestMessage []*BlockRequestMessage @@ -537,7 +537,7 @@ func TestAscendingBlockRequest(t *testing.T) { t.Run(tname, func(t *testing.T) { requests := NewAscedingBlockRequests(tt.startNumber, tt.targetNumber, BootstrapRequestData) - require.Equal(t, requests, tt.expectedBlockRequestMessage) + require.Equal(t, tt.expectedBlockRequestMessage, requests) }) } } From 9e2acb39704ed849f6bb29d196c3ef8947aa64df Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 5 Jul 2023 20:02:06 -0400 Subject: [PATCH 093/140] chore: fix `TestNewNode` test --- dot/node_integration_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dot/node_integration_test.go b/dot/node_integration_test.go index 691f281fda..be7352a1f7 100644 --- a/dot/node_integration_test.go +++ b/dot/node_integration_test.go @@ -57,7 +57,6 @@ func TestNewNode(t *testing.T) { initConfig.Account.Key = "alice" initConfig.Core.Role = common.FullNodeRole initConfig.Core.WasmInterpreter = wasmer.Name - initConfig.Log.Digest = "critical" networkConfig := &network.Config{ @@ -86,7 +85,7 @@ func TestNewNode(t *testing.T) { assert.NoError(t, err) mockServiceRegistry := NewMockServiceRegisterer(ctrl) - mockServiceRegistry.EXPECT().RegisterService(gomock.Any()).Times(8) + mockServiceRegistry.EXPECT().RegisterService(gomock.Any()).Times(9) m := NewMocknodeBuilderIface(ctrl) m.EXPECT().isNodeInitialised(initConfig.BasePath).Return(nil) From 1610978023506b1536361d922387c07f9ac600f2 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 5 Jul 2023 20:25:37 -0400 Subject: [PATCH 094/140] chore: fix `TestStartStopNode` test --- dot/node_integration_test.go | 1 + dot/sync/chain_sync.go | 7 ++----- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/dot/node_integration_test.go b/dot/node_integration_test.go index be7352a1f7..55e489c21f 100644 --- a/dot/node_integration_test.go +++ b/dot/node_integration_test.go @@ -324,6 +324,7 @@ func TestStartStopNode(t *testing.T) { config.ChainSpec = genFile config.Core.GrandpaAuthority = false config.Core.BabeAuthority = false + config.Network.MinPeers = 0 err := InitNode(config) require.NoError(t, err) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index d0a1e0e229..0d0975ef5f 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -170,16 +170,13 @@ func newChainSync(cfg chainSyncConfig) *chainSync { func (cs *chainSync) start() { // wait until we have a minimal workers in the sync worker pool - // and we have a clear target otherwise just wait for { - _, err := cs.getTarget() totalAvailable := cs.workerPool.totalWorkers() - - if err == nil && totalAvailable >= uint(cs.minPeers) { + if totalAvailable >= uint(cs.minPeers) { break } - time.Sleep(time.Millisecond * 100) + time.Sleep(time.Second) } isSyncedGauge.Set(0) From bb6e4d758ab9f568de6eedff1da8d1be89cb1677 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 6 Jul 2023 10:57:01 -0400 Subject: [PATCH 095/140] chore: include `TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData` test --- dot/network/message.go | 1 + dot/sync/chain_sync.go | 5 +++ dot/sync/chain_sync_test.go | 89 +++++++++++++++++++++++++++++++++++++ 3 files changed, 95 insertions(+) diff --git a/dot/network/message.go b/dot/network/message.go index 1006438a80..965748db4d 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -388,6 +388,7 @@ func NewAscedingBlockRequests(startNumber, targetNumber uint, requestedData byte } diff := targetNumber - startNumber + fmt.Printf("target number: %d, start number: %d = %d\n", targetNumber, startNumber, diff) // start and end block are the same, just request 1 block if diff == 0 { diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 0d0975ef5f..80209647d5 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -534,13 +534,18 @@ func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { requests := network.NewAscedingBlockRequests(startRequestAt, targetBlockNumber, network.BootstrapRequestData) + fmt.Printf("===> amount of requests: %d\n", len(requests)) + var expectedAmountOfBlocks uint32 for _, request := range requests { if request.Max != nil { + fmt.Printf("===> request max: %d\n", *request.Max) expectedAmountOfBlocks += *request.Max } } + fmt.Printf("===> expected amount of blocks: %d\n", expectedAmountOfBlocks) + resultsQueue := make(chan *syncTaskResult) cs.workerPool.submitRequests(requests, resultsQueue) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 153349e40e..9d974f744d 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -1165,6 +1165,95 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. require.True(t, ok) } +func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + + mockNetwork := NewMockNetwork(ctrl) + mockRequestMaker := NewMockRequestMaker(ctrl) + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + // create a set of 128 blocks + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 128) + const announceBlock = false + + // the worker will return a partial size of the set + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:97], + } + + // the first peer will respond the from the block 1 to 96 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 96 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + worker1MissingBlocksResponse := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[97:], + } + + // last item from the previous response + parent := worker1Response.BlockData[96] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker1MissingBlocksResponse.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + doBlockRequestCount := 0 + mockRequestMaker.EXPECT(). + Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). + DoAndReturn(func(peerID, _, response any) any { + // lets ensure that the DoBlockRequest is called by + // peer.ID(alice). The first call will return only 97 blocks + // the handler should issue another call to retrieve the missing blocks + pID := peerID.(peer.ID) // cast to peer ID + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) + + responsePtr := response.(*network.BlockResponseMessage) + defer func() { doBlockRequestCount++ }() + + if doBlockRequestCount == 0 { + *responsePtr = *worker1Response + return nil + } + + *responsePtr = *worker1MissingBlocksResponse + return nil + }).Times(2) + + const blocksAhead = 256 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) + + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh + + require.Len(t, cs.workerPool.workers, 1) + + _, ok := cs.workerPool.workers[peer.ID("alice")] + require.True(t, ok) +} + func createSuccesfullBlockResponse(_ *testing.T, genesisHash common.Hash, startingAt, numBlocks int) *network.BlockResponseMessage { response := new(network.BlockResponseMessage) From f1f00786203c529bb909f2fcb5fe97790d0de73b Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 11 Jul 2023 08:34:48 -0400 Subject: [PATCH 096/140] chore: address comments --- chain/westend/defaults.go | 8 -- dot/network/message.go | 37 +----- .../modules/gomock_reflect_2744729207/prog.go | 87 ------------- dot/sync/chain_sync.go | 121 ++++++++---------- dot/sync/chain_sync_test.go | 10 +- dot/sync/mock_chain_sync_test.go | 28 ++-- dot/sync/syncer.go | 3 +- dot/sync/syncer_test.go | 4 +- dot/sync/worker_pool_test.go | 10 +- lib/trie/database.go | 4 - 10 files changed, 86 insertions(+), 226 deletions(-) delete mode 100644 dot/rpc/modules/gomock_reflect_2744729207/prog.go diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index 6603e25d54..f6dcb2bfe5 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -29,14 +29,6 @@ func DefaultConfig() *cfg.Config { config.Core.GrandpaAuthority = false config.Core.Role = 1 config.Network.NoMDNS = false - config.Network.MaxPeers = 128 - config.PrometheusExternal = true - config.PrometheusPort = 9876 - config.Log.Sync = "trace" - config.Log.Digest = "trace" - - config.Pprof.Enabled = true - config.Pprof.ListeningAddress = "0.0.0.0:6060" return config } diff --git a/dot/network/message.go b/dot/network/message.go index 965748db4d..a0d52da91f 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -17,7 +17,7 @@ import ( "github.com/ChainSafe/gossamer/pkg/scale" ) -// maxResponseSize is maximum number of block data a BlockResponse message can contain +// MaxBlocksInResponse is maximum number of block data a BlockResponse message can contain const MaxBlocksInResponse = 128 type MessageType byte @@ -361,23 +361,11 @@ func (cm *ConsensusMessage) Hash() (common.Hash, error) { return common.Blake2bHash(encMsg) } -// NewSingleBlockRequestMessage returns a request to retrieve a single block -func NewSingleBlockRequestMessage(blockHash common.Hash, requestedData byte) *BlockRequestMessage { - one := uint32(1) +func NewBlockRequest(startingBlock variadic.Uint32OrHash, amount uint32, requestedData byte, direction SyncDirection) *BlockRequestMessage { return &BlockRequestMessage{ RequestedData: requestedData, - StartingBlock: *variadic.MustNewUint32OrHash(blockHash), - Direction: Descending, - Max: &one, - } -} - -// NewDescendingBlockRequest returns a descending block request message -func NewDescendingBlockRequest(blockHash common.Hash, amount uint32, requestedData byte) *BlockRequestMessage { - return &BlockRequestMessage{ - RequestedData: requestedData, - StartingBlock: *variadic.MustNewUint32OrHash(blockHash), - Direction: Descending, + StartingBlock: startingBlock, + Direction: direction, Max: &amount, } } @@ -388,18 +376,11 @@ func NewAscedingBlockRequests(startNumber, targetNumber uint, requestedData byte } diff := targetNumber - startNumber - fmt.Printf("target number: %d, start number: %d = %d\n", targetNumber, startNumber, diff) // start and end block are the same, just request 1 block if diff == 0 { - one := uint32(1) return []*BlockRequestMessage{ - { - RequestedData: requestedData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(startNumber)), - Direction: Ascending, - Max: &one, - }, + NewBlockRequest(*variadic.MustNewUint32OrHash(uint32(startNumber)), 1, requestedData, Ascending), } } @@ -425,13 +406,7 @@ func NewAscedingBlockRequests(startNumber, targetNumber uint, requestedData byte } start := variadic.MustNewUint32OrHash(startNumber) - - reqs[i] = &BlockRequestMessage{ - RequestedData: requestedData, - StartingBlock: *start, - Direction: Ascending, - Max: &max, - } + reqs[i] = NewBlockRequest(*start, max, requestedData, Ascending) startNumber += uint(max) } diff --git a/dot/rpc/modules/gomock_reflect_2744729207/prog.go b/dot/rpc/modules/gomock_reflect_2744729207/prog.go deleted file mode 100644 index 7e97fcd1cc..0000000000 --- a/dot/rpc/modules/gomock_reflect_2744729207/prog.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2023 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - - -package main - -import ( - "encoding/gob" - "flag" - "fmt" - "os" - "path" - "reflect" - - "github.com/golang/mock/mockgen/model" - - pkg_ "github.com/ChainSafe/gossamer/dot/rpc/modules" -) - -var output = flag.String("output", "", "The output file name, or empty to use stdout.") - -func main() { - flag.Parse() - - its := []struct{ - sym string - typ reflect.Type - }{ - - { "StorageAPI", reflect.TypeOf((*pkg_.StorageAPI)(nil)).Elem()}, - - { "BlockAPI", reflect.TypeOf((*pkg_.BlockAPI)(nil)).Elem()}, - - { "NetworkAPI", reflect.TypeOf((*pkg_.NetworkAPI)(nil)).Elem()}, - - { "BlockProducerAPI", reflect.TypeOf((*pkg_.BlockProducerAPI)(nil)).Elem()}, - - { "TransactionStateAPI", reflect.TypeOf((*pkg_.TransactionStateAPI)(nil)).Elem()}, - - { "CoreAPI", reflect.TypeOf((*pkg_.CoreAPI)(nil)).Elem()}, - - { "SystemAPI", reflect.TypeOf((*pkg_.SystemAPI)(nil)).Elem()}, - - { "BlockFinalityAPI", reflect.TypeOf((*pkg_.BlockFinalityAPI)(nil)).Elem()}, - - { "RuntimeStorageAPI", reflect.TypeOf((*pkg_.RuntimeStorageAPI)(nil)).Elem()}, - - { "SyncStateAPI", reflect.TypeOf((*pkg_.SyncStateAPI)(nil)).Elem()}, - - } - pkg := &model.Package{ - // NOTE: This behaves contrary to documented behaviour if the - // package name is not the final component of the import path. - // The reflect package doesn't expose the package name, though. - Name: path.Base("github.com/ChainSafe/gossamer/dot/rpc/modules"), - } - - for _, it := range its { - intf, err := model.InterfaceFromInterfaceType(it.typ) - if err != nil { - fmt.Fprintf(os.Stderr, "Reflection: %v\n", err) - os.Exit(1) - } - intf.Name = it.sym - pkg.Interfaces = append(pkg.Interfaces, intf) - } - - outfile := os.Stdout - if len(*output) != 0 { - var err error - outfile, err = os.Create(*output) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to open output file %q", *output) - } - defer func() { - if err := outfile.Close(); err != nil { - fmt.Fprintf(os.Stderr, "failed to close output file %q", *output) - os.Exit(1) - } - }() - } - - if err := gob.NewEncoder(outfile).Encode(pkg); err != nil { - fmt.Fprintf(os.Stderr, "gob encode: %v\n", err) - os.Exit(1) - } -} diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 80209647d5..bdba82f987 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -28,7 +28,7 @@ import ( "github.com/ChainSafe/gossamer/lib/common/variadic" ) -var _ ChainSync = &chainSync{} +var _ ChainSync = (*chainSync)(nil) type chainSyncState byte @@ -78,8 +78,8 @@ type ChainSync interface { // called upon receiving a BlockAnnounceHandshake setPeerHead(p peer.ID, hash common.Hash, number uint) - // syncState returns the current syncing state - syncState() chainSyncState + // getSyncMode returns the current syncing state + getSyncMode() chainSyncState // getHighestBlock returns the highest block or an error getHighestBlock() (highestBlock uint, err error) @@ -110,7 +110,7 @@ type chainSync struct { // note: the block may have empty fields, as some data about it may be unknown pendingBlocks DisjointBlockSet - state atomic.Value + syncMode atomic.Value finalisedCh <-chan *types.FinalisationInfo @@ -158,7 +158,7 @@ func newChainSync(cfg chainSyncConfig) *chainSync { network: cfg.net, peerView: make(map[peer.ID]*peerView), pendingBlocks: cfg.pendingBlocks, - state: atomicState, + syncMode: atomicState, finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), minPeers: cfg.minPeers, slotDuration: cfg.slotDuration, @@ -183,15 +183,8 @@ func (cs *chainSync) start() { go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh) go cs.workerPool.listenForRequests(cs.stopCh) - isFarFromTarget, err := cs.isFarFromTarget() - if err != nil && !errors.Is(err, errNoPeerViews) { - panic("failing while checking target distance: " + err.Error()) - } - - if isFarFromTarget { - cs.state.Store(bootstrap) - go cs.bootstrapSync() - } + cs.syncMode.Store(bootstrap) + go cs.bootstrapSync() } func (cs *chainSync) stop() { @@ -199,20 +192,20 @@ func (cs *chainSync) stop() { <-cs.workerPool.doneCh } -func (cs *chainSync) isFarFromTarget() (bool, error) { - syncTarget, err := cs.getTarget() +func (cs *chainSync) isFarFromTarget() (bestBlockHeader *types.Header, syncTarget uint, isFarFromTarget bool, err error) { + syncTarget, err = cs.getTarget() if err != nil { - return false, fmt.Errorf("getting target: %w", err) + return nil, syncTarget, false, fmt.Errorf("getting target: %w", err) } - bestBlockHeader, err := cs.blockState.BestBlockHeader() + bestBlockHeader, err = cs.blockState.BestBlockHeader() if err != nil { - return false, fmt.Errorf("getting best block header: %w", err) + return nil, syncTarget, false, fmt.Errorf("getting best block header: %w", err) } bestBlockNumber := bestBlockHeader.Number - isFarFromTarget := bestBlockNumber+network.MaxBlocksInResponse < syncTarget - return isFarFromTarget, nil + isFarFromTarget = bestBlockNumber+network.MaxBlocksInResponse < syncTarget + return bestBlockHeader, syncTarget, isFarFromTarget, nil } func (cs *chainSync) bootstrapSync() { @@ -224,56 +217,45 @@ func (cs *chainSync) bootstrapSync() { default: } - // TODO: move the syncing logs to a better place - finalisedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - logger.Warnf("getting highest finalized header: %w", err) - } else { - syncTarget, err := cs.getTarget() - if err != nil { - logger.Warnf("getting target: %w", err) - } else { - logger.Infof( - "🚣 currently syncing, %d peers connected, "+ - "%d available workers, "+ - "target block number %d, "+ - "finalised block number %d with hash %s", - len(cs.network.Peers()), - cs.workerPool.totalWorkers(), - syncTarget, finalisedHeader.Number, finalisedHeader.Hash()) - } - } - - isFarFromTarget, err := cs.isFarFromTarget() + bestBlockHeader, syncTarget, isFarFromTarget, err := cs.isFarFromTarget() if err != nil && !errors.Is(err, errNoPeerViews) { logger.Criticalf("ending bootstrap sync, checking target distance: %s", err) return } - if isFarFromTarget { - bestBlockHeader, err := cs.blockState.BestBlockHeader() - if err != nil { - logger.Criticalf("getting best block header: %s", err) - return - } + finalisedHeader, err := cs.blockState.GetHighestFinalisedHeader() + if err != nil { + logger.Criticalf("getting highest finalized header: %w", err) + return + } + logger.Infof( + "🚣 currently syncing, %d peers connected, "+ + "%d available workers, "+ + "target block number %d, "+ + "finalised block number %d with hash %s", + len(cs.network.Peers()), + cs.workerPool.totalWorkers(), + syncTarget, finalisedHeader.Number, finalisedHeader.Hash()) + + if isFarFromTarget { cs.workerPool.useConnectedPeers() err = cs.requestMaxBlocksFrom(bestBlockHeader) if err != nil { - logger.Errorf("while executing bootsrap sync: %s", err) + logger.Errorf("requesting max blocks from best block header: %s", err) } } else { // we are less than 128 blocks behind the target we can use tip sync - cs.state.Store(tip) + cs.syncMode.Store(tip) isSyncedGauge.Set(1) - logger.Debugf("switched sync mode to %d", tip) + logger.Debugf("switched sync mode to %d", tip.String()) return } } } -func (cs *chainSync) syncState() chainSyncState { - return cs.state.Load().(chainSyncState) +func (cs *chainSync) getSyncMode() chainSyncState { + return cs.syncMode.Load().(chainSyncState) } // setPeerHead sets a peer's best known block @@ -301,12 +283,11 @@ func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { return fmt.Errorf("while adding pending block header: %w", err) } - syncState := cs.state.Load().(chainSyncState) - if syncState != tip { + if cs.getSyncMode() != tip { return nil } - isFarFromTarget, err := cs.isFarFromTarget() + _, _, isFarFromTarget, err := cs.isFarFromTarget() if err != nil && !errors.Is(err, errNoPeerViews) { return fmt.Errorf("checking target distance: %w", err) } @@ -316,9 +297,9 @@ func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { } // we are more than 128 blocks behind the head, switch to bootstrap - cs.state.Store(bootstrap) + cs.syncMode.Store(bootstrap) isSyncedGauge.Set(0) - logger.Debugf("switched sync mode to %d", bootstrap) + logger.Debugf("switched sync mode to %d", bootstrap.String()) go cs.bootstrapSync() return nil } @@ -385,16 +366,19 @@ func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types. totalBlocks := uint32(1) var request *network.BlockRequestMessage + startingBlock := *variadic.MustNewUint32OrHash(announcedHeader.Hash()) + if gapLength > 1 { - request = network.NewDescendingBlockRequest(announcedHeader.Hash(), gapLength, - network.BootstrapRequestData) + request = network.NewBlockRequest(startingBlock, gapLength, + network.BootstrapRequestData, network.Descending) + startAtBlock = announcedHeader.Number - uint(*request.Max) + 1 totalBlocks = *request.Max logger.Debugf("received a block announce from %s, requesting %d blocks, descending request from %s (#%d)", peerWhoAnnounced, gapLength, announcedHeader.Hash(), announcedHeader.Number) } else { - request = network.NewSingleBlockRequestMessage(announcedHeader.Hash(), network.BootstrapRequestData) + request = network.NewBlockRequest(startingBlock, 1, network.BootstrapRequestData, network.Descending) logger.Debugf("received a block announce from %s, requesting a single block %s (#%d)", peerWhoAnnounced, announcedHeader.Hash(), announcedHeader.Number) } @@ -423,13 +407,14 @@ func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, startAtBlock := announcedHeader.Number announcedHash := announcedHeader.Hash() var request *network.BlockRequestMessage + startingBlock := *variadic.MustNewUint32OrHash(announcedHash) if parentExists { - request = network.NewSingleBlockRequestMessage(announcedHash, network.BootstrapRequestData) + request = network.NewBlockRequest(startingBlock, 1, network.BootstrapRequestData, network.Descending) } else { gapLength = uint32(announcedHeader.Number - highestFinalizedHeader.Number) startAtBlock = highestFinalizedHeader.Number + 1 - request = network.NewDescendingBlockRequest(announcedHash, gapLength, network.BootstrapRequestData) + request = network.NewBlockRequest(startingBlock, gapLength, network.BootstrapRequestData, network.Descending) } logger.Debugf("requesting %d fork blocks, starting at %s (#%d)", @@ -479,8 +464,8 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) gapLength = 128 } - descendingGapRequest := network.NewDescendingBlockRequest(pendingBlock.hash, - uint32(gapLength), network.BootstrapRequestData) + descendingGapRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(pendingBlock.hash), + uint32(gapLength), network.BootstrapRequestData, network.Descending) startAtBlock := pendingBlock.number - uint(*descendingGapRequest.Max) + 1 // the `requests` in the tip sync are not related necessarily @@ -534,8 +519,6 @@ func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { requests := network.NewAscedingBlockRequests(startRequestAt, targetBlockNumber, network.BootstrapRequestData) - fmt.Printf("===> amount of requests: %d\n", len(requests)) - var expectedAmountOfBlocks uint32 for _, request := range requests { if request.Max != nil { @@ -544,8 +527,6 @@ func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { } } - fmt.Printf("===> expected amount of blocks: %d\n", expectedAmountOfBlocks) - resultsQueue := make(chan *syncTaskResult) cs.workerPool.submitRequests(requests, resultsQueue) @@ -809,7 +790,7 @@ func (cs *chainSync) processBlockData(blockData types.BlockData) error { } // while in bootstrap mode we don't need to broadcast block announcements - announceImportedBlock := cs.state.Load().(chainSyncState) == tip + announceImportedBlock := cs.getSyncMode() == tip if headerInState && bodyInState { err = cs.processBlockDataWithStateHeaderAndBody(blockData, announceImportedBlock) if err != nil { diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 9d974f744d..7943d26133 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -118,7 +118,7 @@ func Test_chainSync_onImportBlock(t *testing.T) { return &chainSync{ pendingBlocks: pendingBlocks, - state: state, + syncMode: state, } }, peerID: somePeer, @@ -145,8 +145,8 @@ func Test_chainSync_onImportBlock(t *testing.T) { GetHighestFinalisedHeader(). Return(block2AnnounceHeader, nil) - expectedRequest := network.NewSingleBlockRequestMessage(block2AnnounceHeader.Hash(), - network.BootstrapRequestData) + expectedRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(block2AnnounceHeader.Hash()), + 1, network.BootstrapRequestData, network.Descending) fakeBlockBody := types.Body([]types.Extrinsic{}) mockedBlockResponse := &network.BlockResponseMessage{ @@ -188,7 +188,7 @@ func Test_chainSync_onImportBlock(t *testing.T) { return &chainSync{ pendingBlocks: pendingBlocksMock, - state: state, + syncMode: state, workerPool: workerPool, network: networkMock, blockState: blockStateMock, @@ -395,7 +395,7 @@ func setupChainSyncToBootstrapMode(t *testing.T, blocksAhead uint, chainSync := newChainSync(cfg) chainSync.peerView = peerViewMap - chainSync.state.Store(bootstrap) + chainSync.syncMode.Store(bootstrap) return chainSync } diff --git a/dot/sync/mock_chain_sync_test.go b/dot/sync/mock_chain_sync_test.go index ed52017a39..d7ce880044 100644 --- a/dot/sync/mock_chain_sync_test.go +++ b/dot/sync/mock_chain_sync_test.go @@ -50,6 +50,20 @@ func (mr *MockChainSyncMockRecorder) getHighestBlock() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getHighestBlock", reflect.TypeOf((*MockChainSync)(nil).getHighestBlock)) } +// getSyncMode mocks base method. +func (m *MockChainSync) getSyncMode() chainSyncState { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getSyncMode") + ret0, _ := ret[0].(chainSyncState) + return ret0 +} + +// getSyncMode indicates an expected call of getSyncMode. +func (mr *MockChainSyncMockRecorder) getSyncMode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getSyncMode", reflect.TypeOf((*MockChainSync)(nil).getSyncMode)) +} + // onBlockAnnounce mocks base method. func (m *MockChainSync) onBlockAnnounce(arg0 announcedBlock) error { m.ctrl.T.Helper() @@ -99,17 +113,3 @@ func (mr *MockChainSyncMockRecorder) stop() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "stop", reflect.TypeOf((*MockChainSync)(nil).stop)) } - -// syncState mocks base method. -func (m *MockChainSync) syncState() chainSyncState { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "syncState") - ret0, _ := ret[0].(chainSyncState) - return ret0 -} - -// syncState indicates an expected call of syncState. -func (mr *MockChainSyncMockRecorder) syncState() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "syncState", reflect.TypeOf((*MockChainSync)(nil).syncState)) -} diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index d106bb1e2b..3afd28659a 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -109,7 +109,6 @@ func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMe if blockAnnounceHeader.Number <= bestBlockHeader.Number { // check if our block hash for that number is the same, if so, do nothing // as we already have that block - // TODO: check what happens when get hash by number returns nothing or ErrNotExists ourHash, err := s.blockState.GetHashByNumber(blockAnnounceHeader.Number) if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { return fmt.Errorf("get block hash by number: %w", err) @@ -166,7 +165,7 @@ func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMe // IsSynced exposes the synced state func (s *Service) IsSynced() bool { - return s.chainSync.syncState() == tip + return s.chainSync.getSyncMode() == tip } // HighestBlock gets the highest known block number diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go index ae14dbc52b..5e0573f805 100644 --- a/dot/sync/syncer_test.go +++ b/dot/sync/syncer_test.go @@ -307,7 +307,7 @@ func TestService_IsSynced(t *testing.T) { "tip": { serviceBuilder: func(ctrl *gomock.Controller) Service { chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().syncState().Return(tip) + chainSync.EXPECT().getSyncMode().Return(tip) return Service{ chainSync: chainSync, } @@ -317,7 +317,7 @@ func TestService_IsSynced(t *testing.T) { "not_tip": { serviceBuilder: func(ctrl *gomock.Controller) Service { chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().syncState().Return(bootstrap) + chainSync.EXPECT().getSyncMode().Return(bootstrap) return Service{ chainSync: chainSync, } diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index fecf69edd6..98028aa6c1 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -10,6 +10,7 @@ import ( "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/common/variadic" "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" @@ -238,7 +239,8 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { workerPool.newPeer(availablePeer) blockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - blockRequest := network.NewSingleBlockRequestMessage(blockHash, network.BootstrapRequestData) + blockRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(blockHash), + 1, network.BootstrapRequestData, network.Descending) mockedBlockResponse := &network.BlockResponseMessage{ BlockData: []*types.BlockData{ { @@ -298,10 +300,12 @@ func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { workerPool.newPeer(availablePeer) firstRequestBlockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - firstBlockRequest := network.NewSingleBlockRequestMessage(firstRequestBlockHash, network.BootstrapRequestData) + firstBlockRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(firstRequestBlockHash), + 1, network.BootstrapRequestData, network.Descending) secondRequestBlockHash := common.MustHexToHash("0x897646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") - secondBlockRequest := network.NewSingleBlockRequestMessage(firstRequestBlockHash, network.BootstrapRequestData) + secondBlockRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(firstRequestBlockHash), + 1, network.BootstrapRequestData, network.Descending) firstMockedBlockResponse := &network.BlockResponseMessage{ BlockData: []*types.BlockData{ diff --git a/lib/trie/database.go b/lib/trie/database.go index e075fad78e..2c1f5b9300 100644 --- a/lib/trie/database.go +++ b/lib/trie/database.go @@ -79,10 +79,6 @@ func (t *Trie) loadNode(db Getter, n *Node) error { } nodeHash := merkleValue - if len(nodeHash) < 1 { - fmt.Printf(">>>>>>>>>> trie loadNode, empty node hash\n") - } - encodedNode, err := db.Get(nodeHash) if err != nil { return fmt.Errorf("cannot find child node key 0x%x in database: %w", nodeHash, err) From 6ba9c73aa14b59f345cc5896eefcf264c5c1419c Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 11 Jul 2023 08:41:45 -0400 Subject: [PATCH 097/140] chore: solve lint warns --- dot/network/message.go | 3 ++- dot/sync/chain_sync.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/dot/network/message.go b/dot/network/message.go index a0d52da91f..c712030231 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -361,7 +361,8 @@ func (cm *ConsensusMessage) Hash() (common.Hash, error) { return common.Blake2bHash(encMsg) } -func NewBlockRequest(startingBlock variadic.Uint32OrHash, amount uint32, requestedData byte, direction SyncDirection) *BlockRequestMessage { +func NewBlockRequest(startingBlock variadic.Uint32OrHash, amount uint32, + requestedData byte, direction SyncDirection) *BlockRequestMessage { return &BlockRequestMessage{ RequestedData: requestedData, StartingBlock: startingBlock, diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index bdba82f987..ab735d2b1e 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -192,7 +192,8 @@ func (cs *chainSync) stop() { <-cs.workerPool.doneCh } -func (cs *chainSync) isFarFromTarget() (bestBlockHeader *types.Header, syncTarget uint, isFarFromTarget bool, err error) { +func (cs *chainSync) isFarFromTarget() (bestBlockHeader *types.Header, syncTarget uint, + isFarFromTarget bool, err error) { syncTarget, err = cs.getTarget() if err != nil { return nil, syncTarget, false, fmt.Errorf("getting target: %w", err) From 7dc13c0ff6e51dd947e585b34c427443a3a580d3 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Sun, 23 Jul 2023 19:26:50 -0400 Subject: [PATCH 098/140] chore: address comments --- dot/network/message.go | 2 +- dot/network/message_test.go | 2 +- dot/sync/chain_sync.go | 7 ++++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/dot/network/message.go b/dot/network/message.go index c712030231..086d489d7a 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -371,7 +371,7 @@ func NewBlockRequest(startingBlock variadic.Uint32OrHash, amount uint32, } } -func NewAscedingBlockRequests(startNumber, targetNumber uint, requestedData byte) []*BlockRequestMessage { +func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byte) []*BlockRequestMessage { if startNumber > targetNumber { return []*BlockRequestMessage{} } diff --git a/dot/network/message_test.go b/dot/network/message_test.go index 095f5a1a82..839cf0e228 100644 --- a/dot/network/message_test.go +++ b/dot/network/message_test.go @@ -536,7 +536,7 @@ func TestAscendingBlockRequest(t *testing.T) { tt := tt t.Run(tname, func(t *testing.T) { - requests := NewAscedingBlockRequests(tt.startNumber, tt.targetNumber, BootstrapRequestData) + requests := NewAscendingBlockRequests(tt.startNumber, tt.targetNumber, BootstrapRequestData) require.Equal(t, tt.expectedBlockRequestMessage, requests) }) } diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index ab735d2b1e..2cb0324aeb 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -169,6 +169,8 @@ func newChainSync(cfg chainSyncConfig) *chainSync { } func (cs *chainSync) start() { + isSyncedGauge.Set(0) + // wait until we have a minimal workers in the sync worker pool for { totalAvailable := cs.workerPool.totalWorkers() @@ -179,7 +181,6 @@ func (cs *chainSync) start() { time.Sleep(time.Second) } - isSyncedGauge.Set(0) go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh) go cs.workerPool.listenForRequests(cs.stopCh) @@ -284,7 +285,7 @@ func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { return fmt.Errorf("while adding pending block header: %w", err) } - if cs.getSyncMode() != tip { + if cs.getSyncMode() == bootstrap { return nil } @@ -517,7 +518,7 @@ func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { targetBlockNumber = targetBlockNumber - (numOfRequestsToDrop * 128) } - requests := network.NewAscedingBlockRequests(startRequestAt, targetBlockNumber, + requests := network.NewAscendingBlockRequests(startRequestAt, targetBlockNumber, network.BootstrapRequestData) var expectedAmountOfBlocks uint32 From d677dc10e2d8d8cd11eae655ffb8b281416ffa44 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 25 Jul 2023 09:56:20 -0400 Subject: [PATCH 099/140] chore: change to plural --- dot/network/service.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dot/network/service.go b/dot/network/service.go index 4cce2e486e..fa139d3616 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -605,8 +605,8 @@ func (s *Service) NetworkState() common.NetworkState { } } -// AllConnectedPeersID returns all the connected to the node instance -func (s *Service) AllConnectedPeersID() []peer.ID { +// AllConnectedPeersIDs returns all the connected to the node instance +func (s *Service) AllConnectedPeersIDs() []peer.ID { return s.host.p2pHost.Network().Peers() } From 39a52d05cbd0948eaa6332374afeb1c59dd5e13c Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 25 Jul 2023 10:56:46 -0400 Subject: [PATCH 100/140] chore: use `WaitGroup` and implement stop timeout --- dot/sync/chain_sync.go | 33 +++++++++++-- dot/sync/chain_sync_test.go | 61 ++++++++++++++++-------- dot/sync/disjoint_block_set.go | 9 ++-- dot/sync/interfaces.go | 2 +- dot/sync/mock_disjoint_block_set_test.go | 9 ++-- dot/sync/mocks_test.go | 12 ++--- dot/sync/worker_pool.go | 8 ++-- dot/sync/worker_pool_test.go | 28 +++++++---- 8 files changed, 111 insertions(+), 51 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 2cb0324aeb..6729417209 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -93,6 +93,7 @@ type announcedBlock struct { } type chainSync struct { + wg sync.WaitGroup stopCh chan struct{} blockState BlockState @@ -181,16 +182,40 @@ func (cs *chainSync) start() { time.Sleep(time.Second) } - go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh) - go cs.workerPool.listenForRequests(cs.stopCh) + cs.wg.Add(1) + go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh, &cs.wg) + + cs.wg.Add(1) + go cs.workerPool.listenForRequests(cs.stopCh, &cs.wg) cs.syncMode.Store(bootstrap) - go cs.bootstrapSync() + + cs.wg.Add(1) + go func() { + cs.bootstrapSync() + }() } func (cs *chainSync) stop() { close(cs.stopCh) - <-cs.workerPool.doneCh + + allStopCh := make(chan struct{}) + go func() { + defer close(allStopCh) + cs.wg.Wait() + }() + + timeoutTimer := time.NewTimer(30 * time.Second) + + select { + case <-allStopCh: + if !timeoutTimer.Stop() { + <-timeoutTimer.C + } + return + case <-timeoutTimer.C: + logger.Critical("not all chainsync goroutines stopped successfully ") + } } func (cs *chainSync) isFarFromTarget() (bestBlockHeader *types.Header, syncTarget uint, diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 7943d26133..0facff942e 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -6,6 +6,7 @@ package sync import ( "errors" "fmt" + "sync" "sync/atomic" "testing" "time" @@ -211,10 +212,11 @@ func Test_chainSync_onImportBlock(t *testing.T) { ctrl := gomock.NewController(t) chainSync := tt.chainSyncBuilder(ctrl) + stopCh := make(chan struct{}) + wg := sync.WaitGroup{} if tt.listenForRequests { - stopCh := make(chan struct{}) - defer close(stopCh) - go chainSync.workerPool.listenForRequests(stopCh) + wg.Add(1) + go chainSync.workerPool.listenForRequests(stopCh, &wg) } err := chainSync.onBlockAnnounce(announcedBlock{ @@ -226,6 +228,11 @@ func Test_chainSync_onImportBlock(t *testing.T) { if tt.errWrapped != nil { assert.EqualError(t, err, tt.errMessage) } + + if tt.listenForRequests { + close(stopCh) + wg.Wait() + } }) } } @@ -463,13 +470,15 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { cs.workerPool.fromBlockAnnounce(peer.ID("noot")) stopCh := make(chan struct{}) - go cs.workerPool.listenForRequests(stopCh) + wg := sync.WaitGroup{} + wg.Add(1) + go cs.workerPool.listenForRequests(stopCh, &wg) err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) - <-cs.workerPool.doneCh + wg.Wait() } func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { @@ -552,13 +561,15 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { cs.workerPool.fromBlockAnnounce(peer.ID("noot2")) stopCh := make(chan struct{}) - go cs.workerPool.listenForRequests(stopCh) + wg := sync.WaitGroup{} + wg.Add(1) + go cs.workerPool.listenForRequests(stopCh, &wg) err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) - <-cs.workerPool.doneCh + wg.Wait() } func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing.T) { @@ -661,13 +672,15 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. cs.workerPool.fromBlockAnnounce(peer.ID("bob")) stopCh := make(chan struct{}) - go cs.workerPool.listenForRequests(stopCh) + wg := sync.WaitGroup{} + wg.Add(1) + go cs.workerPool.listenForRequests(stopCh, &wg) err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) - <-cs.workerPool.doneCh + wg.Wait() // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] @@ -781,13 +794,15 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test cs.workerPool.fromBlockAnnounce(peer.ID("bob")) stopCh := make(chan struct{}) - go cs.workerPool.listenForRequests(stopCh) + wg := sync.WaitGroup{} + wg.Add(1) + go cs.workerPool.listenForRequests(stopCh, &wg) err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) - <-cs.workerPool.doneCh + wg.Wait() // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] @@ -904,13 +919,15 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi cs.workerPool.fromBlockAnnounce(peer.ID("bob")) stopCh := make(chan struct{}) - go cs.workerPool.listenForRequests(stopCh) + wg := sync.WaitGroup{} + wg.Add(1) + go cs.workerPool.listenForRequests(stopCh, &wg) err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) - <-cs.workerPool.doneCh + wg.Wait() // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] @@ -1024,13 +1041,15 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi cs.workerPool.fromBlockAnnounce(peer.ID("bob")) stopCh := make(chan struct{}) - go cs.workerPool.listenForRequests(stopCh) + wg := sync.WaitGroup{} + wg.Add(1) + go cs.workerPool.listenForRequests(stopCh, &wg) err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) - <-cs.workerPool.doneCh + wg.Wait() // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] @@ -1148,13 +1167,15 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. cs.workerPool.fromBlockAnnounce(peer.ID("bob")) stopCh := make(chan struct{}) - go cs.workerPool.listenForRequests(stopCh) + wg := sync.WaitGroup{} + wg.Add(1) + go cs.workerPool.listenForRequests(stopCh, &wg) err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) - <-cs.workerPool.doneCh + wg.Wait() // peer should be not in the worker pool // peer should be in the ignore list @@ -1240,13 +1261,15 @@ func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testi cs.workerPool.fromBlockAnnounce(peer.ID("alice")) stopCh := make(chan struct{}) - go cs.workerPool.listenForRequests(stopCh) + wg := sync.WaitGroup{} + wg.Add(1) + go cs.workerPool.listenForRequests(stopCh, &wg) err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) close(stopCh) - <-cs.workerPool.doneCh + wg.Wait() require.Len(t, cs.workerPool.workers, 1) diff --git a/dot/sync/disjoint_block_set.go b/dot/sync/disjoint_block_set.go index 400b3877e9..95b9f7407b 100644 --- a/dot/sync/disjoint_block_set.go +++ b/dot/sync/disjoint_block_set.go @@ -27,7 +27,7 @@ var ( // DisjointBlockSet represents a set of incomplete blocks, or blocks // with an unknown parent. it is implemented by *disjointBlockSet type DisjointBlockSet interface { - run(finalisedCh <-chan *types.FinalisationInfo, stop <-chan struct{}) + run(finalisedCh <-chan *types.FinalisationInfo, stop <-chan struct{}, wg *sync.WaitGroup) addHashAndNumber(hash common.Hash, number uint) error addHeader(*types.Header) error addBlock(*types.Block) error @@ -114,9 +114,12 @@ func newDisjointBlockSet(limit int) *disjointBlockSet { } } -func (s *disjointBlockSet) run(finalisedCh <-chan *types.FinalisationInfo, stop <-chan struct{}) { +func (s *disjointBlockSet) run(finalisedCh <-chan *types.FinalisationInfo, stop <-chan struct{}, wg *sync.WaitGroup) { ticker := time.NewTicker(clearBlocksInterval) - defer ticker.Stop() + defer func() { + ticker.Stop() + wg.Done() + }() for { select { diff --git a/dot/sync/interfaces.go b/dot/sync/interfaces.go index 311e50390e..806eedb659 100644 --- a/dot/sync/interfaces.go +++ b/dot/sync/interfaces.go @@ -76,7 +76,7 @@ type Network interface { // ReportPeer reports peer based on the peer behaviour. ReportPeer(change peerset.ReputationChange, p peer.ID) - AllConnectedPeersID() []peer.ID + AllConnectedPeersIDs() []peer.ID } // Telemetry is the telemetry client to send telemetry messages. diff --git a/dot/sync/mock_disjoint_block_set_test.go b/dot/sync/mock_disjoint_block_set_test.go index d26ef0644a..98c93f577a 100644 --- a/dot/sync/mock_disjoint_block_set_test.go +++ b/dot/sync/mock_disjoint_block_set_test.go @@ -6,6 +6,7 @@ package sync import ( reflect "reflect" + sync0 "sync" types "github.com/ChainSafe/gossamer/dot/types" common "github.com/ChainSafe/gossamer/lib/common" @@ -158,15 +159,15 @@ func (mr *MockDisjointBlockSetMockRecorder) removeLowerBlocks(arg0 interface{}) } // run mocks base method. -func (m *MockDisjointBlockSet) run(arg0 <-chan *types.FinalisationInfo, arg1 <-chan struct{}) { +func (m *MockDisjointBlockSet) run(arg0 <-chan *types.FinalisationInfo, arg1 <-chan struct{}, arg2 *sync0.WaitGroup) { m.ctrl.T.Helper() - m.ctrl.Call(m, "run", arg0, arg1) + m.ctrl.Call(m, "run", arg0, arg1, arg2) } // run indicates an expected call of run. -func (mr *MockDisjointBlockSetMockRecorder) run(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockDisjointBlockSetMockRecorder) run(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "run", reflect.TypeOf((*MockDisjointBlockSet)(nil).run), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "run", reflect.TypeOf((*MockDisjointBlockSet)(nil).run), arg0, arg1, arg2) } // size mocks base method. diff --git a/dot/sync/mocks_test.go b/dot/sync/mocks_test.go index 971e559359..53350f11bc 100644 --- a/dot/sync/mocks_test.go +++ b/dot/sync/mocks_test.go @@ -608,18 +608,18 @@ func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { return m.recorder } -// AllConnectedPeersID mocks base method. -func (m *MockNetwork) AllConnectedPeersID() []peer.ID { +// AllConnectedPeersIDs mocks base method. +func (m *MockNetwork) AllConnectedPeersIDs() []peer.ID { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AllConnectedPeersID") + ret := m.ctrl.Call(m, "AllConnectedPeersIDs") ret0, _ := ret[0].([]peer.ID) return ret0 } -// AllConnectedPeersID indicates an expected call of AllConnectedPeersID. -func (mr *MockNetworkMockRecorder) AllConnectedPeersID() *gomock.Call { +// AllConnectedPeersIDs indicates an expected call of AllConnectedPeersIDs. +func (mr *MockNetworkMockRecorder) AllConnectedPeersIDs() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllConnectedPeersID", reflect.TypeOf((*MockNetwork)(nil).AllConnectedPeersID)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllConnectedPeersIDs", reflect.TypeOf((*MockNetwork)(nil).AllConnectedPeersIDs)) } // Peers mocks base method. diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 3ce7c576dd..f59325a2c6 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -44,7 +44,6 @@ type peerSyncWorker struct { type syncWorkerPool struct { wg sync.WaitGroup mtx sync.RWMutex - doneCh chan struct{} availableCond *sync.Cond network Network @@ -58,7 +57,6 @@ func newSyncWorkerPool(net Network, requestMaker network.RequestMaker) *syncWork swp := &syncWorkerPool{ network: net, requestMaker: requestMaker, - doneCh: make(chan struct{}), workers: make(map[peer.ID]*peerSyncWorker), taskQueue: make(chan *syncTask, maxRequestsAllowed+1), ignorePeers: make(map[peer.ID]struct{}), @@ -71,7 +69,7 @@ func newSyncWorkerPool(net Network, requestMaker network.RequestMaker) *syncWork // useConnectedPeers will retrieve all connected peers // through the network layer and use them as sources of blocks func (s *syncWorkerPool) useConnectedPeers() { - connectedPeers := s.network.AllConnectedPeersID() + connectedPeers := s.network.AllConnectedPeersIDs() if len(connectedPeers) < 1 { return } @@ -214,8 +212,8 @@ func (s *syncWorkerPool) getPeerByID(peerID peer.ID) *peerSyncWorker { return peerSync } -func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { - defer close(s.doneCh) +func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() for { select { case <-stopCh: diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index 98028aa6c1..2f5cbd232a 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -4,6 +4,7 @@ package sync import ( + "sync" "testing" "time" @@ -29,7 +30,7 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { ctrl := gomock.NewController(t) networkMock := NewMockNetwork(ctrl) networkMock.EXPECT(). - AllConnectedPeersID(). + AllConnectedPeersIDs(). Return([]peer.ID{}) return newSyncWorkerPool(networkMock, nil) @@ -41,7 +42,7 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { ctrl := gomock.NewController(t) networkMock := NewMockNetwork(ctrl) networkMock.EXPECT(). - AllConnectedPeersID(). + AllConnectedPeersIDs(). Return([]peer.ID{ peer.ID("available-1"), peer.ID("available-2"), @@ -60,7 +61,7 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { ctrl := gomock.NewController(t) networkMock := NewMockNetwork(ctrl) networkMock.EXPECT(). - AllConnectedPeersID(). + AllConnectedPeersIDs(). Return([]peer.ID{ peer.ID("available-1"), peer.ID("available-2"), @@ -80,7 +81,7 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { ctrl := gomock.NewController(t) networkMock := NewMockNetwork(ctrl) networkMock.EXPECT(). - AllConnectedPeersID(). + AllConnectedPeersIDs(). Return([]peer.ID{ peer.ID("available-1"), peer.ID("available-2"), @@ -108,7 +109,7 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { ctrl := gomock.NewController(t) networkMock := NewMockNetwork(ctrl) networkMock.EXPECT(). - AllConnectedPeersID(). + AllConnectedPeersIDs(). Return([]peer.ID{ peer.ID("available-1"), peer.ID("available-2"), @@ -232,8 +233,10 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { workerPool := newSyncWorkerPool(networkMock, requestMakerMock) stopCh := make(chan struct{}) - defer close(stopCh) - go workerPool.listenForRequests(stopCh) + + wg := sync.WaitGroup{} + wg.Add(1) + go workerPool.listenForRequests(stopCh, &wg) availablePeer := peer.ID("available-peer") workerPool.newPeer(availablePeer) @@ -282,6 +285,9 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { require.Equal(t, syncTaskResult.who, availablePeer) require.Equal(t, syncTaskResult.request, blockRequest) require.Equal(t, syncTaskResult.response, mockedBlockResponse) + + close(stopCh) + wg.Wait() } func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { @@ -293,8 +299,9 @@ func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { workerPool := newSyncWorkerPool(networkMock, requestMakerMock) stopCh := make(chan struct{}) - defer close(stopCh) - go workerPool.listenForRequests(stopCh) + wg := sync.WaitGroup{} + wg.Add(1) + go workerPool.listenForRequests(stopCh, &wg) availablePeer := peer.ID("available-peer") workerPool.newPeer(availablePeer) @@ -373,4 +380,7 @@ func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { require.Equal(t, syncTaskResult.response, secondMockedBlockResponse) require.Equal(t, uint(1), workerPool.totalWorkers()) + + close(stopCh) + wg.Wait() } From e20b5d520e93e3f362220d3591e928958cef7851 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 25 Jul 2023 11:03:08 -0400 Subject: [PATCH 101/140] chore: change `peerView` to be a value --- dot/sync/chain_sync.go | 18 +++++++++--------- dot/sync/chain_sync_test.go | 10 +++++----- dot/sync/worker_pool.go | 18 +++++++++--------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 6729417209..0bb9ed9460 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -104,7 +104,7 @@ type chainSync struct { // tracks the latest state we know of from our peers, // ie. their best block hash and number peerViewLock sync.RWMutex - peerView map[peer.ID]*peerView + peerView map[peer.ID]peerView // disjoint set of blocks which are known but not ready to be processed // ie. we only know the hash, number, or the parent block is unknown, or the body is unknown @@ -157,7 +157,7 @@ func newChainSync(cfg chainSyncConfig) *chainSync { telemetry: cfg.telemetry, blockState: cfg.bs, network: cfg.net, - peerView: make(map[peer.ID]*peerView), + peerView: make(map[peer.ID]peerView), pendingBlocks: cfg.pendingBlocks, syncMode: atomicState, finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), @@ -218,8 +218,8 @@ func (cs *chainSync) stop() { } } -func (cs *chainSync) isFarFromTarget() (bestBlockHeader *types.Header, syncTarget uint, - isFarFromTarget bool, err error) { +func (cs *chainSync) isBootstrap() (bestBlockHeader *types.Header, syncTarget uint, + isBootstrap bool, err error) { syncTarget, err = cs.getTarget() if err != nil { return nil, syncTarget, false, fmt.Errorf("getting target: %w", err) @@ -231,8 +231,8 @@ func (cs *chainSync) isFarFromTarget() (bestBlockHeader *types.Header, syncTarge } bestBlockNumber := bestBlockHeader.Number - isFarFromTarget = bestBlockNumber+network.MaxBlocksInResponse < syncTarget - return bestBlockHeader, syncTarget, isFarFromTarget, nil + isBootstrap = bestBlockNumber+network.MaxBlocksInResponse < syncTarget + return bestBlockHeader, syncTarget, isBootstrap, nil } func (cs *chainSync) bootstrapSync() { @@ -244,7 +244,7 @@ func (cs *chainSync) bootstrapSync() { default: } - bestBlockHeader, syncTarget, isFarFromTarget, err := cs.isFarFromTarget() + bestBlockHeader, syncTarget, isFarFromTarget, err := cs.isBootstrap() if err != nil && !errors.Is(err, errNoPeerViews) { logger.Criticalf("ending bootstrap sync, checking target distance: %s", err) return @@ -292,7 +292,7 @@ func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber u cs.peerViewLock.Lock() defer cs.peerViewLock.Unlock() - cs.peerView[who] = &peerView{ + cs.peerView[who] = peerView{ who: who, hash: bestHash, number: bestNumber, @@ -314,7 +314,7 @@ func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { return nil } - _, _, isFarFromTarget, err := cs.isFarFromTarget() + _, _, isFarFromTarget, err := cs.isBootstrap() if err != nil && !errors.Is(err, errNoPeerViews) { return fmt.Errorf("checking target distance: %w", err) } diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 0facff942e..6ec6d6e704 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -377,9 +377,9 @@ func setupChainSyncToBootstrapMode(t *testing.T, blocksAhead uint, peer.ID("some_peer_3"), } - peerViewMap := map[peer.ID]*peerView{} + peerViewMap := map[peer.ID]peerView{} for _, p := range mockedPeerID { - peerViewMap[p] = &peerView{ + peerViewMap[p] = peerView{ who: p, hash: common.Hash{1, 2, 3}, number: blocksAhead, @@ -1558,16 +1558,16 @@ func TestChainSync_getHighestBlock(t *testing.T) { cases := map[string]struct { expectedHighestBlock uint wantErr error - chainSyncPeerView map[peer.ID]*peerView + chainSyncPeerView map[peer.ID]peerView }{ "no_peer_view": { wantErr: errNoPeers, expectedHighestBlock: 0, - chainSyncPeerView: make(map[peer.ID]*peerView), + chainSyncPeerView: make(map[peer.ID]peerView), }, "highest_block": { expectedHighestBlock: 500, - chainSyncPeerView: map[peer.ID]*peerView{ + chainSyncPeerView: map[peer.ID]peerView{ peer.ID("peer-A"): { number: 100, }, diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index f59325a2c6..e605da7e95 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -94,18 +94,18 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { return } - peerSync, has := s.workers[who] + worker, has := s.workers[who] if !has { - peerSync = &peerSyncWorker{status: available} - s.workers[who] = peerSync + worker = &peerSyncWorker{status: available} + s.workers[who] = worker logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) } // check if the punishment is not valid - if peerSync.status == punished && peerSync.punishmentTime.Before(time.Now()) { - peerSync.status = available - s.workers[who] = peerSync + if worker.status == punished && worker.punishmentTime.Before(time.Now()) { + worker.status = available + s.workers[who] = worker } } @@ -268,10 +268,10 @@ func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { logger.Debugf("[FINISHED] worker %s, err: %s, block data amount: %d", who, err, len(response.BlockData)) s.mtx.Lock() - peerSync, has := s.workers[who] + worker, has := s.workers[who] if has { - peerSync.status = available - s.workers[who] = peerSync + worker.status = available + s.workers[who] = worker } s.mtx.Unlock() s.availableCond.Signal() From 7da5edf3f3e5a728f6abe4362904bfefff9dbdb6 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 25 Jul 2023 14:36:21 -0400 Subject: [PATCH 102/140] addressing comments --- dot/sync/chain_sync.go | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 0bb9ed9460..c797e16100 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -179,6 +179,7 @@ func (cs *chainSync) start() { break } + // TODO: https://github.com/ChainSafe/gossamer/issues/3402 time.Sleep(time.Second) } @@ -350,15 +351,15 @@ func (cs *chainSync) requestAnnouncedBlock(announce announcedBlock) error { return fmt.Errorf("getting best block header: %w", err) } + highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() + if err != nil { + return fmt.Errorf("getting highest finalized header") + } + // if the announced block contains a lower number than our best // block header, let's check if it is greater than our latests // finalized header, if so this block belongs to a fork chain if announcedNumber < bestBlockHeader.Number { - highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - return fmt.Errorf("getting highest finalized header") - } - // ignore the block if it has the same or lower number // TODO: is it following the protocol to send a blockAnnounce with number < highestFinalized number? if announcedNumber <= highestFinalizedHeader.Number { @@ -373,11 +374,6 @@ func (cs *chainSync) requestAnnouncedBlock(announce announcedBlock) error { return fmt.Errorf("requesting chain blocks: %w", err) } - highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() - if err != nil { - return fmt.Errorf("while getting highest finalized header: %w", err) - } - err = cs.requestPendingBlocks(highestFinalizedHeader) if err != nil { return fmt.Errorf("while requesting pending blocks") From e6f47928a07753e17270555920eb7b6a6b9bf0aa Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 25 Jul 2023 15:07:48 -0400 Subject: [PATCH 103/140] chore: bounded request retry with the same peer id --- dot/sync/chain_sync.go | 34 ++++++++++++++++-------------- dot/sync/worker_pool.go | 40 +++++++++++++++--------------------- dot/sync/worker_pool_test.go | 2 +- 3 files changed, 36 insertions(+), 40 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index c797e16100..1d5b62ca47 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -407,7 +407,7 @@ func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types. } resultsQueue := make(chan *syncTaskResult) - cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) + cs.workerPool.submitRequest(request, &peerWhoAnnounced, resultsQueue) err := cs.handleWorkersResults(resultsQueue, startAtBlock, totalBlocks) if err != nil { return fmt.Errorf("while handling workers results: %w", err) @@ -444,7 +444,7 @@ func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, peerWhoAnnounced, gapLength, announcedHash, announcedHeader.Number) resultsQueue := make(chan *syncTaskResult) - cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) + cs.workerPool.submitRequest(request, &peerWhoAnnounced, resultsQueue) err = cs.handleWorkersResults(resultsQueue, startAtBlock, gapLength) if err != nil { @@ -494,7 +494,7 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) // the `requests` in the tip sync are not related necessarily // this is why we need to treat them separately resultsQueue := make(chan *syncTaskResult) - cs.workerPool.submitRequest(descendingGapRequest, resultsQueue) + cs.workerPool.submitRequest(descendingGapRequest, nil, resultsQueue) // TODO: we should handle the requests concurrently // a way of achieve that is by constructing a new `handleWorkersResults` for @@ -545,7 +545,6 @@ func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { var expectedAmountOfBlocks uint32 for _, request := range requests { if request.Max != nil { - fmt.Printf("===> request max: %d\n", *request.Max) expectedAmountOfBlocks += *request.Max } } @@ -626,6 +625,15 @@ taskResultLoop: <-idleTimer.C } + who := taskResult.who + request := taskResult.request + response := taskResult.response + + var boundedTo *peer.ID + if taskResult.isBounded { + boundedTo = &taskResult.who + } + logger.Debugf("task result: peer(%s), with error: %v, with response: %v", taskResult.who, taskResult.err != nil, taskResult.response != nil) @@ -638,19 +646,15 @@ taskResultLoop: cs.network.ReportPeer(peerset.ReputationChange{ Value: peerset.BadProtocolValue, Reason: peerset.BadProtocolReason, - }, taskResult.who) + }, who) } - cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.punishPeer(who) } - cs.workerPool.submitRequest(taskResult.request, workersResults) + cs.workerPool.submitRequest(request, boundedTo, workersResults) continue } - who := taskResult.who - request := taskResult.request - response := taskResult.response - if request.Direction == network.Descending { // reverse blocks before pre-validating and placing in ready queue reverseBlockData(response.BlockData) @@ -669,7 +673,7 @@ taskResultLoop: } cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) + cs.workerPool.submitRequest(taskResult.request, boundedTo, workersResults) continue taskResultLoop } @@ -677,7 +681,7 @@ taskResultLoop: if !isChain { logger.Criticalf("response from %s is not a chain", who) cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) + cs.workerPool.submitRequest(taskResult.request, boundedTo, workersResults) continue taskResultLoop } @@ -692,7 +696,7 @@ taskResultLoop: }, who) cs.workerPool.ignorePeerAsWorker(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, workersResults) + cs.workerPool.submitRequest(taskResult.request, boundedTo, workersResults) continue taskResultLoop } @@ -722,7 +726,7 @@ taskResultLoop: Direction: network.Ascending, Max: &difference, } - cs.workerPool.submitRequest(taskResult.request, workersResults) + cs.workerPool.submitRequest(taskResult.request, boundedTo, workersResults) continue taskResultLoop } } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index e605da7e95..4ec6d39796 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -29,10 +29,11 @@ type syncTask struct { } type syncTaskResult struct { - who peer.ID - request *network.BlockRequestMessage - response *network.BlockResponseMessage - err error + isBounded bool + who peer.ID + request *network.BlockRequestMessage + response *network.BlockResponseMessage + err error } type peerSyncWorker struct { @@ -109,22 +110,12 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { } } -// submitBoundedRequest given a request the worker pool will driven it -// to the given peer.ID, used for tip sync when we receive a block announce -// from a peer and we want to use the exact same peer to request blocks -func (s *syncWorkerPool) submitBoundedRequest(request *network.BlockRequestMessage, - who peer.ID, resultCh chan<- *syncTaskResult) { - s.taskQueue <- &syncTask{ - boundTo: &who, - request: request, - resultCh: resultCh, - } -} - -// submitRequest given a request the worker pool will get the very first available worker -// to perform the request, the response will be dispatch in the resultCh -func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { +// submitRequest given a request, the worker pool will get the peer given the peer.ID +// parameter or if nil the very first available worker or +// to perform the request, the response will be dispatch in the resultCh. +func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, who *peer.ID, resultCh chan<- *syncTaskResult) { s.taskQueue <- &syncTask{ + boundTo: who, request: request, resultCh: resultCh, } @@ -134,7 +125,7 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, res // the response will be dispatch in the resultCh func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { for _, request := range requests { - s.submitRequest(request, resultCh) + s.submitRequest(request, nil, resultCh) } } @@ -277,9 +268,10 @@ func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { s.availableCond.Signal() task.resultCh <- &syncTaskResult{ - who: who, - request: request, - response: response, - err: err, + isBounded: task.boundTo != nil, + who: who, + request: request, + response: response, + err: err, } } diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index 2f5cbd232a..4e233bba27 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -268,7 +268,7 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { }) resultCh := make(chan *syncTaskResult) - workerPool.submitRequest(blockRequest, resultCh) + workerPool.submitRequest(blockRequest, nil, resultCh) // ensure the task is in the pool and was already // assigned to the peer From 703bb43c09c5540790366e45dbc20ee87ec907de Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 25 Jul 2023 15:13:52 -0400 Subject: [PATCH 104/140] chore: address lint and deepsource comments --- dot/network/message.go | 2 +- dot/sync/chain_sync.go | 3 +-- dot/sync/chain_sync_test.go | 5 ++--- dot/sync/worker_pool.go | 8 ++++++-- dot/sync/worker_pool_test.go | 10 ++++------ 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/dot/network/message.go b/dot/network/message.go index 086d489d7a..ccd97ef29f 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -331,7 +331,7 @@ type ConsensusMessage struct { } // Type returns ConsensusMsgType -func (cm *ConsensusMessage) Type() MessageType { +func (*ConsensusMessage) Type() MessageType { return ConsensusMsgType } diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 1d5b62ca47..ac85e05d6b 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -549,8 +549,7 @@ func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { } } - resultsQueue := make(chan *syncTaskResult) - cs.workerPool.submitRequests(requests, resultsQueue) + resultsQueue := cs.workerPool.submitRequests(requests) err = cs.handleWorkersResults(resultsQueue, startRequestAt, expectedAmountOfBlocks) if err != nil { diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 6ec6d6e704..57d82ed184 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -1000,9 +1000,8 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi if pID == peer.ID("bob") { notAChainBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) // swap positions to force the problem - firstItem := notAChainBlockData.BlockData[0] - notAChainBlockData.BlockData[0] = notAChainBlockData.BlockData[130] - notAChainBlockData.BlockData[130] = firstItem + notAChainBlockData.BlockData[0], notAChainBlockData.BlockData[130] = + notAChainBlockData.BlockData[130], notAChainBlockData.BlockData[0] *responsePtr = *notAChainBlockData return nil diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 4ec6d39796..3ed080cd98 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -113,7 +113,8 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { // submitRequest given a request, the worker pool will get the peer given the peer.ID // parameter or if nil the very first available worker or // to perform the request, the response will be dispatch in the resultCh. -func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, who *peer.ID, resultCh chan<- *syncTaskResult) { +func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, + who *peer.ID, resultCh chan<- *syncTaskResult) { s.taskQueue <- &syncTask{ boundTo: who, request: request, @@ -123,10 +124,13 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, who // submitRequests takes an set of requests and will submit to the pool through submitRequest // the response will be dispatch in the resultCh -func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { +func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) (resultCh chan *syncTaskResult) { + resultCh = make(chan *syncTaskResult) for _, request := range requests { s.submitRequest(request, nil, resultCh) } + + return resultCh } // punishPeer given a peer.ID we check increase its times punished diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index 4e233bba27..e213833ab7 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -90,7 +90,7 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { workerPool := newSyncWorkerPool(networkMock, nil) workerPool.workers[peer.ID("available-3")] = &peerSyncWorker{ status: punished, - //arbitrary unix value + // arbitrary unix value punishmentTime: time.Unix(1000, 0), } return workerPool @@ -179,7 +179,7 @@ func TestSyncWorkerPool_newPeer(t *testing.T) { workerPool := newSyncWorkerPool(nil, nil) workerPool.workers[peer.ID("free-again")] = &peerSyncWorker{ status: punished, - //arbitrary unix value + // arbitrary unix value punishmentTime: time.Unix(1000, 0), } return workerPool @@ -357,10 +357,8 @@ func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { return nil }) - resultCh := make(chan *syncTaskResult) - - workerPool.submitRequests( - []*network.BlockRequestMessage{firstBlockRequest, secondBlockRequest}, resultCh) + resultCh := workerPool.submitRequests( + []*network.BlockRequestMessage{firstBlockRequest, secondBlockRequest}) // ensure the task is in the pool and was already // assigned to the peer From 15e54545055cb51387a980bc24a500785ceb9491 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 25 Jul 2023 15:59:06 -0400 Subject: [PATCH 105/140] chore: fix race conditions --- dot/sync/chain_sync_test.go | 30 +++++++++++++++--------------- dot/sync/worker_pool_test.go | 6 +++++- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 57d82ed184..4d1d532ee2 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -616,7 +616,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second // call will return the second set - doBlockRequestCount := 0 + doBlockRequestCount := atomic.Int32{} mockRequestMaker.EXPECT(). Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). DoAndReturn(func(peerID, _, response any) any { @@ -625,10 +625,10 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. // then alice should pick the failed request and re-execute it which will // be the third call responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount++ }() + defer func() { doBlockRequestCount.Add(1) }() pID := peerID.(peer.ID) // cast to peer ID - switch doBlockRequestCount { + switch doBlockRequestCount.Load() { case 0, 1: if pID == peer.ID("alice") { *responsePtr = *worker1Response @@ -732,7 +732,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second // call will return the second set - doBlockRequestCount := 0 + doBlockRequestCount := atomic.Int32{} mockRequestMaker.EXPECT(). Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). DoAndReturn(func(peerID, _, response any) any { @@ -741,10 +741,10 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test // then alice should pick the failed request and re-execute it which will // be the third call responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount++ }() + defer func() { doBlockRequestCount.Add(1) }() pID := peerID.(peer.ID) // cast to peer ID - switch doBlockRequestCount { + switch doBlockRequestCount.Load() { case 0, 1: if pID == peer.ID("alice") { *responsePtr = *worker1Response @@ -854,7 +854,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second // call will return the second set - doBlockRequestCount := 0 + doBlockRequestCount := atomic.Int32{} mockRequestMaker.EXPECT(). Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). DoAndReturn(func(peerID, _, response any) any { @@ -862,10 +862,10 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an // response item but without header as was requested responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount++ }() + defer func() { doBlockRequestCount.Add(1) }() pID := peerID.(peer.ID) // cast to peer ID - switch doBlockRequestCount { + switch doBlockRequestCount.Load() { case 0, 1: if pID == peer.ID("alice") { *responsePtr = *worker1Response @@ -979,7 +979,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second // call will return the second set - doBlockRequestCount := 0 + doBlockRequestCount := atomic.Int32{} mockRequestMaker.EXPECT(). Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). DoAndReturn(func(peerID, _, response any) any { @@ -987,10 +987,10 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an // response that does not form an chain responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount++ }() + defer func() { doBlockRequestCount.Add(1) }() pID := peerID.(peer.ID) // cast to peer ID - switch doBlockRequestCount { + switch doBlockRequestCount.Load() { case 0, 1: if pID == peer.ID("alice") { *responsePtr = *worker1Response @@ -1102,7 +1102,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. // we use gomock.Any since I cannot guarantee which peer picks which request // but the first call to DoBlockRequest will return the first set and the second // call will return the second set - doBlockRequestCount := 0 + doBlockRequestCount := atomic.Int32{} mockRequestMaker.EXPECT(). Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). DoAndReturn(func(peerID, _, response any) any { @@ -1110,10 +1110,10 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an // response that contains a know bad block responsePtr := response.(*network.BlockResponseMessage) - defer func() { doBlockRequestCount++ }() + defer func() { doBlockRequestCount.Add(1) }() pID := peerID.(peer.ID) // cast to peer ID - switch doBlockRequestCount { + switch doBlockRequestCount.Load() { case 0, 1: if pID == peer.ID("alice") { *responsePtr = *worker1Response diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index e213833ab7..b6c8836995 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -277,9 +277,13 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { totalWorkers := workerPool.totalWorkers() require.Zero(t, totalWorkers) - peerSync := workerPool.getPeerByID(availablePeer) + workerPool.mtx.RLock() + peerSync, has := workerPool.workers[availablePeer] + require.True(t, has) require.Equal(t, peerSync.status, busy) + workerPool.mtx.RUnlock() + syncTaskResult := <-resultCh require.NoError(t, syncTaskResult.err) require.Equal(t, syncTaskResult.who, availablePeer) From 0263c4d215f6586ca79d5760abfc8da1b9c5c875 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 25 Jul 2023 16:53:35 -0400 Subject: [PATCH 106/140] chore: fix integration test --- dot/node_integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dot/node_integration_test.go b/dot/node_integration_test.go index d49e0c1421..345b87a251 100644 --- a/dot/node_integration_test.go +++ b/dot/node_integration_test.go @@ -85,7 +85,7 @@ func TestNewNode(t *testing.T) { assert.NoError(t, err) mockServiceRegistry := NewMockServiceRegisterer(ctrl) - mockServiceRegistry.EXPECT().RegisterService(gomock.Any()).Times(9) + mockServiceRegistry.EXPECT().RegisterService(gomock.Any()).Times(8) m := NewMocknodeBuilderIface(ctrl) m.EXPECT().isNodeInitialised(initConfig.BasePath).Return(nil) From 5f73d0a9db881366c1c7813327f28c16fcb5ca63 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 26 Jul 2023 08:40:47 -0400 Subject: [PATCH 107/140] wip: increase test coverage --- dot/sync/chain_sync.go | 2 + dot/sync/chain_sync_test.go | 297 ++++++++++++++++++++++++++++++------ 2 files changed, 249 insertions(+), 50 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index ac85e05d6b..d194ea0598 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -320,6 +320,8 @@ func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { return fmt.Errorf("checking target distance: %w", err) } + fmt.Printf("ON BLOCK ANNOUNCE FAR FROM TARGET: %v\n", isFarFromTarget) + if !isFarFromTarget { return cs.requestAnnouncedBlock(announced) } diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 4d1d532ee2..52d77bd705 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -5,7 +5,6 @@ package sync import ( "errors" - "fmt" "sync" "sync/atomic" "testing" @@ -60,11 +59,11 @@ func Test_chainSyncState_String(t *testing.T) { } } -func Test_chainSync_onImportBlock(t *testing.T) { +func Test_chainSync_onBlockAnnounce(t *testing.T) { t.Parallel() const somePeer = peer.ID("abc") - errTest := errors.New("test error") + //errTest := errors.New("test error") emptyTrieState := storage.NewTrieState(nil) block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.MustRoot(), common.Hash{}, 1, scale.VaryingDataTypeSlice{}) @@ -78,54 +77,133 @@ func Test_chainSync_onImportBlock(t *testing.T) { blockAnnounceHeader *types.Header errWrapped error errMessage string + expectedSyncMode chainSyncState }{ - "announced_block_already_exists_in_disjoint_set": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(true) - return &chainSync{ - pendingBlocks: pendingBlocks, - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - errWrapped: errAlreadyInDisjointSet, - errMessage: fmt.Sprintf("already in disjoint set: block %s (#%d)", - block2AnnounceHeader.Hash(), block2AnnounceHeader.Number), - }, - "failed_to_add_announced_block_in_disjoint_set": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(errTest) - - return &chainSync{ - pendingBlocks: pendingBlocks, - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - errWrapped: errTest, - errMessage: "while adding pending block header: test error", - }, - "announced_block_while_in_bootstrap_mode": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(nil) - - state := atomic.Value{} - state.Store(bootstrap) - - return &chainSync{ - pendingBlocks: pendingBlocks, - syncMode: state, - } - }, - peerID: somePeer, - blockAnnounceHeader: block2AnnounceHeader, - }, - "announced_block_while_in_tip_mode": { + // "announced_block_already_exists_in_disjoint_set": { + // chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + // pendingBlocks := NewMockDisjointBlockSet(ctrl) + // pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(true) + // return &chainSync{ + // pendingBlocks: pendingBlocks, + // } + // }, + // peerID: somePeer, + // blockAnnounceHeader: block2AnnounceHeader, + // errWrapped: errAlreadyInDisjointSet, + // errMessage: fmt.Sprintf("already in disjoint set: block %s (#%d)", + // block2AnnounceHeader.Hash(), block2AnnounceHeader.Number), + // }, + // "failed_to_add_announced_block_in_disjoint_set": { + // chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + // pendingBlocks := NewMockDisjointBlockSet(ctrl) + // pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + // pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(errTest) + + // return &chainSync{ + // pendingBlocks: pendingBlocks, + // } + // }, + // peerID: somePeer, + // blockAnnounceHeader: block2AnnounceHeader, + // errWrapped: errTest, + // errMessage: "while adding pending block header: test error", + // }, + // "announced_block_while_in_bootstrap_mode": { + // chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + // pendingBlocks := NewMockDisjointBlockSet(ctrl) + // pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + // pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(nil) + + // state := atomic.Value{} + // state.Store(bootstrap) + + // return &chainSync{ + // pendingBlocks: pendingBlocks, + // syncMode: state, + // } + // }, + // peerID: somePeer, + // blockAnnounceHeader: block2AnnounceHeader, + // }, + // "announced_block_while_in_tip_mode": { + // chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + // pendingBlocksMock := NewMockDisjointBlockSet(ctrl) + // pendingBlocksMock.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + // pendingBlocksMock.EXPECT().addHeader(block2AnnounceHeader).Return(nil) + // pendingBlocksMock.EXPECT().removeBlock(block2AnnounceHeader.Hash()) + // pendingBlocksMock.EXPECT().size().Return(int(0)) + + // blockStateMock := NewMockBlockState(ctrl) + // blockStateMock.EXPECT(). + // HasHeader(block2AnnounceHeader.Hash()). + // Return(false, nil) + + // blockStateMock.EXPECT(). + // BestBlockHeader(). + // Return(block1AnnounceHeader, nil) + + // blockStateMock.EXPECT(). + // GetHighestFinalisedHeader(). + // Return(block2AnnounceHeader, nil) + + // expectedRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(block2AnnounceHeader.Hash()), + // 1, network.BootstrapRequestData, network.Descending) + + // fakeBlockBody := types.Body([]types.Extrinsic{}) + // mockedBlockResponse := &network.BlockResponseMessage{ + // BlockData: []*types.BlockData{ + // { + // Hash: block2AnnounceHeader.Hash(), + // Header: block2AnnounceHeader, + // Body: &fakeBlockBody, + // }, + // }, + // } + + // networkMock := NewMockNetwork(ctrl) + // requestMaker := NewMockRequestMaker(ctrl) + // requestMaker.EXPECT(). + // Do(somePeer, expectedRequest, &network.BlockResponseMessage{}). + // DoAndReturn(func(_, _, response any) any { + // responsePtr := response.(*network.BlockResponseMessage) + // *responsePtr = *mockedBlockResponse + // return nil + // }) + + // babeVerifierMock := NewMockBabeVerifier(ctrl) + // storageStateMock := NewMockStorageState(ctrl) + // importHandlerMock := NewMockBlockImportHandler(ctrl) + // telemetryMock := NewMockTelemetry(ctrl) + + // const announceBlock = true + // ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, mockedBlockResponse.BlockData, + // blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, + // announceBlock) + + // workerPool := newSyncWorkerPool(networkMock, requestMaker) + // // include the peer who announced the block in the pool + // workerPool.newPeer(somePeer) + + // state := atomic.Value{} + // state.Store(tip) + + // return &chainSync{ + // pendingBlocks: pendingBlocksMock, + // syncMode: state, + // workerPool: workerPool, + // network: networkMock, + // blockState: blockStateMock, + // babeVerifier: babeVerifierMock, + // telemetry: telemetryMock, + // storageState: storageStateMock, + // blockImportHandler: importHandlerMock, + // } + // }, + // listenForRequests: true, + // peerID: somePeer, + // blockAnnounceHeader: block2AnnounceHeader, + // }, + "announced_block_while_in_tip_mode_but_far_behind_tip": { chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { pendingBlocksMock := NewMockDisjointBlockSet(ctrl) pendingBlocksMock.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) @@ -187,7 +265,17 @@ func Test_chainSync_onImportBlock(t *testing.T) { state := atomic.Value{} state.Store(tip) + fakeBlock := types.NewHeader(block1AnnounceHeader.Hash(), emptyTrieState.MustRoot(), + common.Hash{}, 130, scale.VaryingDataTypeSlice{}) + return &chainSync{ + peerView: map[peer.ID]peerView{ + peer.ID("peerA"): { + who: peer.ID("peerA"), + hash: fakeBlock.Hash(), + number: 130, + }, + }, pendingBlocks: pendingBlocksMock, syncMode: state, workerPool: workerPool, @@ -237,6 +325,115 @@ func Test_chainSync_onImportBlock(t *testing.T) { } } +func Test_chainSync_onBlockAnnounce_tipModeNeedToCatchup(t *testing.T) { + ctrl := gomock.NewController(t) + const somePeer = peer.ID("abc") + + emptyTrieState := storage.NewTrieState(nil) + block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.MustRoot(), + common.Hash{}, 1, scale.VaryingDataTypeSlice{}) + block2AnnounceHeader := types.NewHeader(block1AnnounceHeader.Hash(), emptyTrieState.MustRoot(), + common.Hash{}, 2, scale.VaryingDataTypeSlice{}) + + pendingBlocksMock := NewMockDisjointBlockSet(ctrl) + pendingBlocksMock.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + pendingBlocksMock.EXPECT().addHeader(block2AnnounceHeader).Return(nil) + pendingBlocksMock.EXPECT().removeBlock(block2AnnounceHeader.Hash()) + pendingBlocksMock.EXPECT().size().Return(int(0)) + + blockStateMock := NewMockBlockState(ctrl) + blockStateMock.EXPECT(). + HasHeader(block2AnnounceHeader.Hash()). + Return(false, nil) + + blockStateMock.EXPECT(). + BestBlockHeader(). + Return(block1AnnounceHeader, nil) + + blockStateMock.EXPECT(). + GetHighestFinalisedHeader(). + Return(block2AnnounceHeader, nil) + + expectedRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(block2AnnounceHeader.Hash()), + 1, network.BootstrapRequestData, network.Descending) + + fakeBlockBody := types.Body([]types.Extrinsic{}) + mockedBlockResponse := &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: block2AnnounceHeader.Hash(), + Header: block2AnnounceHeader, + Body: &fakeBlockBody, + }, + }, + } + + networkMock := NewMockNetwork(ctrl) + requestMaker := NewMockRequestMaker(ctrl) + requestMaker.EXPECT(). + Do(somePeer, expectedRequest, &network.BlockResponseMessage{}). + DoAndReturn(func(_, _, response any) any { + responsePtr := response.(*network.BlockResponseMessage) + *responsePtr = *mockedBlockResponse + return nil + }) + + babeVerifierMock := NewMockBabeVerifier(ctrl) + storageStateMock := NewMockStorageState(ctrl) + importHandlerMock := NewMockBlockImportHandler(ctrl) + telemetryMock := NewMockTelemetry(ctrl) + + const announceBlock = true + ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, mockedBlockResponse.BlockData, + blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, + announceBlock) + + workerPool := newSyncWorkerPool(networkMock, requestMaker) + // include the peer who announced the block in the pool + workerPool.newPeer(somePeer) + + state := atomic.Value{} + state.Store(tip) + + fakeBlock := types.NewHeader(block1AnnounceHeader.Hash(), emptyTrieState.MustRoot(), + common.Hash{}, 130, scale.VaryingDataTypeSlice{}) + + stopCh := make(chan struct{}) + chainSync := &chainSync{ + stopCh: stopCh, + peerView: map[peer.ID]peerView{ + peer.ID("peerA"): { + who: peer.ID("peerA"), + hash: fakeBlock.Hash(), + number: 130, // the target is much higher, we should catch up + }, + }, + pendingBlocks: pendingBlocksMock, + syncMode: state, + workerPool: workerPool, + network: networkMock, + blockState: blockStateMock, + babeVerifier: babeVerifierMock, + telemetry: telemetryMock, + storageState: storageStateMock, + blockImportHandler: importHandlerMock, + } + + wg := sync.WaitGroup{} + + wg.Add(1) + go chainSync.workerPool.listenForRequests(stopCh, &wg) + + err := chainSync.onBlockAnnounce(announcedBlock{ + who: somePeer, + header: block2AnnounceHeader, + }) + require.NoError(t, err) + + close(stopCh) + wg.Wait() +} + func TestChainSync_setPeerHead(t *testing.T) { const randomHashString = "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21" randomHash := common.MustHexToHash(randomHashString) From 684e34f933fff803d2cc305a1e5fd33a56f860ba Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 26 Jul 2023 16:11:18 -0400 Subject: [PATCH 108/140] chore: introduce a simple check to ensure chain continuity --- dot/sync/chain_sync.go | 84 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 69 insertions(+), 15 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index d194ea0598..c4a506b135 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -276,7 +276,7 @@ func (cs *chainSync) bootstrapSync() { // we are less than 128 blocks behind the target we can use tip sync cs.syncMode.Store(tip) isSyncedGauge.Set(1) - logger.Debugf("switched sync mode to %d", tip.String()) + logger.Debugf("switched sync mode to %s", tip.String()) return } } @@ -329,7 +329,7 @@ func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { // we are more than 128 blocks behind the head, switch to bootstrap cs.syncMode.Store(bootstrap) isSyncedGauge.Set(0) - logger.Debugf("switched sync mode to %d", bootstrap.String()) + logger.Debugf("switched sync mode to %s", bootstrap.String()) go cs.bootstrapSync() return nil } @@ -686,6 +686,15 @@ taskResultLoop: continue taskResultLoop } + grows := doResponseGrowsTheChain(response.BlockData, syncingChain, + startAtBlock, expectedSyncedBlocks) + if !grows { + logger.Criticalf("response from %s does not grows the ongoing chain", who) + cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, boundedTo, workersResults) + continue taskResultLoop + } + for _, blockInResponse := range response.BlockData { if slices.Contains(cs.badBlocks, blockInResponse.Hash.String()) { logger.Criticalf("%s sent a known bad block: %s (#%d)", @@ -733,19 +742,6 @@ taskResultLoop: } } - if len(syncingChain) >= 2 { - // ensure the acquired block set forms an actual chain - parentElement := syncingChain[0] - for _, element := range syncingChain[1:] { - if parentElement.Header.Hash() != element.Header.ParentHash { - panic(fmt.Sprintf("expected %s (#%d) be parent of %s (#%d)", - parentElement.Header.Hash(), parentElement.Header.Number, - element.Header.Hash(), element.Header.Number)) - } - parentElement = element - } - } - retreiveBlocksSeconds := time.Since(startTime).Seconds() logger.Debugf("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", expectedSyncedBlocks, retreiveBlocksSeconds) @@ -1037,6 +1033,64 @@ func isResponseAChain(responseBlockData []*types.BlockData) bool { return true } +// doResponseGrowsTheChain will check if the acquired blocks grows the current chain +// matching their parent hashes +func doResponseGrowsTheChain(response, ongoingChain []*types.BlockData, startAtBlock uint, expectedTotal uint32) bool { + // the ongoing chain does not have any element, we can safely insert an item in it + if len(ongoingChain) < 1 { + return true + } + + compareParentHash := func(parent, child *types.BlockData) bool { + return parent.Header.Hash() == child.Header.ParentHash + } + + firstBlockInResponse := response[0] + firstBlockExactIndex := firstBlockInResponse.Header.Number - startAtBlock + if firstBlockExactIndex != 0 { + leftElement := ongoingChain[firstBlockExactIndex-1] + if leftElement != nil && !compareParentHash(leftElement, firstBlockInResponse) { + return false + } + + } + + switch { + // if the reponse contains only one block then we should check both sides + // for example, if the response contains only one block called X we should + // check if its parent hash matches with the left element as well as we should + // check if the right element contains X hash as its parent hash + // ... W <- X -> Y ... + // we can skip left side comparision if X is in the 0 index and we can skip + // right side comparision if X is in the last index + case len(response) == 1: + if uint32(firstBlockExactIndex+1) < expectedTotal { + rightElement := ongoingChain[firstBlockExactIndex+1] + if rightElement != nil && !compareParentHash(firstBlockInResponse, rightElement) { + return false + } + } + // if the reponse contains more than 1 block then we need to compare + // only the start and the end of the acquired response, for example + // let's say we receive a response [C, D, E] and we need to check + // if those values fits correctly: + // ... B <- C D E -> F + // we skip the left check if its index is equals to 0 and we skip the right + // check if it ends in the latest position of the ongoing array + case len(response) > 1: + lastBlockInResponse := response[len(response)-1] + lastBlockExactIndex := lastBlockInResponse.Header.Number - startAtBlock + if uint32(lastBlockExactIndex+1) < expectedTotal { + rightElement := ongoingChain[lastBlockExactIndex+1] + if rightElement != nil && !compareParentHash(lastBlockInResponse, rightElement) { + return false + } + } + } + + return true +} + func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { cs.peerViewLock.RLock() defer cs.peerViewLock.RUnlock() From 0df449aed3b8ac5b4c20568f54c4ce1992218768 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 27 Jul 2023 13:46:06 -0400 Subject: [PATCH 109/140] chore: paralell request stream for each available peer --- chain/westend/defaults.go | 8 ++++ chain/westend/genesis.json | 18 +++++++- dot/sync/chain_sync.go | 46 ++++++++++++------ dot/sync/syncer.go | 3 +- dot/sync/worker.go | 95 ++++++++++++++++++++++++++++++++++++++ dot/sync/worker_pool.go | 58 +++++++++++++++++------ 6 files changed, 196 insertions(+), 32 deletions(-) create mode 100644 dot/sync/worker.go diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index f6dcb2bfe5..6603e25d54 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -29,6 +29,14 @@ func DefaultConfig() *cfg.Config { config.Core.GrandpaAuthority = false config.Core.Role = 1 config.Network.NoMDNS = false + config.Network.MaxPeers = 128 + config.PrometheusExternal = true + config.PrometheusPort = 9876 + config.Log.Sync = "trace" + config.Log.Digest = "trace" + + config.Pprof.Enabled = true + config.Pprof.ListeningAddress = "0.0.0.0:6060" return config } diff --git a/chain/westend/genesis.json b/chain/westend/genesis.json index 6527d81001..a6045e6975 100644 --- a/chain/westend/genesis.json +++ b/chain/westend/genesis.json @@ -17,7 +17,23 @@ "/dns/boot-node.helikon.io/tcp/7080/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", "/dns/boot-node.helikon.io/tcp/7082/wss/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", "/dns/westend.bootnode.amforc.com/tcp/30333/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", - "/dns/westend.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8" + "/dns/westend.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", + "/dns/westend-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", + "/dns/westend-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", + "/dns/boot-cr.gatotech.network/tcp/33300/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", + "/dns/boot-cr.gatotech.network/tcp/35300/wss/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", + "/dns/boot-westend.metaspan.io/tcp/33012/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", + "/dns/boot-westend.metaspan.io/tcp/33015/ws/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", + "/dns/boot-westend.metaspan.io/tcp/33016/wss/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", + "/dns/westend-bootnode.turboflakes.io/tcp/30310/p2p/12D3KooWJvPDCZmReU46ghpCMJCPVUvUCav4WQdKtXQhZgJdH6tZ", + "/dns/westend-bootnode.turboflakes.io/tcp/30410/wss/p2p/12D3KooWJvPDCZmReU46ghpCMJCPVUvUCav4WQdKtXQhZgJdH6tZ", + "/dns/westend-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWJifoDhCL3swAKt7MWhFb7wLRFD9oG33AL3nAathmU24x", + "/dns/westend-boot-ng.dwellir.com/tcp/30335/p2p/12D3KooWJifoDhCL3swAKt7MWhFb7wLRFD9oG33AL3nAathmU24x", + "/dns/westend-bootnode.radiumblock.com/tcp/30335/wss/p2p/12D3KooWJBowJuX1TaWNWHt8Dz8z44BoCZunLCfFqxA2rLTn6TBD", + "/dns/westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWJBowJuX1TaWNWHt8Dz8z44BoCZunLCfFqxA2rLTn6TBD", + "/dns/wnd-bootnode.stakeworld.io/tcp/30320/p2p/12D3KooWBYdKipcNbrV5rCbgT5hco8HMLME7cE9hHC3ckqCKDuzP", + "/dns/wnd-bootnode.stakeworld.io/tcp/30321/ws/p2p/12D3KooWBYdKipcNbrV5rCbgT5hco8HMLME7cE9hHC3ckqCKDuzP", + "/dns/wnd-bootnode.stakeworld.io/tcp/30322/wss/p2p/12D3KooWBYdKipcNbrV5rCbgT5hco8HMLME7cE9hHC3ckqCKDuzP" ], "telemetryEndpoints": [ [ diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index c4a506b135..6f434bff7d 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -76,7 +76,7 @@ type ChainSync interface { stop() // called upon receiving a BlockAnnounceHandshake - setPeerHead(p peer.ID, hash common.Hash, number uint) + onBlockAnnounceHandshake(p peer.ID, hash common.Hash, number uint) error // getSyncMode returns the current syncing state getSyncMode() chainSyncState @@ -186,15 +186,14 @@ func (cs *chainSync) start() { cs.wg.Add(1) go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh, &cs.wg) - cs.wg.Add(1) - go cs.workerPool.listenForRequests(cs.stopCh, &cs.wg) - - cs.syncMode.Store(bootstrap) + // cs.wg.Add(1) + // go cs.workerPool.listenForRequests(cs.stopCh, &cs.wg) - cs.wg.Add(1) - go func() { - cs.bootstrapSync() - }() + // cs.syncMode.Store(bootstrap) + // cs.wg.Add(1) + // go func() { + // cs.bootstrapSync() + // }() } func (cs *chainSync) stop() { @@ -286,18 +285,37 @@ func (cs *chainSync) getSyncMode() chainSyncState { return cs.syncMode.Load().(chainSyncState) } -// setPeerHead sets a peer's best known block -func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber uint) { +// onBlockAnnounceHandshake sets a peer's best known block +func (cs *chainSync) onBlockAnnounceHandshake(who peer.ID, bestHash common.Hash, bestNumber uint) error { cs.workerPool.fromBlockAnnounce(who) cs.peerViewLock.Lock() - defer cs.peerViewLock.Unlock() - cs.peerView[who] = peerView{ who: who, hash: bestHash, number: bestNumber, } + cs.peerViewLock.Unlock() + + if cs.getSyncMode() == bootstrap { + return nil + } + + _, _, isFarFromTarget, err := cs.isBootstrap() + if err != nil && !errors.Is(err, errNoPeerViews) { + return fmt.Errorf("checking target distance: %w", err) + } + + if !isFarFromTarget { + return nil + } + + // we are more than 128 blocks behind the head, switch to bootstrap + cs.syncMode.Store(bootstrap) + isSyncedGauge.Set(0) + logger.Debugf("switched sync mode to %s", bootstrap.String()) + go cs.bootstrapSync() + return nil } func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { @@ -320,8 +338,6 @@ func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { return fmt.Errorf("checking target distance: %w", err) } - fmt.Printf("ON BLOCK ANNOUNCE FAR FROM TARGET: %v\n", isFarFromTarget) - if !isFarFromTarget { return cs.requestAnnouncedBlock(announced) } diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 3afd28659a..36221504d6 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -89,8 +89,7 @@ func (s *Service) Stop() error { // HandleBlockAnnounceHandshake notifies the `chainSync` module that // we have received a BlockAnnounceHandshake from the given peer. func (s *Service) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { - s.chainSync.setPeerHead(from, msg.BestBlockHash, uint(msg.BestBlockNumber)) - return nil + return s.chainSync.onBlockAnnounceHandshake(from, msg.BestBlockHash, uint(msg.BestBlockNumber)) } // HandleBlockAnnounce notifies the `chainSync` module that we have received a block announcement from the given peer. diff --git a/dot/sync/worker.go b/dot/sync/worker.go new file mode 100644 index 0000000000..6445844228 --- /dev/null +++ b/dot/sync/worker.go @@ -0,0 +1,95 @@ +package sync + +import ( + "time" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/libp2p/go-libp2p/core/peer" +) + +type worker struct { + peerID peer.ID + sharedGuard chan struct{} + + stopCh chan struct{} + doneCh chan struct{} + + queue <-chan *syncTask + exclusiveQueue chan *syncTask + + requestMaker network.RequestMaker +} + +func newWorker(pID peer.ID, sharedGuard chan struct{}, queue <-chan *syncTask, network network.RequestMaker) *worker { + return &worker{ + peerID: pID, + sharedGuard: sharedGuard, + stopCh: make(chan struct{}), + doneCh: make(chan struct{}), + queue: queue, + requestMaker: network, + } +} + +func (w *worker) processTask(task *syncTask) { + w.exclusiveQueue <- task +} + +func (w *worker) start() { + go func() { + defer func() { + w.doneCh <- struct{}{} + }() + + logger.Debugf("[STARTED] worker %s", w.peerID) + for { + select { + case <-w.stopCh: + logger.Debugf("[STOPPED] worker %s", w.peerID) + return + case task := <-w.queue: + executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) + case task := <-w.exclusiveQueue: + executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) + } + } + }() +} + +func (w *worker) stop() { + w.stopCh <- struct{}{} + + timeoutTimer := time.NewTimer(30 * time.Second) + select { + case <-w.doneCh: + if !timeoutTimer.Stop() { + <-timeoutTimer.C + } + + return + case <-timeoutTimer.C: + logger.Criticalf("timeout while stopping worker %s", w.peerID) + } +} + +func executeRequest(who peer.ID, requestMaker network.RequestMaker, + task *syncTask, sharedGuard chan struct{}) { + defer func() { + <-sharedGuard + }() + + sharedGuard <- struct{}{} + + request := task.request + logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) + response := new(network.BlockResponseMessage) + err := requestMaker.Do(who, request, response) + logger.Debugf("[FINISHED] worker %s, err: %s, block data amount: %d", who, err, len(response.BlockData)) + + task.resultCh <- &syncTaskResult{ + who: who, + request: request, + response: response, + err: err, + } +} diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 3ed080cd98..c9c35d575d 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -40,6 +40,7 @@ type peerSyncWorker struct { status byte timesPunished int punishmentTime time.Time + worker *worker } type syncWorkerPool struct { @@ -52,6 +53,8 @@ type syncWorkerPool struct { taskQueue chan *syncTask workers map[peer.ID]*peerSyncWorker ignorePeers map[peer.ID]struct{} + + sharedGuard chan struct{} } func newSyncWorkerPool(net Network, requestMaker network.RequestMaker) *syncWorkerPool { @@ -61,6 +64,7 @@ func newSyncWorkerPool(net Network, requestMaker network.RequestMaker) *syncWork workers: make(map[peer.ID]*peerSyncWorker), taskQueue: make(chan *syncTask, maxRequestsAllowed+1), ignorePeers: make(map[peer.ID]struct{}), + sharedGuard: make(chan struct{}, maxRequestsAllowed), } swp.availableCond = sync.NewCond(&swp.mtx) @@ -95,18 +99,22 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { return } - worker, has := s.workers[who] + syncWorker, has := s.workers[who] if !has { - worker = &peerSyncWorker{status: available} - s.workers[who] = worker + syncWorker = &peerSyncWorker{status: available} + syncWorker.worker = newWorker(who, s.sharedGuard, s.taskQueue, s.requestMaker) + syncWorker.worker.start() + s.workers[who] = syncWorker logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) } // check if the punishment is not valid - if worker.status == punished && worker.punishmentTime.Before(time.Now()) { - worker.status = available - s.workers[who] = worker + if syncWorker.status == punished && syncWorker.punishmentTime.Before(time.Now()) { + syncWorker.status = available + syncWorker.worker.start() + + s.workers[who] = syncWorker } } @@ -115,11 +123,26 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { // to perform the request, the response will be dispatch in the resultCh. func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, who *peer.ID, resultCh chan<- *syncTaskResult) { - s.taskQueue <- &syncTask{ + task := &syncTask{ boundTo: who, request: request, resultCh: resultCh, } + + // if the request is bounded to a specific peer then just + // request it and sent through its queue otherwise send + // the request in the general queue where all worker are + // listening on + if who != nil { + s.mtx.RLock() + defer s.mtx.RUnlock() + + syncWorker := s.workers[*who] + syncWorker.worker.processTask(task) + } else { + s.taskQueue <- task + } + } // submitRequests takes an set of requests and will submit to the pool through submitRequest @@ -140,26 +163,33 @@ func (s *syncWorkerPool) punishPeer(who peer.ID) { s.mtx.Lock() defer s.mtx.Unlock() - worker, has := s.workers[who] + syncWorker, has := s.workers[who] if !has { return } - timesPunished := worker.timesPunished + 1 + timesPunished := syncWorker.timesPunished + 1 punishmentTime := time.Duration(timesPunished) * punishmentBaseTimeout logger.Debugf("⏱️ punishement time for peer %s: %.2fs", who, punishmentTime.Seconds()) - s.workers[who] = &peerSyncWorker{ - status: punished, - timesPunished: timesPunished, - punishmentTime: time.Now().Add(punishmentTime), - } + syncWorker.status = punished + syncWorker.timesPunished = timesPunished + syncWorker.punishmentTime = time.Now().Add(punishmentTime) + syncWorker.worker.stop() + + s.workers[who] = syncWorker } func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { s.mtx.Lock() defer s.mtx.Unlock() + syncWorker, has := s.workers[who] + if !has { + return + } + + syncWorker.worker.stop() delete(s.workers, who) s.ignorePeers[who] = struct{}{} } From 1bca2c65819a38077996a7121b5a636bdd32edb0 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 27 Jul 2023 17:25:13 -0400 Subject: [PATCH 110/140] chore: remove `sync.Cond` --- dot/network/request_response.go | 2 +- dot/sync/chain_sync.go | 12 +----------- dot/sync/worker.go | 3 ++- dot/sync/worker_pool.go | 2 +- 4 files changed, 5 insertions(+), 14 deletions(-) diff --git a/dot/network/request_response.go b/dot/network/request_response.go index 1671bca2f2..09956a6d27 100644 --- a/dot/network/request_response.go +++ b/dot/network/request_response.go @@ -70,7 +70,7 @@ func (rrp *RequestResponseProtocol) receiveResponse(stream libp2pnetwork.Stream, } if n == 0 { - return fmt.Errorf("received empty message") + return ErrReceivedEmptyMessage } err = msg.Decode(buf[:n]) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 6f434bff7d..2f2ae2c1f3 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -529,19 +529,10 @@ func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { startRequestAt := bestBlockHeader.Number + 1 - // we build the set of requests based on the amount of available peers - // in the worker pool, if we have more peers than `maxRequestAllowed` - // so we limit to `maxRequestAllowed` to avoid the error: - // cannot reserve outbound connection: resource limit exceeded - availableWorkers := cs.workerPool.totalWorkers() - if availableWorkers > maxRequestsAllowed { - availableWorkers = maxRequestsAllowed - } - // targetBlockNumber is the virtual target we will request, however // we should bound it to the real target which is collected through // block announces received from other peers - targetBlockNumber := startRequestAt + availableWorkers*128 + targetBlockNumber := startRequestAt + maxRequestsAllowed*128 realTarget, err := cs.getTarget() if err != nil { return fmt.Errorf("while getting target: %w", err) @@ -568,7 +559,6 @@ func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { } resultsQueue := cs.workerPool.submitRequests(requests) - err = cs.handleWorkersResults(resultsQueue, startRequestAt, expectedAmountOfBlocks) if err != nil { return fmt.Errorf("while handling workers results: %w", err) diff --git a/dot/sync/worker.go b/dot/sync/worker.go index 6445844228..feb6c72b44 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -84,7 +84,6 @@ func executeRequest(who peer.ID, requestMaker network.RequestMaker, logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) response := new(network.BlockResponseMessage) err := requestMaker.Do(who, request, response) - logger.Debugf("[FINISHED] worker %s, err: %s, block data amount: %d", who, err, len(response.BlockData)) task.resultCh <- &syncTaskResult{ who: who, @@ -92,4 +91,6 @@ func executeRequest(who peer.ID, requestMaker network.RequestMaker, response: response, err: err, } + + logger.Debugf("[FINISHED] worker %s, err: %s, block data amount: %d", who, err, len(response.BlockData)) } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index c9c35d575d..514587e91f 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -148,7 +148,7 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, // submitRequests takes an set of requests and will submit to the pool through submitRequest // the response will be dispatch in the resultCh func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) (resultCh chan *syncTaskResult) { - resultCh = make(chan *syncTaskResult) + resultCh = make(chan *syncTaskResult, maxRequestsAllowed+1) for _, request := range requests { s.submitRequest(request, nil, resultCh) } From 0d565d1208032ac6ae725ed94c0186292c28b0cf Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 28 Jul 2023 10:06:18 -0400 Subject: [PATCH 111/140] chore: recap old implementation with concurrency guard --- dot/sync/chain_sync_test.go | 4 +- dot/sync/mock_chain_sync_test.go | 14 ++--- dot/sync/syncer_test.go | 2 +- dot/sync/worker_test.go | 88 ++++++++++++++++++++++++++++++++ 4 files changed, 99 insertions(+), 9 deletions(-) create mode 100644 dot/sync/worker_test.go diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 52d77bd705..070d526c59 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -434,7 +434,7 @@ func Test_chainSync_onBlockAnnounce_tipModeNeedToCatchup(t *testing.T) { wg.Wait() } -func TestChainSync_setPeerHead(t *testing.T) { +func TestChainSync_onBlockAnnounceHandshake(t *testing.T) { const randomHashString = "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21" randomHash := common.MustHexToHash(randomHashString) @@ -527,7 +527,7 @@ func TestChainSync_setPeerHead(t *testing.T) { t.Run(tname, func(t *testing.T) { ctrl := gomock.NewController(t) cs := tt.newChainSync(t, ctrl) - cs.setPeerHead(tt.peerID, tt.bestHash, tt.bestNumber) + cs.onBlockAnnounceHandshake(tt.peerID, tt.bestHash, tt.bestNumber) view, exists := cs.peerView[tt.peerID] require.True(t, exists) diff --git a/dot/sync/mock_chain_sync_test.go b/dot/sync/mock_chain_sync_test.go index d7ce880044..74cc93cdca 100644 --- a/dot/sync/mock_chain_sync_test.go +++ b/dot/sync/mock_chain_sync_test.go @@ -78,16 +78,18 @@ func (mr *MockChainSyncMockRecorder) onBlockAnnounce(arg0 interface{}) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "onBlockAnnounce", reflect.TypeOf((*MockChainSync)(nil).onBlockAnnounce), arg0) } -// setPeerHead mocks base method. -func (m *MockChainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) { +// onBlockAnnounceHandshake mocks base method. +func (m *MockChainSync) onBlockAnnounceHandshake(p peer.ID, hash common.Hash, number uint) error { m.ctrl.T.Helper() - m.ctrl.Call(m, "setPeerHead", p, hash, number) + ret := m.ctrl.Call(m, "onBlockAnnounceHandshake", p, hash, number) + ret0, _ := ret[0].(error) + return ret0 } -// setPeerHead indicates an expected call of setPeerHead. -func (mr *MockChainSyncMockRecorder) setPeerHead(p, hash, number interface{}) *gomock.Call { +// onBlockAnnounceHandshake indicates an expected call of onBlockAnnounceHandshake. +func (mr *MockChainSyncMockRecorder) onBlockAnnounceHandshake(p, hash, number interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setPeerHead", reflect.TypeOf((*MockChainSync)(nil).setPeerHead), p, hash, number) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "onBlockAnnounceHandshake", reflect.TypeOf((*MockChainSync)(nil).onBlockAnnounceHandshake), p, hash, number) } // start mocks base method. diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go index 5e0573f805..64221216f5 100644 --- a/dot/sync/syncer_test.go +++ b/dot/sync/syncer_test.go @@ -282,7 +282,7 @@ func Test_Service_HandleBlockAnnounceHandshake(t *testing.T) { ctrl := gomock.NewController(t) chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().setPeerHead(peer.ID("peer"), common.Hash{1}, uint(2)) + chainSync.EXPECT().onBlockAnnounceHandshake(peer.ID("peer"), common.Hash{1}, uint(2)) service := Service{ chainSync: chainSync, diff --git a/dot/sync/worker_test.go b/dot/sync/worker_test.go new file mode 100644 index 0000000000..eeb9b1672f --- /dev/null +++ b/dot/sync/worker_test.go @@ -0,0 +1,88 @@ +package sync + +import ( + "errors" + "fmt" + "testing" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/golang/mock/gomock" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +func TestWorkerStop(t *testing.T) { + peerA := peer.ID("peerA") + ctrl := gomock.NewController(t) + + reqMaker := NewMockRequestMaker(ctrl) + reqMaker.EXPECT(). + Do(peerA, nil, gomock.AssignableToTypeOf((*network.BlockResponseMessage)(nil))). + Return(nil) + + sharedGuard := make(chan struct{}, 1) + generalQueue := make(chan *syncTask) + + w := newWorker(peerA, sharedGuard, generalQueue, reqMaker) + w.start() + + resultCh := make(chan *syncTaskResult) + defer close(resultCh) + + generalQueue <- &syncTask{ + resultCh: resultCh, + } + + require.Equal(t, 1, len(sharedGuard)) + <-resultCh + + w.stop() +} + +func TestWorkerAsyncStop(t *testing.T) { + peerA := peer.ID("peerA") + ctrl := gomock.NewController(t) + + reqMaker := NewMockRequestMaker(ctrl) + reqMaker.EXPECT(). + Do(peerA, nil, gomock.AssignableToTypeOf((*network.BlockResponseMessage)(nil))). + Return(errors.New("mocked error")) + + reqMaker.EXPECT(). + Do(peerA, nil, gomock.AssignableToTypeOf((*network.BlockResponseMessage)(nil))). + Return(nil) + + sharedGuard := make(chan struct{}, 2) + generalQueue := make(chan *syncTask) + + w := newWorker(peerA, sharedGuard, generalQueue, reqMaker) + w.start() + + doneCh := make(chan struct{}) + resultCh := make(chan *syncTaskResult, 2) + go handleResultsHelper(t, w, resultCh, doneCh) + + // issue two requests in the general channel + generalQueue <- &syncTask{ + resultCh: resultCh, + } + + generalQueue <- &syncTask{ + resultCh: resultCh, + } + + close(resultCh) + <-doneCh +} + +func handleResultsHelper(t *testing.T, w *worker, resultCh chan *syncTaskResult, doneCh chan<- struct{}) { + t.Helper() + defer close(doneCh) + + for r := range resultCh { + if r.err != nil { + fmt.Printf("==> %s\n", r.err) + w.stop() + } + } +} From b7ec4bb9839217964ff5e995155f32c950ee84d2 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 31 Jul 2023 09:33:06 -0400 Subject: [PATCH 112/140] chore: dont punish a already punished peer --- dot/sync/worker_pool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 514587e91f..34273edd08 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -164,7 +164,7 @@ func (s *syncWorkerPool) punishPeer(who peer.ID) { defer s.mtx.Unlock() syncWorker, has := s.workers[who] - if !has { + if !has || syncWorker.status == punished { return } From 2c70646f97ec21b5699a515aa0c254b883a65069 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 1 Aug 2023 18:57:19 -0400 Subject: [PATCH 113/140] chore: implement round-robind worker selection and remove sync.Cond completely --- dot/sync/chain_sync.go | 17 ++-- dot/sync/worker.go | 21 +++-- dot/sync/worker_pool.go | 178 +++++++++++++--------------------------- 3 files changed, 75 insertions(+), 141 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 2f2ae2c1f3..38b0eeec7b 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -636,11 +636,6 @@ taskResultLoop: request := taskResult.request response := taskResult.response - var boundedTo *peer.ID - if taskResult.isBounded { - boundedTo = &taskResult.who - } - logger.Debugf("task result: peer(%s), with error: %v, with response: %v", taskResult.who, taskResult.err != nil, taskResult.response != nil) @@ -658,7 +653,7 @@ taskResultLoop: cs.workerPool.punishPeer(who) } - cs.workerPool.submitRequest(request, boundedTo, workersResults) + cs.workerPool.submitRequest(request, nil, workersResults) continue } @@ -680,7 +675,7 @@ taskResultLoop: } cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, boundedTo, workersResults) + cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -688,7 +683,7 @@ taskResultLoop: if !isChain { logger.Criticalf("response from %s is not a chain", who) cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, boundedTo, workersResults) + cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -697,7 +692,7 @@ taskResultLoop: if !grows { logger.Criticalf("response from %s does not grows the ongoing chain", who) cs.workerPool.punishPeer(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, boundedTo, workersResults) + cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -712,7 +707,7 @@ taskResultLoop: }, who) cs.workerPool.ignorePeerAsWorker(taskResult.who) - cs.workerPool.submitRequest(taskResult.request, boundedTo, workersResults) + cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -742,7 +737,7 @@ taskResultLoop: Direction: network.Ascending, Max: &difference, } - cs.workerPool.submitRequest(taskResult.request, boundedTo, workersResults) + cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } } diff --git a/dot/sync/worker.go b/dot/sync/worker.go index feb6c72b44..68b8b0b323 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -14,25 +14,30 @@ type worker struct { stopCh chan struct{} doneCh chan struct{} - queue <-chan *syncTask - exclusiveQueue chan *syncTask - + queue chan *syncTask requestMaker network.RequestMaker } -func newWorker(pID peer.ID, sharedGuard chan struct{}, queue <-chan *syncTask, network network.RequestMaker) *worker { +func newWorker(pID peer.ID, sharedGuard chan struct{}, network network.RequestMaker) *worker { return &worker{ peerID: pID, sharedGuard: sharedGuard, stopCh: make(chan struct{}), doneCh: make(chan struct{}), - queue: queue, + queue: make(chan *syncTask, maxRequestsAllowed+1), requestMaker: network, } } -func (w *worker) processTask(task *syncTask) { - w.exclusiveQueue <- task +func (w *worker) processTask(task *syncTask) (enqueued bool) { + select { + case w.queue <- task: + logger.Debugf("[ENQUEUED] worker %s, block request: %s", w.peerID, task.request) + return true + default: + logger.Debugf("[NOT ENQUEUED] worker %s, block request: %s", w.peerID, task.request) + return false + } } func (w *worker) start() { @@ -49,8 +54,6 @@ func (w *worker) start() { return case task := <-w.queue: executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) - case task := <-w.exclusiveQueue: - executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) } } }() diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 34273edd08..3c42ffd653 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -9,6 +9,7 @@ import ( "github.com/ChainSafe/gossamer/dot/network" "github.com/libp2p/go-libp2p/core/peer" + "golang.org/x/exp/maps" ) const ( @@ -23,17 +24,15 @@ const ( ) type syncTask struct { - boundTo *peer.ID request *network.BlockRequestMessage resultCh chan<- *syncTaskResult } type syncTaskResult struct { - isBounded bool - who peer.ID - request *network.BlockRequestMessage - response *network.BlockResponseMessage - err error + who peer.ID + request *network.BlockRequestMessage + response *network.BlockResponseMessage + err error } type peerSyncWorker struct { @@ -43,14 +42,16 @@ type peerSyncWorker struct { worker *worker } +func (p *peerSyncWorker) isPunished() bool { + return p.punishmentTime.After(time.Now()) +} + type syncWorkerPool struct { - wg sync.WaitGroup mtx sync.RWMutex availableCond *sync.Cond network Network requestMaker network.RequestMaker - taskQueue chan *syncTask workers map[peer.ID]*peerSyncWorker ignorePeers map[peer.ID]struct{} @@ -62,7 +63,6 @@ func newSyncWorkerPool(net Network, requestMaker network.RequestMaker) *syncWork network: net, requestMaker: requestMaker, workers: make(map[peer.ID]*peerSyncWorker), - taskQueue: make(chan *syncTask, maxRequestsAllowed+1), ignorePeers: make(map[peer.ID]struct{}), sharedGuard: make(chan struct{}, maxRequestsAllowed), } @@ -102,7 +102,7 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { syncWorker, has := s.workers[who] if !has { syncWorker = &peerSyncWorker{status: available} - syncWorker.worker = newWorker(who, s.sharedGuard, s.taskQueue, s.requestMaker) + syncWorker.worker = newWorker(who, s.sharedGuard, s.requestMaker) syncWorker.worker.start() s.workers[who] = syncWorker @@ -123,8 +123,8 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { // to perform the request, the response will be dispatch in the resultCh. func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, who *peer.ID, resultCh chan<- *syncTaskResult) { + task := &syncTask{ - boundTo: who, request: request, resultCh: resultCh, } @@ -133,24 +133,62 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, // request it and sent through its queue otherwise send // the request in the general queue where all worker are // listening on - if who != nil { - s.mtx.RLock() - defer s.mtx.RUnlock() + s.mtx.RLock() + defer s.mtx.RUnlock() + if who != nil { syncWorker := s.workers[*who] - syncWorker.worker.processTask(task) - } else { - s.taskQueue <- task + if !syncWorker.isPunished() { + syncWorker.worker.processTask(task) + return + } } + for syncWorkerPeerID, syncWorker := range s.workers { + if who != nil && *who == syncWorkerPeerID { + continue + } + + if syncWorker.isPunished() { + continue + } + + enqueued := syncWorker.worker.processTask(task) + if enqueued { + break + } + } } // submitRequests takes an set of requests and will submit to the pool through submitRequest // the response will be dispatch in the resultCh func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) (resultCh chan *syncTaskResult) { + logger.Debugf("[SENDING] %d requests", len(requests)) resultCh = make(chan *syncTaskResult, maxRequestsAllowed+1) - for _, request := range requests { - s.submitRequest(request, nil, resultCh) + + s.mtx.RLock() + defer s.mtx.RUnlock() + + idx := 0 + allWorkers := maps.Values(s.workers) + for idx < len(requests) { + workerID := idx % len(allWorkers) + syncWorker := allWorkers[workerID] + + if syncWorker.isPunished() { + continue + } + + enqueued := syncWorker.worker.processTask(&syncTask{ + request: requests[idx], + resultCh: resultCh, + }) + + if !enqueued { + continue + } + + idx++ } return resultCh @@ -207,105 +245,3 @@ func (s *syncWorkerPool) totalWorkers() (total uint) { return total } - -// getAvailablePeer returns the very first peer available, if there -// is no peer avaible then the caller should wait for availablePeerCh -func (s *syncWorkerPool) getAvailablePeer() peer.ID { - for peerID, peerSync := range s.workers { - switch peerSync.status { - case punished: - // if the punishedTime has passed then we can - // use it as an available peer - if peerSync.punishmentTime.Before(time.Now()) { - return peerID - } - case available: - return peerID - default: - } - } - - return peer.ID("") -} - -func (s *syncWorkerPool) getPeerByID(peerID peer.ID) *peerSyncWorker { - peerSync, has := s.workers[peerID] - if !has { - return nil - } - - return peerSync -} - -func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}, wg *sync.WaitGroup) { - defer wg.Done() - for { - select { - case <-stopCh: - // wait for ongoing requests to be finished before returning - s.wg.Wait() - return - - case task := <-s.taskQueue: - // whenever a task arrives we try to find an available peer - // if the task is directed at some peer then we will wait for - // that peer to become available, same happens a normal task - // arrives and there is no available peer, then we should wait - // for someone to become free and then use it. - - s.mtx.Lock() - for { - var peerID peer.ID - if task.boundTo != nil { - peerSync := s.getPeerByID(*task.boundTo) - if peerSync != nil && peerSync.status == available { - peerID = *task.boundTo - } - } else { - peerID = s.getAvailablePeer() - } - - if peerID != peer.ID("") { - peerSync := s.workers[peerID] - peerSync.status = busy - s.workers[peerID] = peerSync - - s.mtx.Unlock() - - s.wg.Add(1) - go s.executeRequest(peerID, task) - break - } - - s.availableCond.Wait() - } - } - } -} - -func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { - defer s.wg.Done() - request := task.request - - logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) - response := new(network.BlockResponseMessage) - err := s.requestMaker.Do(who, request, response) - logger.Debugf("[FINISHED] worker %s, err: %s, block data amount: %d", who, err, len(response.BlockData)) - - s.mtx.Lock() - worker, has := s.workers[who] - if has { - worker.status = available - s.workers[who] = worker - } - s.mtx.Unlock() - s.availableCond.Signal() - - task.resultCh <- &syncTaskResult{ - isBounded: task.boundTo != nil, - who: who, - request: request, - response: response, - err: err, - } -} From fae2f8d356f29ca06628d63c78d4d5cc3d16117f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 2 Aug 2023 09:26:18 -0400 Subject: [PATCH 114/140] wip: working on tests --- dot/sync/chain_sync.go | 15 ++---- dot/sync/chain_sync_test.go | 90 +++++++++--------------------------- dot/sync/worker.go | 10 ++-- dot/sync/worker_pool.go | 55 +++++++++++++++++++--- dot/sync/worker_pool_test.go | 19 +------- dot/sync/worker_test.go | 4 +- 6 files changed, 88 insertions(+), 105 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 38b0eeec7b..9f7064abb4 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -185,20 +185,15 @@ func (cs *chainSync) start() { cs.wg.Add(1) go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh, &cs.wg) - - // cs.wg.Add(1) - // go cs.workerPool.listenForRequests(cs.stopCh, &cs.wg) - - // cs.syncMode.Store(bootstrap) - // cs.wg.Add(1) - // go func() { - // cs.bootstrapSync() - // }() } func (cs *chainSync) stop() { - close(cs.stopCh) + err := cs.workerPool.stop() + if err != nil { + logger.Criticalf("while stopping worker poll: %w", err) + } + close(cs.stopCh) allStopCh := make(chan struct{}) go func() { defer close(allStopCh) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 070d526c59..6543fc55b6 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -302,10 +302,10 @@ func Test_chainSync_onBlockAnnounce(t *testing.T) { chainSync := tt.chainSyncBuilder(ctrl) stopCh := make(chan struct{}) wg := sync.WaitGroup{} - if tt.listenForRequests { - wg.Add(1) - go chainSync.workerPool.listenForRequests(stopCh, &wg) - } + // if tt.listenForRequests { + // wg.Add(1) + // go chainSync.workerPool.listenForRequests(stopCh, &wg) + // } err := chainSync.onBlockAnnounce(announcedBlock{ who: tt.peerID, @@ -399,6 +399,8 @@ func Test_chainSync_onBlockAnnounce_tipModeNeedToCatchup(t *testing.T) { common.Hash{}, 130, scale.VaryingDataTypeSlice{}) stopCh := make(chan struct{}) + defer close(stopCh) + chainSync := &chainSync{ stopCh: stopCh, peerView: map[peer.ID]peerView{ @@ -419,19 +421,13 @@ func Test_chainSync_onBlockAnnounce_tipModeNeedToCatchup(t *testing.T) { blockImportHandler: importHandlerMock, } - wg := sync.WaitGroup{} - - wg.Add(1) - go chainSync.workerPool.listenForRequests(stopCh, &wg) - + defer chainSync.workerPool.stop() err := chainSync.onBlockAnnounce(announcedBlock{ who: somePeer, header: block2AnnounceHeader, }) require.NoError(t, err) - close(stopCh) - wg.Wait() } func TestChainSync_onBlockAnnounceHandshake(t *testing.T) { @@ -666,16 +662,11 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { // the worker pool executes the workers management cs.workerPool.fromBlockAnnounce(peer.ID("noot")) - stopCh := make(chan struct{}) - wg := sync.WaitGroup{} - wg.Add(1) - go cs.workerPool.listenForRequests(stopCh, &wg) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) - close(stopCh) - wg.Wait() + err = cs.workerPool.stop() + require.NoError(t, err) } func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { @@ -757,16 +748,11 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { cs.workerPool.fromBlockAnnounce(peer.ID("noot")) cs.workerPool.fromBlockAnnounce(peer.ID("noot2")) - stopCh := make(chan struct{}) - wg := sync.WaitGroup{} - wg.Add(1) - go cs.workerPool.listenForRequests(stopCh, &wg) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) - close(stopCh) - wg.Wait() + err = cs.workerPool.stop() + require.NoError(t, err) } func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing.T) { @@ -868,16 +854,11 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. cs.workerPool.fromBlockAnnounce(peer.ID("alice")) cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - stopCh := make(chan struct{}) - wg := sync.WaitGroup{} - wg.Add(1) - go cs.workerPool.listenForRequests(stopCh, &wg) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) - close(stopCh) - wg.Wait() + err = cs.workerPool.stop() + require.NoError(t, err) // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] @@ -990,16 +971,11 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test cs.workerPool.fromBlockAnnounce(peer.ID("alice")) cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - stopCh := make(chan struct{}) - wg := sync.WaitGroup{} - wg.Add(1) - go cs.workerPool.listenForRequests(stopCh, &wg) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) - close(stopCh) - wg.Wait() + err = cs.workerPool.stop() + require.NoError(t, err) // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] @@ -1115,16 +1091,11 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi cs.workerPool.fromBlockAnnounce(peer.ID("alice")) cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - stopCh := make(chan struct{}) - wg := sync.WaitGroup{} - wg.Add(1) - go cs.workerPool.listenForRequests(stopCh, &wg) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) - close(stopCh) - wg.Wait() + err = cs.workerPool.stop() + require.NoError(t, err) // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] @@ -1236,16 +1207,11 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi cs.workerPool.fromBlockAnnounce(peer.ID("alice")) cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - stopCh := make(chan struct{}) - wg := sync.WaitGroup{} - wg.Add(1) - go cs.workerPool.listenForRequests(stopCh, &wg) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) - close(stopCh) - wg.Wait() + err = cs.workerPool.stop() + require.NoError(t, err) // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] @@ -1362,16 +1328,11 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. cs.workerPool.fromBlockAnnounce(peer.ID("alice")) cs.workerPool.fromBlockAnnounce(peer.ID("bob")) - stopCh := make(chan struct{}) - wg := sync.WaitGroup{} - wg.Add(1) - go cs.workerPool.listenForRequests(stopCh, &wg) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) - close(stopCh) - wg.Wait() + err = cs.workerPool.stop() + require.NoError(t, err) // peer should be not in the worker pool // peer should be in the ignore list @@ -1456,16 +1417,11 @@ func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testi cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - stopCh := make(chan struct{}) - wg := sync.WaitGroup{} - wg.Add(1) - go cs.workerPool.listenForRequests(stopCh, &wg) - err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) - close(stopCh) - wg.Wait() + err = cs.workerPool.stop() + require.NoError(t, err) require.Len(t, cs.workerPool.workers, 1) diff --git a/dot/sync/worker.go b/dot/sync/worker.go index 68b8b0b323..dce97ed8ed 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -1,12 +1,16 @@ package sync import ( + "errors" + "fmt" "time" "github.com/ChainSafe/gossamer/dot/network" "github.com/libp2p/go-libp2p/core/peer" ) +var ErrStopTimeout = errors.New("stop timeout") + type worker struct { peerID peer.ID sharedGuard chan struct{} @@ -59,7 +63,7 @@ func (w *worker) start() { }() } -func (w *worker) stop() { +func (w *worker) stop() error { w.stopCh <- struct{}{} timeoutTimer := time.NewTimer(30 * time.Second) @@ -69,9 +73,9 @@ func (w *worker) stop() { <-timeoutTimer.C } - return + return nil case <-timeoutTimer.C: - logger.Criticalf("timeout while stopping worker %s", w.peerID) + return fmt.Errorf("%w: worker %s", ErrStopTimeout, w.peerID) } } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 3c42ffd653..0bbad11975 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -4,6 +4,8 @@ package sync import ( + "errors" + "fmt" "sync" "time" @@ -47,8 +49,7 @@ func (p *peerSyncWorker) isPunished() bool { } type syncWorkerPool struct { - mtx sync.RWMutex - availableCond *sync.Cond + mtx sync.RWMutex network Network requestMaker network.RequestMaker @@ -67,10 +68,45 @@ func newSyncWorkerPool(net Network, requestMaker network.RequestMaker) *syncWork sharedGuard: make(chan struct{}, maxRequestsAllowed), } - swp.availableCond = sync.NewCond(&swp.mtx) return swp } +// stop will shutdown all the available workers goroutines +func (s *syncWorkerPool) stop() error { + s.mtx.RLock() + defer s.mtx.RUnlock() + + wg := sync.WaitGroup{} + // make it buffered so the goroutines can write on it + // without beign blocked + errCh := make(chan error, len(s.workers)) + + for _, syncWorker := range s.workers { + if syncWorker.isPunished() { + continue + } + + wg.Add(1) + go func(syncWorker *peerSyncWorker, wg *sync.WaitGroup) { + defer wg.Done() + errCh <- syncWorker.worker.stop() + }(syncWorker, &wg) + } + + wg.Wait() + // closing the errCh then the following for loop don't + // panic due to "all goroutines are asleep - deadlock" + close(errCh) + + var errs error + for err := range errCh { + if err != nil { + errs = errors.Join(errs, err) + } + } + return errs +} + // useConnectedPeers will retrieve all connected peers // through the network layer and use them as sources of blocks func (s *syncWorkerPool) useConnectedPeers() { @@ -197,13 +233,13 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) // punishPeer given a peer.ID we check increase its times punished // and apply the punishment time using the base timeout of 5m, so // each time a peer is punished its timeout will increase by 5m -func (s *syncWorkerPool) punishPeer(who peer.ID) { +func (s *syncWorkerPool) punishPeer(who peer.ID) error { s.mtx.Lock() defer s.mtx.Unlock() syncWorker, has := s.workers[who] if !has || syncWorker.status == punished { - return + return nil } timesPunished := syncWorker.timesPunished + 1 @@ -212,10 +248,17 @@ func (s *syncWorkerPool) punishPeer(who peer.ID) { syncWorker.status = punished syncWorker.timesPunished = timesPunished + + // TODO: create a timer in the worker and disable it to receive new tasks + // once the timer triggers then changes the status back to available syncWorker.punishmentTime = time.Now().Add(punishmentTime) - syncWorker.worker.stop() + err := syncWorker.worker.stop() + if err != nil { + return fmt.Errorf("punishing peer: %w", err) + } s.workers[who] = syncWorker + return nil } func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index b6c8836995..c53b17eb13 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -4,7 +4,6 @@ package sync import ( - "sync" "testing" "time" @@ -232,14 +231,9 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { requestMakerMock := NewMockRequestMaker(ctrl) workerPool := newSyncWorkerPool(networkMock, requestMakerMock) - stopCh := make(chan struct{}) - - wg := sync.WaitGroup{} - wg.Add(1) - go workerPool.listenForRequests(stopCh, &wg) - availablePeer := peer.ID("available-peer") workerPool.newPeer(availablePeer) + defer workerPool.stop() blockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") blockRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(blockHash), @@ -290,8 +284,6 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { require.Equal(t, syncTaskResult.request, blockRequest) require.Equal(t, syncTaskResult.response, mockedBlockResponse) - close(stopCh) - wg.Wait() } func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { @@ -301,11 +293,7 @@ func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { networkMock := NewMockNetwork(ctrl) requestMakerMock := NewMockRequestMaker(ctrl) workerPool := newSyncWorkerPool(networkMock, requestMakerMock) - - stopCh := make(chan struct{}) - wg := sync.WaitGroup{} - wg.Add(1) - go workerPool.listenForRequests(stopCh, &wg) + defer workerPool.stop() availablePeer := peer.ID("available-peer") workerPool.newPeer(availablePeer) @@ -382,7 +370,4 @@ func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { require.Equal(t, syncTaskResult.response, secondMockedBlockResponse) require.Equal(t, uint(1), workerPool.totalWorkers()) - - close(stopCh) - wg.Wait() } diff --git a/dot/sync/worker_test.go b/dot/sync/worker_test.go index eeb9b1672f..8e262e1f8c 100644 --- a/dot/sync/worker_test.go +++ b/dot/sync/worker_test.go @@ -23,7 +23,7 @@ func TestWorkerStop(t *testing.T) { sharedGuard := make(chan struct{}, 1) generalQueue := make(chan *syncTask) - w := newWorker(peerA, sharedGuard, generalQueue, reqMaker) + w := newWorker(peerA, sharedGuard, reqMaker) w.start() resultCh := make(chan *syncTaskResult) @@ -55,7 +55,7 @@ func TestWorkerAsyncStop(t *testing.T) { sharedGuard := make(chan struct{}, 2) generalQueue := make(chan *syncTask) - w := newWorker(peerA, sharedGuard, generalQueue, reqMaker) + w := newWorker(peerA, sharedGuard, reqMaker) w.start() doneCh := make(chan struct{}) From 2ea02f13b6b854a4ed30c1e6b6b82665229c8e08 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 2 Aug 2023 14:46:06 -0400 Subject: [PATCH 115/140] chore: make punishement without stop worker --- dot/sync/chain_sync.go | 26 +++++++++----- dot/sync/chain_sync_test.go | 10 +++--- dot/sync/worker.go | 70 +++++++++++++++++++++++++++--------- dot/sync/worker_pool.go | 61 +++++++++++++------------------ dot/sync/worker_pool_test.go | 11 ++---- dot/sync/worker_test.go | 9 +++-- 6 files changed, 108 insertions(+), 79 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 9f7064abb4..404dee38ba 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -645,7 +645,10 @@ taskResultLoop: Reason: peerset.BadProtocolReason, }, who) } - cs.workerPool.punishPeer(who) + + if err := cs.workerPool.punishPeer(who); err != nil { + logger.Errorf("punishing peer: %w", err) + } } cs.workerPool.submitRequest(request, nil, workersResults) @@ -669,7 +672,10 @@ taskResultLoop: }, who) } - cs.workerPool.punishPeer(taskResult.who) + if err := cs.workerPool.punishPeer(who); err != nil { + logger.Errorf("punishing peer: %w", err) + } + cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -677,7 +683,9 @@ taskResultLoop: isChain := isResponseAChain(response.BlockData) if !isChain { logger.Criticalf("response from %s is not a chain", who) - cs.workerPool.punishPeer(taskResult.who) + if err := cs.workerPool.punishPeer(who); err != nil { + logger.Errorf("punishing peer: %w", err) + } cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -686,7 +694,9 @@ taskResultLoop: startAtBlock, expectedSyncedBlocks) if !grows { logger.Criticalf("response from %s does not grows the ongoing chain", who) - cs.workerPool.punishPeer(taskResult.who) + if err := cs.workerPool.punishPeer(who); err != nil { + logger.Errorf("punishing peer: %w", err) + } cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -1052,13 +1062,13 @@ func doResponseGrowsTheChain(response, ongoingChain []*types.BlockData, startAtB } switch { - // if the reponse contains only one block then we should check both sides + // if the response contains only one block then we should check both sides // for example, if the response contains only one block called X we should // check if its parent hash matches with the left element as well as we should // check if the right element contains X hash as its parent hash // ... W <- X -> Y ... - // we can skip left side comparision if X is in the 0 index and we can skip - // right side comparision if X is in the last index + // we can skip left side comparison if X is in the 0 index and we can skip + // right side comparison if X is in the last index case len(response) == 1: if uint32(firstBlockExactIndex+1) < expectedTotal { rightElement := ongoingChain[firstBlockExactIndex+1] @@ -1066,7 +1076,7 @@ func doResponseGrowsTheChain(response, ongoingChain []*types.BlockData, startAtB return false } } - // if the reponse contains more than 1 block then we need to compare + // if the response contains more than 1 block then we need to compare // only the start and the end of the acquired response, for example // let's say we receive a response [C, D, E] and we need to check // if those values fits correctly: diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 6543fc55b6..f9c85ab86c 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -534,7 +534,7 @@ func TestChainSync_onBlockAnnounceHandshake(t *testing.T) { if tt.shouldBeAWorker { syncWorker, exists := cs.workerPool.workers[tt.peerID] require.True(t, exists) - require.Equal(t, tt.workerStatus, syncWorker.status) + require.Equal(t, tt.workerStatus, syncWorker.worker.status) } else { _, exists := cs.workerPool.workers[tt.peerID] require.False(t, exists) @@ -863,7 +863,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] require.True(t, ok) - require.Equal(t, punished, syncWorker.status) + require.Equal(t, punished, syncWorker.worker.status) } func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *testing.T) { @@ -980,7 +980,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] require.True(t, ok) - require.Equal(t, punished, syncWorker.status) + require.Equal(t, punished, syncWorker.worker.status) } func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testing.T) { @@ -1100,7 +1100,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] require.True(t, ok) - require.Equal(t, punished, syncWorker.status) + require.Equal(t, punished, syncWorker.worker.status) } func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testing.T) { @@ -1216,7 +1216,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi // peer should be punished syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] require.True(t, ok) - require.Equal(t, punished, syncWorker.status) + require.Equal(t, punished, syncWorker.worker.status) } func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing.T) { diff --git a/dot/sync/worker.go b/dot/sync/worker.go index dce97ed8ed..f2ab771821 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -3,6 +3,7 @@ package sync import ( "errors" "fmt" + "sync" "time" "github.com/ChainSafe/gossamer/dot/network" @@ -12,11 +13,14 @@ import ( var ErrStopTimeout = errors.New("stop timeout") type worker struct { + mtx sync.Mutex + status byte peerID peer.ID sharedGuard chan struct{} - stopCh chan struct{} - doneCh chan struct{} + punishment chan time.Duration + stopCh chan struct{} + doneCh chan struct{} queue chan *syncTask requestMaker network.RequestMaker @@ -26,35 +30,39 @@ func newWorker(pID peer.ID, sharedGuard chan struct{}, network network.RequestMa return &worker{ peerID: pID, sharedGuard: sharedGuard, + punishment: make(chan time.Duration), stopCh: make(chan struct{}), doneCh: make(chan struct{}), - queue: make(chan *syncTask, maxRequestsAllowed+1), + queue: make(chan *syncTask, maxRequestsAllowed), requestMaker: network, - } -} - -func (w *worker) processTask(task *syncTask) (enqueued bool) { - select { - case w.queue <- task: - logger.Debugf("[ENQUEUED] worker %s, block request: %s", w.peerID, task.request) - return true - default: - logger.Debugf("[NOT ENQUEUED] worker %s, block request: %s", w.peerID, task.request) - return false + status: available, } } func (w *worker) start() { go func() { defer func() { - w.doneCh <- struct{}{} + logger.Debugf("[STOPPED] worker %s", w.peerID) + close(w.doneCh) }() logger.Debugf("[STARTED] worker %s", w.peerID) for { select { + case punishmentDuration := <-w.punishment: + logger.Debugf("⏱️ punishement time for peer %s: %.2fs", w.peerID, punishmentDuration.Seconds()) + punishmentTimer := time.NewTimer(punishmentDuration) + select { + case <-punishmentTimer.C: + w.mtx.Lock() + w.status = available + w.mtx.Unlock() + + case <-w.stopCh: + return + } + case <-w.stopCh: - logger.Debugf("[STOPPED] worker %s", w.peerID) return case task := <-w.queue: executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) @@ -63,8 +71,36 @@ func (w *worker) start() { }() } +func (w *worker) processTask(task *syncTask) (enqueued bool) { + if w.isPunished() { + return false + } + + select { + case w.queue <- task: + logger.Debugf("[ENQUEUED] worker %s, block request: %s", w.peerID, task.request) + return true + default: + return false + } +} + +func (w *worker) punish(duration time.Duration) { + w.punishment <- duration + + w.mtx.Lock() + defer w.mtx.Unlock() + w.status = punished +} + +func (w *worker) isPunished() bool { + w.mtx.Lock() + defer w.mtx.Unlock() + return w.status == punished +} + func (w *worker) stop() error { - w.stopCh <- struct{}{} + close(w.stopCh) timeoutTimer := time.NewTimer(30 * time.Second) select { diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 0bbad11975..7cd59a7beb 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -38,14 +38,12 @@ type syncTaskResult struct { } type peerSyncWorker struct { - status byte - timesPunished int - punishmentTime time.Time - worker *worker + timesPunished int + worker *worker } func (p *peerSyncWorker) isPunished() bool { - return p.punishmentTime.After(time.Now()) + return p.worker.isPunished() } type syncWorkerPool struct { @@ -135,23 +133,18 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { return } - syncWorker, has := s.workers[who] - if !has { - syncWorker = &peerSyncWorker{status: available} - syncWorker.worker = newWorker(who, s.sharedGuard, s.requestMaker) - syncWorker.worker.start() - - s.workers[who] = syncWorker - logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) + _, has := s.workers[who] + if has { + return } - // check if the punishment is not valid - if syncWorker.status == punished && syncWorker.punishmentTime.Before(time.Now()) { - syncWorker.status = available - syncWorker.worker.start() - - s.workers[who] = syncWorker + syncWorker := &peerSyncWorker{ + worker: newWorker(who, s.sharedGuard, s.requestMaker), } + + syncWorker.worker.start() + s.workers[who] = syncWorker + logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) } // submitRequest given a request, the worker pool will get the peer given the peer.ID @@ -238,41 +231,37 @@ func (s *syncWorkerPool) punishPeer(who peer.ID) error { defer s.mtx.Unlock() syncWorker, has := s.workers[who] - if !has || syncWorker.status == punished { + if !has || syncWorker.isPunished() { return nil } timesPunished := syncWorker.timesPunished + 1 punishmentTime := time.Duration(timesPunished) * punishmentBaseTimeout - logger.Debugf("⏱️ punishement time for peer %s: %.2fs", who, punishmentTime.Seconds()) - syncWorker.status = punished syncWorker.timesPunished = timesPunished - - // TODO: create a timer in the worker and disable it to receive new tasks - // once the timer triggers then changes the status back to available - syncWorker.punishmentTime = time.Now().Add(punishmentTime) - err := syncWorker.worker.stop() - if err != nil { - return fmt.Errorf("punishing peer: %w", err) - } + syncWorker.worker.punish(punishmentTime) s.workers[who] = syncWorker return nil } -func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { +func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) error { s.mtx.Lock() defer s.mtx.Unlock() syncWorker, has := s.workers[who] if !has { - return + return nil + } + + err := syncWorker.worker.stop() + if err != nil { + return fmt.Errorf("stopping worker: %w", err) } - syncWorker.worker.stop() delete(s.workers, who) s.ignorePeers[who] = struct{}{} + return nil } // totalWorkers only returns available or busy workers @@ -280,9 +269,9 @@ func (s *syncWorkerPool) totalWorkers() (total uint) { s.mtx.RLock() defer s.mtx.RUnlock() - for _, worker := range s.workers { - if worker.status == available { - total += 1 + for _, syncWorker := range s.workers { + if !syncWorker.worker.isPunished() { + total++ } } diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index c53b17eb13..8e40e38153 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -160,7 +160,9 @@ func TestSyncWorkerPool_newPeer(t *testing.T) { return newSyncWorkerPool(nil, nil) }, expectedPool: map[peer.ID]*peerSyncWorker{ - peer.ID("peer-1"): {status: available}, + peer.ID("peer-1"): { + worker: &worker{status: available}, + }, }, }, "peer_to_ignore": { @@ -271,13 +273,6 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { totalWorkers := workerPool.totalWorkers() require.Zero(t, totalWorkers) - workerPool.mtx.RLock() - peerSync, has := workerPool.workers[availablePeer] - require.True(t, has) - require.Equal(t, peerSync.status, busy) - - workerPool.mtx.RUnlock() - syncTaskResult := <-resultCh require.NoError(t, syncTaskResult.err) require.Equal(t, syncTaskResult.who, availablePeer) diff --git a/dot/sync/worker_test.go b/dot/sync/worker_test.go index 8e262e1f8c..91d4d5952f 100644 --- a/dot/sync/worker_test.go +++ b/dot/sync/worker_test.go @@ -53,7 +53,6 @@ func TestWorkerAsyncStop(t *testing.T) { Return(nil) sharedGuard := make(chan struct{}, 2) - generalQueue := make(chan *syncTask) w := newWorker(peerA, sharedGuard, reqMaker) w.start() @@ -63,13 +62,13 @@ func TestWorkerAsyncStop(t *testing.T) { go handleResultsHelper(t, w, resultCh, doneCh) // issue two requests in the general channel - generalQueue <- &syncTask{ + w.processTask(&syncTask{ resultCh: resultCh, - } + }) - generalQueue <- &syncTask{ + w.processTask(&syncTask{ resultCh: resultCh, - } + }) close(resultCh) <-doneCh From 3bf881a815b4d05b1dd69808ee570bbd70fc6cfc Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 2 Aug 2023 14:59:58 -0400 Subject: [PATCH 116/140] chore: fix lint --- dot/sync/chain_sync.go | 21 ++++-------- dot/sync/chain_sync_test.go | 16 ++++----- dot/sync/worker_pool.go | 5 ++- dot/sync/worker_pool_test.go | 66 ++++++++++++++++++------------------ 4 files changed, 50 insertions(+), 58 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 404dee38ba..0d0a445536 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -646,9 +646,7 @@ taskResultLoop: }, who) } - if err := cs.workerPool.punishPeer(who); err != nil { - logger.Errorf("punishing peer: %w", err) - } + cs.workerPool.punishPeer(who) } cs.workerPool.submitRequest(request, nil, workersResults) @@ -672,10 +670,7 @@ taskResultLoop: }, who) } - if err := cs.workerPool.punishPeer(who); err != nil { - logger.Errorf("punishing peer: %w", err) - } - + cs.workerPool.punishPeer(who) cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -683,9 +678,7 @@ taskResultLoop: isChain := isResponseAChain(response.BlockData) if !isChain { logger.Criticalf("response from %s is not a chain", who) - if err := cs.workerPool.punishPeer(who); err != nil { - logger.Errorf("punishing peer: %w", err) - } + cs.workerPool.punishPeer(who) cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -694,9 +687,7 @@ taskResultLoop: startAtBlock, expectedSyncedBlocks) if !grows { logger.Criticalf("response from %s does not grows the ongoing chain", who) - if err := cs.workerPool.punishPeer(who); err != nil { - logger.Errorf("punishing peer: %w", err) - } + cs.workerPool.punishPeer(who) cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -711,7 +702,9 @@ taskResultLoop: Reason: peerset.BadBlockAnnouncementReason, }, who) - cs.workerPool.ignorePeerAsWorker(taskResult.who) + if err := cs.workerPool.ignorePeerAsWorker(taskResult.who); err != nil { + logger.Errorf("ignoring peer: %w", err) + } cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index f9c85ab86c..745b6071b2 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -479,10 +479,10 @@ func TestChainSync_onBlockAnnounceHandshake(t *testing.T) { networkMock := NewMockNetwork(ctrl) workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) workerPool.workers = map[peer.ID]*peerSyncWorker{ - peer.ID("peer-test"): { - status: punished, - punishmentTime: time.Now().Add(3 * time.Hour), - }, + // peer.ID("peer-test"): { + // status: punished, + // punishmentTime: time.Now().Add(3 * time.Hour), + // }, } cs := newChainSyncTest(t, ctrl) @@ -500,10 +500,10 @@ func TestChainSync_onBlockAnnounceHandshake(t *testing.T) { networkMock := NewMockNetwork(ctrl) workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) workerPool.workers = map[peer.ID]*peerSyncWorker{ - peer.ID("peer-test"): { - status: punished, - punishmentTime: time.Now().Add(-3 * time.Hour), - }, + // peer.ID("peer-test"): { + // status: punished, + // punishmentTime: time.Now().Add(-3 * time.Hour), + // }, } cs := newChainSyncTest(t, ctrl) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 7cd59a7beb..fa15a49fb7 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -226,13 +226,13 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) // punishPeer given a peer.ID we check increase its times punished // and apply the punishment time using the base timeout of 5m, so // each time a peer is punished its timeout will increase by 5m -func (s *syncWorkerPool) punishPeer(who peer.ID) error { +func (s *syncWorkerPool) punishPeer(who peer.ID) { s.mtx.Lock() defer s.mtx.Unlock() syncWorker, has := s.workers[who] if !has || syncWorker.isPunished() { - return nil + return } timesPunished := syncWorker.timesPunished + 1 @@ -242,7 +242,6 @@ func (s *syncWorkerPool) punishPeer(who peer.ID) error { syncWorker.worker.punish(punishmentTime) s.workers[who] = syncWorker - return nil } func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) error { diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index 8e40e38153..8b43ceb204 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -18,7 +18,7 @@ import ( func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { t.Parallel() - stablePunishmentTime := time.Now().Add(time.Minute * 2) + //stablePunishmentTime := time.Now().Add(time.Minute * 2) cases := map[string]struct { setupWorkerPool func(t *testing.T) *syncWorkerPool @@ -50,9 +50,9 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { return newSyncWorkerPool(networkMock, nil) }, expectedPool: map[peer.ID]*peerSyncWorker{ - peer.ID("available-1"): {status: available}, - peer.ID("available-2"): {status: available}, - peer.ID("available-3"): {status: available}, + // peer.ID("available-1"): {status: available}, + // peer.ID("available-2"): {status: available}, + // peer.ID("available-3"): {status: available}, }, }, "2_available_peers_1_to_ignore": { @@ -71,8 +71,8 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { return workerPool }, expectedPool: map[peer.ID]*peerSyncWorker{ - peer.ID("available-1"): {status: available}, - peer.ID("available-2"): {status: available}, + // peer.ID("available-1"): {status: available}, + // peer.ID("available-2"): {status: available}, }, }, "peer_punishment_not_valid_anymore": { @@ -88,19 +88,19 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { }) workerPool := newSyncWorkerPool(networkMock, nil) workerPool.workers[peer.ID("available-3")] = &peerSyncWorker{ - status: punished, - // arbitrary unix value - punishmentTime: time.Unix(1000, 0), + // status: punished, + // // arbitrary unix value + // punishmentTime: time.Unix(1000, 0), } return workerPool }, expectedPool: map[peer.ID]*peerSyncWorker{ - peer.ID("available-1"): {status: available}, - peer.ID("available-2"): {status: available}, - peer.ID("available-3"): { - status: available, - punishmentTime: time.Unix(1000, 0), - }, + // peer.ID("available-1"): {status: available}, + // peer.ID("available-2"): {status: available}, + // peer.ID("available-3"): { + // status: available, + // punishmentTime: time.Unix(1000, 0), + // }, }, }, "peer_punishment_still_valid": { @@ -116,18 +116,18 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { }) workerPool := newSyncWorkerPool(networkMock, nil) workerPool.workers[peer.ID("available-3")] = &peerSyncWorker{ - status: punished, - punishmentTime: stablePunishmentTime, + // status: punished, + // punishmentTime: stablePunishmentTime, } return workerPool }, expectedPool: map[peer.ID]*peerSyncWorker{ - peer.ID("available-1"): {status: available}, - peer.ID("available-2"): {status: available}, - peer.ID("available-3"): { - status: punished, - punishmentTime: stablePunishmentTime, - }, + // peer.ID("available-1"): {status: available}, + // peer.ID("available-2"): {status: available}, + // peer.ID("available-3"): { + // status: punished, + // punishmentTime: stablePunishmentTime, + // }, }, }, } @@ -147,7 +147,7 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { func TestSyncWorkerPool_newPeer(t *testing.T) { t.Parallel() - stablePunishmentTime := time.Now().Add(time.Minute * 2) + //stablePunishmentTime := time.Now().Add(time.Minute * 2) cases := map[string]struct { peerID peer.ID @@ -179,16 +179,16 @@ func TestSyncWorkerPool_newPeer(t *testing.T) { setupWorkerPool: func(*testing.T) *syncWorkerPool { workerPool := newSyncWorkerPool(nil, nil) workerPool.workers[peer.ID("free-again")] = &peerSyncWorker{ - status: punished, - // arbitrary unix value - punishmentTime: time.Unix(1000, 0), + // status: punished, + // // arbitrary unix value + // punishmentTime: time.Unix(1000, 0), } return workerPool }, expectedPool: map[peer.ID]*peerSyncWorker{ peer.ID("free-again"): { - status: available, - punishmentTime: time.Unix(1000, 0), + // status: available, + // punishmentTime: time.Unix(1000, 0), }, }, }, @@ -198,15 +198,15 @@ func TestSyncWorkerPool_newPeer(t *testing.T) { workerPool := newSyncWorkerPool(nil, nil) workerPool.workers[peer.ID("peer_punished")] = &peerSyncWorker{ - status: punished, - punishmentTime: stablePunishmentTime, + // status: punished, + // punishmentTime: stablePunishmentTime, } return workerPool }, expectedPool: map[peer.ID]*peerSyncWorker{ peer.ID("peer_punished"): { - status: punished, - punishmentTime: stablePunishmentTime, + // status: punished, + // punishmentTime: stablePunishmentTime, }, }, }, From 01530e1dab6589e5d2df31aa2fc0ef7233fdb0f2 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 2 Aug 2023 15:05:45 -0400 Subject: [PATCH 117/140] chore: add license --- dot/sync/worker.go | 3 +++ dot/sync/worker_test.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/dot/sync/worker.go b/dot/sync/worker.go index f2ab771821..4aa55e6050 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -1,3 +1,6 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( diff --git a/dot/sync/worker_test.go b/dot/sync/worker_test.go index 91d4d5952f..94c1e6bae0 100644 --- a/dot/sync/worker_test.go +++ b/dot/sync/worker_test.go @@ -1,3 +1,6 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package sync import ( From ab39ccc91d73cbc359a01ea3d8f0e41a221324bc Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 2 Aug 2023 15:11:55 -0400 Subject: [PATCH 118/140] chore: change to default values to `westend` --- chain/westend/defaults.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/chain/westend/defaults.go b/chain/westend/defaults.go index 6603e25d54..f6dcb2bfe5 100644 --- a/chain/westend/defaults.go +++ b/chain/westend/defaults.go @@ -29,14 +29,6 @@ func DefaultConfig() *cfg.Config { config.Core.GrandpaAuthority = false config.Core.Role = 1 config.Network.NoMDNS = false - config.Network.MaxPeers = 128 - config.PrometheusExternal = true - config.PrometheusPort = 9876 - config.Log.Sync = "trace" - config.Log.Digest = "trace" - - config.Pprof.Enabled = true - config.Pprof.ListeningAddress = "0.0.0.0:6060" return config } From c6f7c892bb056e76190308003a8553cf96718b3a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 3 Aug 2023 11:00:45 -0400 Subject: [PATCH 119/140] chore: fix `chain_sync_test.go` --- dot/network/message.go | 2 +- dot/sync/chain_sync.go | 28 +-- dot/sync/chain_sync_test.go | 402 ++++++++++++------------------- dot/sync/mock_chain_sync_test.go | 6 +- dot/sync/syncer.go | 3 +- 5 files changed, 171 insertions(+), 270 deletions(-) diff --git a/dot/network/message.go b/dot/network/message.go index ccd97ef29f..14768f6514 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -376,7 +376,7 @@ func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byt return []*BlockRequestMessage{} } - diff := targetNumber - startNumber + diff := targetNumber - (startNumber - 1) // start and end block are the same, just request 1 block if diff == 0 { diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 0d0a445536..345e845afc 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -73,7 +73,7 @@ type peerView struct { // ChainSync contains the methods used by the high-level service into the `chainSync` module type ChainSync interface { start() - stop() + stop() error // called upon receiving a BlockAnnounceHandshake onBlockAnnounceHandshake(p peer.ID, hash common.Hash, number uint) error @@ -187,10 +187,10 @@ func (cs *chainSync) start() { go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh, &cs.wg) } -func (cs *chainSync) stop() { +func (cs *chainSync) stop() error { err := cs.workerPool.stop() if err != nil { - logger.Criticalf("while stopping worker poll: %w", err) + return fmt.Errorf("stopping worker poll: %w", err) } close(cs.stopCh) @@ -207,9 +207,9 @@ func (cs *chainSync) stop() { if !timeoutTimer.Stop() { <-timeoutTimer.C } - return + return nil case <-timeoutTimer.C: - logger.Critical("not all chainsync goroutines stopped successfully ") + return ErrStopTimeout } } @@ -231,6 +231,8 @@ func (cs *chainSync) isBootstrap() (bestBlockHeader *types.Header, syncTarget ui } func (cs *chainSync) bootstrapSync() { + defer cs.wg.Done() + for { select { case <-cs.stopCh: @@ -309,6 +311,8 @@ func (cs *chainSync) onBlockAnnounceHandshake(who peer.ID, bestHash common.Hash, cs.syncMode.Store(bootstrap) isSyncedGauge.Set(0) logger.Debugf("switched sync mode to %s", bootstrap.String()) + + cs.wg.Add(1) go cs.bootstrapSync() return nil } @@ -337,11 +341,6 @@ func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { return cs.requestAnnouncedBlock(announced) } - // we are more than 128 blocks behind the head, switch to bootstrap - cs.syncMode.Store(bootstrap) - isSyncedGauge.Set(0) - logger.Debugf("switched sync mode to %s", bootstrap.String()) - go cs.bootstrapSync() return nil } @@ -534,13 +533,7 @@ func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { } if targetBlockNumber > realTarget { - // basically if our virtual target is beyond the real target - // that means we are only a few requests away, then we - // calculate the correct amount of missing requests and then - // change to tip sync which should take care of the rest - diff := targetBlockNumber - realTarget - numOfRequestsToDrop := (diff / 128) + 1 - targetBlockNumber = targetBlockNumber - (numOfRequestsToDrop * 128) + targetBlockNumber = realTarget } requests := network.NewAscendingBlockRequests(startRequestAt, targetBlockNumber, @@ -1079,6 +1072,7 @@ func doResponseGrowsTheChain(response, ongoingChain []*types.BlockData, startAtB case len(response) > 1: lastBlockInResponse := response[len(response)-1] lastBlockExactIndex := lastBlockInResponse.Header.Number - startAtBlock + if uint32(lastBlockExactIndex+1) < expectedTotal { rightElement := ongoingChain[lastBlockExactIndex+1] if rightElement != nil && !compareParentHash(lastBlockInResponse, rightElement) { diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 745b6071b2..242959bf85 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -5,7 +5,7 @@ package sync import ( "errors" - "sync" + "fmt" "sync/atomic" "testing" "time" @@ -63,7 +63,7 @@ func Test_chainSync_onBlockAnnounce(t *testing.T) { t.Parallel() const somePeer = peer.ID("abc") - //errTest := errors.New("test error") + errTest := errors.New("test error") emptyTrieState := storage.NewTrieState(nil) block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.MustRoot(), common.Hash{}, 1, scale.VaryingDataTypeSlice{}) @@ -71,7 +71,7 @@ func Test_chainSync_onBlockAnnounce(t *testing.T) { common.Hash{}, 2, scale.VaryingDataTypeSlice{}) testCases := map[string]struct { - listenForRequests bool + waitBootstrapSync bool chainSyncBuilder func(ctrl *gomock.Controller) *chainSync peerID peer.ID blockAnnounceHeader *types.Header @@ -79,131 +79,59 @@ func Test_chainSync_onBlockAnnounce(t *testing.T) { errMessage string expectedSyncMode chainSyncState }{ - // "announced_block_already_exists_in_disjoint_set": { - // chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - // pendingBlocks := NewMockDisjointBlockSet(ctrl) - // pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(true) - // return &chainSync{ - // pendingBlocks: pendingBlocks, - // } - // }, - // peerID: somePeer, - // blockAnnounceHeader: block2AnnounceHeader, - // errWrapped: errAlreadyInDisjointSet, - // errMessage: fmt.Sprintf("already in disjoint set: block %s (#%d)", - // block2AnnounceHeader.Hash(), block2AnnounceHeader.Number), - // }, - // "failed_to_add_announced_block_in_disjoint_set": { - // chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - // pendingBlocks := NewMockDisjointBlockSet(ctrl) - // pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - // pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(errTest) - - // return &chainSync{ - // pendingBlocks: pendingBlocks, - // } - // }, - // peerID: somePeer, - // blockAnnounceHeader: block2AnnounceHeader, - // errWrapped: errTest, - // errMessage: "while adding pending block header: test error", - // }, - // "announced_block_while_in_bootstrap_mode": { - // chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - // pendingBlocks := NewMockDisjointBlockSet(ctrl) - // pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - // pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(nil) - - // state := atomic.Value{} - // state.Store(bootstrap) - - // return &chainSync{ - // pendingBlocks: pendingBlocks, - // syncMode: state, - // } - // }, - // peerID: somePeer, - // blockAnnounceHeader: block2AnnounceHeader, - // }, - // "announced_block_while_in_tip_mode": { - // chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - // pendingBlocksMock := NewMockDisjointBlockSet(ctrl) - // pendingBlocksMock.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - // pendingBlocksMock.EXPECT().addHeader(block2AnnounceHeader).Return(nil) - // pendingBlocksMock.EXPECT().removeBlock(block2AnnounceHeader.Hash()) - // pendingBlocksMock.EXPECT().size().Return(int(0)) - - // blockStateMock := NewMockBlockState(ctrl) - // blockStateMock.EXPECT(). - // HasHeader(block2AnnounceHeader.Hash()). - // Return(false, nil) - - // blockStateMock.EXPECT(). - // BestBlockHeader(). - // Return(block1AnnounceHeader, nil) - - // blockStateMock.EXPECT(). - // GetHighestFinalisedHeader(). - // Return(block2AnnounceHeader, nil) - - // expectedRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(block2AnnounceHeader.Hash()), - // 1, network.BootstrapRequestData, network.Descending) - - // fakeBlockBody := types.Body([]types.Extrinsic{}) - // mockedBlockResponse := &network.BlockResponseMessage{ - // BlockData: []*types.BlockData{ - // { - // Hash: block2AnnounceHeader.Hash(), - // Header: block2AnnounceHeader, - // Body: &fakeBlockBody, - // }, - // }, - // } - - // networkMock := NewMockNetwork(ctrl) - // requestMaker := NewMockRequestMaker(ctrl) - // requestMaker.EXPECT(). - // Do(somePeer, expectedRequest, &network.BlockResponseMessage{}). - // DoAndReturn(func(_, _, response any) any { - // responsePtr := response.(*network.BlockResponseMessage) - // *responsePtr = *mockedBlockResponse - // return nil - // }) - - // babeVerifierMock := NewMockBabeVerifier(ctrl) - // storageStateMock := NewMockStorageState(ctrl) - // importHandlerMock := NewMockBlockImportHandler(ctrl) - // telemetryMock := NewMockTelemetry(ctrl) - - // const announceBlock = true - // ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, mockedBlockResponse.BlockData, - // blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, - // announceBlock) - - // workerPool := newSyncWorkerPool(networkMock, requestMaker) - // // include the peer who announced the block in the pool - // workerPool.newPeer(somePeer) - - // state := atomic.Value{} - // state.Store(tip) - - // return &chainSync{ - // pendingBlocks: pendingBlocksMock, - // syncMode: state, - // workerPool: workerPool, - // network: networkMock, - // blockState: blockStateMock, - // babeVerifier: babeVerifierMock, - // telemetry: telemetryMock, - // storageState: storageStateMock, - // blockImportHandler: importHandlerMock, - // } - // }, - // listenForRequests: true, - // peerID: somePeer, - // blockAnnounceHeader: block2AnnounceHeader, - // }, - "announced_block_while_in_tip_mode_but_far_behind_tip": { + "announced_block_already_exists_in_disjoint_set": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + pendingBlocks := NewMockDisjointBlockSet(ctrl) + pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(true) + return &chainSync{ + stopCh: make(chan struct{}), + pendingBlocks: pendingBlocks, + workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + errWrapped: errAlreadyInDisjointSet, + errMessage: fmt.Sprintf("already in disjoint set: block %s (#%d)", + block2AnnounceHeader.Hash(), block2AnnounceHeader.Number), + }, + "failed_to_add_announced_block_in_disjoint_set": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + pendingBlocks := NewMockDisjointBlockSet(ctrl) + pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(errTest) + + return &chainSync{ + stopCh: make(chan struct{}), + pendingBlocks: pendingBlocks, + workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + errWrapped: errTest, + errMessage: "while adding pending block header: test error", + }, + "announced_block_while_in_bootstrap_mode": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + pendingBlocks := NewMockDisjointBlockSet(ctrl) + pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(nil) + + state := atomic.Value{} + state.Store(bootstrap) + + return &chainSync{ + stopCh: make(chan struct{}), + pendingBlocks: pendingBlocks, + syncMode: state, + workerPool: newSyncWorkerPool(NewMockNetwork(nil), NewMockRequestMaker(nil)), + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + }, + "announced_block_while_in_tip_mode": { chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { pendingBlocksMock := NewMockDisjointBlockSet(ctrl) pendingBlocksMock.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) @@ -265,17 +193,8 @@ func Test_chainSync_onBlockAnnounce(t *testing.T) { state := atomic.Value{} state.Store(tip) - fakeBlock := types.NewHeader(block1AnnounceHeader.Hash(), emptyTrieState.MustRoot(), - common.Hash{}, 130, scale.VaryingDataTypeSlice{}) - return &chainSync{ - peerView: map[peer.ID]peerView{ - peer.ID("peerA"): { - who: peer.ID("peerA"), - hash: fakeBlock.Hash(), - number: 130, - }, - }, + stopCh: make(chan struct{}), pendingBlocks: pendingBlocksMock, syncMode: state, workerPool: workerPool, @@ -287,7 +206,6 @@ func Test_chainSync_onBlockAnnounce(t *testing.T) { blockImportHandler: importHandlerMock, } }, - listenForRequests: true, peerID: somePeer, blockAnnounceHeader: block2AnnounceHeader, }, @@ -300,13 +218,6 @@ func Test_chainSync_onBlockAnnounce(t *testing.T) { ctrl := gomock.NewController(t) chainSync := tt.chainSyncBuilder(ctrl) - stopCh := make(chan struct{}) - wg := sync.WaitGroup{} - // if tt.listenForRequests { - // wg.Add(1) - // go chainSync.workerPool.listenForRequests(stopCh, &wg) - // } - err := chainSync.onBlockAnnounce(announcedBlock{ who: tt.peerID, header: tt.blockAnnounceHeader, @@ -317,15 +228,16 @@ func Test_chainSync_onBlockAnnounce(t *testing.T) { assert.EqualError(t, err, tt.errMessage) } - if tt.listenForRequests { - close(stopCh) - wg.Wait() + if tt.waitBootstrapSync { + chainSync.wg.Wait() + err = chainSync.workerPool.stop() + require.NoError(t, err) } }) } } -func Test_chainSync_onBlockAnnounce_tipModeNeedToCatchup(t *testing.T) { +func Test_chainSync_onBlockAnnounceHandshake_tipModeNeedToCatchup(t *testing.T) { ctrl := gomock.NewController(t) const somePeer = peer.ID("abc") @@ -333,48 +245,53 @@ func Test_chainSync_onBlockAnnounce_tipModeNeedToCatchup(t *testing.T) { block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.MustRoot(), common.Hash{}, 1, scale.VaryingDataTypeSlice{}) block2AnnounceHeader := types.NewHeader(block1AnnounceHeader.Hash(), emptyTrieState.MustRoot(), - common.Hash{}, 2, scale.VaryingDataTypeSlice{}) - - pendingBlocksMock := NewMockDisjointBlockSet(ctrl) - pendingBlocksMock.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) - pendingBlocksMock.EXPECT().addHeader(block2AnnounceHeader).Return(nil) - pendingBlocksMock.EXPECT().removeBlock(block2AnnounceHeader.Hash()) - pendingBlocksMock.EXPECT().size().Return(int(0)) + common.Hash{}, 130, scale.VaryingDataTypeSlice{}) blockStateMock := NewMockBlockState(ctrl) blockStateMock.EXPECT(). - HasHeader(block2AnnounceHeader.Hash()). - Return(false, nil) + BestBlockHeader(). + Return(block1AnnounceHeader, nil). + Times(2) blockStateMock.EXPECT(). BestBlockHeader(). - Return(block1AnnounceHeader, nil) + Return(block2AnnounceHeader, nil). + Times(1) blockStateMock.EXPECT(). GetHighestFinalisedHeader(). - Return(block2AnnounceHeader, nil) - - expectedRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(block2AnnounceHeader.Hash()), - 1, network.BootstrapRequestData, network.Descending) - - fakeBlockBody := types.Body([]types.Extrinsic{}) - mockedBlockResponse := &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: block2AnnounceHeader.Hash(), - Header: block2AnnounceHeader, - Body: &fakeBlockBody, - }, - }, - } + Return(block1AnnounceHeader, nil). + Times(2) + + expectedRequest := network.NewAscendingBlockRequests( + block1AnnounceHeader.Number+1, + block2AnnounceHeader.Number, network.BootstrapRequestData) networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT().Peers().Return([]common.PeerInfo{}). + Times(2) + networkMock.EXPECT().AllConnectedPeersIDs().Return([]peer.ID{}) + + firstMockedResponse := createSuccesfullBlockResponse(t, block1AnnounceHeader.Hash(), 2, 128) + latestItemFromMockedResponse := firstMockedResponse.BlockData[len(firstMockedResponse.BlockData)-1] + + secondMockedResponse := createSuccesfullBlockResponse(t, latestItemFromMockedResponse.Hash, + int(latestItemFromMockedResponse.Header.Number+1), 1) + requestMaker := NewMockRequestMaker(ctrl) requestMaker.EXPECT(). - Do(somePeer, expectedRequest, &network.BlockResponseMessage{}). + Do(somePeer, expectedRequest[0], &network.BlockResponseMessage{}). + DoAndReturn(func(_, _, response any) any { + responsePtr := response.(*network.BlockResponseMessage) + *responsePtr = *firstMockedResponse + return nil + }) + + requestMaker.EXPECT(). + Do(somePeer, expectedRequest[1], &network.BlockResponseMessage{}). DoAndReturn(func(_, _, response any) any { responsePtr := response.(*network.BlockResponseMessage) - *responsePtr = *mockedBlockResponse + *responsePtr = *secondMockedResponse return nil }) @@ -383,36 +300,27 @@ func Test_chainSync_onBlockAnnounce_tipModeNeedToCatchup(t *testing.T) { importHandlerMock := NewMockBlockImportHandler(ctrl) telemetryMock := NewMockTelemetry(ctrl) - const announceBlock = true - ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, mockedBlockResponse.BlockData, + const announceBlock = false + + ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, firstMockedResponse.BlockData, + blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, + announceBlock) + ensureSuccessfulBlockImportFlow(t, latestItemFromMockedResponse.Header, secondMockedResponse.BlockData, blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, announceBlock) - - workerPool := newSyncWorkerPool(networkMock, requestMaker) - // include the peer who announced the block in the pool - workerPool.newPeer(somePeer) state := atomic.Value{} state.Store(tip) - fakeBlock := types.NewHeader(block1AnnounceHeader.Hash(), emptyTrieState.MustRoot(), - common.Hash{}, 130, scale.VaryingDataTypeSlice{}) - stopCh := make(chan struct{}) defer close(stopCh) chainSync := &chainSync{ - stopCh: stopCh, - peerView: map[peer.ID]peerView{ - peer.ID("peerA"): { - who: peer.ID("peerA"), - hash: fakeBlock.Hash(), - number: 130, // the target is much higher, we should catch up - }, - }, - pendingBlocks: pendingBlocksMock, + stopCh: stopCh, + peerView: make(map[peer.ID]peerView), syncMode: state, - workerPool: workerPool, + pendingBlocks: newDisjointBlockSet(0), + workerPool: newSyncWorkerPool(networkMock, requestMaker), network: networkMock, blockState: blockStateMock, babeVerifier: babeVerifierMock, @@ -421,16 +329,17 @@ func Test_chainSync_onBlockAnnounce_tipModeNeedToCatchup(t *testing.T) { blockImportHandler: importHandlerMock, } - defer chainSync.workerPool.stop() - err := chainSync.onBlockAnnounce(announcedBlock{ - who: somePeer, - header: block2AnnounceHeader, - }) + err := chainSync.onBlockAnnounceHandshake(somePeer, block2AnnounceHeader.Hash(), block2AnnounceHeader.Number) require.NoError(t, err) + chainSync.wg.Wait() + err = chainSync.workerPool.stop() + require.NoError(t, err) + + require.Equal(t, chainSync.getSyncMode(), tip) } -func TestChainSync_onBlockAnnounceHandshake(t *testing.T) { +func TestChainSync_onBlockAnnounceHandshake_onBootstrapMode(t *testing.T) { const randomHashString = "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21" randomHash := common.MustHexToHash(randomHashString) @@ -442,12 +351,13 @@ func TestChainSync_onBlockAnnounceHandshake(t *testing.T) { shouldBeAWorker bool workerStatus byte }{ - "set_peer_head_with_new_peer": { + "new_peer": { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { networkMock := NewMockNetwork(ctrl) workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) cs := newChainSyncTest(t, ctrl) + cs.syncMode.Store(bootstrap) cs.workerPool = workerPool return cs }, @@ -457,7 +367,7 @@ func TestChainSync_onBlockAnnounceHandshake(t *testing.T) { shouldBeAWorker: true, workerStatus: available, }, - "set_peer_head_with_a_to_ignore_peer_should_not_be_included_in_the_workerpoll": { + "ignore_peer_should_not_be_included_in_the_workerpoll": { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { networkMock := NewMockNetwork(ctrl) workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) @@ -466,6 +376,7 @@ func TestChainSync_onBlockAnnounceHandshake(t *testing.T) { } cs := newChainSyncTest(t, ctrl) + cs.syncMode.Store(bootstrap) cs.workerPool = workerPool return cs }, @@ -474,39 +385,18 @@ func TestChainSync_onBlockAnnounceHandshake(t *testing.T) { bestNumber: uint(20), shouldBeAWorker: false, }, - "set_peer_head_that_stills_punished_in_the_worker_poll": { - newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { - networkMock := NewMockNetwork(ctrl) - workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - workerPool.workers = map[peer.ID]*peerSyncWorker{ - // peer.ID("peer-test"): { - // status: punished, - // punishmentTime: time.Now().Add(3 * time.Hour), - // }, - } - - cs := newChainSyncTest(t, ctrl) - cs.workerPool = workerPool - return cs - }, - peerID: peer.ID("peer-test"), - bestHash: randomHash, - bestNumber: uint(20), - shouldBeAWorker: true, - workerStatus: punished, - }, - "set_peer_head_that_punishment_isnot_valid_in_the_worker_poll": { + "peer_already_exists_in_the_pool": { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { networkMock := NewMockNetwork(ctrl) workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) workerPool.workers = map[peer.ID]*peerSyncWorker{ - // peer.ID("peer-test"): { - // status: punished, - // punishmentTime: time.Now().Add(-3 * time.Hour), - // }, + peer.ID("peer-test"): { + worker: &worker{status: available}, + }, } cs := newChainSyncTest(t, ctrl) + cs.syncMode.Store(bootstrap) cs.workerPool = workerPool return cs }, @@ -607,8 +497,8 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 0, types.NewDigest()) - const blocksAhead = 129 - totalBlockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, int(blocksAhead)-1) + const blocksAhead = 128 + totalBlockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, blocksAhead) mockedNetwork := NewMockNetwork(ctrl) workerPeerID := peer.ID("noot") @@ -639,6 +529,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { mockStorageState := NewMockStorageState(ctrl) mockImportHandler := NewMockBlockImportHandler(ctrl) mockTelemetry := NewMockTelemetry(ctrl) + const announceBlock = false // setup mocks for new synced blocks that doesn't exists in our local database ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, totalBlockResponse.BlockData, mockedBlockState, @@ -655,7 +546,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { target, err := cs.getTarget() require.NoError(t, err) - require.Equal(t, uint(129), target) + require.Equal(t, uint(128), target) // include a new worker in the worker pool set, this worker // should be an available peer that will receive a block request @@ -705,7 +596,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { } // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[127] + parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) @@ -733,7 +624,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { // We start this test with genesis block being our best block, so // we're far behind by 128 blocks, we should execute a bootstrap // sync request those blocks - const blocksAhead = 257 + const blocksAhead = 256 cs := setupChainSyncToBootstrapMode(t, blocksAhead, mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) @@ -792,7 +683,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. } // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[127] + parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) @@ -839,7 +730,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. // We start this test with genesis block being our best block, so // we're far behind by 128 blocks, we should execute a bootstrap // sync request those blocks - const blocksAhead = 257 + const blocksAhead = 256 cs := setupChainSyncToBootstrapMode(t, blocksAhead, mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) @@ -903,7 +794,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test } // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[127] + parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) @@ -956,7 +847,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test // We start this test with genesis block being our best block, so // we're far behind by 128 blocks, we should execute a bootstrap // sync request those blocks - const blocksAhead = 257 + const blocksAhead = 256 cs := setupChainSyncToBootstrapMode(t, blocksAhead, mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) @@ -1076,7 +967,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi // We start this test with genesis block being our best block, so // we're far behind by 128 blocks, we should execute a bootstrap // sync request those blocks - const blocksAhead = 257 + const blocksAhead = 256 cs := setupChainSyncToBootstrapMode(t, blocksAhead, mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) @@ -1192,7 +1083,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi // We start this test with genesis block being our best block, so // we're far behind by 128 blocks, we should execute a bootstrap // sync request those blocks - const blocksAhead = 257 + const blocksAhead = 256 cs := setupChainSyncToBootstrapMode(t, blocksAhead, mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) @@ -1256,7 +1147,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. } // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow // will setup the expectations starting from block 128, from previous worker, until block 256 - parent := worker1Response.BlockData[127] + parent := worker1Response.BlockData[len(worker1Response.BlockData)-1] ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) @@ -1284,8 +1175,19 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. } if pID == peer.ID("bob") { - blockDataWithBadBlock := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 129, 256) - blockDataWithBadBlock.BlockData[4].Hash = fakeBadBlockHash + // use the fisrt response last item hash to produce the second response block data + // so we can guarantee that the second response continues the first response blocks + firstResponseLastItem := worker1Response.BlockData[len(worker1Response.BlockData)-1] + blockDataWithBadBlock := createSuccesfullBlockResponse(t, + firstResponseLastItem.Header.Hash(), + 129, + 128) + + // changes the last item from the second response to be a bad block, so we guarantee that + // this second response is a chain, (changing the hash from a block in the middle of the block + // response brokes the `isAChain` verification) + lastItem := len(blockDataWithBadBlock.BlockData) - 1 + blockDataWithBadBlock.BlockData[lastItem].Hash = fakeBadBlockHash *responsePtr = *blockDataWithBadBlock return nil } @@ -1302,6 +1204,8 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. return nil }).Times(3) + fmt.Printf("BAD BLOCK HASH: %s\n", fakeBadBlockHash) + mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ Value: peerset.BadBlockAnnouncementValue, Reason: peerset.BadBlockAnnouncementReason, @@ -1311,7 +1215,7 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. // We start this test with genesis block being our best block, so // we're far behind by 128 blocks, we should execute a bootstrap // sync request those blocks - const blocksAhead = 257 + const blocksAhead = 256 cs := setupChainSyncToBootstrapMode(t, blocksAhead, mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) @@ -1406,7 +1310,7 @@ func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testi return nil }).Times(2) - const blocksAhead = 256 + const blocksAhead = 128 cs := setupChainSyncToBootstrapMode(t, blocksAhead, mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry) @@ -1429,15 +1333,17 @@ func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testi require.True(t, ok) } -func createSuccesfullBlockResponse(_ *testing.T, genesisHash common.Hash, +func createSuccesfullBlockResponse(t *testing.T, parentHeader common.Hash, startingAt, numBlocks int) *network.BlockResponseMessage { + t.Helper() + response := new(network.BlockResponseMessage) response.BlockData = make([]*types.BlockData, numBlocks) emptyTrieState := storage.NewTrieState(nil) tsRoot := emptyTrieState.MustRoot() - firstHeader := types.NewHeader(genesisHash, tsRoot, common.Hash{}, + firstHeader := types.NewHeader(parentHeader, tsRoot, common.Hash{}, uint(startingAt), scale.VaryingDataTypeSlice{}) response.BlockData[0] = &types.BlockData{ Hash: firstHeader.Hash(), diff --git a/dot/sync/mock_chain_sync_test.go b/dot/sync/mock_chain_sync_test.go index 74cc93cdca..2d38b8e60e 100644 --- a/dot/sync/mock_chain_sync_test.go +++ b/dot/sync/mock_chain_sync_test.go @@ -105,9 +105,11 @@ func (mr *MockChainSyncMockRecorder) start() *gomock.Call { } // stop mocks base method. -func (m *MockChainSync) stop() { +func (m *MockChainSync) stop() error { m.ctrl.T.Helper() - m.ctrl.Call(m, "stop") + ret := m.ctrl.Call(m, "stop") + ret0, _ := ret[0].(error) + return ret0 } // stop indicates an expected call of stop. diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 36221504d6..5413b8a002 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -82,8 +82,7 @@ func (s *Service) Start() error { // Stop stops the chainSync and chainProcessor modules func (s *Service) Stop() error { - s.chainSync.stop() - return nil + return s.chainSync.stop() } // HandleBlockAnnounceHandshake notifies the `chainSync` module that From 4a8a5a8e543892bc5f0ecf552b2c4f6c5f0530d0 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 3 Aug 2023 13:34:17 -0400 Subject: [PATCH 120/140] chore: include `doResponseGrowsTheChain` tests --- dot/sync/chain_sync_test.go | 115 ++++++++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 242959bf85..8f8fb08c93 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -1610,6 +1610,121 @@ func TestChainSync_isResponseAChain(t *testing.T) { } } +func TestChainSync_doResponseGrowsTheChain(t *testing.T) { + block1Header := types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, types.NewDigest()) + block2Header := types.NewHeader(block1Header.Hash(), common.Hash{}, common.Hash{}, 2, types.NewDigest()) + block3Header := types.NewHeader(block2Header.Hash(), common.Hash{}, common.Hash{}, 3, types.NewDigest()) + block4Header := types.NewHeader(block3Header.Hash(), common.Hash{}, common.Hash{}, 4, types.NewDigest()) + + testcases := map[string]struct { + response []*types.BlockData + ongoingChain []*types.BlockData + startAt uint + exepectedTotal uint32 + expectedOut bool + }{ + // the ongoing chain does not have any data so the response + // can be inserted in the ongoing chain without any problems + "empty_ongoing_chain": { + ongoingChain: []*types.BlockData{}, + expectedOut: true, + }, + + "one_in_response_growing_ongoing_chain_without_check": { + startAt: 1, + exepectedTotal: 3, + // the ongoing chain contains 3 positions, the block number 1 is at position 0 + ongoingChain: []*types.BlockData{ + {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, types.NewDigest())}, + nil, + nil, + }, + + // the response contains the block number 3 which should be placed in position 2 + // in the ongoing chain, which means that no comparision should be done to place + // block number 3 in the ongoing chain + response: []*types.BlockData{ + {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 3, types.NewDigest())}, + }, + expectedOut: true, + }, + + "one_in_response_growing_ongoing_chain_by_checking_neighbors": { + startAt: 1, + exepectedTotal: 3, + // the ongoing chain contains 3 positions, the block number 1 is at position 0 + ongoingChain: []*types.BlockData{ + {Header: block1Header}, + nil, + {Header: block3Header}, + }, + + // the response contains the block number 2 which should be placed in position 1 + // in the ongoing chain, which means that a comparision should be made to check + // if the parent hash of block 2 is the same hash of block 1 + response: []*types.BlockData{ + {Header: block2Header}, + }, + expectedOut: true, + }, + + "one_in_response_failed_to_grow_ongoing_chain": { + startAt: 1, + exepectedTotal: 3, + ongoingChain: []*types.BlockData{ + {Header: block1Header}, + nil, + nil, + }, + response: []*types.BlockData{ + {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 2, types.NewDigest())}, + }, + expectedOut: false, + }, + + "many_in_response_grow_ongoing_chain_only_left_check": { + startAt: 1, + exepectedTotal: 3, + ongoingChain: []*types.BlockData{ + {Header: block1Header}, + nil, + nil, + nil, + }, + response: []*types.BlockData{ + {Header: block2Header}, + {Header: block3Header}, + }, + expectedOut: true, + }, + + "many_in_response_grow_ongoing_chain_left_right_check": { + startAt: 1, + exepectedTotal: 3, + ongoingChain: []*types.BlockData{ + {Header: block1Header}, + nil, + nil, + {Header: block4Header}, + }, + response: []*types.BlockData{ + {Header: block2Header}, + {Header: block3Header}, + }, + expectedOut: true, + }, + } + + for tname, tt := range testcases { + tt := tt + + t.Run(tname, func(t *testing.T) { + out := doResponseGrowsTheChain(tt.response, tt.ongoingChain, tt.startAt, tt.exepectedTotal) + require.Equal(t, tt.expectedOut, out) + }) + } +} + func TestChainSync_getHighestBlock(t *testing.T) { t.Parallel() From 407a7ed2812d3b32d5037151ea2e2f7b03f27103 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 3 Aug 2023 13:42:14 -0400 Subject: [PATCH 121/140] chore: fix lint warns --- dot/sync/chain_sync_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 8f8fb08c93..b2e7444e26 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -1641,7 +1641,7 @@ func TestChainSync_doResponseGrowsTheChain(t *testing.T) { }, // the response contains the block number 3 which should be placed in position 2 - // in the ongoing chain, which means that no comparision should be done to place + // in the ongoing chain, which means that no comparison should be done to place // block number 3 in the ongoing chain response: []*types.BlockData{ {Header: types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 3, types.NewDigest())}, @@ -1649,7 +1649,7 @@ func TestChainSync_doResponseGrowsTheChain(t *testing.T) { expectedOut: true, }, - "one_in_response_growing_ongoing_chain_by_checking_neighbors": { + "one_in_response_growing_ongoing_chain_by_checking_neighbours": { startAt: 1, exepectedTotal: 3, // the ongoing chain contains 3 positions, the block number 1 is at position 0 @@ -1660,7 +1660,7 @@ func TestChainSync_doResponseGrowsTheChain(t *testing.T) { }, // the response contains the block number 2 which should be placed in position 1 - // in the ongoing chain, which means that a comparision should be made to check + // in the ongoing chain, which means that a comparison should be made to check // if the parent hash of block 2 is the same hash of block 1 response: []*types.BlockData{ {Header: block2Header}, From 779d393af2b490bb3a443ed9bfd8a4473d7dbca8 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 3 Aug 2023 15:47:24 -0400 Subject: [PATCH 122/140] chore: fix current tests --- dot/sync/chain_sync.go | 5 -- dot/sync/chain_sync_test.go | 28 +----- dot/sync/worker.go | 32 ------- dot/sync/worker_pool.go | 80 ++++------------- dot/sync/worker_pool_test.go | 169 +++++------------------------------ dot/sync/worker_test.go | 38 ++++++-- 6 files changed, 71 insertions(+), 281 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 345e845afc..e8c872db2c 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -638,8 +638,6 @@ taskResultLoop: Reason: peerset.BadProtocolReason, }, who) } - - cs.workerPool.punishPeer(who) } cs.workerPool.submitRequest(request, nil, workersResults) @@ -663,7 +661,6 @@ taskResultLoop: }, who) } - cs.workerPool.punishPeer(who) cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -671,7 +668,6 @@ taskResultLoop: isChain := isResponseAChain(response.BlockData) if !isChain { logger.Criticalf("response from %s is not a chain", who) - cs.workerPool.punishPeer(who) cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } @@ -680,7 +676,6 @@ taskResultLoop: startAtBlock, expectedSyncedBlocks) if !grows { logger.Criticalf("response from %s does not grows the ongoing chain", who) - cs.workerPool.punishPeer(who) cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index b2e7444e26..79ff939f59 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -389,10 +389,8 @@ func TestChainSync_onBlockAnnounceHandshake_onBootstrapMode(t *testing.T) { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { networkMock := NewMockNetwork(ctrl) workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - workerPool.workers = map[peer.ID]*peerSyncWorker{ - peer.ID("peer-test"): { - worker: &worker{status: available}, - }, + workerPool.workers = map[peer.ID]*worker{ + peer.ID("peer-test"): {status: available}, } cs := newChainSyncTest(t, ctrl) @@ -424,7 +422,7 @@ func TestChainSync_onBlockAnnounceHandshake_onBootstrapMode(t *testing.T) { if tt.shouldBeAWorker { syncWorker, exists := cs.workerPool.workers[tt.peerID] require.True(t, exists) - require.Equal(t, tt.workerStatus, syncWorker.worker.status) + require.Equal(t, tt.workerStatus, syncWorker.status) } else { _, exists := cs.workerPool.workers[tt.peerID] require.False(t, exists) @@ -750,11 +748,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. err = cs.workerPool.stop() require.NoError(t, err) - - // peer should be punished - syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] - require.True(t, ok) - require.Equal(t, punished, syncWorker.worker.status) } func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *testing.T) { @@ -867,11 +860,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test err = cs.workerPool.stop() require.NoError(t, err) - - // peer should be punished - syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] - require.True(t, ok) - require.Equal(t, punished, syncWorker.worker.status) } func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testing.T) { @@ -987,11 +975,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi err = cs.workerPool.stop() require.NoError(t, err) - - // peer should be punished - syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] - require.True(t, ok) - require.Equal(t, punished, syncWorker.worker.status) } func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testing.T) { @@ -1103,11 +1086,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi err = cs.workerPool.stop() require.NoError(t, err) - - // peer should be punished - syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] - require.True(t, ok) - require.Equal(t, punished, syncWorker.worker.status) } func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing.T) { diff --git a/dot/sync/worker.go b/dot/sync/worker.go index 4aa55e6050..fbf43e346e 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -52,19 +52,6 @@ func (w *worker) start() { logger.Debugf("[STARTED] worker %s", w.peerID) for { select { - case punishmentDuration := <-w.punishment: - logger.Debugf("⏱️ punishement time for peer %s: %.2fs", w.peerID, punishmentDuration.Seconds()) - punishmentTimer := time.NewTimer(punishmentDuration) - select { - case <-punishmentTimer.C: - w.mtx.Lock() - w.status = available - w.mtx.Unlock() - - case <-w.stopCh: - return - } - case <-w.stopCh: return case task := <-w.queue: @@ -75,33 +62,14 @@ func (w *worker) start() { } func (w *worker) processTask(task *syncTask) (enqueued bool) { - if w.isPunished() { - return false - } - select { case w.queue <- task: - logger.Debugf("[ENQUEUED] worker %s, block request: %s", w.peerID, task.request) return true default: return false } } -func (w *worker) punish(duration time.Duration) { - w.punishment <- duration - - w.mtx.Lock() - defer w.mtx.Unlock() - w.status = punished -} - -func (w *worker) isPunished() bool { - w.mtx.Lock() - defer w.mtx.Unlock() - return w.status == punished -} - func (w *worker) stop() error { close(w.stopCh) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index fa15a49fb7..24f113be2d 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -37,21 +37,12 @@ type syncTaskResult struct { err error } -type peerSyncWorker struct { - timesPunished int - worker *worker -} - -func (p *peerSyncWorker) isPunished() bool { - return p.worker.isPunished() -} - type syncWorkerPool struct { mtx sync.RWMutex network Network requestMaker network.RequestMaker - workers map[peer.ID]*peerSyncWorker + workers map[peer.ID]*worker ignorePeers map[peer.ID]struct{} sharedGuard chan struct{} @@ -61,7 +52,7 @@ func newSyncWorkerPool(net Network, requestMaker network.RequestMaker) *syncWork swp := &syncWorkerPool{ network: net, requestMaker: requestMaker, - workers: make(map[peer.ID]*peerSyncWorker), + workers: make(map[peer.ID]*worker), ignorePeers: make(map[peer.ID]struct{}), sharedGuard: make(chan struct{}, maxRequestsAllowed), } @@ -80,14 +71,10 @@ func (s *syncWorkerPool) stop() error { errCh := make(chan error, len(s.workers)) for _, syncWorker := range s.workers { - if syncWorker.isPunished() { - continue - } - wg.Add(1) - go func(syncWorker *peerSyncWorker, wg *sync.WaitGroup) { + go func(syncWorker *worker, wg *sync.WaitGroup) { defer wg.Done() - errCh <- syncWorker.worker.stop() + errCh <- syncWorker.stop() }(syncWorker, &wg) } @@ -138,11 +125,8 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { return } - syncWorker := &peerSyncWorker{ - worker: newWorker(who, s.sharedGuard, s.requestMaker), - } - - syncWorker.worker.start() + syncWorker := newWorker(who, s.sharedGuard, s.requestMaker) + syncWorker.start() s.workers[who] = syncWorker logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) } @@ -167,10 +151,8 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, if who != nil { syncWorker := s.workers[*who] - if !syncWorker.isPunished() { - syncWorker.worker.processTask(task) - return - } + syncWorker.processTask(task) + return } for syncWorkerPeerID, syncWorker := range s.workers { @@ -178,11 +160,7 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, continue } - if syncWorker.isPunished() { - continue - } - - enqueued := syncWorker.worker.processTask(task) + enqueued := syncWorker.processTask(task) if enqueued { break } @@ -204,11 +182,7 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) workerID := idx % len(allWorkers) syncWorker := allWorkers[workerID] - if syncWorker.isPunished() { - continue - } - - enqueued := syncWorker.worker.processTask(&syncTask{ + enqueued := syncWorker.processTask(&syncTask{ request: requests[idx], resultCh: resultCh, }) @@ -217,43 +191,25 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) continue } + // only increases the index if a task was successfully equeued + // for some worker, if the task was not equeued for some worker + // jump to the next worker and try to enqueue there idx++ } return resultCh } -// punishPeer given a peer.ID we check increase its times punished -// and apply the punishment time using the base timeout of 5m, so -// each time a peer is punished its timeout will increase by 5m -func (s *syncWorkerPool) punishPeer(who peer.ID) { - s.mtx.Lock() - defer s.mtx.Unlock() - - syncWorker, has := s.workers[who] - if !has || syncWorker.isPunished() { - return - } - - timesPunished := syncWorker.timesPunished + 1 - punishmentTime := time.Duration(timesPunished) * punishmentBaseTimeout - - syncWorker.timesPunished = timesPunished - syncWorker.worker.punish(punishmentTime) - - s.workers[who] = syncWorker -} - func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) error { s.mtx.Lock() defer s.mtx.Unlock() - syncWorker, has := s.workers[who] + worker, has := s.workers[who] if !has { return nil } - err := syncWorker.worker.stop() + err := worker.stop() if err != nil { return fmt.Errorf("stopping worker: %w", err) } @@ -268,10 +224,8 @@ func (s *syncWorkerPool) totalWorkers() (total uint) { s.mtx.RLock() defer s.mtx.RUnlock() - for _, syncWorker := range s.workers { - if !syncWorker.worker.isPunished() { - total++ - } + for range s.workers { + total++ } return total diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index 8b43ceb204..d00ce4c5f1 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -14,15 +14,14 @@ import ( "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" ) func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { t.Parallel() - //stablePunishmentTime := time.Now().Add(time.Minute * 2) - cases := map[string]struct { - setupWorkerPool func(t *testing.T) *syncWorkerPool - expectedPool map[peer.ID]*peerSyncWorker + setupWorkerPool func(t *testing.T) *syncWorkerPool + exepectedWorkers []peer.ID }{ "no_connected_peers": { setupWorkerPool: func(t *testing.T) *syncWorkerPool { @@ -34,7 +33,7 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { return newSyncWorkerPool(networkMock, nil) }, - expectedPool: make(map[peer.ID]*peerSyncWorker), + exepectedWorkers: []peer.ID{}, }, "3_available_peers": { setupWorkerPool: func(t *testing.T) *syncWorkerPool { @@ -49,10 +48,10 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { }) return newSyncWorkerPool(networkMock, nil) }, - expectedPool: map[peer.ID]*peerSyncWorker{ - // peer.ID("available-1"): {status: available}, - // peer.ID("available-2"): {status: available}, - // peer.ID("available-3"): {status: available}, + exepectedWorkers: []peer.ID{ + peer.ID("available-1"), + peer.ID("available-2"), + peer.ID("available-3"), }, }, "2_available_peers_1_to_ignore": { @@ -70,40 +69,12 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { workerPool.ignorePeers[peer.ID("available-3")] = struct{}{} return workerPool }, - expectedPool: map[peer.ID]*peerSyncWorker{ - // peer.ID("available-1"): {status: available}, - // peer.ID("available-2"): {status: available}, - }, - }, - "peer_punishment_not_valid_anymore": { - setupWorkerPool: func(t *testing.T) *syncWorkerPool { - ctrl := gomock.NewController(t) - networkMock := NewMockNetwork(ctrl) - networkMock.EXPECT(). - AllConnectedPeersIDs(). - Return([]peer.ID{ - peer.ID("available-1"), - peer.ID("available-2"), - peer.ID("available-3"), - }) - workerPool := newSyncWorkerPool(networkMock, nil) - workerPool.workers[peer.ID("available-3")] = &peerSyncWorker{ - // status: punished, - // // arbitrary unix value - // punishmentTime: time.Unix(1000, 0), - } - return workerPool - }, - expectedPool: map[peer.ID]*peerSyncWorker{ - // peer.ID("available-1"): {status: available}, - // peer.ID("available-2"): {status: available}, - // peer.ID("available-3"): { - // status: available, - // punishmentTime: time.Unix(1000, 0), - // }, + exepectedWorkers: []peer.ID{ + peer.ID("available-1"), + peer.ID("available-2"), }, }, - "peer_punishment_still_valid": { + "peer_already_in_workers_set": { setupWorkerPool: func(t *testing.T) *syncWorkerPool { ctrl := gomock.NewController(t) networkMock := NewMockNetwork(ctrl) @@ -115,19 +86,13 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { peer.ID("available-3"), }) workerPool := newSyncWorkerPool(networkMock, nil) - workerPool.workers[peer.ID("available-3")] = &peerSyncWorker{ - // status: punished, - // punishmentTime: stablePunishmentTime, - } + workerPool.workers[peer.ID("available-3")] = &worker{stopCh: make(chan struct{})} return workerPool }, - expectedPool: map[peer.ID]*peerSyncWorker{ - // peer.ID("available-1"): {status: available}, - // peer.ID("available-2"): {status: available}, - // peer.ID("available-3"): { - // status: punished, - // punishmentTime: stablePunishmentTime, - // }, + exepectedWorkers: []peer.ID{ + peer.ID("available-1"), + peer.ID("available-2"), + peer.ID("available-3"), }, }, } @@ -139,88 +104,11 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { workerPool := tt.setupWorkerPool(t) workerPool.useConnectedPeers() + defer workerPool.stop() - require.Equal(t, workerPool.workers, tt.expectedPool) - }) - } -} - -func TestSyncWorkerPool_newPeer(t *testing.T) { - t.Parallel() - //stablePunishmentTime := time.Now().Add(time.Minute * 2) - - cases := map[string]struct { - peerID peer.ID - setupWorkerPool func(t *testing.T) *syncWorkerPool - expectedPool map[peer.ID]*peerSyncWorker - }{ - "very_fist_entry": { - peerID: peer.ID("peer-1"), - setupWorkerPool: func(*testing.T) *syncWorkerPool { - return newSyncWorkerPool(nil, nil) - }, - expectedPool: map[peer.ID]*peerSyncWorker{ - peer.ID("peer-1"): { - worker: &worker{status: available}, - }, - }, - }, - "peer_to_ignore": { - peerID: peer.ID("to-ignore"), - setupWorkerPool: func(*testing.T) *syncWorkerPool { - workerPool := newSyncWorkerPool(nil, nil) - workerPool.ignorePeers[peer.ID("to-ignore")] = struct{}{} - return workerPool - }, - expectedPool: map[peer.ID]*peerSyncWorker{}, - }, - "peer_punishment_not_valid_anymore": { - peerID: peer.ID("free-again"), - setupWorkerPool: func(*testing.T) *syncWorkerPool { - workerPool := newSyncWorkerPool(nil, nil) - workerPool.workers[peer.ID("free-again")] = &peerSyncWorker{ - // status: punished, - // // arbitrary unix value - // punishmentTime: time.Unix(1000, 0), - } - return workerPool - }, - expectedPool: map[peer.ID]*peerSyncWorker{ - peer.ID("free-again"): { - // status: available, - // punishmentTime: time.Unix(1000, 0), - }, - }, - }, - "peer_punishment_still_valid": { - peerID: peer.ID("peer_punished"), - setupWorkerPool: func(*testing.T) *syncWorkerPool { - - workerPool := newSyncWorkerPool(nil, nil) - workerPool.workers[peer.ID("peer_punished")] = &peerSyncWorker{ - // status: punished, - // punishmentTime: stablePunishmentTime, - } - return workerPool - }, - expectedPool: map[peer.ID]*peerSyncWorker{ - peer.ID("peer_punished"): { - // status: punished, - // punishmentTime: stablePunishmentTime, - }, - }, - }, - } - - for tname, tt := range cases { - tt := tt - t.Run(tname, func(t *testing.T) { - t.Parallel() - - workerPool := tt.setupWorkerPool(t) - workerPool.newPeer(tt.peerID) - - require.Equal(t, workerPool.workers, tt.expectedPool) + require.ElementsMatch(t, + maps.Keys(workerPool.workers), + tt.exepectedWorkers) }) } } @@ -257,7 +145,6 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { requestMakerMock.EXPECT(). Do(availablePeer, blockRequest, &network.BlockResponseMessage{}). DoAndReturn(func(_, _, response any) any { - time.Sleep(5 * time.Second) responsePtr := response.(*network.BlockResponseMessage) *responsePtr = *mockedBlockResponse return nil @@ -266,13 +153,6 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { resultCh := make(chan *syncTaskResult) workerPool.submitRequest(blockRequest, nil, resultCh) - // ensure the task is in the pool and was already - // assigned to the peer - time.Sleep(time.Second) - - totalWorkers := workerPool.totalWorkers() - require.Zero(t, totalWorkers) - syncTaskResult := <-resultCh require.NoError(t, syncTaskResult.err) require.Equal(t, syncTaskResult.who, availablePeer) @@ -281,7 +161,7 @@ func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { } -func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { +func TestSyncWorkerPool_singleWorker_multipleRequests(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) @@ -347,11 +227,6 @@ func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { resultCh := workerPool.submitRequests( []*network.BlockRequestMessage{firstBlockRequest, secondBlockRequest}) - // ensure the task is in the pool and was already - // assigned to the peer - time.Sleep(time.Second) - require.Zero(t, workerPool.totalWorkers()) - syncTaskResult := <-resultCh require.NoError(t, syncTaskResult.err) require.Equal(t, syncTaskResult.who, availablePeer) diff --git a/dot/sync/worker_test.go b/dot/sync/worker_test.go index 94c1e6bae0..4f0e734b10 100644 --- a/dot/sync/worker_test.go +++ b/dot/sync/worker_test.go @@ -5,8 +5,8 @@ package sync import ( "errors" - "fmt" "testing" + "time" "github.com/ChainSafe/gossamer/dot/network" "github.com/golang/mock/gomock" @@ -14,28 +14,42 @@ import ( "github.com/stretchr/testify/require" ) -func TestWorkerStop(t *testing.T) { +func TestWorker(t *testing.T) { peerA := peer.ID("peerA") ctrl := gomock.NewController(t) reqMaker := NewMockRequestMaker(ctrl) reqMaker.EXPECT(). Do(peerA, nil, gomock.AssignableToTypeOf((*network.BlockResponseMessage)(nil))). + DoAndReturn(func(_, _, _ any) any { + time.Sleep(2 * time.Second) + return nil + }). + Times(2). Return(nil) sharedGuard := make(chan struct{}, 1) - generalQueue := make(chan *syncTask) - w := newWorker(peerA, sharedGuard, reqMaker) w.start() resultCh := make(chan *syncTaskResult) defer close(resultCh) - generalQueue <- &syncTask{ + enqueued := w.processTask(&syncTask{ resultCh: resultCh, - } + }) + require.True(t, enqueued) + + enqueued = w.processTask(&syncTask{ + resultCh: resultCh, + }) + require.True(t, enqueued) + time.Sleep(500 * time.Millisecond) + require.Equal(t, 1, len(sharedGuard)) + <-resultCh + + time.Sleep(500 * time.Millisecond) require.Equal(t, 1, len(sharedGuard)) <-resultCh @@ -53,6 +67,10 @@ func TestWorkerAsyncStop(t *testing.T) { reqMaker.EXPECT(). Do(peerA, nil, gomock.AssignableToTypeOf((*network.BlockResponseMessage)(nil))). + DoAndReturn(func(_, _, _ any) any { + time.Sleep(2 * time.Second) + return nil + }). Return(nil) sharedGuard := make(chan struct{}, 2) @@ -62,6 +80,8 @@ func TestWorkerAsyncStop(t *testing.T) { doneCh := make(chan struct{}) resultCh := make(chan *syncTaskResult, 2) + defer close(resultCh) + go handleResultsHelper(t, w, resultCh, doneCh) // issue two requests in the general channel @@ -73,7 +93,6 @@ func TestWorkerAsyncStop(t *testing.T) { resultCh: resultCh, }) - close(resultCh) <-doneCh } @@ -83,8 +102,9 @@ func handleResultsHelper(t *testing.T, w *worker, resultCh chan *syncTaskResult, for r := range resultCh { if r.err != nil { - fmt.Printf("==> %s\n", r.err) - w.stop() + err := w.stop() + require.NoError(t, err) + return } } } From 827e2a7c652e0fd95328a9a37905de5372af9e7a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 4 Aug 2023 08:51:33 -0400 Subject: [PATCH 123/140] chore: fix `TestAscendingBlockRequest` to include the start block in the len of requested blocks --- dot/network/message_test.go | 51 ++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/dot/network/message_test.go b/dot/network/message_test.go index 839cf0e228..98d62b08c7 100644 --- a/dot/network/message_test.go +++ b/dot/network/message_test.go @@ -428,13 +428,15 @@ func TestAscendingBlockRequest(t *testing.T) { three := uint32(3) maxResponseSize := uint32(MaxBlocksInResponse) cases := map[string]struct { - startNumber, targetNumber uint - expectedBlockRequestMessage []*BlockRequestMessage + startNumber, targetNumber uint + expectedBlockRequestMessage []*BlockRequestMessage + expectedTotalOfBlocksRequested uint32 }{ "start_greater_than_target": { - startNumber: 10, - targetNumber: 0, - expectedBlockRequestMessage: []*BlockRequestMessage{}, + startNumber: 10, + targetNumber: 0, + expectedBlockRequestMessage: []*BlockRequestMessage{}, + expectedTotalOfBlocksRequested: 0, }, "no_difference_between_start_and_target": { @@ -448,15 +450,16 @@ func TestAscendingBlockRequest(t *testing.T) { Max: &one, }, }, + expectedTotalOfBlocksRequested: 1, }, "requesting_128_blocks": { - startNumber: 0, + startNumber: 1, targetNumber: 128, expectedBlockRequestMessage: []*BlockRequestMessage{ { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + StartingBlock: *variadic.MustNewUint32OrHash(uint32(1)), Direction: Ascending, Max: &maxResponseSize, }, @@ -464,30 +467,31 @@ func TestAscendingBlockRequest(t *testing.T) { }, "requesting_4_chunks_of_128_blocks": { - startNumber: 0, - targetNumber: 512, // 128 * 4 + startNumber: 1, + targetNumber: 128 * 4, // 512 + expectedTotalOfBlocksRequested: 512, expectedBlockRequestMessage: []*BlockRequestMessage{ { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + StartingBlock: *variadic.MustNewUint32OrHash(uint32(1)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(128)), + StartingBlock: *variadic.MustNewUint32OrHash(uint32(129)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(256)), + StartingBlock: *variadic.MustNewUint32OrHash(uint32(257)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(384)), + StartingBlock: *variadic.MustNewUint32OrHash(uint32(385)), Direction: Ascending, Max: &maxResponseSize, }, @@ -495,36 +499,37 @@ func TestAscendingBlockRequest(t *testing.T) { }, "requesting_4_chunks_of_128_plus_3_blocks": { - startNumber: 0, - targetNumber: (128 * 4) + 3, + startNumber: 1, + targetNumber: (128 * 4) + 3, + expectedTotalOfBlocksRequested: 515, expectedBlockRequestMessage: []*BlockRequestMessage{ { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + StartingBlock: *variadic.MustNewUint32OrHash(uint32(1)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(128)), + StartingBlock: *variadic.MustNewUint32OrHash(uint32(129)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(256)), + StartingBlock: *variadic.MustNewUint32OrHash(uint32(257)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(384)), + StartingBlock: *variadic.MustNewUint32OrHash(uint32(385)), Direction: Ascending, Max: &maxResponseSize, }, { RequestedData: BootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(uint32(512)), + StartingBlock: *variadic.MustNewUint32OrHash(uint32(513)), Direction: Ascending, Max: &three, }, @@ -538,6 +543,12 @@ func TestAscendingBlockRequest(t *testing.T) { t.Run(tname, func(t *testing.T) { requests := NewAscendingBlockRequests(tt.startNumber, tt.targetNumber, BootstrapRequestData) require.Equal(t, tt.expectedBlockRequestMessage, requests) + + acc := uint32(0) + for _, r := range requests { + acc += *r.Max + } + require.Equal(t, tt.expectedTotalOfBlocksRequested, acc) }) } } From 49e0aee20ebf9290f267535873411df8b53ef1bb Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 4 Aug 2023 09:30:03 -0400 Subject: [PATCH 124/140] chore: fix lint warn --- dot/sync/worker.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/dot/sync/worker.go b/dot/sync/worker.go index fbf43e346e..519db4bd66 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -6,7 +6,6 @@ package sync import ( "errors" "fmt" - "sync" "time" "github.com/ChainSafe/gossamer/dot/network" @@ -16,7 +15,6 @@ import ( var ErrStopTimeout = errors.New("stop timeout") type worker struct { - mtx sync.Mutex status byte peerID peer.ID sharedGuard chan struct{} From d6416a8f766b9d774454c548a93de419bc89645e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 4 Aug 2023 13:38:25 -0400 Subject: [PATCH 125/140] chore: fix `TestAscendingBlockRequest` test failure --- dot/network/message_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/dot/network/message_test.go b/dot/network/message_test.go index 98d62b08c7..8498ee4426 100644 --- a/dot/network/message_test.go +++ b/dot/network/message_test.go @@ -454,8 +454,9 @@ func TestAscendingBlockRequest(t *testing.T) { }, "requesting_128_blocks": { - startNumber: 1, - targetNumber: 128, + startNumber: 1, + targetNumber: 128, + expectedTotalOfBlocksRequested: 128, expectedBlockRequestMessage: []*BlockRequestMessage{ { RequestedData: BootstrapRequestData, From dfd96b906d7997e6b6da008ad873f195f9877b4a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 4 Aug 2023 15:05:03 -0400 Subject: [PATCH 126/140] chore: remove test to ensure which peer receives task --- dot/sync/chain_sync_test.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 79ff939f59..cbd66612cd 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -714,9 +714,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. require.FailNow(t, "expected calls by %s and %s, got: %s", peer.ID("alice"), peer.ID("bob"), pID) default: - // ensure the the third call will be made by peer.ID("alice") - require.Equalf(t, pID, peer.ID("alice"), - "expect third call be made by %s, got: %s", peer.ID("alice"), pID) } *responsePtr = *worker2Response @@ -820,9 +817,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test require.FailNow(t, "expected calls by %s and %s, got: %s", peer.ID("alice"), peer.ID("bob"), pID) default: - // ensure the the third call will be made by peer.ID("alice") - require.Equalf(t, pID, peer.ID("alice"), - "expect third call be made by %s, got: %s", peer.ID("alice"), pID) } *responsePtr = *worker2Response @@ -935,9 +929,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi require.FailNow(t, "expected calls by %s and %s, got: %s", peer.ID("alice"), peer.ID("bob"), pID) default: - // ensure the the third call will be made by peer.ID("alice") - require.Equalf(t, pID, peer.ID("alice"), - "expect third call be made by %s, got: %s", peer.ID("alice"), pID) } *responsePtr = *worker2Response @@ -1052,9 +1043,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi require.FailNow(t, "expected calls by %s and %s, got: %s", peer.ID("alice"), peer.ID("bob"), pID) default: - // ensure the the third call will be made by peer.ID("alice") - require.Equalf(t, pID, peer.ID("alice"), - "expect third call be made by %s, got: %s", peer.ID("alice"), pID) } *responsePtr = *worker2Response @@ -1173,9 +1161,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. require.FailNow(t, "expected calls by %s and %s, got: %s", peer.ID("alice"), peer.ID("bob"), pID) default: - // ensure the the third call will be made by peer.ID("alice") - require.Equalf(t, pID, peer.ID("alice"), - "expect third call be made by %s, got: %s", peer.ID("alice"), pID) } *responsePtr = *worker2Response From c738d0186417014b7993d65de78743a5a2f42529 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 4 Aug 2023 15:31:36 -0400 Subject: [PATCH 127/140] chore: remove hidden goroutine `GO-E1007` --- dot/sync/worker.go | 28 +++++++++++++--------------- dot/sync/worker_pool.go | 3 ++- dot/sync/worker_test.go | 4 ++-- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/dot/sync/worker.go b/dot/sync/worker.go index 519db4bd66..6c47e7ee50 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -41,22 +41,20 @@ func newWorker(pID peer.ID, sharedGuard chan struct{}, network network.RequestMa } func (w *worker) start() { - go func() { - defer func() { - logger.Debugf("[STOPPED] worker %s", w.peerID) - close(w.doneCh) - }() - - logger.Debugf("[STARTED] worker %s", w.peerID) - for { - select { - case <-w.stopCh: - return - case task := <-w.queue: - executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) - } - } + defer func() { + logger.Debugf("[STOPPED] worker %s", w.peerID) + close(w.doneCh) }() + + logger.Debugf("[STARTED] worker %s", w.peerID) + for { + select { + case <-w.stopCh: + return + case task := <-w.queue: + executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) + } + } } func (w *worker) processTask(task *syncTask) (enqueued bool) { diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 24f113be2d..e7cad0b1eb 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -126,7 +126,8 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { } syncWorker := newWorker(who, s.sharedGuard, s.requestMaker) - syncWorker.start() + go syncWorker.start() + s.workers[who] = syncWorker logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) } diff --git a/dot/sync/worker_test.go b/dot/sync/worker_test.go index 4f0e734b10..aff4c4304e 100644 --- a/dot/sync/worker_test.go +++ b/dot/sync/worker_test.go @@ -30,7 +30,7 @@ func TestWorker(t *testing.T) { sharedGuard := make(chan struct{}, 1) w := newWorker(peerA, sharedGuard, reqMaker) - w.start() + go w.start() resultCh := make(chan *syncTaskResult) defer close(resultCh) @@ -76,7 +76,7 @@ func TestWorkerAsyncStop(t *testing.T) { sharedGuard := make(chan struct{}, 2) w := newWorker(peerA, sharedGuard, reqMaker) - w.start() + go w.start() doneCh := make(chan struct{}) resultCh := make(chan *syncTaskResult, 2) From 90c6658003e6dfea56976b14c972acbef9226019 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 7 Aug 2023 10:33:27 -0400 Subject: [PATCH 128/140] chore: update `worker_test.go` --- dot/sync/chain_sync_test.go | 2 -- dot/sync/worker_test.go | 54 ------------------------------------- 2 files changed, 56 deletions(-) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index cbd66612cd..5a0b7fc612 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -1167,8 +1167,6 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. return nil }).Times(3) - fmt.Printf("BAD BLOCK HASH: %s\n", fakeBadBlockHash) - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ Value: peerset.BadBlockAnnouncementValue, Reason: peerset.BadBlockAnnouncementReason, diff --git a/dot/sync/worker_test.go b/dot/sync/worker_test.go index aff4c4304e..c22891872d 100644 --- a/dot/sync/worker_test.go +++ b/dot/sync/worker_test.go @@ -4,7 +4,6 @@ package sync import ( - "errors" "testing" "time" @@ -55,56 +54,3 @@ func TestWorker(t *testing.T) { w.stop() } - -func TestWorkerAsyncStop(t *testing.T) { - peerA := peer.ID("peerA") - ctrl := gomock.NewController(t) - - reqMaker := NewMockRequestMaker(ctrl) - reqMaker.EXPECT(). - Do(peerA, nil, gomock.AssignableToTypeOf((*network.BlockResponseMessage)(nil))). - Return(errors.New("mocked error")) - - reqMaker.EXPECT(). - Do(peerA, nil, gomock.AssignableToTypeOf((*network.BlockResponseMessage)(nil))). - DoAndReturn(func(_, _, _ any) any { - time.Sleep(2 * time.Second) - return nil - }). - Return(nil) - - sharedGuard := make(chan struct{}, 2) - - w := newWorker(peerA, sharedGuard, reqMaker) - go w.start() - - doneCh := make(chan struct{}) - resultCh := make(chan *syncTaskResult, 2) - defer close(resultCh) - - go handleResultsHelper(t, w, resultCh, doneCh) - - // issue two requests in the general channel - w.processTask(&syncTask{ - resultCh: resultCh, - }) - - w.processTask(&syncTask{ - resultCh: resultCh, - }) - - <-doneCh -} - -func handleResultsHelper(t *testing.T, w *worker, resultCh chan *syncTaskResult, doneCh chan<- struct{}) { - t.Helper() - defer close(doneCh) - - for r := range resultCh { - if r.err != nil { - err := w.stop() - require.NoError(t, err) - return - } - } -} From 1e1f0dce905fea8d5358b07fcfd61931be0db144 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 7 Aug 2023 11:32:09 -0400 Subject: [PATCH 129/140] chore: update `zombienet` github action to go1.20 --- .github/workflows/zombienet.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/zombienet.yml b/.github/workflows/zombienet.yml index 586391c646..559ca8f1d2 100644 --- a/.github/workflows/zombienet.yml +++ b/.github/workflows/zombienet.yml @@ -9,7 +9,7 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: 1.19 + go-version: "1.20" stable: true check-latest: true @@ -19,7 +19,6 @@ jobs: echo "::set-output name=go-build::$(go env GOCACHE)" echo "::set-output name=go-mod::$(go env GOMODCACHE)" - uses: actions/checkout@v3 - - name: Go build cache uses: actions/cache@v3 with: @@ -51,4 +50,4 @@ jobs: chmod +x /usr/local/bin/zombienet - name: Zombienet test run: | - zombienet test -p native zombienet_tests/functional/0001-basic-network.zndsl \ No newline at end of file + zombienet test -p native zombienet_tests/functional/0001-basic-network.zndsl From c4efd0088bf28edffc60efc2679adc1d39d960cb Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 8 Aug 2023 14:21:09 -0400 Subject: [PATCH 130/140] chore: start peer worker from block announce --- dot/sync/chain_sync.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index e8c872db2c..a350c45e5b 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -318,6 +318,9 @@ func (cs *chainSync) onBlockAnnounceHandshake(who peer.ID, bestHash common.Hash, } func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { + // TODO: create issue to be spec compliat with regard block annoucement + cs.workerPool.fromBlockAnnounce(announced.who) + if cs.pendingBlocks.hasBlock(announced.header.Hash()) { return fmt.Errorf("%w: block %s (#%d)", errAlreadyInDisjointSet, announced.header.Hash(), announced.header.Number) From c570e6511331699385d214f4d92ffd0697ad0afb Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 8 Aug 2023 15:43:47 -0400 Subject: [PATCH 131/140] chore: address comments --- dot/node_integration_test.go | 1 - dot/sync/worker.go | 13 +++---------- dot/sync/worker_pool.go | 13 +++++++------ dot/sync/worker_pool_test.go | 2 +- 4 files changed, 11 insertions(+), 18 deletions(-) diff --git a/dot/node_integration_test.go b/dot/node_integration_test.go index 345b87a251..99d3694e61 100644 --- a/dot/node_integration_test.go +++ b/dot/node_integration_test.go @@ -324,7 +324,6 @@ func TestStartStopNode(t *testing.T) { config.ChainSpec = genFile config.Core.GrandpaAuthority = false config.Core.BabeAuthority = false - config.Network.MinPeers = 0 err := InitNode(config) require.NoError(t, err) diff --git a/dot/sync/worker.go b/dot/sync/worker.go index 6c47e7ee50..502a448120 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -20,7 +20,6 @@ type worker struct { sharedGuard chan struct{} punishment chan time.Duration - stopCh chan struct{} doneCh chan struct{} queue chan *syncTask @@ -32,7 +31,6 @@ func newWorker(pID peer.ID, sharedGuard chan struct{}, network network.RequestMa peerID: pID, sharedGuard: sharedGuard, punishment: make(chan time.Duration), - stopCh: make(chan struct{}), doneCh: make(chan struct{}), queue: make(chan *syncTask, maxRequestsAllowed), requestMaker: network, @@ -47,13 +45,8 @@ func (w *worker) start() { }() logger.Debugf("[STARTED] worker %s", w.peerID) - for { - select { - case <-w.stopCh: - return - case task := <-w.queue: - executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) - } + for task := range w.queue { + executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) } } @@ -67,7 +60,7 @@ func (w *worker) processTask(task *syncTask) (enqueued bool) { } func (w *worker) stop() error { - close(w.stopCh) + close(w.queue) timeoutTimer := time.NewTimer(30 * time.Second) select { diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index e7cad0b1eb..5bae0ea5b9 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -152,15 +152,16 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, if who != nil { syncWorker := s.workers[*who] - syncWorker.processTask(task) - return - } - for syncWorkerPeerID, syncWorker := range s.workers { - if who != nil && *who == syncWorkerPeerID { - continue + // if task enqueued then returns otherwise + // try to submit the task to other available peer + enqueued := syncWorker.processTask(task) + if enqueued { + return } + } + for _, syncWorker := range s.workers { enqueued := syncWorker.processTask(task) if enqueued { break diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index d00ce4c5f1..4106673d2c 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -86,7 +86,7 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { peer.ID("available-3"), }) workerPool := newSyncWorkerPool(networkMock, nil) - workerPool.workers[peer.ID("available-3")] = &worker{stopCh: make(chan struct{})} + workerPool.workers[peer.ID("available-3")] = &worker{queue: make(chan *syncTask)} return workerPool }, exepectedWorkers: []peer.ID{ From aeb4b6e1c095c6e0da16d82926ec320f808b1e1f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 8 Aug 2023 16:26:39 -0400 Subject: [PATCH 132/140] chore: make `processTask` panics if `queue` channel is blocked to write --- dot/sync/worker.go | 21 +++++++++++++++------ dot/sync/worker_pool.go | 24 ++++-------------------- dot/sync/worker_test.go | 6 ++---- 3 files changed, 21 insertions(+), 30 deletions(-) diff --git a/dot/sync/worker.go b/dot/sync/worker.go index 502a448120..a46469b3bd 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -6,6 +6,7 @@ package sync import ( "errors" "fmt" + "sync" "time" "github.com/ChainSafe/gossamer/dot/network" @@ -15,6 +16,7 @@ import ( var ErrStopTimeout = errors.New("stop timeout") type worker struct { + mxt sync.Mutex status byte peerID peer.ID sharedGuard chan struct{} @@ -50,17 +52,24 @@ func (w *worker) start() { } } -func (w *worker) processTask(task *syncTask) (enqueued bool) { - select { - case w.queue <- task: - return true - default: - return false +func (w *worker) processTask(task *syncTask) { + w.mxt.Lock() + defer w.mxt.Unlock() + if w.queue != nil { + select { + case w.queue <- task: + default: + panic(fmt.Sprintf("worker %s queue is blocked, cannot enqueue task: %s", + w.peerID, task.request.String())) + } } } func (w *worker) stop() error { + w.mxt.Lock() close(w.queue) + w.queue = nil + w.mxt.Unlock() timeoutTimer := time.NewTimer(30 * time.Second) select { diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 5bae0ea5b9..1c92a60cc8 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -152,27 +152,18 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, if who != nil { syncWorker := s.workers[*who] - - // if task enqueued then returns otherwise - // try to submit the task to other available peer - enqueued := syncWorker.processTask(task) - if enqueued { - return - } + syncWorker.processTask(task) + return } for _, syncWorker := range s.workers { - enqueued := syncWorker.processTask(task) - if enqueued { - break - } + syncWorker.processTask(task) } } // submitRequests takes an set of requests and will submit to the pool through submitRequest // the response will be dispatch in the resultCh func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) (resultCh chan *syncTaskResult) { - logger.Debugf("[SENDING] %d requests", len(requests)) resultCh = make(chan *syncTaskResult, maxRequestsAllowed+1) s.mtx.RLock() @@ -184,18 +175,11 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) workerID := idx % len(allWorkers) syncWorker := allWorkers[workerID] - enqueued := syncWorker.processTask(&syncTask{ + syncWorker.processTask(&syncTask{ request: requests[idx], resultCh: resultCh, }) - if !enqueued { - continue - } - - // only increases the index if a task was successfully equeued - // for some worker, if the task was not equeued for some worker - // jump to the next worker and try to enqueue there idx++ } diff --git a/dot/sync/worker_test.go b/dot/sync/worker_test.go index c22891872d..718ca3e6e1 100644 --- a/dot/sync/worker_test.go +++ b/dot/sync/worker_test.go @@ -34,15 +34,13 @@ func TestWorker(t *testing.T) { resultCh := make(chan *syncTaskResult) defer close(resultCh) - enqueued := w.processTask(&syncTask{ + w.processTask(&syncTask{ resultCh: resultCh, }) - require.True(t, enqueued) - enqueued = w.processTask(&syncTask{ + w.processTask(&syncTask{ resultCh: resultCh, }) - require.True(t, enqueued) time.Sleep(500 * time.Millisecond) require.Equal(t, 1, len(sharedGuard)) From ec57baf49919d48e884227b8e4298c38d429ae83 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 9 Aug 2023 12:26:06 -0400 Subject: [PATCH 133/140] chore: randomly assign a task to a peer if none is passed --- dot/sync/chain_sync.go | 2 ++ dot/sync/worker.go | 1 - dot/sync/worker_pool.go | 18 ++++++++++-------- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index a350c45e5b..f1fdcc1f0b 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -643,6 +643,7 @@ taskResultLoop: } } + // TODO: avoid the same peer to get the same task cs.workerPool.submitRequest(request, nil, workersResults) continue } @@ -743,6 +744,7 @@ taskResultLoop: return fmt.Errorf("while handling ready block: %w", err) } } + return nil } diff --git a/dot/sync/worker.go b/dot/sync/worker.go index a46469b3bd..9d50e924f3 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -46,7 +46,6 @@ func (w *worker) start() { close(w.doneCh) }() - logger.Debugf("[STARTED] worker %s", w.peerID) for task := range w.queue { executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 1c92a60cc8..e95b2886ae 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -6,6 +6,7 @@ package sync import ( "errors" "fmt" + "math/rand" "sync" "time" @@ -156,9 +157,13 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, return } - for _, syncWorker := range s.workers { - syncWorker.processTask(task) - } + // if the exact peer is not specified then + // randomly select a worker and assign the + // task to it + workers := maps.Values(s.workers) + selectedWorkerIdx := rand.Intn(len(workers)) + selectedWorker := workers[selectedWorkerIdx] + selectedWorker.processTask(task) } // submitRequests takes an set of requests and will submit to the pool through submitRequest @@ -169,18 +174,15 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) s.mtx.RLock() defer s.mtx.RUnlock() - idx := 0 allWorkers := maps.Values(s.workers) - for idx < len(requests) { + for idx, request := range requests { workerID := idx % len(allWorkers) syncWorker := allWorkers[workerID] syncWorker.processTask(&syncTask{ - request: requests[idx], + request: request, resultCh: resultCh, }) - - idx++ } return resultCh From fa161b04d4fede1b855d20c7956385739ae5b1d5 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 9 Aug 2023 14:33:03 -0400 Subject: [PATCH 134/140] chore: fix lint --- dot/sync/chain_sync.go | 2 +- dot/sync/worker_pool.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index f1fdcc1f0b..cb33ce0140 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -318,7 +318,7 @@ func (cs *chainSync) onBlockAnnounceHandshake(who peer.ID, bestHash common.Hash, } func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { - // TODO: create issue to be spec compliat with regard block annoucement + // TODO: https://github.com/ChainSafe/gossamer/issues/3432 cs.workerPool.fromBlockAnnounce(announced.who) if cs.pendingBlocks.hasBlock(announced.header.Hash()) { diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index e95b2886ae..be2b4e8e5e 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -161,7 +161,7 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, // randomly select a worker and assign the // task to it workers := maps.Values(s.workers) - selectedWorkerIdx := rand.Intn(len(workers)) + selectedWorkerIdx := rand.Intn(len(workers)) //nolint:all selectedWorker := workers[selectedWorkerIdx] selectedWorker.processTask(task) } From d9dafa572243aa8363e29a825e1426b02f887a89 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 9 Aug 2023 14:47:35 -0400 Subject: [PATCH 135/140] chore: use `crypto/rand` --- dot/sync/worker_pool.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index be2b4e8e5e..df7e489b5f 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -6,7 +6,9 @@ package sync import ( "errors" "fmt" - "math/rand" + + "crypto/rand" + "math/big" "sync" "time" @@ -159,9 +161,14 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, // if the exact peer is not specified then // randomly select a worker and assign the - // task to it + // task to it, if the amount of workers is + var selectedWorkerIdx int workers := maps.Values(s.workers) - selectedWorkerIdx := rand.Intn(len(workers)) //nolint:all + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(len(workers)))) + if err != nil { + panic(fmt.Errorf("fail to get a random number: %w", err)) + } + selectedWorkerIdx = int(nBig.Int64()) selectedWorker := workers[selectedWorkerIdx] selectedWorker.processTask(task) } From 0c97fe0565dd1fd487034f55a8cbcae33981fd53 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 10 Aug 2023 11:30:22 -0400 Subject: [PATCH 136/140] chore: make tests work --- dot/sync/chain_sync_test.go | 163 ++++++++++++------------------------ 1 file changed, 54 insertions(+), 109 deletions(-) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index 5a0b7fc612..ba16c10809 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -699,25 +699,16 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing. responsePtr := response.(*network.BlockResponseMessage) defer func() { doBlockRequestCount.Add(1) }() - pID := peerID.(peer.ID) // cast to peer ID switch doBlockRequestCount.Load() { - case 0, 1: - if pID == peer.ID("alice") { - *responsePtr = *worker1Response - return nil - } - - if pID == peer.ID("bob") { - return errors.New("a bad error while getting a response") - } - - require.FailNow(t, "expected calls by %s and %s, got: %s", - peer.ID("alice"), peer.ID("bob"), pID) + case 0: + *responsePtr = *worker1Response + case 1: + return errors.New("a bad error while getting a response") default: + *responsePtr = *worker2Response } - - *responsePtr = *worker2Response return nil + }).Times(3) // setup a chain sync which holds in its peer view map @@ -802,33 +793,24 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *test responsePtr := response.(*network.BlockResponseMessage) defer func() { doBlockRequestCount.Add(1) }() - pID := peerID.(peer.ID) // cast to peer ID switch doBlockRequestCount.Load() { - case 0, 1: - if pID == peer.ID("alice") { - *responsePtr = *worker1Response - return nil - } - - if pID == peer.ID("bob") { - return errors.New("protocols not supported") - } - - require.FailNow(t, "expected calls by %s and %s, got: %s", - peer.ID("alice"), peer.ID("bob"), pID) + case 0: + *responsePtr = *worker1Response + case 1: + return errors.New("protocols not supported") default: + *responsePtr = *worker2Response } - *responsePtr = *worker2Response return nil }).Times(3) - // since peer.ID("bob") will fail with protocols not supported his + // since some peer will fail with protocols not supported his // reputation will be affected and mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ Value: peerset.BadProtocolValue, Reason: peerset.BadProtocolReason, - }, peer.ID("bob")) + }, gomock.AssignableToTypeOf(peer.ID(""))) // setup a chain sync which holds in its peer view map // 3 peers, each one announce block 129 as its best block number. // We start this test with genesis block being our best block, so @@ -910,37 +892,27 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testi responsePtr := response.(*network.BlockResponseMessage) defer func() { doBlockRequestCount.Add(1) }() - pID := peerID.(peer.ID) // cast to peer ID switch doBlockRequestCount.Load() { - case 0, 1: - if pID == peer.ID("alice") { - *responsePtr = *worker1Response - return nil - } - - if pID == peer.ID("bob") { - incompleteBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) - incompleteBlockData.BlockData[0].Header = nil - - *responsePtr = *incompleteBlockData - return nil - } + case 0: + *responsePtr = *worker1Response + case 1: + incompleteBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) + incompleteBlockData.BlockData[0].Header = nil - require.FailNow(t, "expected calls by %s and %s, got: %s", - peer.ID("alice"), peer.ID("bob"), pID) + *responsePtr = *incompleteBlockData default: + *responsePtr = *worker2Response } - *responsePtr = *worker2Response return nil }).Times(3) - // since peer.ID("bob") will fail with protocols not supported his + // since some peer will fail with protocols not supported his // reputation will be affected and mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ Value: peerset.IncompleteHeaderValue, Reason: peerset.IncompleteHeaderReason, - }, peer.ID("bob")) + }, gomock.AssignableToTypeOf(peer.ID(""))) // setup a chain sync which holds in its peer view map // 3 peers, each one announce block 129 as its best block number. // We start this test with genesis block being our best block, so @@ -1022,30 +994,20 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testi responsePtr := response.(*network.BlockResponseMessage) defer func() { doBlockRequestCount.Add(1) }() - pID := peerID.(peer.ID) // cast to peer ID switch doBlockRequestCount.Load() { - case 0, 1: - if pID == peer.ID("alice") { - *responsePtr = *worker1Response - return nil - } - - if pID == peer.ID("bob") { - notAChainBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) - // swap positions to force the problem - notAChainBlockData.BlockData[0], notAChainBlockData.BlockData[130] = - notAChainBlockData.BlockData[130], notAChainBlockData.BlockData[0] - - *responsePtr = *notAChainBlockData - return nil - } + case 0: + *responsePtr = *worker1Response + case 1: + notAChainBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) + // swap positions to force the problem + notAChainBlockData.BlockData[0], notAChainBlockData.BlockData[130] = + notAChainBlockData.BlockData[130], notAChainBlockData.BlockData[0] - require.FailNow(t, "expected calls by %s and %s, got: %s", - peer.ID("alice"), peer.ID("bob"), pID) + *responsePtr = *notAChainBlockData default: + *responsePtr = *worker2Response } - *responsePtr = *worker2Response return nil }).Times(3) @@ -1132,45 +1094,35 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. responsePtr := response.(*network.BlockResponseMessage) defer func() { doBlockRequestCount.Add(1) }() - pID := peerID.(peer.ID) // cast to peer ID switch doBlockRequestCount.Load() { - case 0, 1: - if pID == peer.ID("alice") { - *responsePtr = *worker1Response - return nil - } - - if pID == peer.ID("bob") { - // use the fisrt response last item hash to produce the second response block data - // so we can guarantee that the second response continues the first response blocks - firstResponseLastItem := worker1Response.BlockData[len(worker1Response.BlockData)-1] - blockDataWithBadBlock := createSuccesfullBlockResponse(t, - firstResponseLastItem.Header.Hash(), - 129, - 128) - - // changes the last item from the second response to be a bad block, so we guarantee that - // this second response is a chain, (changing the hash from a block in the middle of the block - // response brokes the `isAChain` verification) - lastItem := len(blockDataWithBadBlock.BlockData) - 1 - blockDataWithBadBlock.BlockData[lastItem].Hash = fakeBadBlockHash - *responsePtr = *blockDataWithBadBlock - return nil - } - - require.FailNow(t, "expected calls by %s and %s, got: %s", - peer.ID("alice"), peer.ID("bob"), pID) + case 0: + *responsePtr = *worker1Response + case 1: + // use the fisrt response last item hash to produce the second response block data + // so we can guarantee that the second response continues the first response blocks + firstResponseLastItem := worker1Response.BlockData[len(worker1Response.BlockData)-1] + blockDataWithBadBlock := createSuccesfullBlockResponse(t, + firstResponseLastItem.Header.Hash(), + 129, + 128) + + // changes the last item from the second response to be a bad block, so we guarantee that + // this second response is a chain, (changing the hash from a block in the middle of the block + // response brokes the `isAChain` verification) + lastItem := len(blockDataWithBadBlock.BlockData) - 1 + blockDataWithBadBlock.BlockData[lastItem].Hash = fakeBadBlockHash + *responsePtr = *blockDataWithBadBlock default: + *responsePtr = *worker2Response } - *responsePtr = *worker2Response return nil }).Times(3) mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ Value: peerset.BadBlockAnnouncementValue, Reason: peerset.BadBlockAnnouncementReason, - }, peer.ID("bob")) + }, gomock.AssignableToTypeOf(peer.ID(""))) // setup a chain sync which holds in its peer view map // 3 peers, each one announce block 129 as its best block number. // We start this test with genesis block being our best block, so @@ -1201,11 +1153,8 @@ func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing. // peer should be not in the worker pool // peer should be in the ignore list - _, ok := cs.workerPool.workers[peer.ID("bob")] - require.False(t, ok) - - _, ok = cs.workerPool.ignorePeers[peer.ID("bob")] - require.True(t, ok) + require.Len(t, cs.workerPool.workers, 1) + require.Len(t, cs.workerPool.ignorePeers, 1) } func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testing.T) { @@ -1255,19 +1204,15 @@ func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testi // lets ensure that the DoBlockRequest is called by // peer.ID(alice). The first call will return only 97 blocks // the handler should issue another call to retrieve the missing blocks - pID := peerID.(peer.ID) // cast to peer ID - require.Equalf(t, pID, peer.ID("alice"), - "expect third call be made by %s, got: %s", peer.ID("alice"), pID) - responsePtr := response.(*network.BlockResponseMessage) defer func() { doBlockRequestCount++ }() if doBlockRequestCount == 0 { *responsePtr = *worker1Response - return nil + } else { + *responsePtr = *worker1MissingBlocksResponse } - *responsePtr = *worker1MissingBlocksResponse return nil }).Times(2) From 198fb185414ee9da896300e5ddbb5f1462be2505 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 10 Aug 2023 12:19:43 -0400 Subject: [PATCH 137/140] chore: fix race conditions --- dot/sync/chain_sync_test.go | 8 ++-- dot/sync/worker.go | 56 ++++----------------------- dot/sync/worker_pool.go | 75 +++++++++++++++++++----------------- dot/sync/worker_pool_test.go | 6 ++- dot/sync/worker_test.go | 19 ++++++--- 5 files changed, 69 insertions(+), 95 deletions(-) diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index ba16c10809..0344ab43f6 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -389,8 +389,10 @@ func TestChainSync_onBlockAnnounceHandshake_onBootstrapMode(t *testing.T) { newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { networkMock := NewMockNetwork(ctrl) workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) - workerPool.workers = map[peer.ID]*worker{ - peer.ID("peer-test"): {status: available}, + workerPool.workers = map[peer.ID]*syncWorker{ + peer.ID("peer-test"): { + worker: &worker{status: available}, + }, } cs := newChainSyncTest(t, ctrl) @@ -422,7 +424,7 @@ func TestChainSync_onBlockAnnounceHandshake_onBootstrapMode(t *testing.T) { if tt.shouldBeAWorker { syncWorker, exists := cs.workerPool.workers[tt.peerID] require.True(t, exists) - require.Equal(t, tt.workerStatus, syncWorker.status) + require.Equal(t, tt.workerStatus, syncWorker.worker.status) } else { _, exists := cs.workerPool.workers[tt.peerID] require.False(t, exists) diff --git a/dot/sync/worker.go b/dot/sync/worker.go index 9d50e924f3..478608db81 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -5,9 +5,7 @@ package sync import ( "errors" - "fmt" "sync" - "time" "github.com/ChainSafe/gossamer/dot/network" "github.com/libp2p/go-libp2p/core/peer" @@ -16,15 +14,10 @@ import ( var ErrStopTimeout = errors.New("stop timeout") type worker struct { - mxt sync.Mutex - status byte - peerID peer.ID - sharedGuard chan struct{} - - punishment chan time.Duration - doneCh chan struct{} - - queue chan *syncTask + mxt sync.Mutex + status byte + peerID peer.ID + sharedGuard chan struct{} requestMaker network.RequestMaker } @@ -32,57 +25,22 @@ func newWorker(pID peer.ID, sharedGuard chan struct{}, network network.RequestMa return &worker{ peerID: pID, sharedGuard: sharedGuard, - punishment: make(chan time.Duration), - doneCh: make(chan struct{}), - queue: make(chan *syncTask, maxRequestsAllowed), requestMaker: network, status: available, } } -func (w *worker) start() { +func (w *worker) run(queue chan *syncTask, wg *sync.WaitGroup) { defer func() { logger.Debugf("[STOPPED] worker %s", w.peerID) - close(w.doneCh) + wg.Done() }() - for task := range w.queue { + for task := range queue { executeRequest(w.peerID, w.requestMaker, task, w.sharedGuard) } } -func (w *worker) processTask(task *syncTask) { - w.mxt.Lock() - defer w.mxt.Unlock() - if w.queue != nil { - select { - case w.queue <- task: - default: - panic(fmt.Sprintf("worker %s queue is blocked, cannot enqueue task: %s", - w.peerID, task.request.String())) - } - } -} - -func (w *worker) stop() error { - w.mxt.Lock() - close(w.queue) - w.queue = nil - w.mxt.Unlock() - - timeoutTimer := time.NewTimer(30 * time.Second) - select { - case <-w.doneCh: - if !timeoutTimer.Stop() { - <-timeoutTimer.C - } - - return nil - case <-timeoutTimer.C: - return fmt.Errorf("%w: worker %s", ErrStopTimeout, w.peerID) - } -} - func executeRequest(who peer.ID, requestMaker network.RequestMaker, task *syncTask, sharedGuard chan struct{}) { defer func() { diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index df7e489b5f..b3c18a51ea 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -4,7 +4,6 @@ package sync import ( - "errors" "fmt" "crypto/rand" @@ -40,12 +39,18 @@ type syncTaskResult struct { err error } +type syncWorker struct { + worker *worker + queue chan *syncTask +} + type syncWorkerPool struct { mtx sync.RWMutex + wg sync.WaitGroup network Network requestMaker network.RequestMaker - workers map[peer.ID]*worker + workers map[peer.ID]*syncWorker ignorePeers map[peer.ID]struct{} sharedGuard chan struct{} @@ -55,7 +60,7 @@ func newSyncWorkerPool(net Network, requestMaker network.RequestMaker) *syncWork swp := &syncWorkerPool{ network: net, requestMaker: requestMaker, - workers: make(map[peer.ID]*worker), + workers: make(map[peer.ID]*syncWorker), ignorePeers: make(map[peer.ID]struct{}), sharedGuard: make(chan struct{}, maxRequestsAllowed), } @@ -68,31 +73,27 @@ func (s *syncWorkerPool) stop() error { s.mtx.RLock() defer s.mtx.RUnlock() - wg := sync.WaitGroup{} - // make it buffered so the goroutines can write on it - // without beign blocked - errCh := make(chan error, len(s.workers)) - - for _, syncWorker := range s.workers { - wg.Add(1) - go func(syncWorker *worker, wg *sync.WaitGroup) { - defer wg.Done() - errCh <- syncWorker.stop() - }(syncWorker, &wg) + for _, sw := range s.workers { + close(sw.queue) } - wg.Wait() - // closing the errCh then the following for loop don't - // panic due to "all goroutines are asleep - deadlock" - close(errCh) - - var errs error - for err := range errCh { - if err != nil { - errs = errors.Join(errs, err) + allWorkersDoneCh := make(chan struct{}) + go func() { + defer close(allWorkersDoneCh) + s.wg.Wait() + }() + + timeoutTimer := time.NewTimer(30 * time.Second) + select { + case <-timeoutTimer.C: + return fmt.Errorf("timeout reached while finishing workers") + case <-allWorkersDoneCh: + if !timeoutTimer.Stop() { + <-timeoutTimer.C } + + return nil } - return errs } // useConnectedPeers will retrieve all connected peers @@ -128,10 +129,16 @@ func (s *syncWorkerPool) newPeer(who peer.ID) { return } - syncWorker := newWorker(who, s.sharedGuard, s.requestMaker) - go syncWorker.start() + worker := newWorker(who, s.sharedGuard, s.requestMaker) + workerQueue := make(chan *syncTask, maxRequestsAllowed) + + s.wg.Add(1) + go worker.run(workerQueue, &s.wg) - s.workers[who] = syncWorker + s.workers[who] = &syncWorker{ + worker: worker, + queue: workerQueue, + } logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) } @@ -155,7 +162,7 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, if who != nil { syncWorker := s.workers[*who] - syncWorker.processTask(task) + syncWorker.queue <- task return } @@ -170,7 +177,7 @@ func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, } selectedWorkerIdx = int(nBig.Int64()) selectedWorker := workers[selectedWorkerIdx] - selectedWorker.processTask(task) + selectedWorker.queue <- task } // submitRequests takes an set of requests and will submit to the pool through submitRequest @@ -186,10 +193,10 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) workerID := idx % len(allWorkers) syncWorker := allWorkers[workerID] - syncWorker.processTask(&syncTask{ + syncWorker.queue <- &syncTask{ request: request, resultCh: resultCh, - }) + } } return resultCh @@ -204,11 +211,7 @@ func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) error { return nil } - err := worker.stop() - if err != nil { - return fmt.Errorf("stopping worker: %w", err) - } - + close(worker.queue) delete(s.workers, who) s.ignorePeers[who] = struct{}{} return nil diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go index 4106673d2c..5ea0d4e9d7 100644 --- a/dot/sync/worker_pool_test.go +++ b/dot/sync/worker_pool_test.go @@ -86,7 +86,11 @@ func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { peer.ID("available-3"), }) workerPool := newSyncWorkerPool(networkMock, nil) - workerPool.workers[peer.ID("available-3")] = &worker{queue: make(chan *syncTask)} + syncWorker := &syncWorker{ + worker: &worker{}, + queue: make(chan *syncTask), + } + workerPool.workers[peer.ID("available-3")] = syncWorker return workerPool }, exepectedWorkers: []peer.ID{ diff --git a/dot/sync/worker_test.go b/dot/sync/worker_test.go index 718ca3e6e1..904be38983 100644 --- a/dot/sync/worker_test.go +++ b/dot/sync/worker_test.go @@ -4,6 +4,7 @@ package sync import ( + "sync" "testing" "time" @@ -29,18 +30,23 @@ func TestWorker(t *testing.T) { sharedGuard := make(chan struct{}, 1) w := newWorker(peerA, sharedGuard, reqMaker) - go w.start() + + wg := sync.WaitGroup{} + queue := make(chan *syncTask, 2) + + wg.Add(1) + go w.run(queue, &wg) resultCh := make(chan *syncTaskResult) defer close(resultCh) - w.processTask(&syncTask{ + queue <- &syncTask{ resultCh: resultCh, - }) + } - w.processTask(&syncTask{ + queue <- &syncTask{ resultCh: resultCh, - }) + } time.Sleep(500 * time.Millisecond) require.Equal(t, 1, len(sharedGuard)) @@ -50,5 +56,6 @@ func TestWorker(t *testing.T) { require.Equal(t, 1, len(sharedGuard)) <-resultCh - w.stop() + close(queue) + wg.Wait() } From 75799ff45e8291f10efd6716d0c9fcf5d2b471d4 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 10 Aug 2023 12:24:35 -0400 Subject: [PATCH 138/140] chore: remove unnused fields and remove unneeded error return --- dot/sync/chain_sync.go | 4 +--- dot/sync/worker.go | 1 - dot/sync/worker_pool.go | 6 +++--- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index cb33ce0140..e3371a02d8 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -694,9 +694,7 @@ taskResultLoop: Reason: peerset.BadBlockAnnouncementReason, }, who) - if err := cs.workerPool.ignorePeerAsWorker(taskResult.who); err != nil { - logger.Errorf("ignoring peer: %w", err) - } + cs.workerPool.ignorePeerAsWorker(taskResult.who) cs.workerPool.submitRequest(taskResult.request, nil, workersResults) continue taskResultLoop } diff --git a/dot/sync/worker.go b/dot/sync/worker.go index 478608db81..e4c6252619 100644 --- a/dot/sync/worker.go +++ b/dot/sync/worker.go @@ -14,7 +14,6 @@ import ( var ErrStopTimeout = errors.New("stop timeout") type worker struct { - mxt sync.Mutex status byte peerID peer.ID sharedGuard chan struct{} diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index b3c18a51ea..1fcd9d6b6d 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -202,19 +202,19 @@ func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage) return resultCh } -func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) error { +func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { s.mtx.Lock() defer s.mtx.Unlock() worker, has := s.workers[who] if !has { - return nil + return } close(worker.queue) delete(s.workers, who) s.ignorePeers[who] = struct{}{} - return nil + return } // totalWorkers only returns available or busy workers From ec34fb528dbba573fcf5d20ae8e53cfe110faff2 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 10 Aug 2023 13:54:23 -0400 Subject: [PATCH 139/140] chore: remove redundant return --- dot/sync/worker_pool.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go index 1fcd9d6b6d..1eb4640bf8 100644 --- a/dot/sync/worker_pool.go +++ b/dot/sync/worker_pool.go @@ -207,14 +207,11 @@ func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { defer s.mtx.Unlock() worker, has := s.workers[who] - if !has { - return + if has { + close(worker.queue) + delete(s.workers, who) + s.ignorePeers[who] = struct{}{} } - - close(worker.queue) - delete(s.workers, who) - s.ignorePeers[who] = struct{}{} - return } // totalWorkers only returns available or busy workers From 0bac6492735f2d6e850b5a47519f3e8563b34f65 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 11 Aug 2023 11:38:34 -0400 Subject: [PATCH 140/140] chore: fix zombienet test check --- dot/sync/chain_sync.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index e3371a02d8..3d64993130 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -170,7 +170,8 @@ func newChainSync(cfg chainSyncConfig) *chainSync { } func (cs *chainSync) start() { - isSyncedGauge.Set(0) + // since the default status from sync mode is syncMode(tip) + isSyncedGauge.Set(1) // wait until we have a minimal workers in the sync worker pool for {