diff --git a/.github/workflows/qa-clean-exit.yml b/.github/workflows/qa-clean-exit.yml new file mode 100644 index 00000000000..1e6c9f2767f --- /dev/null +++ b/.github/workflows/qa-clean-exit.yml @@ -0,0 +1,89 @@ +name: QA - Clean exit on Ctrl-C + +on: + push: + branches: + - devel + - alpha + - 'release/**' + pull_request: + branches: + - devel + - alpha + - 'release/**' + types: + - opened + - reopened + - synchronize + - ready_for_review + +jobs: + long-running-test: + #if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} + #strategy: + # matrix: + # os: [ ubuntu-22.04, macos-13-xlarge ] + #runs-on: ${{ matrix.os }} + runs-on: self-hosted + env: + ERIGON_DATA_DIR: ${{ github.workspace }}/erigon_data + + steps: + - name: Check out repository + uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.20' + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + #- name: Install dependencies + # run: | + # sudo apt-get update + # sudo apt-get install -y build-essential make gcc + + - name: Clean Erigon Build Directory + run: | + make clean + + - name: Build Erigon + run: | + make erigon + working-directory: ${{ github.workspace }} + + #- name: Download Python Script for Logs Checking + # run: | + # curl -o check_erigon_exit.py 'https://gist.githubusercontent.com/mriccobene/8db4030a745de34d527f136f2caa104f/raw/3c1a860cb87d61075e78ce399e17f0ab157cacc6/check_erigon_exit.py' + + - name: Run Erigon, send ctrl-c and check for clean exiting + run: | + # Run Erigon, send ctrl-c and check logs + python3 ${{ github.workspace }}/../../../../erigon-qa/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR + + # Capture monitoring script exit status + monitoring_exit_status=$? + + # Clean up Erigon process if it's still running + if kill -0 $ERIGON_PID 2> /dev/null; then + echo "Terminating Erigon" + kill $ERIGON_PID + wait $ERIGON_PID + else + echo "Erigon has already terminated" + fi + + # Clean up Erigon build and data directories + rm -rf $ERIGON_DATA_DIR + + # Check monitoring script exit status + if [ $monitoring_exit_status -eq 0 ]; then + echo "Monitoring completed successfully" + else + echo "Error detected in Erigon logs or monitoring script exited unexpectedly" + exit 1 + fi diff --git a/.gitignore b/.gitignore index 3ce4eeca701..123c1eb2b93 100644 --- a/.gitignore +++ b/.gitignore @@ -98,3 +98,5 @@ node_modules /config.toml /config.yaml /config.yml + +vendor \ No newline at end of file diff --git a/Makefile b/Makefile index 8890d6c1dc7..7ab3fb4248d 100644 --- a/Makefile +++ b/Makefile @@ -134,6 +134,8 @@ COMMANDS += sentinel COMMANDS += caplin COMMANDS += caplin-regression COMMANDS += tooling +COMMANDS += snapshots + diff --git a/README.md b/README.md index 7ada8d94f97..8ee63718b93 100644 --- a/README.md +++ b/README.md @@ -49,15 +49,15 @@ in `erigon --help`). We don't allow change this flag after first start. System Requirements =================== -* For an Archive node of Ethereum Mainnet we recommend >=3TB storage space: 1.8TB state (as of March 2022), - 200GB temp files (can symlink or mount folder `/temp` to another disk). Ethereum Mainnet Full node ( +* For an Archive node of Ethereum Mainnet we recommend >=3.5TB storage space: 2.2TB state (as of December 2023), + 470GB snapshots (can symlink or mount folder `/snapshots` to another disk), 200GB temp files (can symlink or mount folder `/temp` to another disk). Ethereum Mainnet Full node ( see `--prune*` flags): 400Gb (April 2022). * Goerli Full node (see `--prune*` flags): 189GB on Beta, 114GB on Alpha (April 2022). * Gnosis Chain Archive: 600GB (October 2023). -* Polygon Mainnet Archive: 5TB. (April 2022). `--prune.*.older 15768000`: 5.1Tb (Sept 2023). Polygon Mumbai Archive: +* Polygon Mainnet Archive: 8.5TiB (December 2023). `--prune.*.older 15768000`: 5.1Tb (September 2023). Polygon Mumbai Archive: 1TB. (April 2022). SSD or NVMe. Do not recommend HDD - on HDD Erigon will always stay N blocks behind chain tip, but not fall behind. diff --git a/cl/abstract/beacon_state.go b/cl/abstract/beacon_state.go index a4d098e3206..304a414dd12 100644 --- a/cl/abstract/beacon_state.go +++ b/cl/abstract/beacon_state.go @@ -22,7 +22,7 @@ type BeaconStateUpgradable interface { } type BeaconStateExtension interface { - SlashValidator(slashedInd uint64, whistleblowerInd *uint64) error + SlashValidator(slashedInd uint64, whistleblowerInd *uint64) (uint64, error) InitiateValidatorExit(index uint64) error GetActiveValidatorsIndices(epoch uint64) (indicies []uint64) GetTotalActiveBalance() uint64 @@ -162,6 +162,8 @@ type BeaconStateMinimal interface { Eth1Data() *cltypes.Eth1Data Eth1DataVotes() *solid.ListSSZ[*cltypes.Eth1Data] Eth1DepositIndex() uint64 + ValidatorSet() *solid.ValidatorSet + PreviousEpochParticipation() *solid.BitList ForEachValidator(fn func(v solid.Validator, idx int, total int) bool) ValidatorForValidatorIndex(index int) (solid.Validator, error) diff --git a/cl/antiquary/antiquary.go b/cl/antiquary/antiquary.go index 2042d78b5ca..07bdf0f9ad1 100644 --- a/cl/antiquary/antiquary.go +++ b/cl/antiquary/antiquary.go @@ -34,7 +34,7 @@ type Antiquary struct { beaconDB persistence.BlockSource backfilled *atomic.Bool cfg *clparams.BeaconChainConfig - states bool + states, blocks bool fs afero.Fs validatorsTable *state_accessors.StaticValidatorTable genesisState *state.CachingBeaconState @@ -43,7 +43,7 @@ type Antiquary struct { balances32 []byte } -func NewAntiquary(ctx context.Context, genesisState *state.CachingBeaconState, validatorsTable *state_accessors.StaticValidatorTable, cfg *clparams.BeaconChainConfig, dirs datadir.Dirs, downloader proto_downloader.DownloaderClient, mainDB kv.RwDB, sn *freezeblocks.CaplinSnapshots, reader freezeblocks.BeaconSnapshotReader, beaconDB persistence.BlockSource, logger log.Logger, states bool, fs afero.Fs) *Antiquary { +func NewAntiquary(ctx context.Context, genesisState *state.CachingBeaconState, validatorsTable *state_accessors.StaticValidatorTable, cfg *clparams.BeaconChainConfig, dirs datadir.Dirs, downloader proto_downloader.DownloaderClient, mainDB kv.RwDB, sn *freezeblocks.CaplinSnapshots, reader freezeblocks.BeaconSnapshotReader, beaconDB persistence.BlockSource, logger log.Logger, states, blocks bool, fs afero.Fs) *Antiquary { backfilled := &atomic.Bool{} backfilled.Store(false) return &Antiquary{ @@ -61,12 +61,13 @@ func NewAntiquary(ctx context.Context, genesisState *state.CachingBeaconState, v fs: fs, validatorsTable: validatorsTable, genesisState: genesisState, + blocks: blocks, } } // Antiquate is the function that starts transactions seeding and shit, very cool but very shit too as a name. func (a *Antiquary) Loop() error { - if a.downloader == nil { + if a.downloader == nil || !a.blocks { return nil // Just skip if we don't have a downloader } // Skip if we dont support backfilling for the current network @@ -94,7 +95,6 @@ func (a *Antiquary) Loop() error { return err } // Here we need to start mdbx transaction and lock the thread - log.Info("[Antiquary]: Stopping Caplin to process historical indicies") tx, err := a.mainDB.BeginRw(a.ctx) if err != nil { return err @@ -110,6 +110,7 @@ func (a *Antiquary) Loop() error { return err } defer logInterval.Stop() + log.Info("[Antiquary]: Stopping Caplin to process historical indicies", "from", from, "to", a.sn.BlocksAvailable()) // Now write the snapshots as indicies for i := from; i < a.sn.BlocksAvailable(); i++ { @@ -204,11 +205,11 @@ func (a *Antiquary) Loop() error { continue } to = utils.Min64(to, to-safetyMargin) // We don't want to retire snapshots that are too close to the finalized head - to = (to / snaptype.Erigon2RecentMergeLimit) * snaptype.Erigon2RecentMergeLimit - if to-from < snaptype.Erigon2RecentMergeLimit { + to = (to / snaptype.Erigon2MergeLimit) * snaptype.Erigon2MergeLimit + if to-from < snaptype.Erigon2MergeLimit { continue } - if err := a.antiquate(from, to); err != nil { + if err := a.antiquate(a.sn.Version(), from, to); err != nil { return err } case <-a.ctx.Done(): @@ -217,12 +218,12 @@ func (a *Antiquary) Loop() error { } // Antiquate will antiquate a specific block range (aka. retire snapshots), this should be ran in the background. -func (a *Antiquary) antiquate(from, to uint64) error { +func (a *Antiquary) antiquate(version uint8, from, to uint64) error { if a.downloader == nil { return nil // Just skip if we don't have a downloader } log.Info("[Antiquary]: Antiquating", "from", from, "to", to) - if err := freezeblocks.DumpBeaconBlocks(a.ctx, a.mainDB, a.beaconDB, from, to, snaptype.Erigon2RecentMergeLimit, a.dirs.Tmp, a.dirs.Snap, 1, log.LvlDebug, a.logger); err != nil { + if err := freezeblocks.DumpBeaconBlocks(a.ctx, a.mainDB, a.beaconDB, version, from, to, snaptype.Erigon2MergeLimit, a.dirs.Tmp, a.dirs.Snap, 1, log.LvlDebug, a.logger); err != nil { return err } diff --git a/cl/antiquary/state_antiquary.go b/cl/antiquary/state_antiquary.go index 3ae2a1dd3dc..d4d3da32e5a 100644 --- a/cl/antiquary/state_antiquary.go +++ b/cl/antiquary/state_antiquary.go @@ -28,6 +28,7 @@ import ( "github.com/ledgerwatch/erigon/cl/phase1/core/state/raw" "github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling" "github.com/ledgerwatch/erigon/cl/transition" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" "github.com/ledgerwatch/log/v3" ) @@ -165,12 +166,12 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { defer blockRoots.Close() stateRoots := etl.NewCollector(kv.StateRoot, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) defer stateRoots.Close() - minimalBeaconStates := etl.NewCollector(kv.MinimalBeaconState, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) - defer minimalBeaconStates.Close() + slotData := etl.NewCollector(kv.SlotData, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + defer slotData.Close() + epochData := etl.NewCollector(kv.EpochData, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + defer epochData.Close() inactivityScoresC := etl.NewCollector(kv.InactivityScores, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) defer inactivityScoresC.Close() - checkpoints := etl.NewCollector(kv.Checkpoints, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) - defer checkpoints.Close() nextSyncCommittee := etl.NewCollector(kv.NextSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) defer nextSyncCommittee.Close() currentSyncCommittee := etl.NewCollector(kv.CurrentSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) @@ -183,6 +184,8 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { defer eth1DataVotes.Close() stateEvents := etl.NewCollector(kv.StateEvents, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) defer stateEvents.Close() + activeValidatorIndicies := etl.NewCollector(kv.ActiveValidatorIndicies, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + defer activeValidatorIndicies.Close() progress, err := state_accessors.GetStateProcessingProgress(tx) if err != nil { @@ -214,10 +217,9 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return err } // Collect genesis state if we are at genesis - if err := s.collectGenesisState(ctx, compressedWriter, s.currentState, slashings, inactivityScoresC, proposers, minimalBeaconStates, stateEvents, changedValidators); err != nil { + if err := s.collectGenesisState(ctx, compressedWriter, s.currentState, currentSyncCommittee, nextSyncCommittee, slashings, epochData, inactivityScoresC, proposers, slotData, stateEvents, changedValidators); err != nil { return err } - s.balances32 = append(s.balances32, s.currentState.RawBalances()...) } else { start := time.Now() // progress not 0? we need to load the state from the DB @@ -232,8 +234,12 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return err } log.Info("Recovered Beacon State", "slot", s.currentState.Slot(), "elapsed", end, "root", libcommon.Hash(hashRoot).String()) - s.balances32 = append(s.balances32, s.currentState.RawBalances()...) + if err := s.currentState.InitBeaconState(); err != nil { + return err + } } + s.balances32 = s.balances32[:0] + s.balances32 = append(s.balances32, s.currentState.RawBalances()...) } logLvl := log.LvlInfo @@ -295,19 +301,34 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return s.validatorsTable.AddWithdrawalCredentials(uint64(index), slot, libcommon.BytesToHash(wc)) }, OnEpochBoundary: func(epoch uint64) error { - k := base_encoding.Encode64ToBytes4(s.cfg.RoundSlotToEpoch(slot)) - v := make([]byte, solid.CheckpointSize*3) - copy(v, s.currentState.CurrentJustifiedCheckpoint()) - copy(v[solid.CheckpointSize:], s.currentState.PreviousJustifiedCheckpoint()) - copy(v[solid.CheckpointSize*2:], s.currentState.FinalizedCheckpoint()) - if err := checkpoints.Collect(k, v); err != nil { + if err := s.storeEpochData(commonBuffer, s.currentState, epochData); err != nil { return err } - prevEpoch := epoch - 1 + var prevEpoch uint64 + if epoch > 0 { + prevEpoch = epoch - 1 + } mix := s.currentState.GetRandaoMixes(prevEpoch) if err := randaoMixes.Collect(base_encoding.Encode64ToBytes4(prevEpoch*s.cfg.SlotsPerEpoch), mix[:]); err != nil { return err } + // Write active validator indicies + actives := s.currentState.GetActiveValidatorsIndices(prevEpoch) + commonBuffer.Reset() + if err := base_encoding.WriteRabbits(actives, commonBuffer); err != nil { + return err + } + if err := activeValidatorIndicies.Collect(base_encoding.Encode64ToBytes4(prevEpoch*s.cfg.SlotsPerEpoch), libcommon.Copy(commonBuffer.Bytes())); err != nil { + return err + } + actives = s.currentState.GetActiveValidatorsIndices(epoch) + commonBuffer.Reset() + if err := base_encoding.WriteRabbits(actives, commonBuffer); err != nil { + return err + } + if err := activeValidatorIndicies.Collect(base_encoding.Encode64ToBytes4(epoch*s.cfg.SlotsPerEpoch), libcommon.Copy(commonBuffer.Bytes())); err != nil { + return err + } // truncate the file return proposers.Collect(base_encoding.Encode64ToBytes4(epoch), getProposerDutiesValue(s.currentState)) }, @@ -379,6 +400,8 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { if err := s.antiquateEffectiveBalances(ctx, slot, s.currentState.RawValidatorSet(), compressedWriter); err != nil { return err } + s.balances32 = s.balances32[:0] + s.balances32 = append(s.balances32, s.currentState.RawBalances()...) } else if slot%s.cfg.SlotsPerEpoch == 0 { if err := s.antiquateBytesListDiff(ctx, key, s.balances32, s.currentState.RawBalances(), balances, base_encoding.ComputeCompressedSerializedUint64ListDiff); err != nil { return err @@ -396,20 +419,22 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { prevValSet = append(prevValSet, s.currentState.RawValidatorSet()...) fullValidation := slot%100_000 == 0 || first + blockRewardsCollector := ð2.BlockRewardsCollector{} // We sanity check the state every 100k slots or when we start. - if err := transition.TransitionState(s.currentState, block, fullValidation); err != nil { + if err := transition.TransitionState(s.currentState, block, blockRewardsCollector, fullValidation); err != nil { return err } + first = false - // dump the whole sla + // dump the whole slashings vector. if slashingOccured { if err := s.antiquateFullUint64List(slashings, slot, s.currentState.RawSlashings(), commonBuffer, compressedWriter); err != nil { return err } } - if err := s.storeMinimalState(commonBuffer, s.currentState, minimalBeaconStates); err != nil { + if err := s.storeSlotData(commonBuffer, s.currentState, blockRewardsCollector, slotData); err != nil { return err } if err := stateEvents.Collect(base_encoding.Encode64ToBytes4(slot), events.CopyBytes()); err != nil { @@ -424,6 +449,9 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { if err := s.antiquateEffectiveBalances(ctx, slot, s.currentState.RawValidatorSet(), compressedWriter); err != nil { return err } + // Reset it as we antiquated it. + s.balances32 = s.balances32[:0] + s.balances32 = append(s.balances32, s.currentState.RawBalances()...) continue } @@ -495,8 +523,11 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { if err := stateRoots.Load(rwTx, kv.StateRoot, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { return err } + if err := activeValidatorIndicies.Load(rwTx, kv.ActiveValidatorIndicies, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + return err + } - if err := minimalBeaconStates.Load(rwTx, kv.MinimalBeaconState, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + if err := slotData.Load(rwTx, kv.SlotData, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { return err } @@ -508,7 +539,7 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return err } - if err := checkpoints.Load(rwTx, kv.Checkpoints, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + if err := epochData.Load(rwTx, kv.EpochData, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { return err } @@ -679,7 +710,7 @@ func getProposerDutiesValue(s *state.CachingBeaconState) []byte { return list } -func (s *Antiquary) collectGenesisState(ctx context.Context, compressor *zstd.Encoder, state *state.CachingBeaconState, slashings, inactivities, proposersCollector, minimalBeaconStateCollector, stateEvents *etl.Collector, changedValidators map[uint64]struct{}) error { +func (s *Antiquary) collectGenesisState(ctx context.Context, compressor *zstd.Encoder, state *state.CachingBeaconState, currentSyncCommittee, nextSyncCommittee, slashings, epochData, inactivities, proposersCollector, slotDataCollector, stateEvents *etl.Collector, changedValidators map[uint64]struct{}) error { var err error slot := state.Slot() epoch := slot / s.cfg.SlotsPerEpoch @@ -713,29 +744,59 @@ func (s *Antiquary) collectGenesisState(ctx context.Context, compressor *zstd.En return err } + if err := s.storeEpochData(&commonBuffer, state, epochData); err != nil { + return err + } + if state.Version() >= clparams.AltairVersion { // dump inactivity scores if err := s.antiquateFullUint64List(inactivities, slot, state.RawInactivityScores(), &commonBuffer, compressor); err != nil { return err } + committeeSlot := s.cfg.RoundSlotToSyncCommitteePeriod(slot) + committee := *state.CurrentSyncCommittee() + if err := currentSyncCommittee.Collect(base_encoding.Encode64ToBytes4(committeeSlot), libcommon.Copy(committee[:])); err != nil { + return err + } + + committee = *state.NextSyncCommittee() + if err := nextSyncCommittee.Collect(base_encoding.Encode64ToBytes4(committeeSlot), libcommon.Copy(committee[:])); err != nil { + return err + } } var b bytes.Buffer - if err := s.storeMinimalState(&b, state, minimalBeaconStateCollector); err != nil { + if err := s.storeSlotData(&b, state, nil, slotDataCollector); err != nil { return err } return stateEvents.Collect(base_encoding.Encode64ToBytes4(slot), events.CopyBytes()) } -func (s *Antiquary) storeMinimalState(buffer *bytes.Buffer, st *state.CachingBeaconState, collector *etl.Collector) error { +func (s *Antiquary) storeSlotData(buffer *bytes.Buffer, st *state.CachingBeaconState, rewardsCollector *eth2.BlockRewardsCollector, collector *etl.Collector) error { + buffer.Reset() + slotData := state_accessors.SlotDataFromBeaconState(st) + if rewardsCollector != nil { + slotData.AttestationsRewards = rewardsCollector.Attestations + slotData.SyncAggregateRewards = rewardsCollector.SyncAggregate + slotData.AttesterSlashings = rewardsCollector.AttesterSlashings + slotData.ProposerSlashings = rewardsCollector.ProposerSlashings + } + if err := slotData.WriteTo(buffer); err != nil { + return err + } + return collector.Collect(base_encoding.Encode64ToBytes4(st.Slot()), libcommon.Copy(buffer.Bytes())) +} + +func (s *Antiquary) storeEpochData(buffer *bytes.Buffer, st *state.CachingBeaconState, collector *etl.Collector) error { buffer.Reset() - minimalBeaconState := state_accessors.MinimalBeaconStateFromBeaconState(st.BeaconState) + epochData := state_accessors.EpochDataFromBeaconState(st) - if err := minimalBeaconState.WriteTo(buffer); err != nil { + if err := epochData.WriteTo(buffer); err != nil { return err } - return collector.Collect(base_encoding.Encode64ToBytes4(st.Slot()), buffer.Bytes()) + roundedSlot := s.cfg.RoundSlotToEpoch(st.Slot()) + return collector.Collect(base_encoding.Encode64ToBytes4(roundedSlot), libcommon.Copy(buffer.Bytes())) } func (s *Antiquary) dumpPayload(k []byte, v []byte, c *etl.Collector, b *bytes.Buffer, compressor *zstd.Encoder) error { @@ -764,7 +825,6 @@ func (s *Antiquary) dumpPayload(k []byte, v []byte, c *etl.Collector, b *bytes.B // if err := os.WriteFile("b.txt", b, 0644); err != nil { // s.logger.Error("Failed to write full beacon state", "err", err) // } - // } func flattenRandaoMixes(hashes []libcommon.Hash) []byte { diff --git a/cl/antiquary/state_antiquary_test.go b/cl/antiquary/state_antiquary_test.go index 52805c12fe2..3f198e7fe44 100644 --- a/cl/antiquary/state_antiquary_test.go +++ b/cl/antiquary/state_antiquary_test.go @@ -3,7 +3,6 @@ package antiquary import ( "context" _ "embed" - "fmt" "testing" "github.com/ledgerwatch/erigon-lib/common/datadir" @@ -20,31 +19,27 @@ import ( func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postState *state.CachingBeaconState) { db := memdb.NewTestDB(t) - reader := tests.LoadChain(blocks, db) + reader, _ := tests.LoadChain(blocks, postState, db, t) ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() f := afero.NewMemMapFs() - a := NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, f) + a := NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, true, f) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) // TODO: add more meaning here, like checking db values, will do so once i see some bugs } func TestStateAntiquaryCapella(t *testing.T) { - t.Skip() blocks, preState, postState := tests.GetCapellaRandom() runTest(t, blocks, preState, postState) } func TestStateAntiquaryBellatrix(t *testing.T) { - t.Skip() blocks, preState, postState := tests.GetBellatrixRandom() - fmt.Println(len(blocks)) runTest(t, blocks, preState, postState) } func TestStateAntiquaryPhase0(t *testing.T) { - t.Skip() blocks, preState, postState := tests.GetPhase0Random() runTest(t, blocks, preState, postState) } diff --git a/cl/antiquary/tests/tests.go b/cl/antiquary/tests/tests.go index 1596e16d05b..ddfb042405d 100644 --- a/cl/antiquary/tests/tests.go +++ b/cl/antiquary/tests/tests.go @@ -5,14 +5,19 @@ import ( "embed" _ "embed" "strconv" + "testing" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/persistence" "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/utils" + "github.com/spf13/afero" + "github.com/stretchr/testify/require" ) //go:embed test_data/capella/blocks_0.ssz_snappy @@ -57,38 +62,53 @@ func (m *MockBlockReader) ReadBlockBySlot(ctx context.Context, tx kv.Tx, slot ui } func (m *MockBlockReader) ReadBlockByRoot(ctx context.Context, tx kv.Tx, blockRoot libcommon.Hash) (*cltypes.SignedBeaconBlock, error) { - panic("implement me") + // do a linear search + for _, v := range m.u { + r, err := v.Block.HashSSZ() + if err != nil { + return nil, err + } + + if r == blockRoot { + return v, nil + } + } + return nil, nil } func (m *MockBlockReader) ReadHeaderByRoot(ctx context.Context, tx kv.Tx, blockRoot libcommon.Hash) (*cltypes.SignedBeaconBlockHeader, error) { - panic("implement me") + block, err := m.ReadBlockByRoot(ctx, tx, blockRoot) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + return block.SignedBeaconBlockHeader(), nil } func (m *MockBlockReader) FrozenSlots() uint64 { panic("implement me") } -func LoadChain(blocks []*cltypes.SignedBeaconBlock, db kv.RwDB) *MockBlockReader { +func LoadChain(blocks []*cltypes.SignedBeaconBlock, s *state.CachingBeaconState, db kv.RwDB, t *testing.T) (*MockBlockReader, afero.Fs) { tx, err := db.BeginRw(context.Background()) - if err != nil { - panic(err) - } + require.NoError(t, err) defer tx.Rollback() + fs := afero.NewMemMapFs() + bs := persistence.NewAferoRawBlockSaver(fs, &clparams.MainnetBeaconConfig) + source := persistence.NewBeaconChainDatabaseFilesystem(bs, nil, &clparams.MainnetBeaconConfig) m := NewMockBlockReader() for _, block := range blocks { m.u[block.Block.Slot] = block - h := block.SignedBeaconBlockHeader() - if err := beacon_indicies.WriteBeaconBlockHeaderAndIndicies(context.Background(), tx, h, true); err != nil { - panic(err) - } - if err := beacon_indicies.WriteHighestFinalized(tx, block.Block.Slot+64); err != nil { - panic(err) - } - } - if err := tx.Commit(); err != nil { - panic(err) + + require.NoError(t, source.WriteBlock(context.Background(), tx, block, true)) + require.NoError(t, beacon_indicies.WriteHighestFinalized(tx, block.Block.Slot+64)) } - return m + require.NoError(t, state_accessors.InitializeStaticTables(tx, s)) + + require.NoError(t, tx.Commit()) + return m, fs } func GetCapellaRandom() ([]*cltypes.SignedBeaconBlock, *state.CachingBeaconState, *state.CachingBeaconState) { diff --git a/cl/beacon/beaconhttp/api.go b/cl/beacon/beaconhttp/api.go index b0c3d94c385..5beffa09f6d 100644 --- a/cl/beacon/beaconhttp/api.go +++ b/cl/beacon/beaconhttp/api.go @@ -6,10 +6,13 @@ import ( "fmt" "net/http" "reflect" + "strings" + "time" "github.com/ledgerwatch/erigon-lib/types/ssz" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph" "github.com/ledgerwatch/log/v3" + "golang.org/x/exp/slices" ) var _ error = EndpointError{} @@ -51,13 +54,13 @@ func (e *EndpointError) WriteTo(w http.ResponseWriter) { } type EndpointHandler[T any] interface { - Handle(r *http.Request) (T, error) + Handle(w http.ResponseWriter, r *http.Request) (T, error) } -type EndpointHandlerFunc[T any] func(r *http.Request) (T, error) +type EndpointHandlerFunc[T any] func(w http.ResponseWriter, r *http.Request) (T, error) -func (e EndpointHandlerFunc[T]) Handle(r *http.Request) (T, error) { - return e(r) +func (e EndpointHandlerFunc[T]) Handle(w http.ResponseWriter, r *http.Request) (T, error) { + return e(w, r) } func HandleEndpointFunc[T any](h EndpointHandlerFunc[T]) http.HandlerFunc { @@ -66,18 +69,25 @@ func HandleEndpointFunc[T any](h EndpointHandlerFunc[T]) http.HandlerFunc { func HandleEndpoint[T any](h EndpointHandler[T]) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ans, err := h.Handle(r) + start := time.Now() + ans, err := h.Handle(w, r) + log.Debug("beacon api request", "endpoint", r.URL.Path, "duration", time.Since(start)) if err != nil { log.Error("beacon api request error", "err", err) - endpointError := WrapEndpointError(err) + var endpointError *EndpointError + if e, ok := err.(*EndpointError); ok { + endpointError = e + } else { + endpointError = WrapEndpointError(err) + } endpointError.WriteTo(w) return } - // TODO: ssz handler // TODO: potentially add a context option to buffer these contentType := r.Header.Get("Accept") - switch contentType { - case "application/octet-stream": + contentTypes := strings.Split(contentType, ",") + switch { + case slices.Contains(contentTypes, "application/octet-stream"): sszMarshaler, ok := any(ans).(ssz.Marshaler) if !ok { NewEndpointError(http.StatusBadRequest, "This endpoint does not support SSZ response").WriteTo(w) @@ -90,16 +100,32 @@ func HandleEndpoint[T any](h EndpointHandler[T]) http.HandlerFunc { return } w.Write(encoded) - case "application/json", "": - w.Header().Add("content-type", "application/json") - err := json.NewEncoder(w).Encode(ans) - if err != nil { - // this error is fatal, log to console - log.Error("beaconapi failed to encode json", "type", reflect.TypeOf(ans), "err", err) + case contentType == "*/*", contentType == "", slices.Contains(contentTypes, "text/html"), slices.Contains(contentTypes, "application/json"): + if !isNil(ans) { + w.Header().Add("content-type", "application/json") + err := json.NewEncoder(w).Encode(ans) + if err != nil { + // this error is fatal, log to console + log.Error("beaconapi failed to encode json", "type", reflect.TypeOf(ans), "err", err) + } + } else { + w.WriteHeader(200) } default: http.Error(w, "content type must be application/json or application/octet-stream", http.StatusBadRequest) - } }) } + +func isNil[T any](t T) bool { + v := reflect.ValueOf(t) + kind := v.Kind() + // Must be one of these types to be nillable + return (kind == reflect.Ptr || + kind == reflect.Interface || + kind == reflect.Slice || + kind == reflect.Map || + kind == reflect.Chan || + kind == reflect.Func) && + v.IsNil() +} diff --git a/cl/beacon/beaconhttp/types.go b/cl/beacon/beaconhttp/types.go new file mode 100644 index 00000000000..fc5dfaa3e7e --- /dev/null +++ b/cl/beacon/beaconhttp/types.go @@ -0,0 +1,28 @@ +package beaconhttp + +import ( + "encoding/json" + "strconv" +) + +type IntStr int + +func (i IntStr) MarshalJSON() ([]byte, error) { + return json.Marshal(strconv.FormatInt(int64(i), 10)) +} + +func (i *IntStr) UnmarshalJSON(b []byte) error { + // Try string first + var s string + if err := json.Unmarshal(b, &s); err == nil { + value, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *i = IntStr(value) + return nil + } + + // Fallback to number + return json.Unmarshal(b, (*int)(i)) +} diff --git a/cl/beacon/building/endpoints.go b/cl/beacon/building/endpoints.go new file mode 100644 index 00000000000..cdf3c133d8c --- /dev/null +++ b/cl/beacon/building/endpoints.go @@ -0,0 +1,25 @@ +package building + +import ( + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" +) + +type BeaconCommitteeSubscription struct { + ValidatorIndex int `json:"validator_index,string"` + CommitteeIndex int `json:"committee_index,string"` + CommitteesAtSlot int `json:"committees_at_slot,string"` + Slot int `json:"slot,string"` + IsAggregator bool `json:"is_aggregator"` +} + +type SyncCommitteeSubscription struct { + ValidatorIndex int `json:"validator_index,string"` + SyncCommitteeIndices []beaconhttp.IntStr `json:"sync_committee_indices"` + UntilEpoch int `json:"until_epoch,string"` +} + +type PrepareBeaconProposer struct { + ValidatorIndex int `json:"validator_index,string"` + FeeRecipient common.Address `json:"fee_recipient"` +} diff --git a/cl/beacon/building/state.go b/cl/beacon/building/state.go new file mode 100644 index 00000000000..e7baf787b8d --- /dev/null +++ b/cl/beacon/building/state.go @@ -0,0 +1,25 @@ +package building + +import ( + "sync" + + "github.com/ledgerwatch/erigon-lib/common" +) + +type State struct { + feeRecipients map[int]common.Address + + mu sync.RWMutex +} + +func NewState() *State { + return &State{ + feeRecipients: map[int]common.Address{}, + } +} + +func (s *State) SetFeeRecipient(idx int, address common.Address) { + s.mu.Lock() + defer s.mu.Unlock() + s.feeRecipients[idx] = address +} diff --git a/cl/beacon/handler/attestation_rewards.go b/cl/beacon/handler/attestation_rewards.go new file mode 100644 index 00000000000..51b5e7dadcd --- /dev/null +++ b/cl/beacon/handler/attestation_rewards.go @@ -0,0 +1,450 @@ +package handler + +import ( + "encoding/json" + "io" + "net/http" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2/statechange" + "github.com/ledgerwatch/erigon/cl/utils" +) + +type IdealReward struct { + EffectiveBalance int64 `json:"effective_balance,string"` + Head int64 `json:"head,string"` + Target int64 `json:"target,string"` + Source int64 `json:"source,string"` + InclusionDelay int64 `json:"inclusion_delay,string"` + Inactivity int64 `json:"inactivity,string"` +} + +type TotalReward struct { + ValidatorIndex int64 `json:"validator_index,string"` + Head int64 `json:"head,string"` + Target int64 `json:"target,string"` + Source int64 `json:"source,string"` + InclusionDelay int64 `json:"inclusion_delay,string"` + Inactivity int64 `json:"inactivity,string"` +} + +type attestationsRewardsResponse struct { + IdealRewards []IdealReward `json:"ideal_rewards"` + TotalRewards []TotalReward `json:"total_rewards"` +} + +func (a *ApiHandler) getAttestationsRewards(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + epoch, err := epochFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + req := []string{} + // read the entire body + jsonBytes, err := io.ReadAll(r.Body) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + // parse json body request + if len(jsonBytes) > 0 { + if err := json.Unmarshal(jsonBytes, &req); err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + } + + filterIndicies, err := parseQueryValidatorIndicies(tx, req) + if err != nil { + return nil, err + } + _, headSlot, err := a.forkchoiceStore.GetHead() + if err != nil { + return nil, err + } + headEpoch := headSlot / a.beaconChainCfg.SlotsPerEpoch + if epoch > headEpoch { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "epoch is in the future") + } + // Few cases to handle: + // 1) finalized data + // 2) not finalized data + version := a.beaconChainCfg.GetCurrentStateVersion(epoch) + + // finalized data + if epoch > a.forkchoiceStore.FinalizedCheckpoint().Epoch() { + minRange := epoch * a.beaconChainCfg.SlotsPerEpoch + maxRange := (epoch + 1) * a.beaconChainCfg.SlotsPerEpoch + var blockRoot libcommon.Hash + for i := maxRange - 1; i >= minRange; i-- { + blockRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, i) + if err != nil { + return nil, err + } + if blockRoot == (libcommon.Hash{}) { + continue + } + s, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true) + if err != nil { + return nil, err + } + if s == nil { + continue + } + if s.Version() == clparams.Phase0Version { + return a.computeAttestationsRewardsForPhase0(s, filterIndicies, epoch) + } + return a.computeAttestationsRewardsForAltair(s.ValidatorSet(), s.InactivityScores(), s.PreviousEpochParticipation(), state.InactivityLeaking(s), filterIndicies, epoch) + } + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "no block found for this epoch") + } + + if version == clparams.Phase0Version { + minRange := epoch * a.beaconChainCfg.SlotsPerEpoch + maxRange := (epoch + 1) * a.beaconChainCfg.SlotsPerEpoch + for i := maxRange - 1; i >= minRange; i-- { + s, err := a.stateReader.ReadHistoricalState(ctx, tx, i) + if err != nil { + return nil, err + } + if s == nil { + continue + } + if err := s.InitBeaconState(); err != nil { + return nil, err + } + return a.computeAttestationsRewardsForPhase0(s, filterIndicies, epoch) + } + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "no block found for this epoch") + } + lastSlot := epoch*a.beaconChainCfg.SlotsPerEpoch + a.beaconChainCfg.SlotsPerEpoch - 1 + stateProgress, err := state_accessors.GetStateProcessingProgress(tx) + if err != nil { + return nil, err + } + if lastSlot > stateProgress { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "requested range is not yet processed or the node is not archivial") + } + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, lastSlot) + if err != nil { + return nil, err + } + + _, previousIdx, err := a.stateReader.ReadPartecipations(tx, lastSlot) + if err != nil { + return nil, err + } + _, _, finalizedCheckpoint, err := state_accessors.ReadCheckpoints(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + if err != nil { + return nil, err + } + inactivityScores := solid.NewUint64ListSSZ(int(a.beaconChainCfg.ValidatorRegistryLimit)) + if err := a.stateReader.ReconstructUint64ListDump(tx, lastSlot, kv.InactivityScores, validatorSet.Length(), inactivityScores); err != nil { + return nil, err + } + return a.computeAttestationsRewardsForAltair( + validatorSet, + inactivityScores, + previousIdx, + a.isInactivityLeaking(epoch, finalizedCheckpoint), + filterIndicies, + epoch) +} + +func (a *ApiHandler) isInactivityLeaking(epoch uint64, finalityCheckpoint solid.Checkpoint) bool { + prevEpoch := epoch + if epoch > 0 { + prevEpoch = epoch - 1 + } + return prevEpoch-finalityCheckpoint.Epoch() > a.beaconChainCfg.MinEpochsToInactivityPenalty +} + +func (a *ApiHandler) baseReward(version clparams.StateVersion, effectiveBalance, activeBalanceRoot uint64) uint64 { + basePerIncrement := a.beaconChainCfg.EffectiveBalanceIncrement * a.beaconChainCfg.BaseRewardFactor / activeBalanceRoot + if version != clparams.Phase0Version { + return (effectiveBalance / a.beaconChainCfg.EffectiveBalanceIncrement) * basePerIncrement + } + return effectiveBalance * a.beaconChainCfg.BaseRewardFactor / activeBalanceRoot / a.beaconChainCfg.BaseRewardsPerEpoch +} + +func (a *ApiHandler) computeAttestationsRewardsForAltair(validatorSet *solid.ValidatorSet, inactivityScores solid.Uint64ListSSZ, previousParticipation *solid.BitList, inactivityLeak bool, filterIndicies []uint64, epoch uint64) (*beaconResponse, error) { + totalActiveBalance := uint64(0) + flagsUnslashedIndiciesSet := statechange.GetUnslashedIndiciesSet(a.beaconChainCfg, epoch, validatorSet, previousParticipation) + weights := a.beaconChainCfg.ParticipationWeights() + flagsTotalBalances := make([]uint64, len(weights)) + + prevEpoch := uint64(0) + if epoch > 0 { + prevEpoch = epoch - 1 + } + + validatorSet.Range(func(validatorIndex int, v solid.Validator, l int) bool { + if v.Active(epoch) { + totalActiveBalance += v.EffectiveBalance() + } + + for i := range weights { + if flagsUnslashedIndiciesSet[i][validatorIndex] { + flagsTotalBalances[i] += v.EffectiveBalance() + } + } + return true + }) + version := a.beaconChainCfg.GetCurrentStateVersion(epoch) + inactivityPenaltyDenominator := a.beaconChainCfg.InactivityScoreBias * a.beaconChainCfg.GetPenaltyQuotient(version) + rewardMultipliers := make([]uint64, len(weights)) + for i := range weights { + rewardMultipliers[i] = weights[i] * (flagsTotalBalances[i] / a.beaconChainCfg.EffectiveBalanceIncrement) + } + + rewardDenominator := (totalActiveBalance / a.beaconChainCfg.EffectiveBalanceIncrement) * a.beaconChainCfg.WeightDenominator + var response *attestationsRewardsResponse + if len(filterIndicies) > 0 { + response = &attestationsRewardsResponse{ + IdealRewards: make([]IdealReward, 0, len(filterIndicies)), + TotalRewards: make([]TotalReward, 0, len(filterIndicies)), + } + } else { + response = &attestationsRewardsResponse{ + IdealRewards: make([]IdealReward, 0, validatorSet.Length()), + TotalRewards: make([]TotalReward, 0, validatorSet.Length()), + } + } + // make a map with the filter indicies + totalActiveBalanceSqrt := utils.IntegerSquareRoot(totalActiveBalance) + + fn := func(index uint64, v solid.Validator) error { + effectiveBalance := v.EffectiveBalance() + baseReward := a.baseReward(version, effectiveBalance, totalActiveBalanceSqrt) + // not eligible for rewards? then all empty + if !(v.Active(prevEpoch) || (v.Slashed() && prevEpoch+1 < v.WithdrawableEpoch())) { + response.IdealRewards = append(response.IdealRewards, IdealReward{EffectiveBalance: int64(effectiveBalance)}) + response.TotalRewards = append(response.TotalRewards, TotalReward{ValidatorIndex: int64(index)}) + return nil + } + idealReward := IdealReward{EffectiveBalance: int64(effectiveBalance)} + totalReward := TotalReward{ValidatorIndex: int64(index)} + if !inactivityLeak { + idealReward.Head = int64(baseReward * rewardMultipliers[a.beaconChainCfg.TimelyHeadFlagIndex] / rewardDenominator) + idealReward.Target = int64(baseReward * rewardMultipliers[a.beaconChainCfg.TimelyTargetFlagIndex] / rewardDenominator) + idealReward.Source = int64(baseReward * rewardMultipliers[a.beaconChainCfg.TimelySourceFlagIndex] / rewardDenominator) + } + // Note: for altair, we dont have the inclusion delay, always 0. + for flagIdx := range weights { + if flagsUnslashedIndiciesSet[flagIdx][index] { + if flagIdx == int(a.beaconChainCfg.TimelyHeadFlagIndex) { + totalReward.Head = idealReward.Head + } else if flagIdx == int(a.beaconChainCfg.TimelyTargetFlagIndex) { + totalReward.Target = idealReward.Target + } else if flagIdx == int(a.beaconChainCfg.TimelySourceFlagIndex) { + totalReward.Source = idealReward.Source + } + } else if flagIdx != int(a.beaconChainCfg.TimelyHeadFlagIndex) { + down := -int64(baseReward * weights[flagIdx] / a.beaconChainCfg.WeightDenominator) + if flagIdx == int(a.beaconChainCfg.TimelyHeadFlagIndex) { + totalReward.Head = down + } else if flagIdx == int(a.beaconChainCfg.TimelyTargetFlagIndex) { + totalReward.Target = down + } else if flagIdx == int(a.beaconChainCfg.TimelySourceFlagIndex) { + totalReward.Source = down + } + } + } + if !flagsUnslashedIndiciesSet[a.beaconChainCfg.TimelyTargetFlagIndex][index] { + inactivityScore := inactivityScores.Get(int(index)) + totalReward.Inactivity = -int64((effectiveBalance * inactivityScore) / inactivityPenaltyDenominator) + } + response.IdealRewards = append(response.IdealRewards, idealReward) + response.TotalRewards = append(response.TotalRewards, totalReward) + return nil + } + + if len(filterIndicies) > 0 { + for _, index := range filterIndicies { + if err := fn(index, validatorSet.Get(int(index))); err != nil { + return nil, err + } + } + } else { + for index := uint64(0); index < uint64(validatorSet.Length()); index++ { + if err := fn(index, validatorSet.Get(int(index))); err != nil { + return nil, err + } + } + } + return newBeaconResponse(response), nil +} + +// processRewardsAndPenaltiesPhase0 process rewards and penalties for phase0 state. +func (a *ApiHandler) computeAttestationsRewardsForPhase0(s *state.CachingBeaconState, filterIndicies []uint64, epoch uint64) (*beaconResponse, error) { + response := &attestationsRewardsResponse{} + beaconConfig := s.BeaconConfig() + if epoch == beaconConfig.GenesisEpoch { + return newBeaconResponse(response), nil + } + prevEpoch := uint64(0) + if epoch > 0 { + prevEpoch = epoch - 1 + } + if len(filterIndicies) > 0 { + response = &attestationsRewardsResponse{ + IdealRewards: make([]IdealReward, 0, len(filterIndicies)), + TotalRewards: make([]TotalReward, 0, len(filterIndicies)), + } + } else { + response = &attestationsRewardsResponse{ + IdealRewards: make([]IdealReward, 0, s.ValidatorLength()), + TotalRewards: make([]TotalReward, 0, s.ValidatorLength()), + } + } + + inactivityLeak := state.InactivityLeaking(s) + rewardDenominator := s.GetTotalActiveBalance() / beaconConfig.EffectiveBalanceIncrement + var unslashedMatchingSourceBalanceIncrements, unslashedMatchingTargetBalanceIncrements, unslashedMatchingHeadBalanceIncrements uint64 + var err error + s.ForEachValidator(func(validator solid.Validator, idx, total int) bool { + if validator.Slashed() { + return true + } + var previousMatchingSourceAttester, previousMatchingTargetAttester, previousMatchingHeadAttester bool + + if previousMatchingSourceAttester, err = s.ValidatorIsPreviousMatchingSourceAttester(idx); err != nil { + return false + } + if previousMatchingTargetAttester, err = s.ValidatorIsPreviousMatchingTargetAttester(idx); err != nil { + return false + } + if previousMatchingHeadAttester, err = s.ValidatorIsPreviousMatchingHeadAttester(idx); err != nil { + return false + } + if previousMatchingSourceAttester { + unslashedMatchingSourceBalanceIncrements += validator.EffectiveBalance() + } + if previousMatchingTargetAttester { + unslashedMatchingTargetBalanceIncrements += validator.EffectiveBalance() + } + if previousMatchingHeadAttester { + unslashedMatchingHeadBalanceIncrements += validator.EffectiveBalance() + } + return true + }) + if err != nil { + return nil, err + } + // Then compute their total increment. + unslashedMatchingSourceBalanceIncrements /= beaconConfig.EffectiveBalanceIncrement + unslashedMatchingTargetBalanceIncrements /= beaconConfig.EffectiveBalanceIncrement + unslashedMatchingHeadBalanceIncrements /= beaconConfig.EffectiveBalanceIncrement + fn := func(index uint64, currentValidator solid.Validator) error { + baseReward, err := s.BaseReward(index) + if err != nil { + return err + } + var previousMatchingSourceAttester, previousMatchingTargetAttester, previousMatchingHeadAttester bool + + if previousMatchingSourceAttester, err = s.ValidatorIsPreviousMatchingSourceAttester(int(index)); err != nil { + return err + } + if previousMatchingTargetAttester, err = s.ValidatorIsPreviousMatchingTargetAttester(int(index)); err != nil { + return err + } + if previousMatchingHeadAttester, err = s.ValidatorIsPreviousMatchingHeadAttester(int(index)); err != nil { + return err + } + totalReward := TotalReward{ValidatorIndex: int64(index)} + idealReward := IdealReward{EffectiveBalance: int64(currentValidator.EffectiveBalance())} + + // check inclusion delay + if !currentValidator.Slashed() && previousMatchingSourceAttester { + var attestation *solid.PendingAttestation + if attestation, err = s.ValidatorMinPreviousInclusionDelayAttestation(int(index)); err != nil { + return err + } + proposerReward := (baseReward / beaconConfig.ProposerRewardQuotient) + maxAttesterReward := baseReward - proposerReward + idealReward.InclusionDelay = int64(maxAttesterReward / attestation.InclusionDelay()) + totalReward.InclusionDelay = idealReward.InclusionDelay + } + // if it is not eligible for rewards, then do not continue further + if !(currentValidator.Active(prevEpoch) || (currentValidator.Slashed() && prevEpoch+1 < currentValidator.WithdrawableEpoch())) { + response.IdealRewards = append(response.IdealRewards, idealReward) + response.TotalRewards = append(response.TotalRewards, totalReward) + return nil + } + if inactivityLeak { + idealReward.Source = int64(baseReward) + idealReward.Target = int64(baseReward) + idealReward.Head = int64(baseReward) + } else { + idealReward.Source = int64(baseReward * unslashedMatchingSourceBalanceIncrements / rewardDenominator) + idealReward.Target = int64(baseReward * unslashedMatchingTargetBalanceIncrements / rewardDenominator) + idealReward.Head = int64(baseReward * unslashedMatchingHeadBalanceIncrements / rewardDenominator) + } + // we can use a multiplier to account for all attesting + var attested, missed uint64 + if currentValidator.Slashed() { + missed = 3 + } else { + if previousMatchingSourceAttester { + attested++ + totalReward.Source = idealReward.Source + } + if previousMatchingTargetAttester { + attested++ + totalReward.Target = idealReward.Target + } + if previousMatchingHeadAttester { + attested++ + totalReward.Head = idealReward.Head + } + missed = 3 - attested + } + // process inactivities + if inactivityLeak { + proposerReward := baseReward / beaconConfig.ProposerRewardQuotient + totalReward.Inactivity = -int64(beaconConfig.BaseRewardsPerEpoch*baseReward - proposerReward) + if currentValidator.Slashed() || !previousMatchingTargetAttester { + totalReward.Inactivity -= int64(currentValidator.EffectiveBalance() * state.FinalityDelay(s) / beaconConfig.InactivityPenaltyQuotient) + } + } + totalReward.Inactivity -= int64(baseReward * missed) + response.IdealRewards = append(response.IdealRewards, idealReward) + response.TotalRewards = append(response.TotalRewards, totalReward) + return nil + } + if len(filterIndicies) > 0 { + for _, index := range filterIndicies { + v, err := s.ValidatorForValidatorIndex(int(index)) + if err != nil { + return nil, err + } + if err := fn(index, v); err != nil { + return nil, err + } + } + } else { + for index := uint64(0); index < uint64(s.ValidatorLength()); index++ { + v, err := s.ValidatorForValidatorIndex(int(index)) + if err != nil { + return nil, err + } + if err := fn(index, v); err != nil { + return nil, err + } + } + } + return newBeaconResponse(response), nil +} diff --git a/cl/beacon/handler/attestation_rewards_test.go b/cl/beacon/handler/attestation_rewards_test.go new file mode 100644 index 00000000000..f5823765c14 --- /dev/null +++ b/cl/beacon/handler/attestation_rewards_test.go @@ -0,0 +1,150 @@ +package handler + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/stretchr/testify/require" +) + +func TestAttestationRewardsBellatrix(t *testing.T) { + _, blocks, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.BellatrixVersion) + var err error + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.JustifiedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, 99999999) + + cases := []struct { + name string + epoch uint64 + code int + request string + expected string + }{ + { + name: "all validators", + epoch: (fcu.HeadSlotVal / 32) - 1, + code: http.StatusOK, + expected: `{"data":{"ideal_rewards":[{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"}],"total_rewards":[{"validator_index":"0","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"1","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"2","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"3","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"4","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"5","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"6","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"7","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"8","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"9","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"10","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"11","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"12","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"13","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"14","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"15","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"16","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"17","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"18","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"19","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"20","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"21","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"22","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"23","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"24","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"25","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"26","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"27","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"28","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"29","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"30","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"31","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"32","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"33","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"34","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"35","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"36","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"37","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"38","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"39","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"40","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"41","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"42","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"43","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"44","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"45","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"46","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"47","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"48","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"49","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"50","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"51","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"52","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"53","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"54","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"55","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"56","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"57","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"58","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"59","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"60","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"61","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"62","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"63","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"64","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"65","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"66","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"67","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"68","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"69","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"70","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"71","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"72","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"73","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"74","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"75","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"76","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"77","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"78","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"79","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"80","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"81","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"82","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"83","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"84","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"85","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"86","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"87","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"88","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"89","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"90","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"91","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"92","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"93","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"94","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"95","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"96","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"97","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"98","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"99","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"100","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"101","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"102","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"103","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"104","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"105","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"106","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"107","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"108","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"109","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"110","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"111","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"112","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"113","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"114","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"115","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"116","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"117","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"118","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"119","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"120","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"121","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"122","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"123","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"124","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"125","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"126","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"127","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"128","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"129","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"130","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"131","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"132","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"133","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"134","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"135","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"136","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"137","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"138","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"139","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"140","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"141","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"142","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"143","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"144","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"145","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"146","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"147","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"148","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"149","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"150","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"151","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"152","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"153","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"154","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"155","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"156","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"157","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"158","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"159","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"160","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"161","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"162","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"163","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"164","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"165","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"166","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"167","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"168","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"169","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"170","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"171","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"172","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"173","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"174","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"175","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"176","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"177","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"178","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"179","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"180","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"181","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"182","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"183","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"184","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"185","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"186","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"187","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"188","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"189","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"190","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"191","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"192","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"193","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"194","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"195","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"196","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"197","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"198","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"199","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"200","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"201","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"202","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"203","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"204","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"205","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"206","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"207","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"208","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"209","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"210","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"211","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"212","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"213","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"214","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"215","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"216","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"217","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"218","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"219","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"220","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"221","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"222","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"223","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"224","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"225","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"226","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"227","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"228","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"229","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"230","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"231","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"232","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"233","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"234","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"235","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"236","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"237","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"238","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"239","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"240","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"241","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"242","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"243","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"244","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"245","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"246","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"247","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"248","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"249","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"250","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"251","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"252","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"253","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"254","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"255","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"}]}}` + "\n", // Add your expected response + }, + { + epoch: 99999999, + code: http.StatusNotFound, + }, + { + name: "2 validators", + epoch: (fcu.HeadSlotVal / 32) - 1, + request: `["1","4"]`, + code: http.StatusOK, + expected: `{"data":{"ideal_rewards":[{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"}],"total_rewards":[{"validator_index":"1","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"4","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"}]}}` + "\n", // Add your expected response + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + url := fmt.Sprintf("%s/eth/v1/beacon/rewards/attestations/%d", server.URL, c.epoch) + + // Create a request + req, err := http.NewRequest("POST", url, strings.NewReader(c.request)) + require.NoError(t, err) + req.Header.Set("Content-Type", "application/json") + + // Perform the request + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + // Check status code + require.Equal(t, c.code, resp.StatusCode) + + if resp.StatusCode != http.StatusOK { + return + } + + // Read the response body + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + // Compare the response with the expected result + require.Equal(t, c.expected, string(out)) + }) + } +} + +func TestAttestationRewardsPhase0(t *testing.T) { + _, blocks, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + var err error + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.JustifiedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, 99999999) + + cases := []struct { + name string + epoch uint64 + code int + request string + expected string + }{ + { + name: "all validators", + epoch: (fcu.HeadSlotVal / 32) - 1, + code: http.StatusOK, + expected: `{"data":{"ideal_rewards":[{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"20000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"22000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"4348","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"effective_balance":"20000000000","head":"67482","target":"67482","source":"67482","inclusion_delay":"4646","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"4941","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"9123","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"17562","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"17423","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"4355","inactivity":"0"},{"effective_balance":"22000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3231","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"5854","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"10752","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"3920","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"28000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"5675","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3231","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"3763","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"10686","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"4561","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"effective_balance":"20000000000","head":"67482","target":"67482","source":"67482","inclusion_delay":"2697","inactivity":"0"},{"effective_balance":"22000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"8743","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"7898","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"5675","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"4673","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"4348","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"27000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"effective_balance":"20000000000","head":"67482","target":"67482","source":"67482","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3949","inactivity":"0"},{"effective_balance":"28000000000","head":"94475","target":"94475","source":"94475","inclusion_delay":"7805","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"3434","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"5401","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"20211","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"5131","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"3507","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"29000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"2508","inactivity":"0"},{"effective_balance":"28000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"28000000000","head":"94475","target":"94475","source":"94475","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"28000000000","head":"94475","target":"94475","source":"94475","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"13739","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"14635","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"20211","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"19235","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"5227","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"3604","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"6272","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"8827","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"20071","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"3310","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"14934","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"54361","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"20000000000","head":"67482","target":"67482","source":"67482","inclusion_delay":"3636","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"6111","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"27000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"effective_balance":"20000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"4470","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"22000000000","head":"74230","target":"74230","source":"74230","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"26000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"3316","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"60633","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"20000000000","head":"67482","target":"67482","source":"67482","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"7898","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"4704","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"25000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"effective_balance":"28000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"20000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2293","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"effective_balance":"22000000000","head":"74230","target":"74230","source":"74230","inclusion_delay":"5110","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"24000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"31000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4739","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"4427","inactivity":"0"},{"effective_balance":"28000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"effective_balance":"28000000000","head":"94475","target":"94475","source":"94475","inclusion_delay":"29271","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"25089","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"30000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"3789","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"7317","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"7201","inactivity":"0"},{"effective_balance":"22000000000","head":"74230","target":"74230","source":"74230","inclusion_delay":"5411","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"7168","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"4530","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"19000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"4050","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2961","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"7983","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"5272","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"4646","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"39725","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"29000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"30000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"22000000000","head":"74230","target":"74230","source":"74230","inclusion_delay":"22998","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2293","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"effective_balance":"22000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"}],"total_rewards":[{"validator_index":"0","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"1","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"2","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-358422"},{"validator_index":"3","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"4","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"validator_index":"5","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"6","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"7","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"8","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"9","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"validator_index":"10","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"11","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"12","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"13","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"14","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"15","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"16","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"17","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"validator_index":"18","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"validator_index":"19","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"20","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"21","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"22","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"23","head":"87727","target":"87727","source":"87727","inclusion_delay":"4348","inactivity":"0"},{"validator_index":"24","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"validator_index":"25","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"26","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"validator_index":"27","head":"67482","target":"67482","source":"67482","inclusion_delay":"4646","inactivity":"0"},{"validator_index":"28","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"29","head":"87727","target":"87727","source":"87727","inclusion_delay":"4941","inactivity":"0"},{"validator_index":"30","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"31","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"32","head":"80979","target":"80979","source":"80979","inclusion_delay":"9123","inactivity":"0"},{"validator_index":"33","head":"70856","target":"70856","source":"70856","inclusion_delay":"17562","inactivity":"0"},{"validator_index":"34","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"35","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"36","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"37","head":"84353","target":"84353","source":"84353","inclusion_delay":"17423","inactivity":"0"},{"validator_index":"38","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"39","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"validator_index":"40","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"41","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"validator_index":"42","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"43","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"44","head":"84353","target":"84353","source":"84353","inclusion_delay":"4355","inactivity":"0"},{"validator_index":"45","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"46","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"47","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"48","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"49","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"50","head":"57360","target":"57360","source":"57360","inclusion_delay":"3231","inactivity":"0"},{"validator_index":"51","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"52","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"53","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"54","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"55","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"56","head":"70856","target":"70856","source":"70856","inclusion_delay":"5854","inactivity":"0"},{"validator_index":"57","head":"60734","target":"60734","source":"60734","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"58","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"59","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"60","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"61","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"62","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"63","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"64","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"validator_index":"65","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"66","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"validator_index":"67","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"68","head":"60734","target":"60734","source":"60734","inclusion_delay":"10752","inactivity":"0"},{"validator_index":"69","head":"101224","target":"101224","source":"101224","inclusion_delay":"3920","inactivity":"0"},{"validator_index":"70","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"71","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"72","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"validator_index":"73","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"74","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"75","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"validator_index":"76","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"77","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"validator_index":"78","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"79","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"80","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"81","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-372759"},{"validator_index":"82","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"83","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"validator_index":"84","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"validator_index":"85","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"validator_index":"86","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"87","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"88","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"89","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"90","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"91","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"92","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"93","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"94","head":"64108","target":"64108","source":"64108","inclusion_delay":"5675","inactivity":"0"},{"validator_index":"95","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"96","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"97","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"98","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"99","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"validator_index":"100","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"101","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"validator_index":"102","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"103","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"104","head":"57360","target":"57360","source":"57360","inclusion_delay":"3231","inactivity":"0"},{"validator_index":"105","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"106","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"107","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-329748"},{"validator_index":"108","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"validator_index":"109","head":"91101","target":"91101","source":"91101","inclusion_delay":"3763","inactivity":"0"},{"validator_index":"110","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"111","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"112","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"113","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"114","head":"77605","target":"77605","source":"77605","inclusion_delay":"10686","inactivity":"0"},{"validator_index":"115","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"validator_index":"116","head":"80979","target":"80979","source":"80979","inclusion_delay":"4561","inactivity":"0"},{"validator_index":"117","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"validator_index":"118","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"119","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"120","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"validator_index":"121","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"122","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"123","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"validator_index":"124","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"125","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"126","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"127","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"128","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"129","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"130","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"validator_index":"131","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"132","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"validator_index":"133","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"validator_index":"134","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"validator_index":"135","head":"67482","target":"67482","source":"67482","inclusion_delay":"2697","inactivity":"0"},{"validator_index":"136","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"137","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"validator_index":"138","head":"77605","target":"77605","source":"77605","inclusion_delay":"8743","inactivity":"0"},{"validator_index":"139","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"validator_index":"140","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"141","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"142","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"143","head":"57360","target":"57360","source":"57360","inclusion_delay":"7898","inactivity":"0"},{"validator_index":"144","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"validator_index":"145","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"146","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"147","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"148","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"validator_index":"149","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"150","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"validator_index":"151","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"152","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"153","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"154","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"155","head":"64108","target":"64108","source":"64108","inclusion_delay":"5675","inactivity":"0"},{"validator_index":"156","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"157","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"158","head":"64108","target":"64108","source":"64108","inclusion_delay":"4673","inactivity":"0"},{"validator_index":"159","head":"87727","target":"87727","source":"87727","inclusion_delay":"4348","inactivity":"0"},{"validator_index":"160","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"161","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"162","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"163","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"164","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"165","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"166","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"167","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-344085"},{"validator_index":"168","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"validator_index":"169","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-286737"},{"validator_index":"170","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"171","head":"57360","target":"57360","source":"57360","inclusion_delay":"3949","inactivity":"0"},{"validator_index":"172","head":"94475","target":"94475","source":"94475","inclusion_delay":"7805","inactivity":"0"},{"validator_index":"173","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"174","head":"60734","target":"60734","source":"60734","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"175","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"validator_index":"176","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"validator_index":"177","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"validator_index":"178","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"179","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"180","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"181","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"182","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"validator_index":"183","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"184","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"185","head":"77605","target":"77605","source":"77605","inclusion_delay":"3434","inactivity":"0"},{"validator_index":"186","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"validator_index":"187","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"validator_index":"188","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"189","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"190","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"191","head":"104598","target":"104598","source":"104598","inclusion_delay":"5401","inactivity":"0"},{"validator_index":"192","head":"97850","target":"97850","source":"97850","inclusion_delay":"20211","inactivity":"0"},{"validator_index":"193","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"194","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"validator_index":"195","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"validator_index":"196","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"validator_index":"197","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"198","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"199","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"200","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"validator_index":"201","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"202","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"validator_index":"203","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"204","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"validator_index":"205","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"206","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"207","head":"91101","target":"91101","source":"91101","inclusion_delay":"5131","inactivity":"0"},{"validator_index":"208","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"209","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"210","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"211","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"validator_index":"212","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"213","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"validator_index":"214","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"215","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-329748"},{"validator_index":"216","head":"87727","target":"87727","source":"87727","inclusion_delay":"3507","inactivity":"0"},{"validator_index":"217","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"218","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"validator_index":"219","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"220","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"221","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"222","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"223","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-258063"},{"validator_index":"224","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-301074"},{"validator_index":"225","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"226","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"227","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"228","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"229","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"230","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"231","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"232","head":"60734","target":"60734","source":"60734","inclusion_delay":"2508","inactivity":"0"},{"validator_index":"233","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"234","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-401433"},{"validator_index":"235","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-401433"},{"validator_index":"236","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"237","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"238","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"239","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-444444"},{"validator_index":"240","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"241","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"242","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"243","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"244","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"245","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-301074"},{"validator_index":"246","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"validator_index":"247","head":"77605","target":"77605","source":"77605","inclusion_delay":"13739","inactivity":"0"},{"validator_index":"248","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"validator_index":"249","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"validator_index":"250","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"251","head":"70856","target":"70856","source":"70856","inclusion_delay":"14635","inactivity":"0"},{"validator_index":"252","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"253","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-301074"},{"validator_index":"254","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"255","head":"97850","target":"97850","source":"97850","inclusion_delay":"20211","inactivity":"0"},{"validator_index":"256","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"257","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"258","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"259","head":"77605","target":"77605","source":"77605","inclusion_delay":"19235","inactivity":"0"},{"validator_index":"260","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-258063"},{"validator_index":"261","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"validator_index":"262","head":"101224","target":"101224","source":"101224","inclusion_delay":"5227","inactivity":"0"},{"validator_index":"263","head":"84353","target":"84353","source":"84353","inclusion_delay":"3604","inactivity":"0"},{"validator_index":"264","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"265","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"266","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-387096"},{"validator_index":"267","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"268","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"269","head":"101224","target":"101224","source":"101224","inclusion_delay":"6272","inactivity":"0"},{"validator_index":"270","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"271","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"validator_index":"272","head":"64108","target":"64108","source":"64108","inclusion_delay":"8827","inactivity":"0"},{"validator_index":"273","head":"80979","target":"80979","source":"80979","inclusion_delay":"20071","inactivity":"0"},{"validator_index":"274","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"275","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-444444"},{"validator_index":"276","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"277","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"278","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"279","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"validator_index":"280","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"281","head":"64108","target":"64108","source":"64108","inclusion_delay":"3310","inactivity":"0"},{"validator_index":"282","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"283","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"284","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"285","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"286","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"287","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"288","head":"84353","target":"84353","source":"84353","inclusion_delay":"14934","inactivity":"0"},{"validator_index":"289","head":"87727","target":"87727","source":"87727","inclusion_delay":"54361","inactivity":"0"},{"validator_index":"290","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"291","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"292","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"293","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"validator_index":"294","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"295","head":"67482","target":"67482","source":"67482","inclusion_delay":"3636","inactivity":"0"},{"validator_index":"296","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-387096"},{"validator_index":"297","head":"64108","target":"64108","source":"64108","inclusion_delay":"6111","inactivity":"0"},{"validator_index":"298","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"299","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"300","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-344085"},{"validator_index":"301","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"302","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"303","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-358422"},{"validator_index":"304","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"305","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"306","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"validator_index":"307","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"validator_index":"308","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"309","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"310","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"311","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"validator_index":"312","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"313","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"314","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"315","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"validator_index":"316","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"317","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"318","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"319","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"validator_index":"320","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"validator_index":"321","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"322","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"323","head":"104598","target":"104598","source":"104598","inclusion_delay":"4470","inactivity":"0"},{"validator_index":"324","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"validator_index":"325","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"326","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-315411"},{"validator_index":"327","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"328","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"329","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"330","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"validator_index":"331","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"332","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"333","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"334","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"335","head":"77605","target":"77605","source":"77605","inclusion_delay":"3316","inactivity":"0"},{"validator_index":"336","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"337","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"338","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"339","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"validator_index":"340","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-358422"},{"validator_index":"341","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"validator_index":"342","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"validator_index":"343","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-387096"},{"validator_index":"344","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"345","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"346","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"validator_index":"347","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"348","head":"97850","target":"97850","source":"97850","inclusion_delay":"60633","inactivity":"0"},{"validator_index":"349","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"350","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-286737"},{"validator_index":"351","head":"57360","target":"57360","source":"57360","inclusion_delay":"7898","inactivity":"0"},{"validator_index":"352","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"353","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"validator_index":"354","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"355","head":"60734","target":"60734","source":"60734","inclusion_delay":"4704","inactivity":"0"},{"validator_index":"356","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"validator_index":"357","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"358","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"359","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"360","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"validator_index":"361","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"362","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"363","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"364","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"validator_index":"365","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"validator_index":"366","head":"57360","target":"57360","source":"57360","inclusion_delay":"2293","inactivity":"0"},{"validator_index":"367","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"368","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"369","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"validator_index":"370","head":"74230","target":"74230","source":"74230","inclusion_delay":"5110","inactivity":"0"},{"validator_index":"371","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"validator_index":"372","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"373","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"374","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"375","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"376","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"377","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"validator_index":"378","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"379","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"validator_index":"380","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-387096"},{"validator_index":"381","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"382","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"validator_index":"383","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"384","head":"57360","target":"57360","source":"57360","inclusion_delay":"4739","inactivity":"0"},{"validator_index":"385","head":"60734","target":"60734","source":"60734","inclusion_delay":"4427","inactivity":"0"},{"validator_index":"386","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"387","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"388","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"validator_index":"389","head":"94475","target":"94475","source":"94475","inclusion_delay":"29271","inactivity":"0"},{"validator_index":"390","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"391","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"validator_index":"392","head":"101224","target":"101224","source":"101224","inclusion_delay":"25089","inactivity":"0"},{"validator_index":"393","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"394","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"395","head":"97850","target":"97850","source":"97850","inclusion_delay":"3789","inactivity":"0"},{"validator_index":"396","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"397","head":"70856","target":"70856","source":"70856","inclusion_delay":"7317","inactivity":"0"},{"validator_index":"398","head":"104598","target":"104598","source":"104598","inclusion_delay":"7201","inactivity":"0"},{"validator_index":"399","head":"74230","target":"74230","source":"74230","inclusion_delay":"5411","inactivity":"0"},{"validator_index":"400","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"401","head":"80979","target":"80979","source":"80979","inclusion_delay":"7168","inactivity":"0"},{"validator_index":"402","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"403","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"404","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"405","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"406","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"407","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-272400"},{"validator_index":"408","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"validator_index":"409","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"validator_index":"410","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"411","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"412","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"validator_index":"413","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"414","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"validator_index":"415","head":"87727","target":"87727","source":"87727","inclusion_delay":"4530","inactivity":"0"},{"validator_index":"416","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"validator_index":"417","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"418","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"419","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"420","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"421","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"422","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"423","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"424","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"425","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"426","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"427","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"428","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"429","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"430","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"validator_index":"431","head":"104598","target":"104598","source":"104598","inclusion_delay":"4050","inactivity":"0"},{"validator_index":"432","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"433","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"434","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"validator_index":"435","head":"57360","target":"57360","source":"57360","inclusion_delay":"2961","inactivity":"0"},{"validator_index":"436","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-344085"},{"validator_index":"437","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"438","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-430107"},{"validator_index":"439","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"440","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"validator_index":"441","head":"70856","target":"70856","source":"70856","inclusion_delay":"7983","inactivity":"0"},{"validator_index":"442","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"validator_index":"443","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"444","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"445","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"validator_index":"446","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"447","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"448","head":"97850","target":"97850","source":"97850","inclusion_delay":"5272","inactivity":"0"},{"validator_index":"449","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"450","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"451","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"452","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"453","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"454","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"455","head":"101224","target":"101224","source":"101224","inclusion_delay":"4646","inactivity":"0"},{"validator_index":"456","head":"64108","target":"64108","source":"64108","inclusion_delay":"39725","inactivity":"0"},{"validator_index":"457","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"458","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"459","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"460","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"461","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"462","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"463","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"464","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"465","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"466","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"467","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"468","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"validator_index":"469","head":"97850","target":"97850","source":"97850","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"470","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"471","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"472","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"473","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"474","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"validator_index":"475","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"476","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-444444"},{"validator_index":"477","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"478","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"479","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"480","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"481","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"482","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"483","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"484","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"485","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"486","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"487","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"488","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"validator_index":"489","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"490","head":"74230","target":"74230","source":"74230","inclusion_delay":"22998","inactivity":"0"},{"validator_index":"491","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"validator_index":"492","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"493","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"494","head":"57360","target":"57360","source":"57360","inclusion_delay":"2293","inactivity":"0"},{"validator_index":"495","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"validator_index":"496","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"497","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"498","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"499","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"validator_index":"500","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"501","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"502","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"503","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"validator_index":"504","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"505","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"506","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"507","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"validator_index":"508","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"validator_index":"509","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"validator_index":"510","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"validator_index":"511","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"512","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"513","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"514","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"515","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"516","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"517","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"518","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"519","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"520","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"521","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"522","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"523","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"524","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"525","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"526","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"527","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"}]}}` + "\n", // Add your expected response + }, + { + epoch: 99999999, + code: http.StatusNotFound, + }, + { + name: "2 validators", + epoch: (fcu.HeadSlotVal / 32) - 1, + request: `["1","4"]`, + code: http.StatusOK, + expected: `{"data":{"ideal_rewards":[{"effective_balance":"20000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"}],"total_rewards":[{"validator_index":"1","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"4","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"}]}}` + "\n", // Add your expected response + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + url := fmt.Sprintf("%s/eth/v1/beacon/rewards/attestations/%d", server.URL, c.epoch) + + // Create a request + req, err := http.NewRequest("POST", url, strings.NewReader(c.request)) + require.NoError(t, err) + req.Header.Set("Content-Type", "application/json") + + // Perform the request + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + // Check status code + require.Equal(t, c.code, resp.StatusCode) + + if resp.StatusCode != http.StatusOK { + return + } + + // Read the response body + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + // Compare the response with the expected result + require.Equal(t, c.expected, string(out)) + }) + } +} diff --git a/cl/beacon/handler/blocks.go b/cl/beacon/handler/blocks.go index cabe88addca..2035b7663b5 100644 --- a/cl/beacon/handler/blocks.go +++ b/cl/beacon/handler/blocks.go @@ -19,7 +19,7 @@ type headerResponse struct { } type getHeadersRequest struct { - Slot *uint64 `json:"slot,omitempty"` + Slot *uint64 `json:"slot,omitempty,string"` ParentRoot *libcommon.Hash `json:"root,omitempty"` } @@ -59,7 +59,7 @@ func (a *ApiHandler) rootFromBlockId(ctx context.Context, tx kv.Tx, blockId *seg return } -func (a *ApiHandler) getBlock(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getBlock(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) if err != nil { @@ -87,14 +87,53 @@ func (a *ApiHandler) getBlock(r *http.Request) (*beaconResponse, error) { var canonicalRoot libcommon.Hash canonicalRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, blk.Block.Slot) if err != nil { - return nil, beaconhttp.WrapEndpointError(err) + return nil, err } return newBeaconResponse(blk). withFinalized(root == canonicalRoot && blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot()). withVersion(blk.Version()), nil } -func (a *ApiHandler) getBlockAttestations(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getBlindedBlock(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + ctx := r.Context() + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockId, err := blockIdFromRequest(r) + if err != nil { + return nil, err + } + root, err := a.rootFromBlockId(ctx, tx, blockId) + if err != nil { + return nil, err + } + + blk, err := a.blockReader.ReadBlockByRoot(ctx, tx, root) + if err != nil { + return nil, err + } + if blk == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("block not found %x", root)) + } + // Check if the block is canonical + var canonicalRoot libcommon.Hash + canonicalRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, blk.Block.Slot) + if err != nil { + return nil, err + } + blinded, err := blk.Blinded() + if err != nil { + return nil, err + } + return newBeaconResponse(blinded). + withFinalized(root == canonicalRoot && blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot()). + withVersion(blk.Version()), nil +} + +func (a *ApiHandler) getBlockAttestations(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) if err != nil { @@ -125,7 +164,7 @@ func (a *ApiHandler) getBlockAttestations(r *http.Request) (*beaconResponse, err withVersion(blk.Version()), nil } -func (a *ApiHandler) getBlockRoot(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getBlockRoot(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) if err != nil { @@ -154,5 +193,7 @@ func (a *ApiHandler) getBlockRoot(r *http.Request) (*beaconResponse, error) { if err != nil { return nil, err } - return newBeaconResponse(struct{ Root libcommon.Hash }{Root: root}).withFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()), nil + return newBeaconResponse(struct { + Root libcommon.Hash `json:"root"` + }{Root: root}).withFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()), nil } diff --git a/cl/beacon/handler/blocks_test.go b/cl/beacon/handler/blocks_test.go new file mode 100644 index 00000000000..2889e570197 --- /dev/null +++ b/cl/beacon/handler/blocks_test.go @@ -0,0 +1,257 @@ +package handler + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/common" + "github.com/stretchr/testify/require" +) + +func TestGetBlindedBlock(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + // Start by testing + rootBlock1, err := blocks[0].Block.HashSSZ() + if err != nil { + t.Fatal(err) + } + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + cases := []struct { + blockID string + code int + slot uint64 + }{ + { + blockID: "0x" + common.Bytes2Hex(rootBlock1[:]), + code: http.StatusOK, + slot: blocks[0].Block.Slot, + }, + { + blockID: "head", + code: http.StatusOK, + slot: blocks[len(blocks)-1].Block.Slot, + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(server.URL + "/eth/v1/beacon/blinded_blocks/" + c.blockID) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + jsonVal := make(map[string]interface{}) + // unmarshal the json + require.NoError(t, json.NewDecoder(resp.Body).Decode(&jsonVal)) + data := jsonVal["data"].(map[string]interface{}) + message := data["message"].(map[string]interface{}) + + // compare the block + require.Equal(t, message["slot"], strconv.FormatInt(int64(c.slot), 10)) + }) + } +} + +func TestGetBlockBlinded(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + // Start by testing + rootBlock1, err := blocks[0].Block.HashSSZ() + if err != nil { + t.Fatal(err) + } + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + cases := []struct { + blockID string + code int + slot uint64 + }{ + { + blockID: "0x" + common.Bytes2Hex(rootBlock1[:]), + code: http.StatusOK, + slot: blocks[0].Block.Slot, + }, + { + blockID: "head", + code: http.StatusOK, + slot: blocks[len(blocks)-1].Block.Slot, + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(server.URL + "/eth/v2/beacon/blocks/" + c.blockID) + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + jsonVal := make(map[string]interface{}) + // unmarshal the json + require.NoError(t, json.NewDecoder(resp.Body).Decode(&jsonVal)) + data := jsonVal["data"].(map[string]interface{}) + message := data["message"].(map[string]interface{}) + + // compare the block + require.Equal(t, message["slot"], strconv.FormatInt(int64(c.slot), 10)) + }) + } +} + +func TestGetBlockAttestations(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + // Start by testing + rootBlock1, err := blocks[0].Block.HashSSZ() + if err != nil { + t.Fatal(err) + } + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + cases := []struct { + blockID string + code int + attLen int + }{ + { + blockID: "0x" + common.Bytes2Hex(rootBlock1[:]), + code: http.StatusOK, + attLen: blocks[0].Block.Body.Attestations.Len(), + }, + { + blockID: "head", + code: http.StatusOK, + attLen: blocks[len(blocks)-1].Block.Body.Attestations.Len(), + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(server.URL + "/eth/v1/beacon/blocks/" + c.blockID + "/attestations") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + jsonVal := make(map[string]interface{}) + // unmarshal the json + require.NoError(t, json.NewDecoder(resp.Body).Decode(&jsonVal)) + data := jsonVal["data"].([]interface{}) + require.Equal(t, len(data), c.attLen) + }) + } +} + +func TestGetBlockRoot(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + var err error + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + // compute block 0 and block len -1 root + blk0Root, err := blocks[0].Block.HashSSZ() + require.NoError(t, err) + + blkLastRoot, err := blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + cases := []struct { + blockID string + code int + root string + }{ + { + blockID: strconv.FormatInt(int64(blocks[0].Block.Slot), 10), + code: http.StatusOK, + root: "0x" + common.Bytes2Hex(blk0Root[:]), + }, + { + blockID: "head", + code: http.StatusOK, + root: "0x" + common.Bytes2Hex(blkLastRoot[:]), + }, + { + blockID: "19912929", + code: http.StatusNotFound, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(server.URL + "/eth/v1/beacon/blocks/" + c.blockID + "/root") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + jsonVal := make(map[string]interface{}) + // unmarshal the json + require.NoError(t, json.NewDecoder(resp.Body).Decode(&jsonVal)) + data := jsonVal["data"].(map[string]interface{}) + root := data["root"].(string) + require.Equal(t, root, c.root) + }) + } +} diff --git a/cl/beacon/handler/commitees_test.go b/cl/beacon/handler/commitees_test.go new file mode 100644 index 00000000000..f3cff88891f --- /dev/null +++ b/cl/beacon/handler/commitees_test.go @@ -0,0 +1,167 @@ +package handler + +import ( + "io" + "math" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/common" + "github.com/stretchr/testify/require" +) + +func TestGetCommitteesAntiquated(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + fcu.FinalizedSlotVal = math.MaxUint64 + + fcu.StateAtBlockRootVal[fcu.HeadVal] = postState + + cases := []struct { + name string + blockID string + code int + query string + expected string + }{ + { + name: "slot", + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + query: "?slot=" + strconv.FormatUint(fcu.HeadSlotVal, 10), + expected: `{"data":[{"index":"0","slot":"8322","validators":["0","104","491","501","379","318","275","504","75","280","105","399","35","401"]}],"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + name: "empty-index", + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + query: "?index=1", + expected: `{"data":[],"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + name: "all-queries", + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + query: "?index=0&slot=" + strconv.FormatUint(fcu.HeadSlotVal-32, 10) + "&epoch=" + strconv.FormatUint((fcu.HeadSlotVal/32)-1, 10), + expected: `{"data":[{"index":"0","slot":"8290","validators":["127","377","274","85","309","420","423","398","153","480","273","429","374","260"]}],"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/states/{block_id} with content-type octet-stream + req, err := http.NewRequest("GET", server.URL+"/eth/v1/beacon/states/"+c.blockID+"/committees"+c.query, nil) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // read the all of the octect + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, c.expected, string(out)) + }) + } +} + +func TestGetCommitteesNonAntiquated(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, sm, fcu := setupTestingHandler(t, clparams.Phase0Version) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + fcu.FinalizedSlotVal = 0 + + fcu.StateAtBlockRootVal[fcu.HeadVal] = postState + require.NoError(t, sm.OnHeadState(postState)) + cases := []struct { + name string + blockID string + code int + query string + expected string + }{ + { + name: "slot", + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + query: "?slot=" + strconv.FormatUint(fcu.HeadSlotVal, 10), + expected: `{"data":[{"index":"0","slot":"8322","validators":["0","104","491","501","379","318","275","504","75","280","105","399","35","401"]}],"finalized":false,"execution_optimistic":false}` + "\n", + }, + { + name: "empty-index", + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + query: "?index=1", + expected: `{"data":[],"finalized":false,"execution_optimistic":false}` + "\n", + }, + { + name: "all-queries", + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + query: "?index=0&slot=" + strconv.FormatUint(fcu.HeadSlotVal-32, 10) + "&epoch=" + strconv.FormatUint((fcu.HeadSlotVal/32)-1, 10), + expected: `{"data":[{"index":"0","slot":"8290","validators":["127","377","274","85","309","420","423","398","153","480","273","429","374","260"]}],"finalized":false,"execution_optimistic":false}` + "\n", + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/states/{block_id} with content-type octet-stream + req, err := http.NewRequest("GET", server.URL+"/eth/v1/beacon/states/"+c.blockID+"/committees"+c.query, nil) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // read the all of the octect + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, c.expected, string(out)) + }) + } +} diff --git a/cl/beacon/handler/committees.go b/cl/beacon/handler/committees.go new file mode 100644 index 00000000000..0ab70fc29cd --- /dev/null +++ b/cl/beacon/handler/committees.go @@ -0,0 +1,147 @@ +package handler + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" +) + +type committeeResponse struct { + Index uint64 `json:"index,string"` + Slot uint64 `json:"slot,string"` + Validators []string `json:"validators"` // do string directly but it is still a base10 number +} + +func (a *ApiHandler) getCommittees(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + ctx := r.Context() + + epochReq, err := uint64FromQueryParams(r, "epoch") + if err != nil { + return nil, err + } + + index, err := uint64FromQueryParams(r, "index") + if err != nil { + return nil, err + } + + slotFilter, err := uint64FromQueryParams(r, "slot") + if err != nil { + return nil, err + } + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + blockId, err := stateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + slotPtr, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + if slotPtr == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block slot: %x", blockRoot)) + } + slot := *slotPtr + epoch := slot / a.beaconChainCfg.SlotsPerEpoch + if epochReq != nil { + epoch = *epochReq + } + // check if the filter (if any) is in the epoch + if slotFilter != nil && !(epoch*a.beaconChainCfg.SlotsPerEpoch <= *slotFilter && *slotFilter < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch) { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("slot %d is not in epoch %d", *slotFilter, epoch)) + } + resp := make([]*committeeResponse, 0, a.beaconChainCfg.SlotsPerEpoch*a.beaconChainCfg.MaxCommitteesPerSlot) + isFinalized := slot <= a.forkchoiceStore.FinalizedSlot() + if a.forkchoiceStore.LowestAvaiableSlot() <= slot { + // non-finality case + s, cn := a.syncedData.HeadState() + defer cn() + if s == nil { + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, "node is syncing") + } + if epoch > state.Epoch(s)+1 { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("epoch %d is too far in the future", epoch)) + } + // get active validator indicies + committeeCount := s.CommitteeCount(epoch) + // now start obtaining the committees from the head state + for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { + if slotFilter != nil && currSlot != *slotFilter { + continue + } + for committeeIndex := uint64(0); committeeIndex < committeeCount; committeeIndex++ { + if index != nil && committeeIndex != *index { + continue + } + data := &committeeResponse{Index: committeeIndex, Slot: currSlot} + idxs, err := s.GetBeaconCommitee(currSlot, committeeIndex) + if err != nil { + return nil, err + } + for _, idx := range idxs { + data.Validators = append(data.Validators, strconv.FormatUint(idx, 10)) + } + resp = append(resp, data) + } + } + return newBeaconResponse(resp).withFinalized(isFinalized), nil + } + // finality case + activeIdxs, err := state_accessors.ReadActiveIndicies(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + if err != nil { + return nil, err + } + + committeesPerSlot := uint64(len(activeIdxs)) / a.beaconChainCfg.SlotsPerEpoch / a.beaconChainCfg.TargetCommitteeSize + if a.beaconChainCfg.MaxCommitteesPerSlot < committeesPerSlot { + committeesPerSlot = a.beaconChainCfg.MaxCommitteesPerSlot + } + if committeesPerSlot < 1 { + committeesPerSlot = 1 + } + + mixPosition := (epoch + a.beaconChainCfg.EpochsPerHistoricalVector - a.beaconChainCfg.MinSeedLookahead - 1) % a.beaconChainCfg.EpochsPerHistoricalVector + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read randao mix: %v", err)) + } + + for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { + if slotFilter != nil && currSlot != *slotFilter { + continue + } + for committeeIndex := uint64(0); committeeIndex < committeesPerSlot; committeeIndex++ { + if index != nil && committeeIndex != *index { + continue + } + data := &committeeResponse{Index: committeeIndex, Slot: currSlot} + index := (currSlot%a.beaconChainCfg.SlotsPerEpoch)*committeesPerSlot + committeeIndex + committeeCount := committeesPerSlot * a.beaconChainCfg.SlotsPerEpoch + idxs, err := a.stateReader.ComputeCommittee(mix, activeIdxs, currSlot, committeeCount, index) + if err != nil { + return nil, err + } + for _, idx := range idxs { + data.Validators = append(data.Validators, strconv.FormatUint(idx, 10)) + } + resp = append(resp, data) + } + } + return newBeaconResponse(resp).withFinalized(isFinalized), nil +} diff --git a/cl/beacon/handler/config.go b/cl/beacon/handler/config.go index b0e8972c2d8..cc37c8a0150 100644 --- a/cl/beacon/handler/config.go +++ b/cl/beacon/handler/config.go @@ -9,20 +9,19 @@ import ( "github.com/ledgerwatch/erigon/cl/cltypes" ) -func (a *ApiHandler) getSpec(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getSpec(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { return newBeaconResponse(a.beaconChainCfg), nil } -func (a *ApiHandler) getDepositContract(r *http.Request) (*beaconResponse, error) { - +func (a *ApiHandler) getDepositContract(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { return newBeaconResponse(struct { - ChainId uint64 `json:"chain_id"` + ChainId uint64 `json:"chain_id,string"` DepositContract string `json:"address"` }{ChainId: a.beaconChainCfg.DepositChainID, DepositContract: a.beaconChainCfg.DepositContractAddress}), nil } -func (a *ApiHandler) getForkSchedule(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getForkSchedule(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { response := []cltypes.Fork{} // create first response (unordered and incomplete) for currentVersion, epoch := range a.beaconChainCfg.ForkVersionSchedule { diff --git a/cl/beacon/handler/config_test.go b/cl/beacon/handler/config_test.go new file mode 100644 index 00000000000..d97349a94e0 --- /dev/null +++ b/cl/beacon/handler/config_test.go @@ -0,0 +1,81 @@ +package handler + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/stretchr/testify/require" +) + +func TestGetSpec(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, _, _, _, _, handler, _, _, _ := setupTestingHandler(t, clparams.Phase0Version) + + server := httptest.NewServer(handler.mux) + defer server.Close() + + resp, err := http.Get(server.URL + "/eth/v1/config/spec") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusOK, resp.StatusCode) + out := make(map[string]interface{}) + err = json.NewDecoder(resp.Body).Decode(&out) + require.NoError(t, err) + + data := out["data"].(map[string]interface{}) + require.Equal(t, data["SlotsPerEpoch"], float64(32)) + require.Equal(t, data["SlotsPerHistoricalRoot"], float64(8192)) +} + +func TestGetForkSchedule(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, _, _, _, _, handler, _, _, _ := setupTestingHandler(t, clparams.Phase0Version) + + server := httptest.NewServer(handler.mux) + defer server.Close() + + resp, err := http.Get(server.URL + "/eth/v1/config/fork_schedule") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusOK, resp.StatusCode) + out := make(map[string]interface{}) + err = json.NewDecoder(resp.Body).Decode(&out) + require.NoError(t, err) + + require.Greater(t, len(out["data"].([]interface{})), 2) + for _, v := range out["data"].([]interface{}) { + data := v.(map[string]interface{}) + require.NotNil(t, data["current_version"]) + require.NotNil(t, data["epoch"]) + require.NotNil(t, data["previous_version"]) + } +} + +func TestGetDepositContract(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, _, _, _, _, handler, _, _, _ := setupTestingHandler(t, clparams.Phase0Version) + + server := httptest.NewServer(handler.mux) + defer server.Close() + + resp, err := http.Get(server.URL + "/eth/v1/config/deposit_contract") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusOK, resp.StatusCode) + out := make(map[string]interface{}) + err = json.NewDecoder(resp.Body).Decode(&out) + require.NoError(t, err) + + data := out["data"].(map[string]interface{}) + require.Equal(t, data["address"], "0x00000000219ab540356cBB839Cbe05303d7705Fa") + require.Equal(t, data["chain_id"], "1") +} diff --git a/cl/beacon/handler/duties_attester.go b/cl/beacon/handler/duties_attester.go new file mode 100644 index 00000000000..a9f1045d0f9 --- /dev/null +++ b/cl/beacon/handler/duties_attester.go @@ -0,0 +1,163 @@ +package handler + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" +) + +type attesterDutyResponse struct { + Pubkey libcommon.Bytes48 `json:"pubkey"` + ValidatorIndex uint64 `json:"validator_index,string"` + CommitteeIndex uint64 `json:"committee_index,string"` + CommitteeLength uint64 `json:"committee_length,string"` + ValidatorCommitteeIndex uint64 `json:"validator_committee_index,string"` + CommitteesAtSlot uint64 `json:"committees_at_slot,string"` + Slot uint64 `json:"slot,string"` +} + +func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + epoch, err := epochFromRequest(r) + if err != nil { + return nil, err + } + + var idxsStr []string + if err := json.NewDecoder(r.Body).Decode(&idxsStr); err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not decode request body: %w. request body is required", err).Error()) + } + if len(idxsStr) == 0 { + return newBeaconResponse([]string{}).withOptimistic(false), nil + } + idxSet := map[int]struct{}{} + // convert the request to uint64 + for _, idxStr := range idxsStr { + + idx, err := strconv.ParseUint(idxStr, 10, 64) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not parse validator index: %w", err).Error()) + } + if _, ok := idxSet[int(idx)]; ok { + continue + } + idxSet[int(idx)] = struct{}{} + } + + tx, err := a.indiciesDB.BeginRo(r.Context()) + if err != nil { + return nil, err + } + defer tx.Rollback() + + resp := []attesterDutyResponse{} + + // get the duties + if a.forkchoiceStore.LowestAvaiableSlot() <= epoch*a.beaconChainCfg.SlotsPerEpoch { + // non-finality case + s, cn := a.syncedData.HeadState() + defer cn() + if s == nil { + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, "node is syncing") + } + + if epoch > state.Epoch(s)+1 { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("epoch %d is too far in the future", epoch)) + } + + // get active validator indicies + committeeCount := s.CommitteeCount(epoch) + // now start obtaining the committees from the head state + for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { + for committeeIndex := uint64(0); committeeIndex < committeeCount; committeeIndex++ { + idxs, err := s.GetBeaconCommitee(currSlot, committeeIndex) + if err != nil { + return nil, err + } + for vIdx, idx := range idxs { + if _, ok := idxSet[int(idx)]; !ok { + continue + } + publicKey, err := s.ValidatorPublicKey(int(idx)) + if err != nil { + return nil, err + } + duty := attesterDutyResponse{ + Pubkey: publicKey, + ValidatorIndex: idx, + CommitteeIndex: committeeIndex, + CommitteeLength: uint64(len(idxs)), + ValidatorCommitteeIndex: uint64(vIdx), + CommitteesAtSlot: committeeCount, + Slot: currSlot, + } + resp = append(resp, duty) + } + } + } + return newBeaconResponse(resp).withOptimistic(false), nil + } + + stageStateProgress, err := state_accessors.GetStateProcessingProgress(tx) + if err != nil { + return nil, err + } + if (epoch)*a.beaconChainCfg.SlotsPerEpoch >= stageStateProgress { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("epoch %d is too far in the future", epoch)) + } + // finality case + activeIdxs, err := state_accessors.ReadActiveIndicies(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + if err != nil { + return nil, err + } + + committeesPerSlot := uint64(len(activeIdxs)) / a.beaconChainCfg.SlotsPerEpoch / a.beaconChainCfg.TargetCommitteeSize + if a.beaconChainCfg.MaxCommitteesPerSlot < committeesPerSlot { + committeesPerSlot = a.beaconChainCfg.MaxCommitteesPerSlot + } + if committeesPerSlot < 1 { + committeesPerSlot = 1 + } + + mixPosition := (epoch + a.beaconChainCfg.EpochsPerHistoricalVector - a.beaconChainCfg.MinSeedLookahead - 1) % a.beaconChainCfg.EpochsPerHistoricalVector + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read randao mix: %v", err)) + } + + for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { + for committeeIndex := uint64(0); committeeIndex < committeesPerSlot; committeeIndex++ { + index := (currSlot%a.beaconChainCfg.SlotsPerEpoch)*committeesPerSlot + committeeIndex + committeeCount := committeesPerSlot * a.beaconChainCfg.SlotsPerEpoch + idxs, err := a.stateReader.ComputeCommittee(mix, activeIdxs, currSlot, committeeCount, index) + if err != nil { + return nil, err + } + for vIdx, idx := range idxs { + if _, ok := idxSet[int(idx)]; !ok { + continue + } + publicKey, err := state_accessors.ReadPublicKeyByIndex(tx, idx) + if err != nil { + return nil, err + } + duty := attesterDutyResponse{ + Pubkey: publicKey, + ValidatorIndex: idx, + CommitteeIndex: committeeIndex, + CommitteeLength: uint64(len(idxs)), + ValidatorCommitteeIndex: uint64(vIdx), + CommitteesAtSlot: committeesPerSlot, + Slot: currSlot, + } + resp = append(resp, duty) + } + } + } + return newBeaconResponse(resp).withOptimistic(false), nil +} diff --git a/cl/beacon/handler/duties_attester_test.go b/cl/beacon/handler/duties_attester_test.go new file mode 100644 index 00000000000..6014096cc2a --- /dev/null +++ b/cl/beacon/handler/duties_attester_test.go @@ -0,0 +1,151 @@ +package handler + +import ( + "bytes" + "io" + "math" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/stretchr/testify/require" +) + +func TestDutiesAttesterAntiquated(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + fcu.FinalizedSlotVal = math.MaxUint64 + + fcu.StateAtBlockRootVal[fcu.HeadVal] = postState + + cases := []struct { + name string + epoch string + code int + reqBody string + expected string + }{ + { + name: "non-empty-indicies", + epoch: strconv.FormatUint(fcu.HeadSlotVal/32, 10), + code: http.StatusOK, + reqBody: `["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]`, + expected: `{"data":[{"pubkey":"0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb","validator_index":"0","committee_index":"0","committee_length":"14","validator_committee_index":"0","committees_at_slot":"1","slot":"8322"},{"pubkey":"0xb0e7791fb972fe014159aa33a98622da3cdc98ff707965e536d8636b5fcc5ac7a91a8c46e59a00dca575af0f18fb13dc","validator_index":"4","committee_index":"0","committee_length":"13","validator_committee_index":"5","committees_at_slot":"1","slot":"8327"},{"pubkey":"0xb928f3beb93519eecf0145da903b40a4c97dca00b21f12ac0df3be9116ef2ef27b2ae6bcd4c5bc2d54ef5a70627efcb7","validator_index":"6","committee_index":"0","committee_length":"13","validator_committee_index":"10","committees_at_slot":"1","slot":"8327"},{"pubkey":"0xa6e82f6da4520f85c5d27d8f329eccfa05944fd1096b20734c894966d12a9e2a9a9744529d7212d33883113a0cadb909","validator_index":"5","committee_index":"0","committee_length":"14","validator_committee_index":"10","committees_at_slot":"1","slot":"8329"},{"pubkey":"0x89ece308f9d1f0131765212deca99697b112d61f9be9a5f1f3780a51335b3ff981747a0b2ca2179b96d2c0c9024e5224","validator_index":"2","committee_index":"0","committee_length":"14","validator_committee_index":"11","committees_at_slot":"1","slot":"8331"},{"pubkey":"0xaf81da25ecf1c84b577fefbedd61077a81dc43b00304015b2b596ab67f00e41c86bb00ebd0f90d4b125eb0539891aeed","validator_index":"9","committee_index":"0","committee_length":"14","validator_committee_index":"8","committees_at_slot":"1","slot":"8342"},{"pubkey":"0xac9b60d5afcbd5663a8a44b7c5a02f19e9a77ab0a35bd65809bb5c67ec582c897feb04decc694b13e08587f3ff9b5b60","validator_index":"3","committee_index":"0","committee_length":"13","validator_committee_index":"6","committees_at_slot":"1","slot":"8348"}],"execution_optimistic":false}` + "\n", + }, + { + name: "empty-index", + epoch: strconv.FormatUint(fcu.HeadSlotVal/32, 10), + code: http.StatusOK, + reqBody: `[]`, + expected: `{"data":[],"execution_optimistic":false}` + "\n", + }, + { + name: "404", + reqBody: `["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]`, + epoch: `999999999`, + code: http.StatusBadRequest, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // + body := bytes.Buffer{} + body.WriteString(c.reqBody) + // Query the block in the handler with /eth/v2/beacon/states/{block_id} with content-type octet-stream + req, err := http.NewRequest("POST", server.URL+"/eth/v1/validator/duties/attester/"+c.epoch, &body) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // read the all of the octect + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, c.expected, string(out)) + }) + } +} + +func TestDutiesAttesterNonAntiquated(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, sm, fcu := setupTestingHandler(t, clparams.Phase0Version) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + fcu.FinalizedSlotVal = 0 + + fcu.StateAtBlockRootVal[fcu.HeadVal] = postState + require.NoError(t, sm.OnHeadState(postState)) + cases := []struct { + name string + epoch string + code int + reqBody string + expected string + }{ + { + name: "non-empty-indicies", + epoch: strconv.FormatUint(fcu.HeadSlotVal/32, 10), + code: http.StatusOK, + reqBody: `["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]`, + expected: `{"data":[{"pubkey":"0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb","validator_index":"0","committee_index":"0","committee_length":"14","validator_committee_index":"0","committees_at_slot":"1","slot":"8322"},{"pubkey":"0xb0e7791fb972fe014159aa33a98622da3cdc98ff707965e536d8636b5fcc5ac7a91a8c46e59a00dca575af0f18fb13dc","validator_index":"4","committee_index":"0","committee_length":"13","validator_committee_index":"5","committees_at_slot":"1","slot":"8327"},{"pubkey":"0xb928f3beb93519eecf0145da903b40a4c97dca00b21f12ac0df3be9116ef2ef27b2ae6bcd4c5bc2d54ef5a70627efcb7","validator_index":"6","committee_index":"0","committee_length":"13","validator_committee_index":"10","committees_at_slot":"1","slot":"8327"},{"pubkey":"0xa6e82f6da4520f85c5d27d8f329eccfa05944fd1096b20734c894966d12a9e2a9a9744529d7212d33883113a0cadb909","validator_index":"5","committee_index":"0","committee_length":"14","validator_committee_index":"10","committees_at_slot":"1","slot":"8329"},{"pubkey":"0x89ece308f9d1f0131765212deca99697b112d61f9be9a5f1f3780a51335b3ff981747a0b2ca2179b96d2c0c9024e5224","validator_index":"2","committee_index":"0","committee_length":"14","validator_committee_index":"11","committees_at_slot":"1","slot":"8331"},{"pubkey":"0xaf81da25ecf1c84b577fefbedd61077a81dc43b00304015b2b596ab67f00e41c86bb00ebd0f90d4b125eb0539891aeed","validator_index":"9","committee_index":"0","committee_length":"14","validator_committee_index":"8","committees_at_slot":"1","slot":"8342"},{"pubkey":"0xac9b60d5afcbd5663a8a44b7c5a02f19e9a77ab0a35bd65809bb5c67ec582c897feb04decc694b13e08587f3ff9b5b60","validator_index":"3","committee_index":"0","committee_length":"13","validator_committee_index":"6","committees_at_slot":"1","slot":"8348"}],"execution_optimistic":false}` + "\n", + }, + { + name: "empty-index", + epoch: strconv.FormatUint(fcu.HeadSlotVal/32, 10), + code: http.StatusOK, + reqBody: `[]`, + expected: `{"data":[],"execution_optimistic":false}` + "\n", + }, + { + name: "404", + reqBody: `["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]`, + epoch: `999999999`, + code: http.StatusBadRequest, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // + body := bytes.Buffer{} + body.WriteString(c.reqBody) + // Query the block in the handler with /eth/v2/beacon/states/{block_id} with content-type octet-stream + req, err := http.NewRequest("POST", server.URL+"/eth/v1/validator/duties/attester/"+c.epoch, &body) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // read the all of the octect + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, c.expected, string(out)) + }) + } +} diff --git a/cl/beacon/handler/duties_proposer.go b/cl/beacon/handler/duties_proposer.go index 609a8292c41..fb8ae2a09ed 100644 --- a/cl/beacon/handler/duties_proposer.go +++ b/cl/beacon/handler/duties_proposer.go @@ -7,33 +7,62 @@ import ( "sync" "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/persistence/base_encoding" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" shuffling2 "github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" ) type proposerDuties struct { Pubkey libcommon.Bytes48 `json:"pubkey"` - ValidatorIndex uint64 `json:"validator_index"` - Slot uint64 `json:"slot"` + ValidatorIndex uint64 `json:"validator_index,string"` + Slot uint64 `json:"slot,string"` } -func (a *ApiHandler) getDutiesProposer(r *http.Request) (*beaconResponse, error) { - +func (a *ApiHandler) getDutiesProposer(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { epoch, err := epochFromRequest(r) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } if epoch < a.forkchoiceStore.FinalizedCheckpoint().Epoch() { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "invalid epoch") + tx, err := a.indiciesDB.BeginRo(r.Context()) + if err != nil { + return nil, err + } + defer tx.Rollback() + key := base_encoding.Encode64ToBytes4(epoch) + indiciesBytes, err := tx.GetOne(kv.Proposers, key) + if err != nil { + return nil, err + } + if len(indiciesBytes) != int(a.beaconChainCfg.SlotsPerEpoch*4) { + return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, "proposer duties is corrupted") + } + duties := make([]proposerDuties, a.beaconChainCfg.SlotsPerEpoch) + for i := uint64(0); i < a.beaconChainCfg.SlotsPerEpoch; i++ { + validatorIndex := binary.BigEndian.Uint32(indiciesBytes[i*4 : i*4+4]) + var pk libcommon.Bytes48 + pk, err := state_accessors.ReadPublicKeyByIndex(tx, uint64(validatorIndex)) + if err != nil { + return nil, err + } + duties[i] = proposerDuties{ + Pubkey: pk, + ValidatorIndex: uint64(validatorIndex), + Slot: epoch*a.beaconChainCfg.SlotsPerEpoch + i, + } + } + return newBeaconResponse(duties).withFinalized(true).withVersion(a.beaconChainCfg.GetCurrentStateVersion(epoch)), nil } // We need to compute our duties state, cancel := a.syncedData.HeadState() defer cancel() if state == nil { - return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, "beacon node is syncing") + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, "beacon node is syncing") } @@ -90,5 +119,4 @@ func (a *ApiHandler) getDutiesProposer(r *http.Request) (*beaconResponse, error) wg.Wait() return newBeaconResponse(duties).withFinalized(false).withVersion(a.beaconChainCfg.GetCurrentStateVersion(epoch)), nil - } diff --git a/cl/beacon/handler/duties_proposer_test.go b/cl/beacon/handler/duties_proposer_test.go new file mode 100644 index 00000000000..bba6c93773e --- /dev/null +++ b/cl/beacon/handler/duties_proposer_test.go @@ -0,0 +1,112 @@ +package handler + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/stretchr/testify/require" +) + +func TestProposerDutiesProposerFcu(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, postState, _, handler, _, syncedDataManager, fcu := setupTestingHandler(t, clparams.Phase0Version) + epoch := blocks[len(blocks)-1].Block.Slot / 32 + + require.NoError(t, syncedDataManager.OnHeadState(postState)) + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(common.Hash{}, epoch) + + server := httptest.NewServer(handler.mux) + defer server.Close() + + resp, err := http.Get(server.URL + "/eth/v1/validator/duties/proposer/" + strconv.FormatUint(epoch, 10)) + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusOK, resp.StatusCode) + out := make(map[string]interface{}) + err = json.NewDecoder(resp.Body).Decode(&out) + require.NoError(t, err) + + data := out["data"].([]interface{}) + require.Equal(t, len(data), 32) + for _, v := range data { + d := v.(map[string]interface{}) + require.NotNil(t, d["pubkey"]) + require.NotNil(t, d["validator_index"]) + require.NotNil(t, d["slot"]) + } +} + +func TestProposerDutiesProposerBadEpoch(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, _, _, postState, _, handler, _, syncedDataManager, fcu := setupTestingHandler(t, clparams.Phase0Version) + + require.NoError(t, syncedDataManager.OnHeadState(postState)) + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(common.Hash{}, 1) + + server := httptest.NewServer(handler.mux) + defer server.Close() + + resp, err := http.Get(server.URL + "/eth/v1/validator/duties/proposer/abc") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) +} + +func TestProposerDutiesNotSynced(t *testing.T) { + _, _, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(common.Hash{}, 1) + + server := httptest.NewServer(handler.mux) + defer server.Close() + + resp, err := http.Get(server.URL + "/eth/v1/validator/duties/proposer/1") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode) +} + +func TestProposerDutiesProposerFcuHistorical(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, postState, _, handler, _, syncedDataManager, fcu := setupTestingHandler(t, clparams.Phase0Version) + epoch := blocks[len(blocks)-1].Block.Slot / 32 + + require.NoError(t, syncedDataManager.OnHeadState(postState)) + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(common.Hash{}, epoch) + + server := httptest.NewServer(handler.mux) + defer server.Close() + + resp, err := http.Get(server.URL + "/eth/v1/validator/duties/proposer/" + strconv.FormatUint(epoch-1, 10)) + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusOK, resp.StatusCode) + out := make(map[string]interface{}) + err = json.NewDecoder(resp.Body).Decode(&out) + require.NoError(t, err) + + data := out["data"].([]interface{}) + require.Equal(t, len(data), 32) + for _, v := range data { + d := v.(map[string]interface{}) + require.NotNil(t, d["pubkey"]) + require.NotNil(t, d["validator_index"]) + require.NotNil(t, d["slot"]) + } +} diff --git a/cl/beacon/handler/duties_sync.go b/cl/beacon/handler/duties_sync.go new file mode 100644 index 00000000000..e52a8a23246 --- /dev/null +++ b/cl/beacon/handler/duties_sync.go @@ -0,0 +1,146 @@ +package handler + +import ( + "encoding/json" + "fmt" + "net/http" + "sort" + "strconv" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" +) + +type syncDutyResponse struct { + Pubkey libcommon.Bytes48 `json:"pubkey"` + ValidatorIndex uint64 `json:"validator_index,string"` + ValidatorSyncCommitteeIndicies []string `json:"validator_sync_committee_indicies"` +} + +func (a *ApiHandler) getSyncDuties(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + epoch, err := epochFromRequest(r) + if err != nil { + return nil, err + } + + // compute the sync committee period + period := epoch / a.beaconChainCfg.EpochsPerSyncCommitteePeriod + + var idxsStr []string + if err := json.NewDecoder(r.Body).Decode(&idxsStr); err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not decode request body: %w. request body is required.", err).Error()) + } + if len(idxsStr) == 0 { + return newBeaconResponse([]string{}).withOptimistic(false), nil + } + duplicates := map[int]struct{}{} + // convert the request to uint64 + idxs := make([]uint64, 0, len(idxsStr)) + for _, idxStr := range idxsStr { + + idx, err := strconv.ParseUint(idxStr, 10, 64) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not parse validator index: %w", err).Error()) + } + if _, ok := duplicates[int(idx)]; ok { + continue + } + idxs = append(idxs, idx) + duplicates[int(idx)] = struct{}{} + } + + tx, err := a.indiciesDB.BeginRo(r.Context()) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // Try to find a slot in the epoch or close to it + referenceSlot := ((epoch + 1) * a.beaconChainCfg.SlotsPerEpoch) - 1 + + // Find the first slot in the epoch (or close enough that have a sync committee) + var referenceRoot libcommon.Hash + for ; referenceRoot != (libcommon.Hash{}); referenceSlot-- { + referenceRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, referenceSlot) + if err != nil { + return nil, err + } + } + referencePeriod := (referenceSlot / a.beaconChainCfg.SlotsPerEpoch) / a.beaconChainCfg.EpochsPerSyncCommitteePeriod + // Now try reading the sync committee + currentSyncCommittee, nextSyncCommittee, ok := a.forkchoiceStore.GetSyncCommittees(referenceRoot) + if !ok { + roundedSlotToPeriod := a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(referenceSlot) + switch { + case referencePeriod == period: + currentSyncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, roundedSlotToPeriod) + case referencePeriod+1 == period: + nextSyncCommittee, err = state_accessors.ReadNextSyncCommittee(tx, roundedSlotToPeriod) + default: + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find sync committee for epoch %d", epoch)) + } + if err != nil { + return nil, err + } + } + var syncCommittee *solid.SyncCommittee + // Determine which one to use. TODO(Giulio2002): Make this less rendundant. + switch { + case referencePeriod == period: + syncCommittee = currentSyncCommittee + case referencePeriod+1 == period: + syncCommittee = nextSyncCommittee + default: + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find sync committee for epoch %d", epoch)) + } + if syncCommittee == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find sync committee for epoch %d", epoch)) + } + // Now we have the sync committee, we can initialize our response set + dutiesSet := map[uint64]*syncDutyResponse{} + for _, idx := range idxs { + publicKey, err := state_accessors.ReadPublicKeyByIndex(tx, idx) + if err != nil { + return nil, err + } + if publicKey == (libcommon.Bytes48{}) { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find validator with index %d", idx)) + } + dutiesSet[idx] = &syncDutyResponse{ + Pubkey: publicKey, + ValidatorIndex: idx, + } + } + // Now we can iterate over the sync committee and fill the response + for idx, committeePartecipantPublicKey := range syncCommittee.GetCommittee() { + committeePartecipantIndex, ok, err := state_accessors.ReadValidatorIndexByPublicKey(tx, committeePartecipantPublicKey) + if err != nil { + return nil, err + } + if !ok { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find validator with public key %x", committeePartecipantPublicKey)) + } + if _, ok := dutiesSet[committeePartecipantIndex]; !ok { + continue + } + dutiesSet[committeePartecipantIndex].ValidatorSyncCommitteeIndicies = append( + dutiesSet[committeePartecipantIndex].ValidatorSyncCommitteeIndicies, + strconv.FormatUint(uint64(idx), 10)) + } + // Now we can convert the map to a slice + duties := make([]*syncDutyResponse, 0, len(dutiesSet)) + for _, duty := range dutiesSet { + if len(duty.ValidatorSyncCommitteeIndicies) == 0 { + continue + } + duties = append(duties, duty) + } + sort.Slice(duties, func(i, j int) bool { + return duties[i].ValidatorIndex < duties[j].ValidatorIndex + }) + + return newBeaconResponse(duties).withOptimistic(false), nil +} diff --git a/cl/beacon/handler/duties_sync_test.go b/cl/beacon/handler/duties_sync_test.go new file mode 100644 index 00000000000..eca554c668d --- /dev/null +++ b/cl/beacon/handler/duties_sync_test.go @@ -0,0 +1,85 @@ +package handler + +import ( + "bytes" + "io" + "math" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/stretchr/testify/require" +) + +func TestDutiesSync(t *testing.T) { + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.BellatrixVersion) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + fcu.FinalizedSlotVal = math.MaxUint64 + + fcu.StateAtBlockRootVal[fcu.HeadVal] = postState + + cases := []struct { + name string + epoch string + code int + reqBody string + expected string + }{ + { + name: "non-empty-indicies", + epoch: strconv.FormatUint(fcu.HeadSlotVal/32, 10), + code: http.StatusOK, + reqBody: `["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]`, + expected: `{"data":[{"pubkey":"0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb","validator_index":"0","validator_sync_committee_indicies":["30","286"]},{"pubkey":"0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e","validator_index":"1","validator_sync_committee_indicies":["120","376"]},{"pubkey":"0x89ece308f9d1f0131765212deca99697b112d61f9be9a5f1f3780a51335b3ff981747a0b2ca2179b96d2c0c9024e5224","validator_index":"2","validator_sync_committee_indicies":["138","394"]},{"pubkey":"0xac9b60d5afcbd5663a8a44b7c5a02f19e9a77ab0a35bd65809bb5c67ec582c897feb04decc694b13e08587f3ff9b5b60","validator_index":"3","validator_sync_committee_indicies":["10","266"]},{"pubkey":"0xb0e7791fb972fe014159aa33a98622da3cdc98ff707965e536d8636b5fcc5ac7a91a8c46e59a00dca575af0f18fb13dc","validator_index":"4","validator_sync_committee_indicies":["114","370"]},{"pubkey":"0xa6e82f6da4520f85c5d27d8f329eccfa05944fd1096b20734c894966d12a9e2a9a9744529d7212d33883113a0cadb909","validator_index":"5","validator_sync_committee_indicies":["103","359"]},{"pubkey":"0xb928f3beb93519eecf0145da903b40a4c97dca00b21f12ac0df3be9116ef2ef27b2ae6bcd4c5bc2d54ef5a70627efcb7","validator_index":"6","validator_sync_committee_indicies":["163","419"]},{"pubkey":"0xa85ae765588126f5e860d019c0e26235f567a9c0c0b2d8ff30f3e8d436b1082596e5e7462d20f5be3764fd473e57f9cf","validator_index":"7","validator_sync_committee_indicies":["197","453"]},{"pubkey":"0x99cdf3807146e68e041314ca93e1fee0991224ec2a74beb2866816fd0826ce7b6263ee31e953a86d1b72cc2215a57793","validator_index":"8","validator_sync_committee_indicies":["175","431"]},{"pubkey":"0xaf81da25ecf1c84b577fefbedd61077a81dc43b00304015b2b596ab67f00e41c86bb00ebd0f90d4b125eb0539891aeed","validator_index":"9","validator_sync_committee_indicies":["53","309"]}],"execution_optimistic":false}` + "\n", + }, + { + name: "empty-index", + epoch: strconv.FormatUint(fcu.HeadSlotVal/32, 10), + code: http.StatusOK, + reqBody: `[]`, + expected: `{"data":[],"execution_optimistic":false}` + "\n", + }, + { + name: "404", + reqBody: `["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]`, + epoch: `999999999`, + code: http.StatusNotFound, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // + body := bytes.Buffer{} + body.WriteString(c.reqBody) + // Query the block in the handler with /eth/v2/beacon/states/{block_id} with content-type octet-stream + req, err := http.NewRequest("POST", server.URL+"/eth/v1/validator/duties/sync/"+c.epoch, &body) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // read the all of the octect + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + if string(out) != c.expected { + panic(string(out)) + } + require.Equal(t, c.expected, string(out)) + }) + } +} diff --git a/cl/beacon/handler/format.go b/cl/beacon/handler/format.go index f2ea28495cb..7baa88b42f2 100644 --- a/cl/beacon/handler/format.go +++ b/cl/beacon/handler/format.go @@ -57,7 +57,15 @@ func (r *beaconResponse) withFinalized(finalized bool) (out *beaconResponse) { out.Finalized = new(bool) out.ExecutionOptimistic = new(bool) out.Finalized = &finalized - return r + return out +} + +func (r *beaconResponse) withOptimistic(optimistic bool) (out *beaconResponse) { + out = new(beaconResponse) + *out = *r + out.ExecutionOptimistic = new(bool) + out.ExecutionOptimistic = &optimistic + return out } func (r *beaconResponse) withVersion(version clparams.StateVersion) (out *beaconResponse) { @@ -65,54 +73,9 @@ func (r *beaconResponse) withVersion(version clparams.StateVersion) (out *beacon *out = *r out.Version = new(clparams.StateVersion) out.Version = &version - return r + return out } -//// In case of it being a json we need to also expose finalization, version, etc... -//type beaconHandlerFn func(r *http.Request) *beaconResponse -// -//func beaconHandlerWrapper(fn beaconHandlerFn, supportSSZ bool) func(w http.ResponseWriter, r *http.Request) { -// return func(w http.ResponseWriter, r *http.Request) { -// accept := r.Header.Get("Accept") -// isSSZ := !strings.Contains(accept, "application/json") && strings.Contains(accept, "application/stream-octect") -// start := time.Now() -// defer func() { -// log.Debug("[Beacon API] finished", "method", r.Method, "path", r.URL.Path, "duration", time.Since(start)) -// }() -// -// resp := fn(r) -// if resp.internalError != nil { -// http.Error(w, resp.internalError.Error(), http.StatusInternalServerError) -// log.Debug("[Beacon API] failed", "method", r.Method, "err", resp.internalError.Error(), "ssz", isSSZ) -// return -// } -// -// if resp.apiError != nil { -// http.Error(w, resp.apiError.err.Error(), resp.apiError.code) -// log.Debug("[Beacon API] failed", "method", r.Method, "err", resp.apiError.err.Error(), "ssz", isSSZ) -// return -// } -// -// if isSSZ && supportSSZ { -// data := resp.Data -// // SSZ encoding -// encoded, err := data.(ssz.Marshaler).EncodeSSZ(nil) -// if err != nil { -// http.Error(w, err.Error(), http.StatusInternalServerError) -// log.Debug("[Beacon API] failed", "method", r.Method, "err", err, "accepted", accept) -// return -// } -// w.Header().Set("Content-Type", "application/octet-stream") -// w.Write(encoded) -// return -// } -// w.Header().Set("Content-Type", "application/json") -// if err := json.NewEncoder(w).Encode(resp); err != nil { -// log.Warn("[Beacon API] failed", "method", r.Method, "err", err, "ssz", isSSZ) -// } -// } -//} - type chainTag int var ( @@ -167,6 +130,14 @@ func epochFromRequest(r *http.Request) (uint64, error) { return epochMaybe, nil } +func stringFromRequest(r *http.Request, name string) (string, error) { + str := chi.URLParam(r, name) + if str == "" { + return "", nil + } + return str, nil +} + func blockIdFromRequest(r *http.Request) (*segmentID, error) { regex := regexp.MustCompile(`^(?:0x[0-9a-fA-F]{64}|head|finalized|genesis|\d+)$`) @@ -260,3 +231,12 @@ func uint64FromQueryParams(r *http.Request, name string) (*uint64, error) { } return &num, nil } + +// decode a list of strings from the query params +func stringListFromQueryParams(r *http.Request, name string) ([]string, error) { + str := r.URL.Query().Get(name) + if str == "" { + return nil, nil + } + return regexp.MustCompile(`\s*,\s*`).Split(str, -1), nil +} diff --git a/cl/beacon/handler/genesis.go b/cl/beacon/handler/genesis.go index 05af01dd8b5..da0ca41d847 100644 --- a/cl/beacon/handler/genesis.go +++ b/cl/beacon/handler/genesis.go @@ -10,12 +10,12 @@ import ( ) type genesisResponse struct { - GenesisTime uint64 `json:"genesis_time,omitempty"` - GenesisValidatorRoot common.Hash `json:"genesis_validator_root,omitempty"` + GenesisTime uint64 `json:"genesis_time,omitempty,string"` + GenesisValidatorRoot common.Hash `json:"genesis_validators_root,omitempty"` GenesisForkVersion libcommon.Bytes4 `json:"genesis_fork_version,omitempty"` } -func (a *ApiHandler) getGenesis(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getGenesis(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { if a.genesisCfg == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "Genesis Config is missing") } diff --git a/cl/beacon/handler/genesis_test.go b/cl/beacon/handler/genesis_test.go new file mode 100644 index 00000000000..544830c86fd --- /dev/null +++ b/cl/beacon/handler/genesis_test.go @@ -0,0 +1,35 @@ +package handler + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/stretchr/testify/require" +) + +func TestGetGenesis(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, _, _, _, _, handler, _, _, _ := setupTestingHandler(t, clparams.Phase0Version) + + server := httptest.NewServer(handler.mux) + defer server.Close() + + resp, err := http.Get(server.URL + "/eth/v1/beacon/genesis") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusOK, resp.StatusCode) + out := make(map[string]interface{}) + err = json.NewDecoder(resp.Body).Decode(&out) + require.NoError(t, err) + + data := out["data"].(map[string]interface{}) + genesisTime := data["genesis_time"].(string) + require.Equal(t, genesisTime, "1606824023") + require.Equal(t, data["genesis_fork_version"], "0xbba4da96") + require.Equal(t, data["genesis_validators_root"], "0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95") +} diff --git a/cl/beacon/handler/handler.go b/cl/beacon/handler/handler.go index b6703bb7b88..72a926c9fa0 100644 --- a/cl/beacon/handler/handler.go +++ b/cl/beacon/handler/handler.go @@ -9,7 +9,9 @@ import ( "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" "github.com/ledgerwatch/erigon/cl/beacon/synced_data" "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/persistence" + "github.com/ledgerwatch/erigon/cl/persistence/state/historical_states_reader" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" "github.com/ledgerwatch/erigon/cl/pool" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" @@ -17,7 +19,7 @@ import ( type ApiHandler struct { o sync.Once - mux chi.Router + mux *chi.Mux blockReader freezeblocks.BeaconSnapshotReader indiciesDB kv.RoDB @@ -26,10 +28,16 @@ type ApiHandler struct { forkchoiceStore forkchoice.ForkChoiceStorage operationsPool pool.OperationsPool syncedData *synced_data.SyncedDataManager + stateReader *historical_states_reader.HistoricalStatesReader + + // pools + randaoMixesPool sync.Pool } -func NewApiHandler(genesisConfig *clparams.GenesisConfig, beaconChainConfig *clparams.BeaconChainConfig, source persistence.RawBeaconBlockChain, indiciesDB kv.RoDB, forkchoiceStore forkchoice.ForkChoiceStorage, operationsPool pool.OperationsPool, rcsn freezeblocks.BeaconSnapshotReader, syncedData *synced_data.SyncedDataManager) *ApiHandler { - return &ApiHandler{o: sync.Once{}, genesisCfg: genesisConfig, beaconChainCfg: beaconChainConfig, indiciesDB: indiciesDB, forkchoiceStore: forkchoiceStore, operationsPool: operationsPool, blockReader: rcsn, syncedData: syncedData} +func NewApiHandler(genesisConfig *clparams.GenesisConfig, beaconChainConfig *clparams.BeaconChainConfig, source persistence.RawBeaconBlockChain, indiciesDB kv.RoDB, forkchoiceStore forkchoice.ForkChoiceStorage, operationsPool pool.OperationsPool, rcsn freezeblocks.BeaconSnapshotReader, syncedData *synced_data.SyncedDataManager, stateReader *historical_states_reader.HistoricalStatesReader) *ApiHandler { + return &ApiHandler{o: sync.Once{}, genesisCfg: genesisConfig, beaconChainCfg: beaconChainConfig, indiciesDB: indiciesDB, forkchoiceStore: forkchoiceStore, operationsPool: operationsPool, blockReader: rcsn, syncedData: syncedData, stateReader: stateReader, randaoMixesPool: sync.Pool{New: func() interface{} { + return solid.NewHashVector(int(beaconChainConfig.EpochsPerHistoricalVector)) + }}} } func (a *ApiHandler) init() { @@ -39,6 +47,7 @@ func (a *ApiHandler) init() { // otterscn specific ones are commented as such r.Route("/eth", func(r chi.Router) { r.Route("/v1", func(r chi.Router) { + r.Get("/events", http.NotFound) r.Route("/config", func(r chi.Router) { r.Get("/spec", beaconhttp.HandleEndpointFunc(a.getSpec)) @@ -46,6 +55,11 @@ func (a *ApiHandler) init() { r.Get("/fork_schedule", beaconhttp.HandleEndpointFunc(a.getForkSchedule)) }) r.Route("/beacon", func(r chi.Router) { + r.Route("/rewards", func(r chi.Router) { + r.Post("/sync_committee/{block_id}", beaconhttp.HandleEndpointFunc(a.getSyncCommitteesRewards)) + r.Get("/blocks/{block_id}", beaconhttp.HandleEndpointFunc(a.getBlockRewards)) + r.Post("/attestations/{epoch}", beaconhttp.HandleEndpointFunc(a.getAttestationsRewards)) + }) r.Route("/headers", func(r chi.Router) { r.Get("/", beaconhttp.HandleEndpointFunc(a.getHeaders)) r.Get("/{block_id}", beaconhttp.HandleEndpointFunc(a.getHeader)) @@ -57,7 +71,7 @@ func (a *ApiHandler) init() { r.Get("/{block_id}/root", beaconhttp.HandleEndpointFunc(a.getBlockRoot)) }) r.Get("/genesis", beaconhttp.HandleEndpointFunc(a.getGenesis)) - r.Post("/binded_blocks", http.NotFound) + r.Get("/blinded_blocks/{block_id}", beaconhttp.HandleEndpointFunc(a.getBlindedBlock)) r.Route("/pool", func(r chi.Router) { r.Post("/attestations", http.NotFound) r.Get("/voluntary_exits", beaconhttp.HandleEndpointFunc(a.poolVoluntaryExits)) @@ -70,20 +84,25 @@ func (a *ApiHandler) init() { r.Get("/node/syncing", http.NotFound) r.Route("/states", func(r chi.Router) { r.Get("/head/validators/{index}", http.NotFound) // otterscan - r.Get("/head/committees", http.NotFound) // otterscan r.Route("/{state_id}", func(r chi.Router) { + r.Get("/randao", beaconhttp.HandleEndpointFunc(a.getRandao)) + r.Get("/committees", beaconhttp.HandleEndpointFunc(a.getCommittees)) + r.Get("/sync_committees", beaconhttp.HandleEndpointFunc(a.getSyncCommittees)) // otterscan + r.Get("/finality_checkpoints", beaconhttp.HandleEndpointFunc(a.getFinalityCheckpoints)) r.Get("/validators", http.NotFound) r.Get("/root", beaconhttp.HandleEndpointFunc(a.getStateRoot)) r.Get("/fork", beaconhttp.HandleEndpointFunc(a.getStateFork)) - r.Get("/validators/{id}", http.NotFound) + r.Get("/validators", beaconhttp.HandleEndpointFunc(a.getAllValidators)) + r.Get("/validator_balances", beaconhttp.HandleEndpointFunc(a.getAllValidatorsBalances)) + r.Get("/validators/{validator_id}", beaconhttp.HandleEndpointFunc(a.getSingleValidator)) }) }) }) r.Route("/validator", func(r chi.Router) { r.Route("/duties", func(r chi.Router) { - r.Post("/attester/{epoch}", http.NotFound) + r.Post("/attester/{epoch}", beaconhttp.HandleEndpointFunc(a.getAttesterDuties)) r.Get("/proposer/{epoch}", beaconhttp.HandleEndpointFunc(a.getDutiesProposer)) - r.Post("/sync/{epoch}", http.NotFound) + r.Post("/sync/{epoch}", beaconhttp.HandleEndpointFunc(a.getSyncDuties)) }) r.Get("/blinded_blocks/{slot}", http.NotFound) r.Get("/attestation_data", http.NotFound) @@ -94,6 +113,7 @@ func (a *ApiHandler) init() { r.Get("/sync_committee_contribution", http.NotFound) r.Post("/contribution_and_proofs", http.NotFound) r.Post("/prepare_beacon_proposer", http.NotFound) + r.Post("/liveness/{epoch}", beaconhttp.HandleEndpointFunc(a.liveness)) }) }) r.Route("/v2", func(r chi.Router) { @@ -103,7 +123,7 @@ func (a *ApiHandler) init() { }) }) r.Route("/beacon", func(r chi.Router) { - r.Get("/blocks/{block_id}", beaconhttp.HandleEndpointFunc(a.getBlock)) //otterscan + r.Get("/blocks/{block_id}", beaconhttp.HandleEndpointFunc(a.getBlock)) }) r.Route("/validator", func(r chi.Router) { r.Post("/blocks/{slot}", http.NotFound) diff --git a/cl/beacon/handler/headers.go b/cl/beacon/handler/headers.go index e6b18607115..462cdf5cf6f 100644 --- a/cl/beacon/handler/headers.go +++ b/cl/beacon/handler/headers.go @@ -9,7 +9,7 @@ import ( "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" ) -func (a *ApiHandler) getHeaders(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getHeaders(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { ctx := r.Context() querySlot, err := uint64FromQueryParams(r, "slot") @@ -89,7 +89,7 @@ func (a *ApiHandler) getHeaders(r *http.Request) (*beaconResponse, error) { return newBeaconResponse(headers), nil } -func (a *ApiHandler) getHeader(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getHeader(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) if err != nil { diff --git a/cl/beacon/handler/headers_test.go b/cl/beacon/handler/headers_test.go new file mode 100644 index 00000000000..b04df1e6f7f --- /dev/null +++ b/cl/beacon/handler/headers_test.go @@ -0,0 +1,178 @@ +package handler + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/common" + "github.com/stretchr/testify/require" +) + +func TestGetHeader(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + // Start by testing + rootBlock1, err := blocks[0].Block.HashSSZ() + if err != nil { + t.Fatal(err) + } + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + bodyRoot1, err := blocks[0].Block.Body.HashSSZ() + require.NoError(t, err) + + bodyRoot2, err := blocks[len(blocks)-1].Block.Body.HashSSZ() + require.NoError(t, err) + + cases := []struct { + blockID string + code int + slot uint64 + bodyRoot string + }{ + { + blockID: "0x" + common.Bytes2Hex(rootBlock1[:]), + code: http.StatusOK, + slot: blocks[0].Block.Slot, + bodyRoot: "0x" + common.Bytes2Hex(bodyRoot1[:]), + }, + { + blockID: "head", + code: http.StatusOK, + slot: blocks[len(blocks)-1].Block.Slot, + bodyRoot: "0x" + common.Bytes2Hex(bodyRoot2[:]), + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(server.URL + "/eth/v1/beacon/headers/" + c.blockID) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + jsonVal := make(map[string]interface{}) + // unmarshal the json + require.NoError(t, json.NewDecoder(resp.Body).Decode(&jsonVal)) + data := jsonVal["data"].(map[string]interface{}) + header := data["header"].(map[string]interface{}) + message := header["message"].(map[string]interface{}) + + // compare the block + require.Equal(t, message["slot"], strconv.FormatInt(int64(c.slot), 10)) + require.Equal(t, message["body_root"], c.bodyRoot) + require.Equal(t, data["canonical"], true) + }) + } +} + +func TestGetHeaders(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + var err error + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + bodyRoot1, err := blocks[0].Block.Body.HashSSZ() + require.NoError(t, err) + + bodyRoot2, err := blocks[len(blocks)-1].Block.Body.HashSSZ() + require.NoError(t, err) + + cases := []struct { + name string + code int + slotReq *uint64 + parentRoot *libcommon.Hash + slot uint64 + bodyRoot string + count int + }{ + { + count: 1, + name: "slot", + code: http.StatusOK, + slotReq: &blocks[0].Block.Slot, + slot: blocks[0].Block.Slot, + bodyRoot: "0x" + common.Bytes2Hex(bodyRoot1[:]), + }, + { + count: 0, + name: "none", + code: http.StatusOK, + slot: blocks[len(blocks)-1].Block.Slot, + bodyRoot: "0x" + common.Bytes2Hex(bodyRoot2[:]), + }, + { + count: 0, + name: "parent", + code: http.StatusOK, + slotReq: &blocks[0].Block.Slot, + slot: blocks[0].Block.Slot, + parentRoot: &blocks[0].Block.ParentRoot, + bodyRoot: "0x" + common.Bytes2Hex(bodyRoot1[:]), + }, + { + count: 0, + name: "wtf", + code: http.StatusOK, + slotReq: new(uint64), + slot: blocks[0].Block.Slot, + parentRoot: &blocks[0].Block.ParentRoot, + bodyRoot: "0x" + common.Bytes2Hex(bodyRoot1[:]), + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + url := server.URL + "/eth/v1/beacon/headers?lol=0" // lol is a random query param + + if c.slotReq != nil { + url += "&slot=" + strconv.FormatInt(int64(*c.slotReq), 10) + } + if c.parentRoot != nil { + url += "&parent_root=" + "0x" + common.Bytes2Hex(c.parentRoot[:]) + } + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(url) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + jsonVal := make(map[string]interface{}) + // unmarshal the json + require.NoError(t, json.NewDecoder(resp.Body).Decode(&jsonVal)) + data := jsonVal["data"].([]interface{}) + require.Equal(t, len(data), c.count) + }) + } +} diff --git a/cl/beacon/handler/liveness.go b/cl/beacon/handler/liveness.go new file mode 100644 index 00000000000..f6b8bd4b519 --- /dev/null +++ b/cl/beacon/handler/liveness.go @@ -0,0 +1,153 @@ +package handler + +import ( + "encoding/json" + "fmt" + "net/http" + "sort" + "strconv" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/utils" +) + +type live struct { + Index int `json:"index,string"` + IsLive bool `json:"is_live"` +} + +func (a *ApiHandler) liveness(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + epoch, err := epochFromRequest(r) + if err != nil { + return nil, err + } + maxEpoch := utils.GetCurrentEpoch(a.genesisCfg.GenesisTime, a.beaconChainCfg.SecondsPerSlot, a.beaconChainCfg.SlotsPerEpoch) + if epoch > maxEpoch { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is in the future, max epoch is %d", epoch, maxEpoch).Error()) + } + + var idxsStr []string + if err := json.NewDecoder(r.Body).Decode(&idxsStr); err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not decode request body: %w. request body is required.", err).Error()) + } + if len(idxsStr) == 0 { + return newBeaconResponse([]string{}), nil + } + idxSet := map[int]struct{}{} + // convert the request to uint64 + idxs := make([]uint64, 0, len(idxsStr)) + for _, idxStr := range idxsStr { + idx, err := strconv.ParseUint(idxStr, 10, 64) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not parse validator index: %w", err).Error()) + } + if _, ok := idxSet[int(idx)]; ok { + continue + } + idxs = append(idxs, idx) + idxSet[int(idx)] = struct{}{} + } + + tx, err := a.indiciesDB.BeginRo(r.Context()) + if err != nil { + return nil, err + } + defer tx.Rollback() + ctx := r.Context() + liveSet := map[uint64]*live{} + // initialize resp. + for _, idx := range idxs { + liveSet[idx] = &live{Index: int(idx), IsLive: false} + } + var lastBlockRootProcess libcommon.Hash + var lastSlotProcess uint64 + // we need to obtain the relevant data: + // Use the blocks in the epoch as heuristic + for i := epoch * a.beaconChainCfg.SlotsPerEpoch; i < ((epoch+1)*a.beaconChainCfg.SlotsPerEpoch)-1; i++ { + block, err := a.blockReader.ReadBlockBySlot(ctx, tx, i) + if err != nil { + return nil, err + } + if block == nil { + continue + } + updateLivenessWithBlock(block, liveSet) + lastBlockRootProcess, err = block.Block.HashSSZ() + if err != nil { + return nil, err + } + lastSlotProcess = block.Block.Slot + } + // use the epoch partecipation as an additional heuristic + currentEpochPartecipation, previousEpochPartecipation, err := a.obtainCurrentEpochPartecipationFromEpoch(tx, epoch, lastBlockRootProcess, lastSlotProcess) + if err != nil { + return nil, err + } + if currentEpochPartecipation == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find partecipations for epoch %d, if this was an historical query, turn on --caplin.archive", epoch)) + } + for idx, live := range liveSet { + if live.IsLive { + continue + } + if idx >= uint64(currentEpochPartecipation.Length()) { + continue + } + if currentEpochPartecipation.Get(int(idx)) != 0 { + live.IsLive = true + continue + } + if idx >= uint64(previousEpochPartecipation.Length()) { + continue + } + live.IsLive = previousEpochPartecipation.Get(int(idx)) != 0 + } + + resp := []*live{} + for _, v := range liveSet { + resp = append(resp, v) + } + sort.Slice(resp, func(i, j int) bool { + return resp[i].Index < resp[j].Index + }) + + return newBeaconResponse(resp), nil +} + +func (a *ApiHandler) obtainCurrentEpochPartecipationFromEpoch(tx kv.Tx, epoch uint64, blockRoot libcommon.Hash, blockSlot uint64) (*solid.BitList, *solid.BitList, error) { + prevEpoch := epoch + if epoch > 0 { + prevEpoch-- + } + + currPartecipation, ok1 := a.forkchoiceStore.Partecipation(epoch) + prevPartecipation, ok2 := a.forkchoiceStore.Partecipation(prevEpoch) + if !ok1 || !ok2 { + return a.stateReader.ReadPartecipations(tx, blockSlot) + } + return currPartecipation, prevPartecipation, nil + +} + +func updateLivenessWithBlock(block *cltypes.SignedBeaconBlock, liveSet map[uint64]*live) { + body := block.Block.Body + if _, ok := liveSet[block.Block.ProposerIndex]; ok { + liveSet[block.Block.ProposerIndex].IsLive = true + } + body.VoluntaryExits.Range(func(index int, value *cltypes.SignedVoluntaryExit, length int) bool { + if _, ok := liveSet[value.VoluntaryExit.ValidatorIndex]; ok { + liveSet[value.VoluntaryExit.ValidatorIndex].IsLive = true + } + return true + }) + body.ExecutionChanges.Range(func(index int, value *cltypes.SignedBLSToExecutionChange, length int) bool { + if _, ok := liveSet[value.Message.ValidatorIndex]; ok { + liveSet[value.Message.ValidatorIndex].IsLive = true + } + return true + }) +} diff --git a/cl/beacon/handler/liveness_test.go b/cl/beacon/handler/liveness_test.go new file mode 100644 index 00000000000..73bb5deec91 --- /dev/null +++ b/cl/beacon/handler/liveness_test.go @@ -0,0 +1,62 @@ +package handler + +import ( + "bytes" + "encoding/json" + "math" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/stretchr/testify/require" +) + +func TestLiveness(t *testing.T) { + // i just want the correct schema to be generated + _, blocks, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + fcu.FinalizedSlotVal = math.MaxUint64 + reqBody := `["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]` + server := httptest.NewServer(handler.mux) + defer server.Close() + // + body := bytes.Buffer{} + body.WriteString(reqBody) + // Query the block in the handler with /eth/v2/beacon/states/{block_id} with content-type octet-stream + req, err := http.NewRequest("POST", server.URL+"/eth/v1/validator/liveness/"+strconv.FormatUint(fcu.HeadSlotVal/32, 10), &body) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode) + + out := map[string]interface{}{} + require.NoError(t, json.NewDecoder(resp.Body).Decode(&out)) + data := out["data"].([]interface{}) + require.Equal(t, 11, len(data)) + // check that is has is_live (bool) and index (stringifed int) + for _, d := range data { + d := d.(map[string]interface{}) + require.Equal(t, 2, len(d)) + isLive, ok := d["is_live"] + require.True(t, ok) + _, ok = isLive.(bool) + require.True(t, ok) + i1, ok := d["index"] + require.True(t, ok) + strIndex, ok := i1.(string) + require.True(t, ok) + _, err := strconv.ParseUint(strIndex, 10, 64) + require.NoError(t, err) + + } + +} diff --git a/cl/beacon/handler/pool.go b/cl/beacon/handler/pool.go index 66614f904f2..8289d2b22a2 100644 --- a/cl/beacon/handler/pool.go +++ b/cl/beacon/handler/pool.go @@ -4,22 +4,22 @@ import ( "net/http" ) -func (a *ApiHandler) poolVoluntaryExits(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) poolVoluntaryExits(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { return newBeaconResponse(a.operationsPool.VoluntaryExistsPool.Raw()), nil } -func (a *ApiHandler) poolAttesterSlashings(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) poolAttesterSlashings(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { return newBeaconResponse(a.operationsPool.AttesterSlashingsPool.Raw()), nil } -func (a *ApiHandler) poolProposerSlashings(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) poolProposerSlashings(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { return newBeaconResponse(a.operationsPool.ProposerSlashingsPool.Raw()), nil } -func (a *ApiHandler) poolBlsToExecutionChanges(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) poolBlsToExecutionChanges(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { return newBeaconResponse(a.operationsPool.BLSToExecutionChangesPool.Raw()), nil } -func (a *ApiHandler) poolAttestations(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) poolAttestations(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { return newBeaconResponse(a.operationsPool.AttestationsPool.Raw()), nil } diff --git a/cl/beacon/handler/rewards.go b/cl/beacon/handler/rewards.go new file mode 100644 index 00000000000..dc8ca4f8119 --- /dev/null +++ b/cl/beacon/handler/rewards.go @@ -0,0 +1,233 @@ +package handler + +import ( + "encoding/json" + "io" + "net/http" + "sort" + + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/utils" +) + +type blockRewardsResponse struct { + ProposerIndex uint64 `json:"proposer_index,string"` + Attestations uint64 `json:"attestations,string"` + ProposerSlashings uint64 `json:"proposer_slashings,string"` + AttesterSlashings uint64 `json:"attester_slashings,string"` + SyncAggregate uint64 `json:"sync_aggregate,string"` + Total uint64 `json:"total,string"` +} + +func (a *ApiHandler) getBlockRewards(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + ctx := r.Context() + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockId, err := blockIdFromRequest(r) + if err != nil { + return nil, err + } + root, err := a.rootFromBlockId(ctx, tx, blockId) + if err != nil { + return nil, err + } + blk, err := a.blockReader.ReadHeaderByRoot(ctx, tx, root) + if err != nil { + return nil, err + } + if blk == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "block not found") + } + slot := blk.Header.Slot + isFinalized := slot <= a.forkchoiceStore.FinalizedSlot() + if slot >= a.forkchoiceStore.LowestAvaiableSlot() { + // finalized case + blkRewards, ok := a.forkchoiceStore.BlockRewards(root) + if !ok { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "block not found") + } + return newBeaconResponse(blockRewardsResponse{ + ProposerIndex: blk.Header.ProposerIndex, + Attestations: blkRewards.Attestations, + ProposerSlashings: blkRewards.ProposerSlashings, + AttesterSlashings: blkRewards.AttesterSlashings, + SyncAggregate: blkRewards.SyncAggregate, + Total: blkRewards.Attestations + blkRewards.ProposerSlashings + blkRewards.AttesterSlashings + blkRewards.SyncAggregate, + }).withFinalized(isFinalized), nil + } + slotData, err := state_accessors.ReadSlotData(tx, slot) + if err != nil { + return nil, err + } + if slotData == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "could not read historical block rewards, node may not be archive or it still processing historical states") + } + return newBeaconResponse(blockRewardsResponse{ + ProposerIndex: blk.Header.ProposerIndex, + Attestations: slotData.AttestationsRewards, + ProposerSlashings: slotData.ProposerSlashings, + AttesterSlashings: slotData.AttesterSlashings, + SyncAggregate: slotData.SyncAggregateRewards, + Total: slotData.AttestationsRewards + slotData.ProposerSlashings + slotData.AttesterSlashings + slotData.SyncAggregateRewards, + }).withFinalized(isFinalized), nil +} + +type syncCommitteeReward struct { + ValidatorIndex uint64 `json:"validator_index,string"` + Reward int64 `json:"reward,string"` +} + +func (a *ApiHandler) getSyncCommitteesRewards(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + // Retrieve all the request data ------------------------------------------- + req := []string{} + // read the entire body + jsonBytes, err := io.ReadAll(r.Body) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + // parse json body request + if len(jsonBytes) > 0 { + if err := json.Unmarshal(jsonBytes, &req); err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + } + filterIndicies, err := parseQueryValidatorIndicies(tx, req) + if err != nil { + return nil, err + } + + blockId, err := blockIdFromRequest(r) + if err != nil { + return nil, err + } + root, err := a.rootFromBlockId(ctx, tx, blockId) + if err != nil { + return nil, err + } + blk, err := a.blockReader.ReadBlockByRoot(ctx, tx, root) + if err != nil { + return nil, err + } + if blk == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "block not found") + } + version := a.beaconChainCfg.GetCurrentStateVersion(blk.Block.Slot / a.beaconChainCfg.SlotsPerEpoch) + if version < clparams.AltairVersion { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "sync committee rewards not available before Altair fork") + } + // retrieve the state we need ----------------------------------------------- + // We need: + // - sync committee of the block + // - total active balance of the block + canonicalBlockRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, blk.Block.Slot) + if err != nil { + return nil, err + } + + isCanonical := canonicalBlockRoot == root + + isFinalized := blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot() + var ( + syncCommittee *solid.SyncCommittee + totalActiveBalance uint64 + ) + if isFinalized { + if !isCanonical { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "non-canonical finalized block not found") + } + epochData, err := state_accessors.ReadEpochData(tx, blk.Block.Slot) + if err != nil { + return nil, err + } + if epochData == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "could not read historical sync committee rewards, node may not be archive or it still processing historical states") + } + totalActiveBalance = epochData.TotalActiveBalance + syncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(blk.Block.Slot)) + if err != nil { + return nil, err + } + if syncCommittee == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "could not read historical sync committee, node may not be archive or it still processing historical states") + } + } else { + var ok bool + syncCommittee, _, ok = a.forkchoiceStore.GetSyncCommittees(root) + if !ok { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "non-finalized sync committee not found") + } + totalActiveBalance, ok = a.forkchoiceStore.TotalActiveBalance(root) + if !ok { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "non-finalized total active balance not found") + } + } + committee := syncCommittee.GetCommittee() + rewards := make([]syncCommitteeReward, 0, len(committee)) + + syncAggregate := blk.Block.Body.SyncAggregate + + filterIndiciesSet := make(map[uint64]struct{}) + for _, v := range filterIndicies { + filterIndiciesSet[v] = struct{}{} + } + // validator index -> accumulated rewards + accumulatedRewards := map[uint64]int64{} + for _, idx := range filterIndicies { + accumulatedRewards[idx] = 0 + } + partecipantReward := int64(a.syncPartecipantReward(totalActiveBalance)) + + for committeeIdx, v := range committee { + idx, ok, err := state_accessors.ReadValidatorIndexByPublicKey(tx, v) + if err != nil { + return nil, err + } + if !ok { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "sync committee public key not found") + } + if len(filterIndiciesSet) > 0 { + if _, ok := filterIndiciesSet[idx]; !ok { + continue + } + } + if syncAggregate.IsSet(uint64(committeeIdx)) { + accumulatedRewards[idx] += partecipantReward + continue + } + accumulatedRewards[idx] -= partecipantReward + } + for idx, reward := range accumulatedRewards { + rewards = append(rewards, syncCommitteeReward{ + ValidatorIndex: idx, + Reward: reward, + }) + } + sort.Slice(rewards, func(i, j int) bool { + return rewards[i].ValidatorIndex < rewards[j].ValidatorIndex + }) + return newBeaconResponse(rewards).withFinalized(isFinalized), nil +} + +func (a *ApiHandler) syncPartecipantReward(activeBalance uint64) uint64 { + activeBalanceSqrt := utils.IntegerSquareRoot(activeBalance) + totalActiveIncrements := activeBalance / a.beaconChainCfg.EffectiveBalanceIncrement + baseRewardPerInc := a.beaconChainCfg.EffectiveBalanceIncrement * a.beaconChainCfg.BaseRewardFactor / activeBalanceSqrt + totalBaseRewards := baseRewardPerInc * totalActiveIncrements + maxParticipantRewards := totalBaseRewards * a.beaconChainCfg.SyncRewardWeight / a.beaconChainCfg.WeightDenominator / a.beaconChainCfg.SlotsPerEpoch + return maxParticipantRewards / a.beaconChainCfg.SyncCommitteeSize +} diff --git a/cl/beacon/handler/rewards_test.go b/cl/beacon/handler/rewards_test.go new file mode 100644 index 00000000000..2a6cef4a851 --- /dev/null +++ b/cl/beacon/handler/rewards_test.go @@ -0,0 +1,142 @@ +package handler + +import ( + "fmt" + "io" + "math" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/common" + "github.com/stretchr/testify/require" +) + +func TestGetBlockRewards(t *testing.T) { + _, blocks, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.BellatrixVersion) + var err error + fcu.HeadVal, err = blocks[len(blocks)-5].Block.HashSSZ() + require.NoError(t, err) + genesisVal, err := blocks[0].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, math.MaxUint64) + fcu.FinalizedSlotVal = math.MaxUint64 + + cases := []struct { + blockID string + code int + expectedResp string + }{ + { + blockID: "0x" + common.Bytes2Hex(fcu.HeadVal[:]), + code: http.StatusOK, + expectedResp: `{"data":{"proposer_index":"203","attestations":"332205","proposer_slashings":"0","attester_slashings":"0","sync_aggregate":"0","total":"332205"},"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + blockID: "0x" + common.Bytes2Hex(genesisVal[:]), + code: http.StatusOK, + expectedResp: `{"data":{"proposer_index":"98","attestations":"332205","proposer_slashings":"0","attester_slashings":"0","sync_aggregate":"0","total":"332205"},"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(server.URL + "/eth/v1/beacon/rewards/blocks/" + c.blockID) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + + // unmarshal the json + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, c.expectedResp, string(out)) + }) + } +} + +func TestPostSyncCommitteeRewards(t *testing.T) { + _, blocks, _, _, _, handler, _, _, fcu := setupTestingHandler(t, clparams.BellatrixVersion) + var err error + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + fcu.FinalizedSlotVal = math.MaxInt64 + + fcu.JustifiedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, 99999999) + + cases := []struct { + name string + blockId string + code int + request string + expected string + }{ + { + name: "all validators", + blockId: "0x" + common.Bytes2Hex(fcu.HeadVal[:]), + code: http.StatusOK, + expected: `{"data":[{"validator_index":"0","reward":"-698"},{"validator_index":"1","reward":"-698"},{"validator_index":"2","reward":"-698"},{"validator_index":"3","reward":"-698"},{"validator_index":"4","reward":"-698"},{"validator_index":"5","reward":"-698"},{"validator_index":"6","reward":"-698"},{"validator_index":"7","reward":"-698"},{"validator_index":"8","reward":"-698"},{"validator_index":"9","reward":"-698"},{"validator_index":"10","reward":"-698"},{"validator_index":"11","reward":"-698"},{"validator_index":"12","reward":"-698"},{"validator_index":"13","reward":"-698"},{"validator_index":"14","reward":"-698"},{"validator_index":"15","reward":"-698"},{"validator_index":"16","reward":"-698"},{"validator_index":"17","reward":"-698"},{"validator_index":"18","reward":"-698"},{"validator_index":"19","reward":"-698"},{"validator_index":"20","reward":"-698"},{"validator_index":"21","reward":"-698"},{"validator_index":"22","reward":"-698"},{"validator_index":"23","reward":"-698"},{"validator_index":"24","reward":"-698"},{"validator_index":"25","reward":"-698"},{"validator_index":"26","reward":"-698"},{"validator_index":"27","reward":"-698"},{"validator_index":"28","reward":"-698"},{"validator_index":"29","reward":"-698"},{"validator_index":"30","reward":"-698"},{"validator_index":"31","reward":"-698"},{"validator_index":"32","reward":"-698"},{"validator_index":"33","reward":"-698"},{"validator_index":"34","reward":"-698"},{"validator_index":"35","reward":"-698"},{"validator_index":"36","reward":"-698"},{"validator_index":"37","reward":"-698"},{"validator_index":"38","reward":"-698"},{"validator_index":"39","reward":"-698"},{"validator_index":"40","reward":"-698"},{"validator_index":"41","reward":"-698"},{"validator_index":"42","reward":"-698"},{"validator_index":"43","reward":"-698"},{"validator_index":"44","reward":"-698"},{"validator_index":"45","reward":"-698"},{"validator_index":"46","reward":"-698"},{"validator_index":"47","reward":"-698"},{"validator_index":"48","reward":"-698"},{"validator_index":"49","reward":"-698"},{"validator_index":"50","reward":"-698"},{"validator_index":"51","reward":"-698"},{"validator_index":"52","reward":"-698"},{"validator_index":"53","reward":"-698"},{"validator_index":"54","reward":"-698"},{"validator_index":"55","reward":"-698"},{"validator_index":"56","reward":"-698"},{"validator_index":"57","reward":"-698"},{"validator_index":"58","reward":"-698"},{"validator_index":"59","reward":"-698"},{"validator_index":"60","reward":"-698"},{"validator_index":"61","reward":"-698"},{"validator_index":"62","reward":"-698"},{"validator_index":"63","reward":"-698"},{"validator_index":"64","reward":"-698"},{"validator_index":"65","reward":"-698"},{"validator_index":"66","reward":"-698"},{"validator_index":"67","reward":"-698"},{"validator_index":"68","reward":"-698"},{"validator_index":"69","reward":"-698"},{"validator_index":"70","reward":"-698"},{"validator_index":"71","reward":"-698"},{"validator_index":"72","reward":"-698"},{"validator_index":"73","reward":"-698"},{"validator_index":"74","reward":"-698"},{"validator_index":"75","reward":"-698"},{"validator_index":"76","reward":"-698"},{"validator_index":"77","reward":"-698"},{"validator_index":"78","reward":"-698"},{"validator_index":"79","reward":"-698"},{"validator_index":"80","reward":"-698"},{"validator_index":"81","reward":"-698"},{"validator_index":"82","reward":"-698"},{"validator_index":"83","reward":"-698"},{"validator_index":"84","reward":"-698"},{"validator_index":"85","reward":"-698"},{"validator_index":"86","reward":"-698"},{"validator_index":"87","reward":"-698"},{"validator_index":"88","reward":"-698"},{"validator_index":"89","reward":"-698"},{"validator_index":"90","reward":"-698"},{"validator_index":"91","reward":"-698"},{"validator_index":"92","reward":"-698"},{"validator_index":"93","reward":"-698"},{"validator_index":"94","reward":"-698"},{"validator_index":"95","reward":"-698"},{"validator_index":"96","reward":"-698"},{"validator_index":"97","reward":"-698"},{"validator_index":"98","reward":"-698"},{"validator_index":"99","reward":"-698"},{"validator_index":"100","reward":"-698"},{"validator_index":"101","reward":"-698"},{"validator_index":"102","reward":"-698"},{"validator_index":"103","reward":"-698"},{"validator_index":"104","reward":"-698"},{"validator_index":"105","reward":"-698"},{"validator_index":"106","reward":"-698"},{"validator_index":"107","reward":"-698"},{"validator_index":"108","reward":"-698"},{"validator_index":"109","reward":"-698"},{"validator_index":"110","reward":"-698"},{"validator_index":"111","reward":"-698"},{"validator_index":"112","reward":"-698"},{"validator_index":"113","reward":"-698"},{"validator_index":"114","reward":"-698"},{"validator_index":"115","reward":"-698"},{"validator_index":"116","reward":"-698"},{"validator_index":"117","reward":"-698"},{"validator_index":"118","reward":"-698"},{"validator_index":"119","reward":"-698"},{"validator_index":"120","reward":"-698"},{"validator_index":"121","reward":"-698"},{"validator_index":"122","reward":"-698"},{"validator_index":"123","reward":"-698"},{"validator_index":"124","reward":"-698"},{"validator_index":"125","reward":"-698"},{"validator_index":"126","reward":"-698"},{"validator_index":"127","reward":"-698"},{"validator_index":"128","reward":"-698"},{"validator_index":"129","reward":"-698"},{"validator_index":"130","reward":"-698"},{"validator_index":"131","reward":"-698"},{"validator_index":"132","reward":"-698"},{"validator_index":"133","reward":"-698"},{"validator_index":"134","reward":"-698"},{"validator_index":"135","reward":"-698"},{"validator_index":"136","reward":"-698"},{"validator_index":"137","reward":"-698"},{"validator_index":"138","reward":"-698"},{"validator_index":"139","reward":"-698"},{"validator_index":"140","reward":"-698"},{"validator_index":"141","reward":"-698"},{"validator_index":"142","reward":"-698"},{"validator_index":"143","reward":"-698"},{"validator_index":"144","reward":"-698"},{"validator_index":"145","reward":"-698"},{"validator_index":"146","reward":"-698"},{"validator_index":"147","reward":"-698"},{"validator_index":"148","reward":"-698"},{"validator_index":"149","reward":"-698"},{"validator_index":"150","reward":"-698"},{"validator_index":"151","reward":"-698"},{"validator_index":"152","reward":"-698"},{"validator_index":"153","reward":"-698"},{"validator_index":"154","reward":"-698"},{"validator_index":"155","reward":"-698"},{"validator_index":"156","reward":"-698"},{"validator_index":"157","reward":"-698"},{"validator_index":"158","reward":"-698"},{"validator_index":"159","reward":"-698"},{"validator_index":"160","reward":"-698"},{"validator_index":"161","reward":"-698"},{"validator_index":"162","reward":"-698"},{"validator_index":"163","reward":"-698"},{"validator_index":"164","reward":"-698"},{"validator_index":"165","reward":"-698"},{"validator_index":"166","reward":"-698"},{"validator_index":"167","reward":"-698"},{"validator_index":"168","reward":"-698"},{"validator_index":"169","reward":"-698"},{"validator_index":"170","reward":"-698"},{"validator_index":"171","reward":"-698"},{"validator_index":"172","reward":"-698"},{"validator_index":"173","reward":"-698"},{"validator_index":"174","reward":"-698"},{"validator_index":"175","reward":"-698"},{"validator_index":"176","reward":"-698"},{"validator_index":"177","reward":"-698"},{"validator_index":"178","reward":"-698"},{"validator_index":"179","reward":"-698"},{"validator_index":"180","reward":"-698"},{"validator_index":"181","reward":"-698"},{"validator_index":"182","reward":"-698"},{"validator_index":"183","reward":"-698"},{"validator_index":"184","reward":"-698"},{"validator_index":"185","reward":"-698"},{"validator_index":"186","reward":"-698"},{"validator_index":"187","reward":"-698"},{"validator_index":"188","reward":"-698"},{"validator_index":"189","reward":"-698"},{"validator_index":"190","reward":"-698"},{"validator_index":"191","reward":"-698"},{"validator_index":"192","reward":"-698"},{"validator_index":"193","reward":"-698"},{"validator_index":"194","reward":"-698"},{"validator_index":"195","reward":"-698"},{"validator_index":"196","reward":"-698"},{"validator_index":"197","reward":"-698"},{"validator_index":"198","reward":"-698"},{"validator_index":"199","reward":"-698"},{"validator_index":"200","reward":"-698"},{"validator_index":"201","reward":"-698"},{"validator_index":"202","reward":"-698"},{"validator_index":"203","reward":"-698"},{"validator_index":"204","reward":"-698"},{"validator_index":"205","reward":"-698"},{"validator_index":"206","reward":"-698"},{"validator_index":"207","reward":"-698"},{"validator_index":"208","reward":"-698"},{"validator_index":"209","reward":"-698"},{"validator_index":"210","reward":"-698"},{"validator_index":"211","reward":"-698"},{"validator_index":"212","reward":"-698"},{"validator_index":"213","reward":"-698"},{"validator_index":"214","reward":"-698"},{"validator_index":"215","reward":"-698"},{"validator_index":"216","reward":"-698"},{"validator_index":"217","reward":"-698"},{"validator_index":"218","reward":"-698"},{"validator_index":"219","reward":"-698"},{"validator_index":"220","reward":"-698"},{"validator_index":"221","reward":"-698"},{"validator_index":"222","reward":"-698"},{"validator_index":"223","reward":"-698"},{"validator_index":"224","reward":"-698"},{"validator_index":"225","reward":"-698"},{"validator_index":"226","reward":"-698"},{"validator_index":"227","reward":"-698"},{"validator_index":"228","reward":"-698"},{"validator_index":"229","reward":"-698"},{"validator_index":"230","reward":"-698"},{"validator_index":"231","reward":"-698"},{"validator_index":"232","reward":"-698"},{"validator_index":"233","reward":"-698"},{"validator_index":"234","reward":"-698"},{"validator_index":"235","reward":"-698"},{"validator_index":"236","reward":"-698"},{"validator_index":"237","reward":"-698"},{"validator_index":"238","reward":"-698"},{"validator_index":"239","reward":"-698"},{"validator_index":"240","reward":"-698"},{"validator_index":"241","reward":"-698"},{"validator_index":"242","reward":"-698"},{"validator_index":"243","reward":"-698"},{"validator_index":"244","reward":"-698"},{"validator_index":"245","reward":"-698"},{"validator_index":"246","reward":"-698"},{"validator_index":"247","reward":"-698"},{"validator_index":"248","reward":"-698"},{"validator_index":"249","reward":"-698"},{"validator_index":"250","reward":"-698"},{"validator_index":"251","reward":"-698"},{"validator_index":"252","reward":"-698"},{"validator_index":"253","reward":"-698"},{"validator_index":"254","reward":"-698"},{"validator_index":"255","reward":"-698"}],"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + blockId: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + { + name: "2 validators", + blockId: "0x" + common.Bytes2Hex(fcu.HeadVal[:]), + request: `["1","4"]`, + code: http.StatusOK, + expected: `{"data":[{"validator_index":"1","reward":"-698"},{"validator_index":"4","reward":"-698"}],"finalized":true,"execution_optimistic":false}` + "\n", // Add your expected response + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + url := fmt.Sprintf("%s/eth/v1/beacon/rewards/sync_committee/%s", server.URL, c.blockId) + + // Create a request + req, err := http.NewRequest("POST", url, strings.NewReader(c.request)) + require.NoError(t, err) + req.Header.Set("Content-Type", "application/json") + + // Perform the request + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + // Check status code + require.Equal(t, c.code, resp.StatusCode) + + if resp.StatusCode != http.StatusOK { + return + } + + // Read the response body + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + if string(out) != c.expected { + panic(string(out)) + } + // Compare the response with the expected result + require.Equal(t, c.expected, string(out)) + }) + } +} diff --git a/cl/beacon/handler/states.go b/cl/beacon/handler/states.go index 0d0c75d9573..335296f349d 100644 --- a/cl/beacon/handler/states.go +++ b/cl/beacon/handler/states.go @@ -4,58 +4,60 @@ import ( "context" "fmt" "net/http" + "strconv" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" "github.com/ledgerwatch/erigon/cl/utils" ) -func (a *ApiHandler) rootFromStateId(ctx context.Context, tx kv.Tx, stateId *segmentID) (root libcommon.Hash, httpStatusErr int, err error) { - var blockRoot libcommon.Hash +func (a *ApiHandler) blockRootFromStateId(ctx context.Context, tx kv.Tx, stateId *segmentID) (root libcommon.Hash, httpStatusErr int, err error) { switch { case stateId.head(): - blockRoot, _, err = a.forkchoiceStore.GetHead() + root, _, err = a.forkchoiceStore.GetHead() if err != nil { return libcommon.Hash{}, http.StatusInternalServerError, err } + return case stateId.finalized(): - blockRoot = a.forkchoiceStore.FinalizedCheckpoint().BlockRoot() + root = a.forkchoiceStore.FinalizedCheckpoint().BlockRoot() + return case stateId.justified(): - blockRoot = a.forkchoiceStore.JustifiedCheckpoint().BlockRoot() + root = a.forkchoiceStore.JustifiedCheckpoint().BlockRoot() + return case stateId.genesis(): - blockRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, 0) + root, err = beacon_indicies.ReadCanonicalBlockRoot(tx, 0) if err != nil { return libcommon.Hash{}, http.StatusInternalServerError, err } - if blockRoot == (libcommon.Hash{}) { + if root == (libcommon.Hash{}) { return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("genesis block not found") } + return case stateId.getSlot() != nil: - blockRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *stateId.getSlot()) + root, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *stateId.getSlot()) if err != nil { return libcommon.Hash{}, http.StatusInternalServerError, err } - if blockRoot == (libcommon.Hash{}) { + if root == (libcommon.Hash{}) { return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("block not found %d", *stateId.getSlot()) } + return case stateId.getRoot() != nil: - root = *stateId.getRoot() + root, err = beacon_indicies.ReadBlockRootByStateRoot(tx, *stateId.getRoot()) + if err != nil { + return libcommon.Hash{}, http.StatusInternalServerError, err + } return default: return libcommon.Hash{}, http.StatusInternalServerError, fmt.Errorf("cannot parse state id") } - root, err = beacon_indicies.ReadStateRootByBlockRoot(ctx, tx, blockRoot) - if err != nil { - return libcommon.Hash{}, http.StatusInternalServerError, err - } - if root == (libcommon.Hash{}) { - return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("block not found") - } - return } type rootResponse struct { @@ -69,7 +71,7 @@ func previousVersion(v clparams.StateVersion) clparams.StateVersion { return v - 1 } -func (a *ApiHandler) getStateFork(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getStateFork(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) @@ -82,7 +84,7 @@ func (a *ApiHandler) getStateFork(r *http.Request) (*beaconResponse, error) { if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } - root, httpStatus, err := a.rootFromStateId(ctx, tx, blockId) + root, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) if err != nil { return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) } @@ -92,7 +94,7 @@ func (a *ApiHandler) getStateFork(r *http.Request) (*beaconResponse, error) { return nil, err } if slot == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, err.Error()) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block slot: %x", root)) } epoch := *slot / a.beaconChainCfg.SlotsPerEpoch @@ -108,7 +110,7 @@ func (a *ApiHandler) getStateFork(r *http.Request) (*beaconResponse, error) { }), nil } -func (a *ApiHandler) getStateRoot(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getStateRoot(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) @@ -121,7 +123,7 @@ func (a *ApiHandler) getStateRoot(r *http.Request) (*beaconResponse, error) { if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } - root, httpStatus, err := a.rootFromStateId(ctx, tx, blockId) + root, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) if err != nil { return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) } @@ -146,10 +148,11 @@ func (a *ApiHandler) getStateRoot(r *http.Request) (*beaconResponse, error) { return nil, err } - return newBeaconResponse(&rootResponse{Root: stateRoot}).withFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()), nil + return newBeaconResponse(&rootResponse{Root: stateRoot}). + withFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()), nil } -func (a *ApiHandler) getFullState(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getFullState(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) @@ -163,20 +166,254 @@ func (a *ApiHandler) getFullState(r *http.Request) (*beaconResponse, error) { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } - root, httpStatus, err := a.rootFromStateId(ctx, tx, blockId) + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) if err != nil { return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) } - blockRoot, err := beacon_indicies.ReadBlockRootByStateRoot(tx, root) + state, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + if state == nil { + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + // Sanity checks slot and canonical data. + if slot == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block slot: %x", blockRoot)) + } + canonicalRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, *slot) + if err != nil { + return nil, err + } + if canonicalRoot != blockRoot { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read state: %x", blockRoot)) + } + state, err := a.stateReader.ReadHistoricalState(ctx, tx, *slot) + if err != nil { + return nil, err + } + if state == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read state: %x", blockRoot)) + } + return newBeaconResponse(state).withFinalized(true).withVersion(state.Version()), nil + } + + return newBeaconResponse(state).withFinalized(false).withVersion(state.Version()), nil +} + +type finalityCheckpointsResponse struct { + FinalizedCheckpoint solid.Checkpoint `json:"finalized_checkpoint"` + CurrentJustifiedCheckpoint solid.Checkpoint `json:"current_justified_checkpoint"` + PreviousJustifiedCheckpoint solid.Checkpoint `json:"previous_justified_checkpoint"` +} + +func (a *ApiHandler) getFinalityCheckpoints(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) if err != nil { return nil, err } + defer tx.Rollback() + blockId, err := stateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } - state, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true) + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + if slot == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block slot: %x", blockRoot)) + } + + ok, finalizedCheckpoint, currentJustifiedCheckpoint, previousJustifiedCheckpoint := a.forkchoiceStore.GetFinalityCheckpoints(blockRoot) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } + if !ok { + currentJustifiedCheckpoint, previousJustifiedCheckpoint, finalizedCheckpoint, err = state_accessors.ReadCheckpoints(tx, a.beaconChainCfg.RoundSlotToEpoch(*slot)) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + if currentJustifiedCheckpoint == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read checkpoints: %x, %d", blockRoot, a.beaconChainCfg.RoundSlotToEpoch(*slot))) + } + } + version := a.beaconChainCfg.GetCurrentStateVersion(*slot / a.beaconChainCfg.SlotsPerEpoch) + canonicalRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, *slot) + if err != nil { + return nil, err + } - return newBeaconResponse(state).withFinalized(false).withVersion(state.Version()), nil + return newBeaconResponse(finalityCheckpointsResponse{ + FinalizedCheckpoint: finalizedCheckpoint, + CurrentJustifiedCheckpoint: currentJustifiedCheckpoint, + PreviousJustifiedCheckpoint: previousJustifiedCheckpoint, + }).withFinalized(canonicalRoot == blockRoot && *slot <= a.forkchoiceStore.FinalizedSlot()).withVersion(version), nil +} + +type syncCommitteesResponse struct { + Validators []string `json:"validators"` + ValidatorAggregates [][]string `json:"validator_aggregates"` +} + +func (a *ApiHandler) getSyncCommittees(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + blockId, err := stateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + if slot == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block slot: %x", blockRoot)) + } + + // Code here + currentSyncCommittee, nextSyncCommittee, ok := a.forkchoiceStore.GetSyncCommittees(blockRoot) + if !ok { + syncCommitteeSlot := a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(*slot) + // Check the main database if it cannot be found in the forkchoice store + currentSyncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, syncCommitteeSlot) + if err != nil { + return nil, err + } + nextSyncCommittee, err = state_accessors.ReadNextSyncCommittee(tx, syncCommitteeSlot) + if err != nil { + return nil, err + } + if currentSyncCommittee == nil || nextSyncCommittee == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read sync committees: %x, %d", blockRoot, *slot)) + } + } + // Now fetch the data we need + statePeriod := a.beaconChainCfg.SyncCommitteePeriod(*slot) + queryEpoch, err := uint64FromQueryParams(r, "epoch") + if err != nil { + return nil, err + } + + committee := currentSyncCommittee.GetCommittee() + if queryEpoch != nil { + requestPeriod := a.beaconChainCfg.SyncCommitteePeriod(*queryEpoch * a.beaconChainCfg.SlotsPerEpoch) + if requestPeriod == statePeriod+1 { + committee = nextSyncCommittee.GetCommittee() + } else if requestPeriod != statePeriod { + return nil, fmt.Errorf("Epoch is outside the sync committee period of the state") + } + } + // Lastly construct the response + validatorsPerSubcommittee := a.beaconChainCfg.SyncCommitteeSize / a.beaconChainCfg.SyncCommitteeSubnetCount + response := syncCommitteesResponse{ + Validators: make([]string, a.beaconChainCfg.SyncCommitteeSize), + ValidatorAggregates: make([][]string, a.beaconChainCfg.SyncCommitteeSubnetCount), + } + for i, publicKey := range committee { + // get the validator index of the committee + validatorIndex, ok, err := state_accessors.ReadValidatorIndexByPublicKey(tx, publicKey) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("could not read validator index: %x", publicKey) + } + idx := strconv.FormatInt(int64(validatorIndex), 10) + response.Validators[i] = idx + // add the index to the subcommittee + subCommitteeIndex := uint64(i) / validatorsPerSubcommittee + if len(response.ValidatorAggregates[subCommitteeIndex]) == 0 { + response.ValidatorAggregates[subCommitteeIndex] = make([]string, validatorsPerSubcommittee) + } + response.ValidatorAggregates[subCommitteeIndex][uint64(i)%validatorsPerSubcommittee] = idx + } + canonicalRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, *slot) + if err != nil { + return nil, err + } + + return newBeaconResponse(response).withFinalized(canonicalRoot == blockRoot && *slot <= a.forkchoiceStore.FinalizedSlot()), nil +} + +type randaoResponse struct { + Randao libcommon.Hash `json:"randao"` +} + +func (a *ApiHandler) getRandao(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + blockId, err := stateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + epochReq, err := uint64FromQueryParams(r, "epoch") + if err != nil { + return nil, err + } + slotPtr, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + if slotPtr == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block slot: %x", blockRoot)) + } + slot := *slotPtr + epoch := slot / a.beaconChainCfg.SlotsPerEpoch + if epochReq != nil { + epoch = *epochReq + } + randaoMixes := a.randaoMixesPool.Get().(solid.HashListSSZ) + defer a.randaoMixesPool.Put(randaoMixes) + + if a.forkchoiceStore.RandaoMixes(blockRoot, randaoMixes) { + mix := randaoMixes.Get(int(epoch % a.beaconChainCfg.EpochsPerHistoricalVector)) + return newBeaconResponse(randaoResponse{Randao: mix}).withFinalized(slot <= a.forkchoiceStore.FinalizedSlot()), nil + } + // check if the block is canonical + canonicalRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, slot) + if err != nil { + return nil, err + } + if canonicalRoot != blockRoot { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read randao: %x", blockRoot)) + } + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, slot, epoch%a.beaconChainCfg.EpochsPerHistoricalVector) + if err != nil { + return nil, err + } + return newBeaconResponse(randaoResponse{Randao: mix}).withFinalized(slot <= a.forkchoiceStore.FinalizedSlot()), nil } diff --git a/cl/beacon/handler/states_test.go b/cl/beacon/handler/states_test.go new file mode 100644 index 00000000000..eaa6d633a9e --- /dev/null +++ b/cl/beacon/handler/states_test.go @@ -0,0 +1,504 @@ +package handler + +import ( + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "github.com/ledgerwatch/erigon/common" + "github.com/stretchr/testify/require" +) + +func TestGetStateFork(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + cases := []struct { + blockID string + code int + }{ + { + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + }, + { + blockID: "head", + code: http.StatusOK, + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + { + blockID: strconv.FormatInt(int64(postState.Slot()), 10), + code: http.StatusOK, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(server.URL + "/eth/v1/beacon/states/" + c.blockID + "/fork") + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + jsonVal := make(map[string]interface{}) + // unmarshal the json + require.NoError(t, json.NewDecoder(resp.Body).Decode(&jsonVal)) + data := jsonVal["data"].(map[string]interface{}) + require.Equal(t, data["current_version"], "0x00000000") + require.Equal(t, data["previous_version"], "0x00000000") + require.Equal(t, data["epoch"], "0") + }) + } +} + +func TestGetStateRoot(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + + cases := []struct { + blockID string + code int + }{ + { + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + }, + { + blockID: "finalized", + code: http.StatusOK, + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + { + blockID: strconv.FormatInt(int64(postState.Slot()), 10), + code: http.StatusOK, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(server.URL + "/eth/v1/beacon/states/" + c.blockID + "/root") + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + jsonVal := make(map[string]interface{}) + // unmarshal the json + require.NoError(t, json.NewDecoder(resp.Body).Decode(&jsonVal)) + data := jsonVal["data"].(map[string]interface{}) + require.Equal(t, data["root"], "0x"+common.Bytes2Hex(postRoot[:])) + }) + } +} + +func TestGetStateFullHistorical(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + + cases := []struct { + blockID string + code int + }{ + { + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + }, + { + blockID: "finalized", + code: http.StatusOK, + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + { + blockID: strconv.FormatInt(int64(postState.Slot()), 10), + code: http.StatusOK, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/states/{block_id} with content-type octet-stream + req, err := http.NewRequest("GET", server.URL+"/eth/v2/debug/beacon/states/"+c.blockID, nil) + require.NoError(t, err) + req.Header.Set("Accept", "application/octet-stream") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // read the all of the octect + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + other := state.New(&clparams.MainnetBeaconConfig) + require.NoError(t, other.DecodeSSZ(out, int(clparams.Phase0Version))) + + otherRoot, err := other.HashSSZ() + require.NoError(t, err) + require.Equal(t, postRoot, otherRoot) + }) + } +} + +func TestGetStateFullForkchoice(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + + fcu.StateAtBlockRootVal[fcu.HeadVal] = postState + + cases := []struct { + blockID string + code int + }{ + { + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + }, + { + blockID: "finalized", + code: http.StatusOK, + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + { + blockID: strconv.FormatInt(int64(postState.Slot()), 10), + code: http.StatusOK, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/states/{block_id} with content-type octet-stream + req, err := http.NewRequest("GET", server.URL+"/eth/v2/debug/beacon/states/"+c.blockID, nil) + require.NoError(t, err) + req.Header.Set("Accept", "application/octet-stream") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // read the all of the octect + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + other := state.New(&clparams.MainnetBeaconConfig) + require.NoError(t, other.DecodeSSZ(out, int(clparams.Phase0Version))) + + otherRoot, err := other.HashSSZ() + require.NoError(t, err) + require.Equal(t, postRoot, otherRoot) + }) + } +} + +func TestGetStateSyncCommittees(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.BellatrixVersion) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + cSyncCommittee := postState.CurrentSyncCommittee().Copy() + nSyncCommittee := postState.NextSyncCommittee().Copy() + + fcu.GetSyncCommitteesVal[fcu.HeadVal] = [2]*solid.SyncCommittee{ + cSyncCommittee, + nSyncCommittee, + } + + fcu.JustifiedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + + cases := []struct { + blockID string + code int + }{ + { + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + }, + { + blockID: "justified", + code: http.StatusOK, + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + { + blockID: strconv.FormatInt(int64(postState.Slot()), 10), + code: http.StatusOK, + }, + } + expected := `{"data":{"validators":["109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42","141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192","109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42","141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192"],"validator_aggregates":[["109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42"],["141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192"],["109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42"],["141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192"]]},"finalized":false,"execution_optimistic":false}` + "\n" + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + resp, err := http.Get(server.URL + "/eth/v1/beacon/states/" + c.blockID + "/sync_committees") + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // read the all of the octect + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, string(out), expected) + }) + } +} + +func TestGetStateSyncCommitteesHistorical(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.BellatrixVersion) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.JustifiedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + + cases := []struct { + blockID string + code int + }{ + { + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + }, + { + blockID: "justified", + code: http.StatusOK, + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + { + blockID: strconv.FormatInt(int64(postState.Slot()), 10), + code: http.StatusOK, + }, + } + expected := `{"data":{"validators":["109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42","141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192","109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42","141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192"],"validator_aggregates":[["109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42"],["141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192"],["109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42"],["141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192"]]},"finalized":false,"execution_optimistic":false}` + "\n" + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + resp, err := http.Get(server.URL + "/eth/v1/beacon/states/" + c.blockID + "/sync_committees") + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // read the all of the octect + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, string(out), expected) + }) + } +} + +func TestGetStateFinalityCheckpoints(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.BellatrixVersion) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.JustifiedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + + cases := []struct { + blockID string + code int + }{ + { + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + }, + { + blockID: "justified", + code: http.StatusOK, + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + { + blockID: strconv.FormatInt(int64(postState.Slot()), 10), + code: http.StatusOK, + }, + } + expected := `{"data":{"finalized_checkpoint":{"epoch":"1","root":"0xde46b0f2ed5e72f0cec20246403b14c963ec995d7c2825f3532b0460c09d5693"},"current_justified_checkpoint":{"epoch":"3","root":"0xa6e47f164b1a3ca30ea3b2144bd14711de442f51e5b634750a12a1734e24c987"},"previous_justified_checkpoint":{"epoch":"2","root":"0x4c3ee7969e485696669498a88c17f70e6999c40603e2f4338869004392069063"}},"finalized":false,"version":2,"execution_optimistic":false}` + "\n" + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + resp, err := http.Get(server.URL + "/eth/v1/beacon/states/" + c.blockID + "/finality_checkpoints") + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // read the all of the octect + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, string(out), expected) + }) + } +} + +func TestGetRandao(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.BellatrixVersion) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.JustifiedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + + cases := []struct { + blockID string + code int + }{ + { + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + }, + { + blockID: "justified", + code: http.StatusOK, + }, + { + blockID: "0x" + common.Bytes2Hex(make([]byte, 32)), + code: http.StatusNotFound, + }, + { + blockID: strconv.FormatInt(int64(postState.Slot()), 10), + code: http.StatusOK, + }, + } + expected := `{"data":{"randao":"0xdeec617717272914bfd73e02ca1da113a83cf4cf33cd4939486509e2da4ccf4e"},"finalized":false,"execution_optimistic":false}` + "\n" + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + resp, err := http.Get(server.URL + "/eth/v1/beacon/states/" + c.blockID + "/randao") + require.NoError(t, err) + + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // read the all of the octect + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, string(out), expected) + }) + } +} diff --git a/cl/beacon/handler/utils_test.go b/cl/beacon/handler/utils_test.go new file mode 100644 index 00000000000..9dd231bd83a --- /dev/null +++ b/cl/beacon/handler/utils_test.go @@ -0,0 +1,66 @@ +package handler + +import ( + "context" + "testing" + + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/cl/antiquary" + "github.com/ledgerwatch/erigon/cl/antiquary/tests" + "github.com/ledgerwatch/erigon/cl/beacon/synced_data" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/persistence" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/persistence/state/historical_states_reader" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" + "github.com/ledgerwatch/erigon/cl/pool" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/afero" + "github.com/stretchr/testify/require" +) + +func setupTestingHandler(t *testing.T, v clparams.StateVersion) (db kv.RwDB, blocks []*cltypes.SignedBeaconBlock, f afero.Fs, preState, postState *state.CachingBeaconState, handler *ApiHandler, opPool pool.OperationsPool, syncedData *synced_data.SyncedDataManager, fcu *forkchoice.ForkChoiceStorageMock) { + bcfg := clparams.MainnetBeaconConfig + if v == clparams.Phase0Version { + blocks, preState, postState = tests.GetPhase0Random() + } else if v == clparams.BellatrixVersion { + bcfg.AltairForkEpoch = 1 + bcfg.BellatrixForkEpoch = 1 + blocks, preState, postState = tests.GetBellatrixRandom() + } else { + require.FailNow(t, "unknown state version") + } + fcu = forkchoice.NewForkChoiceStorageMock() + db = memdb.NewTestDB(t) + var reader *tests.MockBlockReader + reader, f = tests.LoadChain(blocks, postState, db, t) + + rawDB := persistence.NewAferoRawBlockSaver(f, &clparams.MainnetBeaconConfig) + bcfg.InitializeForkSchedule() + + ctx := context.Background() + vt := state_accessors.NewStaticValidatorTable() + a := antiquary.NewAntiquary(ctx, preState, vt, &bcfg, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, true, f) + require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) + // historical states reader below + statesReader := historical_states_reader.NewHistoricalStatesReader(&bcfg, reader, vt, f, preState) + opPool = pool.NewOperationsPool(&bcfg) + syncedData = synced_data.NewSyncedDataManager(true, &bcfg) + gC := clparams.GenesisConfigs[clparams.MainnetNetwork] + handler = NewApiHandler( + &gC, + &bcfg, + rawDB, + db, + fcu, + opPool, + reader, + syncedData, + statesReader) + handler.init() + return +} diff --git a/cl/beacon/handler/validators.go b/cl/beacon/handler/validators.go new file mode 100644 index 00000000000..33bffbb40c0 --- /dev/null +++ b/cl/beacon/handler/validators.go @@ -0,0 +1,543 @@ +package handler + +import ( + "encoding/hex" + "fmt" + "math" + "net/http" + "strconv" + "strings" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "golang.org/x/exp/slices" +) + +type validatorStatus int + +var validatorJsonTemplate = "{\"index\":\"%d\",\"status\":\"%s\",\"balance\":\"%d\",\"validator\":{\"pubkey\":\"0x%x\",\"withdrawal_credentials\":\"0x%x\",\"effective_balance\":\"%d\",\"slashed\":%t,\"activation_eligibility_epoch\":\"%d\",\"activation_epoch\":\"%d\",\"exit_epoch\":\"%d\",\"withdrawable_epoch\":\"%d\"}}" + +const ( + validatorPendingInitialized validatorStatus = 1 //"pending_initialized" + validatorPendingQueued validatorStatus = 2 //"pending_queued" + validatorActiveOngoing validatorStatus = 3 //"active_ongoing" + validatorActiveExiting validatorStatus = 4 //"active_exiting" + validatorActiveSlashed validatorStatus = 5 //"active_slashed" + validatorExitedUnslashed validatorStatus = 6 //"exited_unslashed" + validatorExitedSlashed validatorStatus = 7 //"exited_slashed" + validatorWithdrawalPossible validatorStatus = 8 //"withdrawal_possible" + validatorWithdrawalDone validatorStatus = 9 //"withdrawal_done" + validatorActive validatorStatus = 10 //"active" + validatorPending validatorStatus = 11 //"pending" + validatorExited validatorStatus = 12 //"exited" + validatorWithdrawal validatorStatus = 13 //"withdrawal" +) + +func validatorStatusFromString(s string) (validatorStatus, error) { + switch s { + case "pending_initialized": + return validatorPendingInitialized, nil + case "pending_queued": + return validatorPendingQueued, nil + case "active_ongoing": + return validatorActiveOngoing, nil + case "active_exiting": + return validatorActiveExiting, nil + case "active_slashed": + return validatorActiveSlashed, nil + case "exited_unslashed": + return validatorExitedUnslashed, nil + case "exited_slashed": + return validatorExitedSlashed, nil + case "withdrawal_possible": + return validatorWithdrawalPossible, nil + case "withdrawal_done": + return validatorWithdrawalDone, nil + case "active": + return validatorActive, nil + case "pending": + return validatorPending, nil + case "exited": + return validatorExited, nil + case "withdrawal": + return validatorWithdrawal, nil + default: + return 0, fmt.Errorf("invalid validator status %s", s) + } +} + +func validatorStatusFromValidator(v solid.Validator, currentEpoch uint64, balance uint64) validatorStatus { + activationEpoch := v.ActivationEpoch() + // pending section + if activationEpoch > currentEpoch { + activationEligibilityEpoch := v.ActivationEligibilityEpoch() + if activationEligibilityEpoch == math.MaxUint64 { + return validatorPendingInitialized + } + return validatorPendingQueued + } + + exitEpoch := v.ExitEpoch() + // active section + if activationEpoch <= currentEpoch && currentEpoch < exitEpoch { + if exitEpoch == math.MaxUint64 { + return validatorActiveOngoing + } + slashed := v.Slashed() + if slashed { + return validatorActiveSlashed + } + return validatorActiveExiting + } + + withdrawableEpoch := v.WithdrawableEpoch() + // exited section + if exitEpoch <= currentEpoch && currentEpoch < withdrawableEpoch { + if v.Slashed() { + return validatorExitedSlashed + } + return validatorExitedUnslashed + } + + if balance == 0 { + return validatorWithdrawalDone + } + return validatorWithdrawalPossible + +} + +func (s validatorStatus) String() string { + switch s { + case validatorPendingInitialized: + return "pending_initialized" + case validatorPendingQueued: + return "pending_queued" + case validatorActiveOngoing: + return "active_ongoing" + case validatorActiveExiting: + return "active_exiting" + case validatorActiveSlashed: + return "active_slashed" + case validatorExitedUnslashed: + return "exited_unslashed" + case validatorExitedSlashed: + return "exited_slashed" + case validatorWithdrawalPossible: + return "withdrawal_possible" + case validatorWithdrawalDone: + return "withdrawal_done" + case validatorActive: + return "active" + case validatorPending: + return "pending" + case validatorExited: + return "exited" + case validatorWithdrawal: + return "withdrawal" + default: + panic("invalid validator status") + } +} + +const maxValidatorsLookupFilter = 32 + +func parseStatuses(s []string) ([]validatorStatus, error) { + seenAlready := make(map[validatorStatus]struct{}) + statuses := make([]validatorStatus, 0, len(s)) + + if len(s) > maxValidatorsLookupFilter { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "too many statuses requested") + } + + for _, status := range s { + s, err := validatorStatusFromString(status) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + if _, ok := seenAlready[s]; ok { + continue + } + seenAlready[s] = struct{}{} + statuses = append(statuses, s) + } + return statuses, nil +} + +func checkValidValidatorId(s string) (bool, error) { + // If it starts with 0x, then it must a 48bytes 0x prefixed string + if len(s) == 98 && s[:2] == "0x" { + // check if it is a valid hex string + if _, err := hex.DecodeString(s[2:]); err != nil { + return false, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + return true, nil + } + // If it is not 0x prefixed, then it must be a number, check if it is a base-10 number + if _, err := strconv.ParseUint(s, 10, 64); err != nil { + return false, beaconhttp.NewEndpointError(http.StatusBadRequest, "invalid validator id") + } + return false, nil +} + +func (a *ApiHandler) getAllValidators(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockId, err := stateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + queryFilters, err := stringListFromQueryParams(r, "status") + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + validatorIds, err := stringListFromQueryParams(r, "id") + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + if len(validatorIds) > maxValidatorsLookupFilter { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "too many validators requested") + } + filterIndicies, err := parseQueryValidatorIndicies(tx, validatorIds) + if err != nil { + return nil, err + } + // Check the filters' validity + statusFilters, err := parseStatuses(queryFilters) + if err != nil { + return nil, err + } + + if blockId.head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. + s, cn := a.syncedData.HeadState() + defer cn() + if s == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "node is not synced") + } + return responseValidators(filterIndicies, statusFilters, state.Epoch(s), s.Balances(), s.Validators(), false) + } + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + + if slot == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "state not found") + } + stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch + state, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true) + if err != nil { + return nil, err + } + if state == nil { + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, *slot) + if err != nil { + return nil, err + } + balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + if err != nil { + return nil, err + } + return responseValidators(filterIndicies, statusFilters, stateEpoch, balances, validatorSet, true) + } + return responseValidators(filterIndicies, statusFilters, stateEpoch, state.Balances(), state.Validators(), *slot <= a.forkchoiceStore.FinalizedSlot()) +} + +func parseQueryValidatorIndex(tx kv.Tx, id string) (uint64, error) { + isPublicKey, err := checkValidValidatorId(id) + if err != nil { + return 0, err + } + if isPublicKey { + var b48 libcommon.Bytes48 + if err := b48.UnmarshalText([]byte(id)); err != nil { + return 0, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + has, err := tx.Has(kv.InvertedValidatorPublicKeys, b48[:]) + if err != nil { + return 0, err + } + if !has { + return math.MaxUint64, nil + } + idx, ok, err := state_accessors.ReadValidatorIndexByPublicKey(tx, b48) + if err != nil { + return 0, err + } + if !ok { + return 0, beaconhttp.NewEndpointError(http.StatusNotFound, "validator not found") + } + return idx, nil + } + idx, err := strconv.ParseUint(id, 10, 64) + if err != nil { + return 0, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + return idx, nil + +} + +func parseQueryValidatorIndicies(tx kv.Tx, ids []string) ([]uint64, error) { + filterIndicies := make([]uint64, 0, len(ids)) + + for _, id := range ids { + idx, err := parseQueryValidatorIndex(tx, id) + if err != nil { + return nil, err + } + filterIndicies = append(filterIndicies, idx) + } + return filterIndicies, nil +} + +func (a *ApiHandler) getSingleValidator(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockId, err := stateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + validatorId, err := stringFromRequest(r, "validator_id") + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + validatorIndex, err := parseQueryValidatorIndex(tx, validatorId) + if err != nil { + return nil, err + } + + if blockId.head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. + s, cn := a.syncedData.HeadState() + defer cn() + if s.ValidatorLength() <= int(validatorIndex) { + return newBeaconResponse([]int{}).withFinalized(false), nil + } + if s == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "node is not synced") + } + return responseValidator(validatorIndex, state.Epoch(s), s.Balances(), s.Validators(), false) + } + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + + if slot == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "state not found") + } + stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch + state, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true) + if err != nil { + return nil, err + } + if state == nil { + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, *slot) + if err != nil { + return nil, err + } + balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + if err != nil { + return nil, err + } + return responseValidator(validatorIndex, stateEpoch, balances, validatorSet, true) + } + return responseValidator(validatorIndex, stateEpoch, state.Balances(), state.Validators(), *slot <= a.forkchoiceStore.FinalizedSlot()) +} + +func (a *ApiHandler) getAllValidatorsBalances(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockId, err := stateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + validatorIds, err := stringListFromQueryParams(r, "id") + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + if len(validatorIds) > maxValidatorsLookupFilter { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "too many validators requested") + } + filterIndicies, err := parseQueryValidatorIndicies(tx, validatorIds) + if err != nil { + return nil, err + } + + if blockId.head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. + s, cn := a.syncedData.HeadState() + defer cn() + if s == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "node is not synced") + } + return responseValidatorsBalances(filterIndicies, state.Epoch(s), s.Balances(), false) + } + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + + if slot == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "state not found") + } + stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch + state, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true) + if err != nil { + return nil, err + } + if state == nil { + balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + if err != nil { + return nil, err + } + return responseValidatorsBalances(filterIndicies, stateEpoch, balances, true) + } + return responseValidatorsBalances(filterIndicies, stateEpoch, state.Balances(), *slot <= a.forkchoiceStore.FinalizedSlot()) +} + +type directString string + +func (d directString) MarshalJSON() ([]byte, error) { + return []byte(d), nil +} + +func responseValidators(filterIndicies []uint64, filterStatuses []validatorStatus, stateEpoch uint64, balances solid.Uint64ListSSZ, validators *solid.ValidatorSet, finalized bool) (*beaconResponse, error) { + var b strings.Builder + b.WriteString("[") + first := true + var err error + validators.Range(func(i int, v solid.Validator, l int) bool { + if len(filterIndicies) > 0 && !slices.Contains(filterIndicies, uint64(i)) { + return true + } + status := validatorStatusFromValidator(v, stateEpoch, balances.Get(i)) + if shouldStatusBeFiltered(status, filterStatuses) { + return true + } + if !first { + if _, err = b.WriteString(","); err != nil { + return false + } + } + first = false + if _, err = b.WriteString(fmt.Sprintf(validatorJsonTemplate, i, status.String(), balances.Get(i), v.PublicKey(), v.WithdrawalCredentials().String(), v.EffectiveBalance(), v.Slashed(), v.ActivationEligibilityEpoch(), v.ActivationEpoch(), v.ExitEpoch(), v.WithdrawableEpoch())); err != nil { + return false + } + return true + }) + if err != nil { + return nil, err + } + + _, err = b.WriteString("]\n") + + return newBeaconResponse(directString(b.String())).withFinalized(finalized), err +} + +func responseValidator(idx uint64, stateEpoch uint64, balances solid.Uint64ListSSZ, validators *solid.ValidatorSet, finalized bool) (*beaconResponse, error) { + var b strings.Builder + var err error + if validators.Length() <= int(idx) { + return newBeaconResponse([]int{}).withFinalized(finalized), nil + } + + v := validators.Get(int(idx)) + status := validatorStatusFromValidator(v, stateEpoch, balances.Get(int(idx))) + + if _, err = b.WriteString(fmt.Sprintf(validatorJsonTemplate, idx, status.String(), balances.Get(int(idx)), v.PublicKey(), v.WithdrawalCredentials().String(), v.EffectiveBalance(), v.Slashed(), v.ActivationEligibilityEpoch(), v.ActivationEpoch(), v.ExitEpoch(), v.WithdrawableEpoch())); err != nil { + return nil, err + } + + _, err = b.WriteString("\n") + + return newBeaconResponse(directString(b.String())).withFinalized(finalized), err +} + +func responseValidatorsBalances(filterIndicies []uint64, stateEpoch uint64, balances solid.Uint64ListSSZ, finalized bool) (*beaconResponse, error) { + var b strings.Builder + b.WriteString("[") + jsonTemplate := "{\"index\":\"%d\",\"balance\":\"%d\"}" + first := true + var err error + balances.Range(func(i int, v uint64, l int) bool { + if len(filterIndicies) > 0 && !slices.Contains(filterIndicies, uint64(i)) { + return true + } + + if !first { + if _, err = b.WriteString(","); err != nil { + return false + } + } + first = false + if _, err = b.WriteString(fmt.Sprintf(jsonTemplate, i, v)); err != nil { + return false + } + return true + }) + if err != nil { + return nil, err + } + + _, err = b.WriteString("]\n") + + return newBeaconResponse(directString(b.String())).withFinalized(finalized), err +} + +func shouldStatusBeFiltered(status validatorStatus, statuses []validatorStatus) bool { + if len(statuses) == 0 { + return false + } + for _, s := range statuses { + if (s == status) || (s == validatorActive && (status == validatorActiveOngoing || status == validatorActiveExiting || status == validatorActiveSlashed)) || + (s == validatorPending && (status == validatorPendingInitialized || status == validatorPendingQueued)) || + (s == validatorExited && (status == validatorExitedUnslashed || status == validatorExitedSlashed)) || + (s == validatorWithdrawal && (status == validatorWithdrawalPossible || status == validatorWithdrawalDone)) { + return false + } + } + return true // filter if no filter condition is met +} diff --git a/cl/beacon/handler/validators_test.go b/cl/beacon/handler/validators_test.go new file mode 100644 index 00000000000..698577aeb21 --- /dev/null +++ b/cl/beacon/handler/validators_test.go @@ -0,0 +1,197 @@ +package handler + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/common" + "github.com/stretchr/testify/require" +) + +func TestGetAllValidators(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + + cases := []struct { + blockID string + code int + queryParams string + expectedResp string + }{ + { + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + queryParams: "?id=1,2,3", + expectedResp: `{"data":[{"index":"1","status":"withdrawal_possible","balance":"20125000000","validator":{"pubkey":"0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e","withdrawal_credentials":"0x307830303166303965643330356330373637643536663162336264623235663330313239383032376638653938613865306364326463626363363630373233643762","effective_balance":"20000000000","slashed":false,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"253","withdrawable_epoch":"257"}},{"index":"2","status":"active_slashed","balance":"25678253779","validator":{"pubkey":"0x89ece308f9d1f0131765212deca99697b112d61f9be9a5f1f3780a51335b3ff981747a0b2ca2179b96d2c0c9024e5224","withdrawal_credentials":"0x307830303661646334613165346361626133376335346435366432343131666430646633613130326638343839613463316265353335663466643566383831306339","effective_balance":"25000000000","slashed":true,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"261","withdrawable_epoch":"8448"}},{"index":"3","status":"active_slashed","balance":"35998164834","validator":{"pubkey":"0xac9b60d5afcbd5663a8a44b7c5a02f19e9a77ab0a35bd65809bb5c67ec582c897feb04decc694b13e08587f3ff9b5b60","withdrawal_credentials":"0x307830303831633835323037386132616434333064343338643765616566633339363436663533383935323932353936626265313939653264376431383834616238","effective_balance":"32000000000","slashed":true,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"261","withdrawable_epoch":"8448"}}],"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + blockID: "finalized", + code: http.StatusOK, + queryParams: "?status=active&id=1,2,3", + expectedResp: `{"data":[{"index":"2","status":"active_slashed","balance":"25678253779","validator":{"pubkey":"0x89ece308f9d1f0131765212deca99697b112d61f9be9a5f1f3780a51335b3ff981747a0b2ca2179b96d2c0c9024e5224","withdrawal_credentials":"0x307830303661646334613165346361626133376335346435366432343131666430646633613130326638343839613463316265353335663466643566383831306339","effective_balance":"25000000000","slashed":true,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"261","withdrawable_epoch":"8448"}},{"index":"3","status":"active_slashed","balance":"35998164834","validator":{"pubkey":"0xac9b60d5afcbd5663a8a44b7c5a02f19e9a77ab0a35bd65809bb5c67ec582c897feb04decc694b13e08587f3ff9b5b60","withdrawal_credentials":"0x307830303831633835323037386132616434333064343338643765616566633339363436663533383935323932353936626265313939653264376431383834616238","effective_balance":"32000000000","slashed":true,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"261","withdrawable_epoch":"8448"}}],"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + blockID: "finalized", + code: http.StatusOK, + queryParams: "?id=0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + expectedResp: `{"data":[{"index":"1","status":"withdrawal_possible","balance":"20125000000","validator":{"pubkey":"0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e","withdrawal_credentials":"0x307830303166303965643330356330373637643536663162336264623235663330313239383032376638653938613865306364326463626363363630373233643762","effective_balance":"20000000000","slashed":false,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"253","withdrawable_epoch":"257"}}],"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + blockID: "alabama", + code: http.StatusBadRequest, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(server.URL + "/eth/v1/beacon/states/" + c.blockID + "/validators" + c.queryParams) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // unmarshal the json + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, c.expectedResp, string(out)) + + }) + } +} + +func TestGetValidatorsBalances(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + + cases := []struct { + blockID string + code int + queryParams string + expectedResp string + }{ + { + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + queryParams: "?id=1,2,3", + expectedResp: `{"data":[{"index":"1","balance":"20125000000"},{"index":"2","balance":"25678253779"},{"index":"3","balance":"35998164834"}],"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + blockID: "finalized", + code: http.StatusOK, + queryParams: "?id=0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + expectedResp: `{"data":[{"index":"1","balance":"20125000000"}],"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + blockID: "alabama", + code: http.StatusBadRequest, + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(server.URL + "/eth/v1/beacon/states/" + c.blockID + "/validator_balances" + c.queryParams) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // unmarshal the json + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, c.expectedResp, string(out)) + + }) + } +} + +func TestGetSingleValidator(t *testing.T) { + + // setupTestingHandler(t, clparams.Phase0Version) + _, blocks, _, _, postState, handler, _, _, fcu := setupTestingHandler(t, clparams.Phase0Version) + + postRoot, err := postState.HashSSZ() + require.NoError(t, err) + + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(t, err) + + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + + cases := []struct { + blockID string + code int + validatorIdx string + expectedResp string + }{ + { + blockID: "0x" + common.Bytes2Hex(postRoot[:]), + code: http.StatusOK, + validatorIdx: "1", + expectedResp: `{"data":{"index":"1","status":"withdrawal_possible","balance":"20125000000","validator":{"pubkey":"0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e","withdrawal_credentials":"0x307830303166303965643330356330373637643536663162336264623235663330313239383032376638653938613865306364326463626363363630373233643762","effective_balance":"20000000000","slashed":false,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"253","withdrawable_epoch":"257"}},"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + blockID: "finalized", + code: http.StatusOK, + validatorIdx: "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + expectedResp: `{"data":{"index":"1","status":"withdrawal_possible","balance":"20125000000","validator":{"pubkey":"0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e","withdrawal_credentials":"0x307830303166303965643330356330373637643536663162336264623235663330313239383032376638653938613865306364326463626363363630373233643762","effective_balance":"20000000000","slashed":false,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"253","withdrawable_epoch":"257"}},"finalized":true,"execution_optimistic":false}` + "\n", + }, + { + blockID: "alabama", + code: http.StatusBadRequest, + validatorIdx: "3", + }, + } + + for _, c := range cases { + t.Run(c.blockID, func(t *testing.T) { + server := httptest.NewServer(handler.mux) + defer server.Close() + // Query the block in the handler with /eth/v2/beacon/blocks/{block_id} + resp, err := http.Get(server.URL + "/eth/v1/beacon/states/" + c.blockID + "/validators/" + c.validatorIdx) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, c.code, resp.StatusCode) + if resp.StatusCode != http.StatusOK { + return + } + // unmarshal the json + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, c.expectedResp, string(out)) + + }) + } +} diff --git a/cl/beacon/router.go b/cl/beacon/router.go index 3fb927f0d33..018e138342c 100644 --- a/cl/beacon/router.go +++ b/cl/beacon/router.go @@ -1,6 +1,7 @@ package beacon import ( + "context" "net" "net/http" "strings" @@ -39,7 +40,8 @@ func ListenAndServe(beaconHandler *LayeredBeaconHandler, routerCfg beacon_router mux.HandleFunc("/eth/*", func(w http.ResponseWriter, r *http.Request) { nfw := ¬FoundNoWriter{rw: w} beaconHandler.ValidatorApi.ServeHTTP(nfw, r) - if nfw.code == 404 || nfw.code == 0 { + r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, chi.NewRouteContext())) + if isNotFound(nfw.code) || nfw.code == 0 { beaconHandler.ArchiveApi.ServeHTTP(w, r) } }) @@ -65,14 +67,3 @@ func ListenAndServe(beaconHandler *LayeredBeaconHandler, routerCfg beacon_router } return nil } - -func newBeaconMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - contentType := r.Header.Get("Content-Type") - if contentType != "application/json" && contentType != "" { - http.Error(w, "Content-Type header must be application/json", http.StatusUnsupportedMediaType) - return - } - next.ServeHTTP(w, r) - }) -} diff --git a/cl/beacon/rw.go b/cl/beacon/rw.go index 33a74b2fb7e..bab259297f8 100644 --- a/cl/beacon/rw.go +++ b/cl/beacon/rw.go @@ -11,11 +11,18 @@ type notFoundNoWriter struct { headers http.Header } +func isNotFound(code int) bool { + return code == 404 || code == 405 +} + func (f *notFoundNoWriter) Header() http.Header { - if f.code == 404 { + if isNotFound(f.code) { return make(http.Header) } - return f.rw.Header() + if f.headers == nil { + f.headers = make(http.Header) + } + return f.headers } func (f *notFoundNoWriter) Write(xs []byte) (int, error) { @@ -23,7 +30,7 @@ func (f *notFoundNoWriter) Write(xs []byte) (int, error) { if f.code == 0 { f.WriteHeader(200) } - if f.code == 404 { + if isNotFound(f.code) { return 0, nil } // pass on the write @@ -34,9 +41,19 @@ func (f *notFoundNoWriter) WriteHeader(statusCode int) { if f.code != 0 { return } - if f.code != 404 { - f.rw.WriteHeader(statusCode) - } - // if it's a 404 and we are not at our last handler, set the target to an io.Discard f.code = statusCode + if isNotFound(statusCode) { + f.headers = nil + return + } + f.rw.WriteHeader(statusCode) + // if we get here, it means it is a successful write. + if f.headers != nil { + for k, v := range f.headers { + for _, x := range v { + f.rw.Header().Add(k, x) + } + } + } + f.headers = f.rw.Header() } diff --git a/cl/beacon/synced_data/synced_data.go b/cl/beacon/synced_data/synced_data.go index abc04251670..c8de023f888 100644 --- a/cl/beacon/synced_data/synced_data.go +++ b/cl/beacon/synced_data/synced_data.go @@ -28,18 +28,15 @@ func (s *SyncedDataManager) OnHeadState(newState *state.CachingBeaconState) (err if !s.enabled { return } - // Schedule update. - go func() { - s.mu.Lock() - defer s.mu.Unlock() - if s.headState == nil { - s.headState, err = newState.Copy() - } - err = newState.CopyInto(s.headState) - if err != nil { - log.Error("failed to copy head state", "err", err) - } - }() + s.mu.Lock() + defer s.mu.Unlock() + if s.headState == nil { + s.headState, err = newState.Copy() + } + err = newState.CopyInto(s.headState) + if err != nil { + log.Error("failed to copy head state", "err", err) + } return } diff --git a/cl/beacon/validatorapi/endpoints.go b/cl/beacon/validatorapi/get.go similarity index 77% rename from cl/beacon/validatorapi/endpoints.go rename to cl/beacon/validatorapi/get.go index ed06d471a2f..2f38e638eca 100644 --- a/cl/beacon/validatorapi/endpoints.go +++ b/cl/beacon/validatorapi/get.go @@ -5,10 +5,11 @@ import ( "net/http" "strconv" "strings" - "unicode" + "github.com/gfx-labs/sse" "github.com/go-chi/chi/v5" "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" "github.com/ledgerwatch/erigon/cl/clparams" @@ -17,7 +18,7 @@ import ( "github.com/ledgerwatch/erigon/cl/utils" ) -func (v *ValidatorApiHandler) GetEthV1NodeSyncing(r *http.Request) (any, error) { +func (v *ValidatorApiHandler) GetEthV1NodeSyncing(w http.ResponseWriter, r *http.Request) (any, error) { _, slot, err := v.FC.GetHead() if err != nil { return nil, err @@ -50,17 +51,14 @@ func (v *ValidatorApiHandler) GetEthV1NodeSyncing(r *http.Request) (any, error) }, nil } -func (v *ValidatorApiHandler) EventSourceGetV1Events(w http.ResponseWriter, r *http.Request) { -} - -func (v *ValidatorApiHandler) GetEthV1ConfigSpec(r *http.Request) (*clparams.BeaconChainConfig, error) { +func (v *ValidatorApiHandler) GetEthV1ConfigSpec(w http.ResponseWriter, r *http.Request) (*clparams.BeaconChainConfig, error) { if v.BeaconChainCfg == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "beacon config not found") } return v.BeaconChainCfg, nil } -func (v *ValidatorApiHandler) GetEthV1BeaconGenesis(r *http.Request) (any, error) { +func (v *ValidatorApiHandler) GetEthV1BeaconGenesis(w http.ResponseWriter, r *http.Request) (any, error) { if v.GenesisCfg == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "genesis config not found") } @@ -75,7 +73,7 @@ func (v *ValidatorApiHandler) GetEthV1BeaconGenesis(r *http.Request) (any, error }, nil } -func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdFork(r *http.Request) (any, error) { +func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdFork(w http.ResponseWriter, r *http.Request) (any, error) { stateId := chi.URLParam(r, "state_id") state, err := v.privateGetStateFromStateId(stateId) if err != nil { @@ -95,7 +93,7 @@ func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdFork(r *http.Request) ( }, }, nil } -func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdValidatorsValidatorId(r *http.Request) (any, error) { +func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdValidatorsValidatorId(w http.ResponseWriter, r *http.Request) (any, error) { stateId := chi.URLParam(r, "state_id") // grab the correct state for the given state id beaconState, err := v.privateGetStateFromStateId(stateId) @@ -206,44 +204,59 @@ func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdValidatorsValidatorId(r }, nil } -func (v *ValidatorApiHandler) privateGetStateFromStateId(stateId string) (*state.CachingBeaconState, error) { - switch { - case stateId == "head": - // Now check the head - headRoot, _, err := v.FC.GetHead() - if err != nil { - return nil, err - } - return v.FC.GetStateAtBlockRoot(headRoot, true) - case stateId == "genesis": - // not supported - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "genesis block not found") - case stateId == "finalized": - return v.FC.GetStateAtBlockRoot(v.FC.FinalizedCheckpoint().BlockRoot(), true) - case stateId == "justified": - return v.FC.GetStateAtBlockRoot(v.FC.JustifiedCheckpoint().BlockRoot(), true) - case strings.HasPrefix(stateId, "0x"): - // assume is hex has, so try to parse - hsh := common.Hash{} - err := hsh.UnmarshalText([]byte(stateId)) - if err != nil { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid state ID: %s", stateId)) - } - return v.FC.GetStateAtStateRoot(hsh, true) - case isInt(stateId): - // ignore the error bc isInt check succeeded. yes this doesn't protect for overflow, they will request slot 0 and it will fail. good - val, _ := strconv.ParseUint(stateId, 10, 64) - return v.FC.GetStateAtSlot(val, true) - default: - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid state ID: %s", stateId)) +func (v *ValidatorApiHandler) GetEthV1EthNodeSyncing(w http.ResponseWriter, r *http.Request) (any, error) { + // TODO: populate this map + o := map[string]any{ + "data": map[string]any{}, } + return o, nil } +func (v *ValidatorApiHandler) GetEthV3ValidatorBlocksSlot(w http.ResponseWriter, r *http.Request) (any, error) { + // TODO: populate this map + o := map[string]any{ + "data": map[string]any{}, + } -func isInt(s string) bool { - for _, c := range s { - if !unicode.IsDigit(c) { - return false - } + slotString := chi.URLParam(r, "slot") + slot, err := strconv.ParseUint(slotString, 10, 64) + if err != nil { + return nil, fmt.Errorf("fail to parse slot: %w", err) + } + randaoRevealString := r.URL.Query().Get("randao_reveal") + randaoReveal, err := hexutil.Decode(randaoRevealString) + if err != nil { + return nil, fmt.Errorf("fail to parse randao_reveal: %w", err) + } + graffitiString := r.URL.Query().Get("randao_reveal") + if graffitiString == "" { + graffitiString = "0x" + } + graffiti, err := hexutil.Decode(graffitiString) + if err != nil { + return nil, fmt.Errorf("fail to parse graffiti: %w", err) + } + skip_randao_verification := r.URL.Query().Has("skip_randao_verification") + //if skip_randao_verification { + // if isInfinity(randaoReveal) { + // return nil, beaconhttp.NewEndpointError(400, "randao reveal must be set to infinity if skip randao verification is set") + // } + //} + _, _, _, _ = slot, graffiti, randaoReveal, skip_randao_verification + return o, nil +} + +func (v *ValidatorApiHandler) EventSourceGetV1Events(w http.ResponseWriter, r *http.Request) { + sink, err := sse.DefaultUpgrader.Upgrade(w, r) + if err != nil { + // OK to ignore this error. + return + } + topics := r.URL.Query()["topics"] + for _, topic := range topics { + sink.Encode(&sse.Event{ + Event: []byte(topic), + Data: nil, + }) + // OK to ignore this error. maybe should log it later } - return true } diff --git a/cl/beacon/validatorapi/handler.go b/cl/beacon/validatorapi/handler.go index 838ef398240..95a1302a3f7 100644 --- a/cl/beacon/validatorapi/handler.go +++ b/cl/beacon/validatorapi/handler.go @@ -6,6 +6,7 @@ import ( "github.com/go-chi/chi/v5" "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/beacon/building" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" ) @@ -16,19 +17,22 @@ type ValidatorApiHandler struct { BeaconChainCfg *clparams.BeaconChainConfig GenesisCfg *clparams.GenesisConfig + state *building.State + o sync.Once - mux chi.Router + mux *chi.Mux } func (v *ValidatorApiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { v.o.Do(func() { v.mux = chi.NewRouter() - v.init(v.mux) + v.state = building.NewState() + v.Route(v.mux) }) v.mux.ServeHTTP(w, r) } -func (v *ValidatorApiHandler) init(r chi.Router) { +func (v *ValidatorApiHandler) Route(r chi.Router) { r.Route("/eth", func(r chi.Router) { r.Route("/v1", func(r chi.Router) { r.Route("/beacon", func(r chi.Router) { @@ -39,31 +43,32 @@ func (v *ValidatorApiHandler) init(r chi.Router) { r.Get("/validators/{validator_id}", beaconhttp.HandleEndpointFunc(v.GetEthV1BeaconStatesStateIdValidatorsValidatorId)) }) }) - r.Post("/binded_blocks", http.NotFound) - r.Post("/blocks", http.NotFound) + r.Post("/blocks", beaconhttp.HandleEndpointFunc(v.PostEthV1BeaconBlocks)) + r.Post("/blinded_blocks", beaconhttp.HandleEndpointFunc(v.PostEthV1BeaconBlindedBlocks)) r.Route("/pool", func(r chi.Router) { - r.Post("/attestations", http.NotFound) - r.Post("/sync_committees", http.NotFound) + r.Post("/attestations", beaconhttp.HandleEndpointFunc(v.PostEthV1BeaconPoolAttestations)) + r.Post("/sync_committees", beaconhttp.HandleEndpointFunc(v.PostEthV1BeaconPoolAttestations)) }) r.Get("/node/syncing", beaconhttp.HandleEndpointFunc(v.GetEthV1NodeSyncing)) }) r.Get("/config/spec", beaconhttp.HandleEndpointFunc(v.GetEthV1ConfigSpec)) - r.Get("/events", http.NotFound) + r.Get("/events", v.EventSourceGetV1Events) r.Route("/validator", func(r chi.Router) { - r.Route("/duties", func(r chi.Router) { - r.Post("/attester/{epoch}", http.NotFound) - r.Get("/proposer/{epoch}", http.NotFound) - r.Post("/sync/{epoch}", http.NotFound) - }) + // implemented by archive api (for now) + // r.Route("/duties", func(r chi.Router) { + // r.Post("/attester/{epoch}", http.NotFound) + // r.Post("/sync/{epoch}", http.NotFound) + // r.Get("/proposer/{epoch}", http.NotFound) + // }) // r.Get("/blinded_blocks/{slot}", http.NotFound) - deprecated r.Get("/attestation_data", http.NotFound) r.Get("/aggregate_attestation", http.NotFound) - r.Post("/aggregate_and_proofs", http.NotFound) - r.Post("/beacon_committee_subscriptions", http.NotFound) - r.Post("/sync_committee_subscriptions", http.NotFound) + r.Post("/aggregate_and_proofs", beaconhttp.HandleEndpointFunc(v.PostEthV1ValidatorAggregateAndProofs)) + r.Post("/beacon_committee_subscriptions", beaconhttp.HandleEndpointFunc(v.PostEthV1ValidatorBeaconCommitteeSubscriptions)) + r.Post("/sync_committee_subscriptions", beaconhttp.HandleEndpointFunc(v.PostEthV1ValidatorSyncCommitteeSubscriptions)) r.Get("/sync_committee_contribution", http.NotFound) - r.Post("/contribution_and_proofs", http.NotFound) - r.Post("/prepare_beacon_proposer", http.NotFound) + r.Post("/contribution_and_proofs", beaconhttp.HandleEndpointFunc(v.PostEthV1ValidatorContributionAndProofs)) + r.Post("/prepare_beacon_proposer", beaconhttp.HandleEndpointFunc(v.PostEthV1ValidatorPrepareBeaconProposer)) }) }) r.Route("/v2", func(r chi.Router) { @@ -73,14 +78,15 @@ func (v *ValidatorApiHandler) init(r chi.Router) { }) }) r.Route("/beacon", func(r chi.Router) { - r.Post("/blocks/{block_id}", http.NotFound) + r.Post("/blocks", beaconhttp.HandleEndpointFunc(v.PostEthV2BeaconBlocks)) + r.Post("/blinded_blocks", beaconhttp.HandleEndpointFunc(v.PostEthV2BeaconBlindedBlocks)) }) r.Route("/validator", func(r chi.Router) { - r.Post("/blocks/{slot}", http.NotFound) + r.Post("/blocks/{slot}", beaconhttp.HandleEndpointFunc(v.GetEthV3ValidatorBlocksSlot)) }) }) r.Route("/v3", func(r chi.Router) { - r.Route("/beacon", func(r chi.Router) { + r.Route("/validator", func(r chi.Router) { r.Get("/blocks/{block_id}", http.NotFound) }) }) diff --git a/cl/beacon/validatorapi/helpers.go b/cl/beacon/validatorapi/helpers.go new file mode 100644 index 00000000000..af0319e0ce5 --- /dev/null +++ b/cl/beacon/validatorapi/helpers.go @@ -0,0 +1,55 @@ +package validatorapi + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "unicode" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" +) + +func (v *ValidatorApiHandler) privateGetStateFromStateId(stateId string) (*state.CachingBeaconState, error) { + switch { + case stateId == "head": + // Now check the head + headRoot, _, err := v.FC.GetHead() + if err != nil { + return nil, err + } + return v.FC.GetStateAtBlockRoot(headRoot, true) + case stateId == "genesis": + // not supported + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "genesis block not found") + case stateId == "finalized": + return v.FC.GetStateAtBlockRoot(v.FC.FinalizedCheckpoint().BlockRoot(), true) + case stateId == "justified": + return v.FC.GetStateAtBlockRoot(v.FC.JustifiedCheckpoint().BlockRoot(), true) + case strings.HasPrefix(stateId, "0x"): + // assume is hex has, so try to parse + hsh := common.Hash{} + err := hsh.UnmarshalText([]byte(stateId)) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid state ID: %s", stateId)) + } + return v.FC.GetStateAtStateRoot(hsh, true) + case isInt(stateId): + // ignore the error bc isInt check succeeded. yes this doesn't protect for overflow, they will request slot 0 and it will fail. good + val, _ := strconv.ParseUint(stateId, 10, 64) + return v.FC.GetStateAtSlot(val, true) + default: + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid state ID: %s", stateId)) + } +} + +func isInt(s string) bool { + for _, c := range s { + if !unicode.IsDigit(c) { + return false + } + } + return true +} diff --git a/cl/beacon/validatorapi/post.go b/cl/beacon/validatorapi/post.go new file mode 100644 index 00000000000..207eec480e8 --- /dev/null +++ b/cl/beacon/validatorapi/post.go @@ -0,0 +1,145 @@ +package validatorapi + +import ( + "encoding/json" + "net/http" + + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/beacon/building" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" +) + +func (v *ValidatorApiHandler) PostEthV1ValidatorPrepareBeaconProposer(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []building.PrepareBeaconProposer + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + for _, x := range req { + v.state.SetFeeRecipient(x.ValidatorIndex, x.FeeRecipient) + } + return nil, nil +} + +func (v *ValidatorApiHandler) PostEthV1ValidatorContributionAndProofs(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []*cltypes.ContributionAndProof + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1ValidatorSyncCommitteeSubscriptions(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []building.SyncCommitteeSubscription + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1ValidatorBeaconCommitteeSubscriptions(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []building.BeaconCommitteeSubscription + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1ValidatorAggregateAndProofs(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []cltypes.SignedAggregateAndProof + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1BeaconPoolSyncCommittees(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []*solid.SyncCommittee + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1BeaconPoolAttestations(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []*solid.Attestation + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1BeaconBlocks(w http.ResponseWriter, r *http.Request) (*int, error) { + ethConsensusVersion := r.Header.Get("Eth-Consensus-Version") + var req cltypes.SignedBeaconBlock + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + _ = ethConsensusVersion + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV2BeaconBlocks(w http.ResponseWriter, r *http.Request) (*int, error) { + broadcastValidation := r.URL.Query().Get("broadcast_validation") + if broadcastValidation == "" { + broadcastValidation = "gossip" + } + ethConsensusVersion := r.Header.Get("Eth-Consensus-Version") + if ethConsensusVersion == "" { + return nil, beaconhttp.NewEndpointError(400, "no eth consensus version set") + } + var req cltypes.SignedBeaconBlock + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + _, _ = broadcastValidation, ethConsensusVersion + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1BeaconBlindedBlocks(w http.ResponseWriter, r *http.Request) (*int, error) { + ethConsensusVersion := r.Header.Get("Eth-Consensus-Version") + var req cltypes.SignedBlindedBeaconBlock + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + _ = ethConsensusVersion + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV2BeaconBlindedBlocks(w http.ResponseWriter, r *http.Request) (*int, error) { + broadcastValidation := r.URL.Query().Get("broadcast_validation") + if broadcastValidation == "" { + broadcastValidation = "gossip" + } + ethConsensusVersion := r.Header.Get("Eth-Consensus-Version") + if ethConsensusVersion == "" { + return nil, beaconhttp.NewEndpointError(400, "no eth consensus version set") + } + var req cltypes.SignedBlindedBeaconBlock + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + _, _ = broadcastValidation, ethConsensusVersion + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} diff --git a/cl/clparams/config.go b/cl/clparams/config.go index 8b58db47b3d..a0bb995e3fe 100644 --- a/cl/clparams/config.go +++ b/cl/clparams/config.go @@ -504,6 +504,10 @@ func (b *BeaconChainConfig) RoundSlotToSyncCommitteePeriod(slot uint64) uint64 { return slot - (slot % slotsPerSyncCommitteePeriod) } +func (b *BeaconChainConfig) SyncCommitteePeriod(slot uint64) uint64 { + return slot / (b.SlotsPerEpoch * b.EpochsPerSyncCommitteePeriod) +} + func (b *BeaconChainConfig) RoundSlotToVotePeriod(slot uint64) uint64 { p := b.SlotsPerEpoch * b.EpochsPerEth1VotingPeriod return slot - (slot % p) diff --git a/cl/cltypes/aggregate.go b/cl/cltypes/aggregate.go index a21ade55f6a..b829c41f8c4 100644 --- a/cl/cltypes/aggregate.go +++ b/cl/cltypes/aggregate.go @@ -12,9 +12,9 @@ import ( * to be aggregated and the BLS signature of the attestation. */ type AggregateAndProof struct { - AggregatorIndex uint64 - Aggregate *solid.Attestation - SelectionProof libcommon.Bytes96 + AggregatorIndex uint64 `json:"aggregator_index,string"` + Aggregate *solid.Attestation `json:"aggregate"` + SelectionProof libcommon.Bytes96 `json:"selection_proof"` } func (a *AggregateAndProof) EncodeSSZ(dst []byte) ([]byte, error) { @@ -39,8 +39,8 @@ func (a *AggregateAndProof) HashSSZ() ([32]byte, error) { } type SignedAggregateAndProof struct { - Message *AggregateAndProof - Signature libcommon.Bytes96 + Message *AggregateAndProof `json:"message"` + Signature libcommon.Bytes96 `json:"signature"` } func (a *SignedAggregateAndProof) EncodeSSZ(dst []byte) ([]byte, error) { @@ -82,6 +82,13 @@ func (agg *SyncAggregate) Sum() int { return ret } +func (agg *SyncAggregate) IsSet(idx uint64) bool { + if idx >= 2048 { + return false + } + return agg.SyncCommiteeBits[idx/8]&(1<<(idx%8)) > 0 +} + func (agg *SyncAggregate) EncodeSSZ(buf []byte) ([]byte, error) { return append(buf, append(agg.SyncCommiteeBits[:], agg.SyncCommiteeSignature[:]...)...), nil } diff --git a/cl/cltypes/beacon_block.go b/cl/cltypes/beacon_block.go index 8125342241c..bf3d4766812 100644 --- a/cl/cltypes/beacon_block.go +++ b/cl/cltypes/beacon_block.go @@ -28,8 +28,8 @@ type SignedBeaconBlock struct { } type BeaconBlock struct { - Slot uint64 `json:"slot"` - ProposerIndex uint64 `json:"proposer_index"` + Slot uint64 `json:"slot,string"` + ProposerIndex uint64 `json:"proposer_index,string"` ParentRoot libcommon.Hash `json:"parent_root"` StateRoot libcommon.Hash `json:"state_root"` Body *BeaconBody `json:"body"` @@ -72,6 +72,17 @@ func NewSignedBeaconBlock(beaconCfg *clparams.BeaconChainConfig) *SignedBeaconBl return &SignedBeaconBlock{Block: NewBeaconBlock(beaconCfg)} } +func (b *SignedBeaconBlock) Blinded() (*SignedBlindedBeaconBlock, error) { + blindedBlock, err := b.Block.Blinded() + if err != nil { + return nil, err + } + return &SignedBlindedBeaconBlock{ + Signature: b.Signature, + Block: blindedBlock, + }, nil +} + func (s *SignedBeaconBlock) SignedBeaconBlockHeader() *SignedBeaconBlockHeader { bodyRoot, err := s.Block.Body.HashSSZ() if err != nil { @@ -93,6 +104,20 @@ func NewBeaconBlock(beaconCfg *clparams.BeaconChainConfig) *BeaconBlock { return &BeaconBlock{Body: NewBeaconBody(beaconCfg)} } +func (b *BeaconBlock) Blinded() (*BlindedBeaconBlock, error) { + body, err := b.Body.Blinded() + if err != nil { + return nil, err + } + return &BlindedBeaconBlock{ + Slot: b.Slot, + ProposerIndex: b.ProposerIndex, + ParentRoot: b.ParentRoot, + StateRoot: b.StateRoot, + Body: body, + }, nil +} + func NewBeaconBody(beaconCfg *clparams.BeaconChainConfig) *BeaconBody { return &BeaconBody{ beaconCfg: beaconCfg, @@ -180,6 +205,28 @@ func (b *BeaconBody) DecodeSSZ(buf []byte, version int) error { return err } +func (b *BeaconBody) Blinded() (*BlindedBeaconBody, error) { + header, err := b.ExecutionPayload.PayloadHeader() + if err != nil { + return nil, err + } + return &BlindedBeaconBody{ + RandaoReveal: b.RandaoReveal, + Eth1Data: b.Eth1Data, + Graffiti: b.Graffiti, + ProposerSlashings: b.ProposerSlashings, + AttesterSlashings: b.AttesterSlashings, + Attestations: b.Attestations, + Deposits: b.Deposits, + VoluntaryExits: b.VoluntaryExits, + SyncAggregate: b.SyncAggregate, + ExecutionPayload: header, + ExecutionChanges: b.ExecutionChanges, + BlobKzgCommitments: b.BlobKzgCommitments, + Version: b.Version, + }, nil +} + func (b *BeaconBody) HashSSZ() ([32]byte, error) { return merkle_tree.HashTreeRoot(b.getSchema(false)...) } diff --git a/cl/cltypes/beacon_block_test.go b/cl/cltypes/beacon_block_test.go index e032454800e..6a20827e3fe 100644 --- a/cl/cltypes/beacon_block_test.go +++ b/cl/cltypes/beacon_block_test.go @@ -60,7 +60,21 @@ func TestBeaconBody(t *testing.T) { assert.NoError(t, err) assert.Equal(t, libcommon.HexToHash("918d1ee08d700e422fcce6319cd7509b951d3ebfb1a05291aab9466b7e9826fc"), libcommon.Hash(root)) - _, err = body.ExecutionPayload.RlpHeader() + // Test the blinded + blinded, err := body.Blinded() + assert.NoError(t, err) + + root2, err := blinded.HashSSZ() + assert.NoError(t, err) + assert.Equal(t, libcommon.HexToHash("918d1ee08d700e422fcce6319cd7509b951d3ebfb1a05291aab9466b7e9826fc"), libcommon.Hash(root2)) + + block2 := blinded.Full(body.ExecutionPayload.Transactions, body.ExecutionPayload.Withdrawals) + assert.Equal(t, block2.ExecutionPayload.version, body.ExecutionPayload.version) + root3, err := block2.HashSSZ() + assert.NoError(t, err) + assert.Equal(t, libcommon.HexToHash("918d1ee08d700e422fcce6319cd7509b951d3ebfb1a05291aab9466b7e9826fc"), libcommon.Hash(root3)) + + _, err = body.ExecutionPayload.RlpHeader(&libcommon.Hash{}) assert.NoError(t, err) p, err := body.ExecutionPayload.PayloadHeader() diff --git a/cl/cltypes/beacon_header.go b/cl/cltypes/beacon_header.go index 94064339d23..4c67066f3df 100644 --- a/cl/cltypes/beacon_header.go +++ b/cl/cltypes/beacon_header.go @@ -13,8 +13,8 @@ import ( * It contains the hash of the block body, and state root data. */ type BeaconBlockHeader struct { - Slot uint64 `json:"slot"` - ProposerIndex uint64 `json:"proposer_index"` + Slot uint64 `json:"slot,string"` + ProposerIndex uint64 `json:"proposer_index,string"` ParentRoot libcommon.Hash `json:"parent_root"` Root libcommon.Hash `json:"state_root"` BodyRoot libcommon.Hash `json:"body_root"` diff --git a/cl/cltypes/blinded_beacon_block.go b/cl/cltypes/blinded_beacon_block.go new file mode 100644 index 00000000000..b7111ecde0e --- /dev/null +++ b/cl/cltypes/blinded_beacon_block.go @@ -0,0 +1,315 @@ +package cltypes + +import ( + "fmt" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/types/clonable" + "github.com/ledgerwatch/erigon-lib/types/ssz" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/merkle_tree" + ssz2 "github.com/ledgerwatch/erigon/cl/ssz" +) + +type SignedBlindedBeaconBlock struct { + Signature libcommon.Bytes96 `json:"signature"` + Block *BlindedBeaconBlock `json:"message"` +} + +type BlindedBeaconBlock struct { + Slot uint64 `json:"slot,string"` + ProposerIndex uint64 `json:"proposer_index,string"` + ParentRoot libcommon.Hash `json:"parent_root"` + StateRoot libcommon.Hash `json:"state_root"` + Body *BlindedBeaconBody `json:"body"` +} + +type BlindedBeaconBody struct { + // A byte array used for randomness in the beacon chain + RandaoReveal libcommon.Bytes96 `json:"randao_reveal"` + // Data related to the Ethereum 1.0 chain + Eth1Data *Eth1Data `json:"eth1_data"` + // A byte array used to customize validators' behavior + Graffiti libcommon.Hash `json:"graffit"` + // A list of slashing events for validators who included invalid blocks in the chain + ProposerSlashings *solid.ListSSZ[*ProposerSlashing] `json:"proposer_slashings"` + // A list of slashing events for validators who included invalid attestations in the chain + AttesterSlashings *solid.ListSSZ[*AttesterSlashing] `json:"attester_slashings"` + // A list of attestations included in the block + Attestations *solid.ListSSZ[*solid.Attestation] `json:"attestations"` + // A list of deposits made to the Ethereum 1.0 chain + Deposits *solid.ListSSZ[*Deposit] `json:"deposits"` + // A list of validators who have voluntarily exited the beacon chain + VoluntaryExits *solid.ListSSZ[*SignedVoluntaryExit] `json:"voluntary_exits"` + // A summary of the current state of the beacon chain + SyncAggregate *SyncAggregate `json:"sync_aggregate,omitempty"` + // Data related to crosslink records and executing operations on the Ethereum 2.0 chain + ExecutionPayload *Eth1Header `json:"execution_payload_header,omitempty"` + // Withdrawals Diffs for Execution Layer + ExecutionChanges *solid.ListSSZ[*SignedBLSToExecutionChange] `json:"execution_changes,omitempty"` + // The commitments for beacon chain blobs + // With a max of 4 per block + BlobKzgCommitments *solid.ListSSZ[*KZGCommitment] `json:"blob_kzg_commitments,omitempty"` + // The version of the beacon chain + Version clparams.StateVersion `json:"-"` + beaconCfg *clparams.BeaconChainConfig +} + +// Getters + +func NewSignedBlindedBeaconBlock(beaconCfg *clparams.BeaconChainConfig) *SignedBlindedBeaconBlock { + return &SignedBlindedBeaconBlock{Block: NewBlindedBeaconBlock(beaconCfg)} +} + +func (s *SignedBlindedBeaconBlock) SignedBeaconBlockHeader() *SignedBeaconBlockHeader { + bodyRoot, err := s.Block.Body.HashSSZ() + if err != nil { + panic(err) + } + return &SignedBeaconBlockHeader{ + Signature: s.Signature, + Header: &BeaconBlockHeader{ + Slot: s.Block.Slot, + ProposerIndex: s.Block.ProposerIndex, + ParentRoot: s.Block.ParentRoot, + Root: s.Block.StateRoot, + BodyRoot: bodyRoot, + }, + } +} + +func NewBlindedBeaconBlock(beaconCfg *clparams.BeaconChainConfig) *BlindedBeaconBlock { + return &BlindedBeaconBlock{Body: NewBlindedBeaconBody(beaconCfg)} +} + +func NewBlindedBeaconBody(beaconCfg *clparams.BeaconChainConfig) *BlindedBeaconBody { + return &BlindedBeaconBody{ + beaconCfg: beaconCfg, + } +} + +// Version returns beacon block version. +func (b *SignedBlindedBeaconBlock) Version() clparams.StateVersion { + return b.Block.Body.Version +} + +func (b *SignedBlindedBeaconBlock) Full(txs *solid.TransactionsSSZ, withdrawals *solid.ListSSZ[*Withdrawal]) *SignedBeaconBlock { + return &SignedBeaconBlock{ + Signature: b.Signature, + Block: b.Block.Full(txs, withdrawals), + } +} + +// Version returns beacon block version. +func (b *BlindedBeaconBlock) Version() clparams.StateVersion { + return b.Body.Version +} + +func (b *BlindedBeaconBlock) Full(txs *solid.TransactionsSSZ, withdrawals *solid.ListSSZ[*Withdrawal]) *BeaconBlock { + return &BeaconBlock{ + Slot: b.Slot, + ProposerIndex: b.ProposerIndex, + ParentRoot: b.ParentRoot, + StateRoot: b.StateRoot, + Body: b.Body.Full(txs, withdrawals), + } +} + +func (b *BlindedBeaconBody) EncodeSSZ(dst []byte) ([]byte, error) { + return ssz2.MarshalSSZ(dst, b.getSchema(false)...) +} + +func (b *BlindedBeaconBody) EncodingSizeSSZ() (size int) { + + if b.Eth1Data == nil { + b.Eth1Data = &Eth1Data{} + } + if b.SyncAggregate == nil { + b.SyncAggregate = &SyncAggregate{} + } + if b.ExecutionPayload == nil { + b.ExecutionPayload = NewEth1Header(b.Version) + } + if b.ProposerSlashings == nil { + b.ProposerSlashings = solid.NewStaticListSSZ[*ProposerSlashing](MaxProposerSlashings, 416) + } + if b.AttesterSlashings == nil { + b.AttesterSlashings = solid.NewDynamicListSSZ[*AttesterSlashing](MaxAttesterSlashings) + } + if b.Attestations == nil { + b.Attestations = solid.NewDynamicListSSZ[*solid.Attestation](MaxAttestations) + } + if b.Deposits == nil { + b.Deposits = solid.NewStaticListSSZ[*Deposit](MaxDeposits, 1240) + } + if b.VoluntaryExits == nil { + b.VoluntaryExits = solid.NewStaticListSSZ[*SignedVoluntaryExit](MaxVoluntaryExits, 112) + } + if b.ExecutionPayload == nil { + b.ExecutionPayload = NewEth1Header(b.Version) + } + if b.ExecutionChanges == nil { + b.ExecutionChanges = solid.NewStaticListSSZ[*SignedBLSToExecutionChange](MaxExecutionChanges, 172) + } + if b.BlobKzgCommitments == nil { + b.BlobKzgCommitments = solid.NewStaticListSSZ[*KZGCommitment](MaxBlobsCommittmentsPerBlock, 48) + } + + size += b.ProposerSlashings.EncodingSizeSSZ() + size += b.AttesterSlashings.EncodingSizeSSZ() + size += b.Attestations.EncodingSizeSSZ() + size += b.Deposits.EncodingSizeSSZ() + size += b.VoluntaryExits.EncodingSizeSSZ() + if b.Version >= clparams.BellatrixVersion { + size += b.ExecutionPayload.EncodingSizeSSZ() + } + if b.Version >= clparams.CapellaVersion { + size += b.ExecutionChanges.EncodingSizeSSZ() + } + if b.Version >= clparams.DenebVersion { + size += b.ExecutionChanges.EncodingSizeSSZ() + } + + return +} + +func (b *BlindedBeaconBody) DecodeSSZ(buf []byte, version int) error { + b.Version = clparams.StateVersion(version) + + if len(buf) < b.EncodingSizeSSZ() { + return fmt.Errorf("[BeaconBody] err: %s", ssz.ErrLowBufferSize) + } + + b.ExecutionPayload = NewEth1Header(b.Version) + + err := ssz2.UnmarshalSSZ(buf, version, b.getSchema(false)...) + return err +} + +func (b *BlindedBeaconBody) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(b.getSchema(false)...) +} + +func (b *BlindedBeaconBody) getSchema(storage bool) []interface{} { + s := []interface{}{b.RandaoReveal[:], b.Eth1Data, b.Graffiti[:], b.ProposerSlashings, b.AttesterSlashings, b.Attestations, b.Deposits, b.VoluntaryExits} + if b.Version >= clparams.AltairVersion { + s = append(s, b.SyncAggregate) + } + if b.Version >= clparams.BellatrixVersion && !storage { + s = append(s, b.ExecutionPayload) + } + if b.Version >= clparams.CapellaVersion { + s = append(s, b.ExecutionChanges) + } + if b.Version >= clparams.DenebVersion { + s = append(s, b.BlobKzgCommitments) + } + return s +} + +func (b *BlindedBeaconBody) Full(txs *solid.TransactionsSSZ, withdrawals *solid.ListSSZ[*Withdrawal]) *BeaconBody { + // Recover the execution payload + executionPayload := &Eth1Block{ + ParentHash: b.ExecutionPayload.ParentHash, + BlockNumber: b.ExecutionPayload.BlockNumber, + StateRoot: b.ExecutionPayload.StateRoot, + Time: b.ExecutionPayload.Time, + GasLimit: b.ExecutionPayload.GasLimit, + GasUsed: b.ExecutionPayload.GasUsed, + Extra: b.ExecutionPayload.Extra, + ReceiptsRoot: b.ExecutionPayload.ReceiptsRoot, + LogsBloom: b.ExecutionPayload.LogsBloom, + BaseFeePerGas: b.ExecutionPayload.BaseFeePerGas, + BlockHash: b.ExecutionPayload.BlockHash, + BlobGasUsed: b.ExecutionPayload.BlobGasUsed, + ExcessBlobGas: b.ExecutionPayload.ExcessBlobGas, + FeeRecipient: b.ExecutionPayload.FeeRecipient, + PrevRandao: b.ExecutionPayload.PrevRandao, + Transactions: txs, + Withdrawals: withdrawals, + version: b.ExecutionPayload.version, + beaconCfg: b.beaconCfg, + } + + return &BeaconBody{ + RandaoReveal: b.RandaoReveal, + Eth1Data: b.Eth1Data, + Graffiti: b.Graffiti, + ProposerSlashings: b.ProposerSlashings, + AttesterSlashings: b.AttesterSlashings, + Attestations: b.Attestations, + Deposits: b.Deposits, + VoluntaryExits: b.VoluntaryExits, + SyncAggregate: b.SyncAggregate, + ExecutionPayload: executionPayload, + ExecutionChanges: b.ExecutionChanges, + BlobKzgCommitments: b.BlobKzgCommitments, + Version: b.Version, + beaconCfg: b.beaconCfg, + } +} + +func (b *BlindedBeaconBlock) EncodeSSZ(buf []byte) (dst []byte, err error) { + return ssz2.MarshalSSZ(buf, b.Slot, b.ProposerIndex, b.ParentRoot[:], b.StateRoot[:], b.Body) +} + +func (b *BlindedBeaconBlock) EncodingSizeSSZ() int { + if b.Body == nil { + return 80 + } + return 80 + b.Body.EncodingSizeSSZ() +} + +func (b *BlindedBeaconBlock) DecodeSSZ(buf []byte, version int) error { + return ssz2.UnmarshalSSZ(buf, version, &b.Slot, &b.ProposerIndex, b.ParentRoot[:], b.StateRoot[:], b.Body) +} + +func (b *BlindedBeaconBlock) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(b.Slot, b.ProposerIndex, b.ParentRoot[:], b.StateRoot[:], b.Body) +} + +func (b *SignedBlindedBeaconBlock) EncodeSSZ(buf []byte) ([]byte, error) { + return ssz2.MarshalSSZ(buf, b.Block, b.Signature[:]) +} + +func (b *SignedBlindedBeaconBlock) EncodingSizeSSZ() int { + if b.Block == nil { + return 100 + } + return 100 + b.Block.EncodingSizeSSZ() +} + +func (b *SignedBlindedBeaconBlock) DecodeSSZ(buf []byte, s int) error { + return ssz2.UnmarshalSSZ(buf, s, b.Block, b.Signature[:]) +} + +func (b *SignedBlindedBeaconBlock) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(b.Block, b.Signature[:]) +} + +func (*BlindedBeaconBody) Static() bool { + return false +} + +func (*BlindedBeaconBlock) Static() bool { + return false +} + +func (b *BlindedBeaconBody) Clone() clonable.Clonable { + return NewBlindedBeaconBody(b.beaconCfg) +} + +func (b *BlindedBeaconBlock) Clone() clonable.Clonable { + return NewBlindedBeaconBlock(b.Body.beaconCfg) +} + +func (b *SignedBlindedBeaconBlock) Clone() clonable.Clonable { + return NewSignedBlindedBeaconBlock(b.Block.Body.beaconCfg) +} + +// make sure that the type implements the interface ssz2.ObjectSSZ +var _ ssz2.ObjectSSZ = (*BlindedBeaconBody)(nil) +var _ ssz2.ObjectSSZ = (*BlindedBeaconBlock)(nil) +var _ ssz2.ObjectSSZ = (*SignedBlindedBeaconBlock)(nil) diff --git a/cl/cltypes/bls_to_execution_change.go b/cl/cltypes/bls_to_execution_change.go index 676154f15fc..8fa1a9000a9 100644 --- a/cl/cltypes/bls_to_execution_change.go +++ b/cl/cltypes/bls_to_execution_change.go @@ -11,7 +11,7 @@ import ( // Change to EL engine type BLSToExecutionChange struct { - ValidatorIndex uint64 `json:"validator_index"` + ValidatorIndex uint64 `json:"validator_index,string"` From libcommon.Bytes48 `json:"from"` To libcommon.Address `json:"to"` } diff --git a/cl/cltypes/contribution.go b/cl/cltypes/contribution.go new file mode 100644 index 00000000000..f748b8650d9 --- /dev/null +++ b/cl/cltypes/contribution.go @@ -0,0 +1,111 @@ +package cltypes + +import ( + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/merkle_tree" + ssz2 "github.com/ledgerwatch/erigon/cl/ssz" +) + +/* + * ContributionAndProof contains the index of the aggregator, the attestation + * to be aggregated and the BLS signature of the attestation. + */ +type ContributionAndProof struct { + AggregatorIndex uint64 `json:"aggregator_index,string"` + SelectionProof libcommon.Bytes96 `json:"selection_proof"` + Contribution *solid.Contribution `json:"contribution"` +} + +func (a *ContributionAndProof) EncodeSSZ(dst []byte) ([]byte, error) { + return ssz2.MarshalSSZ(dst, a.AggregatorIndex, a.Contribution, a.SelectionProof[:]) +} + +func (a *ContributionAndProof) Static() bool { + return false +} + +func (a *ContributionAndProof) DecodeSSZ(buf []byte, version int) error { + a.Contribution = new(solid.Contribution) + return ssz2.UnmarshalSSZ(buf, version, &a.AggregatorIndex, a.Contribution, a.SelectionProof[:]) +} + +func (a *ContributionAndProof) EncodingSizeSSZ() int { + return 108 + a.Contribution.EncodingSizeSSZ() +} + +func (a *ContributionAndProof) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(a.AggregatorIndex, a.Contribution, a.SelectionProof[:]) +} + +type SignedContributionAndProof struct { + Message *ContributionAndProof `json:"message"` + Signature libcommon.Bytes96 `json:"signature"` +} + +func (a *SignedContributionAndProof) EncodeSSZ(dst []byte) ([]byte, error) { + return ssz2.MarshalSSZ(dst, a.Message, a.Signature[:]) +} + +func (a *SignedContributionAndProof) DecodeSSZ(buf []byte, version int) error { + a.Message = new(ContributionAndProof) + return ssz2.UnmarshalSSZ(buf, version, a.Message, a.Signature[:]) +} + +func (a *SignedContributionAndProof) EncodingSizeSSZ() int { + return 100 + a.Message.EncodingSizeSSZ() +} + +func (a *SignedContributionAndProof) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(a.Message, a.Signature[:]) +} + +/* + * SyncContribution, Determines successfull committee, bits shows active participants, + * and signature is the aggregate BLS signature of the committee. + */ +type SyncContribution struct { + SyncCommiteeBits libcommon.Bytes64 `json:"sync_commitee_bits"` + SyncCommiteeSignature libcommon.Bytes96 `json:"signature"` +} + +// return sum of the committee bits +func (agg *SyncContribution) Sum() int { + ret := 0 + for i := range agg.SyncCommiteeBits { + for bit := 1; bit <= 128; bit *= 2 { + if agg.SyncCommiteeBits[i]&byte(bit) > 0 { + ret++ + } + } + } + return ret +} + +func (agg *SyncContribution) IsSet(idx uint64) bool { + if idx >= 2048 { + return false + } + return agg.SyncCommiteeBits[idx/8]&(1<<(idx%8)) > 0 +} + +func (agg *SyncContribution) EncodeSSZ(buf []byte) ([]byte, error) { + return append(buf, append(agg.SyncCommiteeBits[:], agg.SyncCommiteeSignature[:]...)...), nil +} + +func (*SyncContribution) Static() bool { + return true +} + +func (agg *SyncContribution) DecodeSSZ(buf []byte, version int) error { + return ssz2.UnmarshalSSZ(buf, version, agg.SyncCommiteeBits[:], agg.SyncCommiteeSignature[:]) +} + +func (agg *SyncContribution) EncodingSizeSSZ() int { + return 160 +} + +func (agg *SyncContribution) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(agg.SyncCommiteeBits[:], agg.SyncCommiteeSignature[:]) + +} diff --git a/cl/cltypes/eth1_block.go b/cl/cltypes/eth1_block.go index 3d92938f7ee..67f54233f42 100644 --- a/cl/cltypes/eth1_block.go +++ b/cl/cltypes/eth1_block.go @@ -22,18 +22,18 @@ type Eth1Block struct { ReceiptsRoot libcommon.Hash `json:"receipts_root"` LogsBloom types.Bloom `json:"logs_bloom"` PrevRandao libcommon.Hash `json:"prev_randao"` - BlockNumber uint64 `json:"block_number"` - GasLimit uint64 `json:"gas_limit"` - GasUsed uint64 `json:"gas_used"` - Time uint64 `json:"timestamp"` + BlockNumber uint64 `json:"block_number,string"` + GasLimit uint64 `json:"gas_limit,string"` + GasUsed uint64 `json:"gas_used,string"` + Time uint64 `json:"timestamp,string"` Extra *solid.ExtraData `json:"extra_data"` BaseFeePerGas libcommon.Hash `json:"base_fee_per_gas"` // Extra fields BlockHash libcommon.Hash `json:"block_hash"` Transactions *solid.TransactionsSSZ `json:"transactions"` Withdrawals *solid.ListSSZ[*Withdrawal] `json:"withdrawals,omitempty"` - BlobGasUsed uint64 `json:"blob_gas_used,omitempty"` - ExcessBlobGas uint64 `json:"excess_blob_gas,omitempty"` + BlobGasUsed uint64 `json:"blob_gas_used,omitempty,string"` + ExcessBlobGas uint64 `json:"excess_blob_gas,omitempty,string"` // internals version clparams.StateVersion beaconCfg *clparams.BeaconChainConfig @@ -192,7 +192,7 @@ func (b *Eth1Block) getSchema() []interface{} { } // RlpHeader returns the equivalent types.Header struct with RLP-based fields. -func (b *Eth1Block) RlpHeader() (*types.Header, error) { +func (b *Eth1Block) RlpHeader(parentRoot *libcommon.Hash) (*types.Header, error) { // Reverse the order of the bytes in the BaseFeePerGas array and convert it to a big integer. reversedBaseFeePerGas := libcommon.Copy(b.BaseFeePerGas[:]) for i, j := 0, len(reversedBaseFeePerGas)-1; i < j; i, j = i+1, j-1 { @@ -211,25 +211,29 @@ func (b *Eth1Block) RlpHeader() (*types.Header, error) { }) *withdrawalsHash = types.DeriveSha(types.Withdrawals(withdrawals)) } + if b.version < clparams.DenebVersion { + parentRoot = nil + } header := &types.Header{ - ParentHash: b.ParentHash, - UncleHash: types.EmptyUncleHash, - Coinbase: b.FeeRecipient, - Root: b.StateRoot, - TxHash: types.DeriveSha(types.BinaryTransactions(b.Transactions.UnderlyngReference())), - ReceiptHash: b.ReceiptsRoot, - Bloom: b.LogsBloom, - Difficulty: merge.ProofOfStakeDifficulty, - Number: big.NewInt(int64(b.BlockNumber)), - GasLimit: b.GasLimit, - GasUsed: b.GasUsed, - Time: b.Time, - Extra: b.Extra.Bytes(), - MixDigest: b.PrevRandao, - Nonce: merge.ProofOfStakeNonce, - BaseFee: baseFee, - WithdrawalsHash: withdrawalsHash, + ParentHash: b.ParentHash, + UncleHash: types.EmptyUncleHash, + Coinbase: b.FeeRecipient, + Root: b.StateRoot, + TxHash: types.DeriveSha(types.BinaryTransactions(b.Transactions.UnderlyngReference())), + ReceiptHash: b.ReceiptsRoot, + Bloom: b.LogsBloom, + Difficulty: merge.ProofOfStakeDifficulty, + Number: big.NewInt(int64(b.BlockNumber)), + GasLimit: b.GasLimit, + GasUsed: b.GasUsed, + Time: b.Time, + Extra: b.Extra.Bytes(), + MixDigest: b.PrevRandao, + Nonce: merge.ProofOfStakeNonce, + BaseFee: baseFee, + WithdrawalsHash: withdrawalsHash, + ParentBeaconBlockRoot: parentRoot, } if b.version >= clparams.DenebVersion { diff --git a/cl/cltypes/eth1_data.go b/cl/cltypes/eth1_data.go index 716105dee89..ee9695d67ca 100644 --- a/cl/cltypes/eth1_data.go +++ b/cl/cltypes/eth1_data.go @@ -10,7 +10,7 @@ import ( type Eth1Data struct { Root libcommon.Hash `json:"deposit_root"` - DepositCount uint64 `json:"deposit_count"` + DepositCount uint64 `json:"deposit_count,string"` BlockHash libcommon.Hash `json:"block_hash"` } diff --git a/cl/cltypes/eth1_header.go b/cl/cltypes/eth1_header.go index 32794c2ebdd..9eabdfc1a6b 100644 --- a/cl/cltypes/eth1_header.go +++ b/cl/cltypes/eth1_header.go @@ -14,24 +14,24 @@ import ( // ETH1Header represents the ethereum 1 header structure CL-side. type Eth1Header struct { - ParentHash libcommon.Hash - FeeRecipient libcommon.Address - StateRoot libcommon.Hash - ReceiptsRoot libcommon.Hash - LogsBloom types.Bloom - PrevRandao libcommon.Hash - BlockNumber uint64 - GasLimit uint64 - GasUsed uint64 - Time uint64 - Extra *solid.ExtraData - BaseFeePerGas libcommon.Hash + ParentHash libcommon.Hash `json:"parent_hash"` + FeeRecipient libcommon.Address `json:"fee_recipient"` + StateRoot libcommon.Hash `json:"state_root"` + ReceiptsRoot libcommon.Hash `json:"receipts_root"` + LogsBloom types.Bloom `json:"logs_bloom"` + PrevRandao libcommon.Hash `json:"prev_randao"` + BlockNumber uint64 `json:"block_number,string"` + GasLimit uint64 `json:"gas_limit,string"` + GasUsed uint64 `json:"gas_used,string"` + Time uint64 `json:"time,string"` + Extra *solid.ExtraData `json:"extra_data"` + BaseFeePerGas libcommon.Hash `json:"base_fee_per_gas"` // Extra fields - BlockHash libcommon.Hash - TransactionsRoot libcommon.Hash - WithdrawalsRoot libcommon.Hash - BlobGasUsed uint64 - ExcessBlobGas uint64 + BlockHash libcommon.Hash `json:"block_hash"` + TransactionsRoot libcommon.Hash `json:"transactions_root"` + WithdrawalsRoot libcommon.Hash `json:"withdrawals_root,omitempty"` + BlobGasUsed uint64 `json:"blob_gas_used,omitempty,string"` + ExcessBlobGas uint64 `json:"excess_blob_gas,omitempty,string"` // internals version clparams.StateVersion } diff --git a/cl/cltypes/fork.go b/cl/cltypes/fork.go index 9059a927112..eff0f292047 100644 --- a/cl/cltypes/fork.go +++ b/cl/cltypes/fork.go @@ -10,7 +10,7 @@ import ( type Fork struct { PreviousVersion libcommon.Bytes4 `json:"previous_version"` CurrentVersion libcommon.Bytes4 `json:"current_version"` - Epoch uint64 `json:"epoch"` + Epoch uint64 `json:"epoch,string"` } func (*Fork) Static() bool { diff --git a/cl/cltypes/solid/attestation_data.go b/cl/cltypes/solid/attestation_data.go index 6c505f58105..4c922f5ebb0 100644 --- a/cl/cltypes/solid/attestation_data.go +++ b/cl/cltypes/solid/attestation_data.go @@ -40,8 +40,8 @@ func NewAttestionDataFromParameters( func (a AttestationData) MarshalJSON() ([]byte, error) { return json.Marshal(struct { - Slot uint64 `json:"slot"` - Index uint64 `json:"index"` + Slot uint64 `json:"slot,string"` + Index uint64 `json:"index,string"` BeaconBlockRoot libcommon.Hash `json:"beacon_block_root"` Source Checkpoint `json:"source"` Target Checkpoint `json:"target"` @@ -56,8 +56,8 @@ func (a AttestationData) MarshalJSON() ([]byte, error) { func (a AttestationData) UnmarshalJSON(buf []byte) error { var tmp struct { - Slot uint64 `json:"slot"` - Index uint64 `json:"index"` + Slot uint64 `json:"slot,string"` + Index uint64 `json:"index,string"` BeaconBlockRoot libcommon.Hash `json:"beacon_block_root"` Source Checkpoint `json:"source"` Target Checkpoint `json:"target"` diff --git a/cl/cltypes/solid/bitlist.go b/cl/cltypes/solid/bitlist.go index cf14cf0644c..f54d55af93a 100644 --- a/cl/cltypes/solid/bitlist.go +++ b/cl/cltypes/solid/bitlist.go @@ -65,6 +65,13 @@ func (u *BitList) CopyTo(target IterableSSZ[byte]) { } } +func (u *BitList) Copy() *BitList { + n := NewBitList(u.l, u.c) + n.u = make([]byte, len(u.u), cap(u.u)) + copy(n.u, u.u) + return n +} + // Range allows us to do something to each bit in the list, just like a Power Rangers roll call. func (u *BitList) Range(fn func(index int, value byte, length int) bool) { for i, v := range u.u { diff --git a/cl/cltypes/solid/checkpoint.go b/cl/cltypes/solid/checkpoint.go index 87ce50436aa..948bd8344eb 100644 --- a/cl/cltypes/solid/checkpoint.go +++ b/cl/cltypes/solid/checkpoint.go @@ -35,14 +35,14 @@ func NewCheckpoint() Checkpoint { func (c Checkpoint) MarshalJSON() ([]byte, error) { return json.Marshal(struct { - Epoch uint64 `json:"epoch"` + Epoch uint64 `json:"epoch,string"` Root libcommon.Hash `json:"root"` }{Epoch: c.Epoch(), Root: c.BlockRoot()}) } func (c Checkpoint) UnmarshalJSON(buf []byte) error { var tmp struct { - Epoch uint64 `json:"epoch"` + Epoch uint64 `json:"epoch,string"` Root libcommon.Hash `json:"root"` } if err := json.Unmarshal(buf, &tmp); err != nil { diff --git a/cl/cltypes/solid/contribution.go b/cl/cltypes/solid/contribution.go new file mode 100644 index 00000000000..36e0806e897 --- /dev/null +++ b/cl/cltypes/solid/contribution.go @@ -0,0 +1,231 @@ +package solid + +import ( + "encoding/binary" + "encoding/json" + + "github.com/ledgerwatch/erigon-lib/common" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon-lib/common/length" + "github.com/ledgerwatch/erigon-lib/types/clonable" + "github.com/ledgerwatch/erigon-lib/types/ssz" + "github.com/ledgerwatch/erigon/cl/merkle_tree" + ssz2 "github.com/ledgerwatch/erigon/cl/ssz" +) + +const ( + // slot: 8 bytes // 0 + // beaconBlockHash: 32 bytes // 8 + // subcommitteeIndex: 8 bytes // 40 + // aggregationbits: 16 bytes // 48 + // signature: 96 bytes // 64 + // total = 160 + contributionStaticBufferSize = 8 + 32 + 8 + 16 + 96 +) + +// Contribution type represents a statement or confirmation of some occurrence or phenomenon. +type Contribution [160]byte + +// Static returns whether the contribution is static or not. For Contribution, it's always false. +func (*Contribution) Static() bool { + return false +} + +// NewAttestionFromParameters creates a new Contribution instance using provided parameters +func NewContributionFromParameters( + slot uint64, + beaconBlockRoot libcommon.Hash, + subcommitteeIndex uint64, + aggregationBits [16]byte, + signature libcommon.Bytes96, +) *Contribution { + a := &Contribution{} + a.SetSlot(slot) + a.SetBeaconBlockRoot(beaconBlockRoot) + a.SetSubcommitteeIndex(subcommitteeIndex) + a.SetAggregationBits(aggregationBits) + a.SetSignature(signature) + return a +} + +func (a Contribution) MarshalJSON() ([]byte, error) { + ab := a.AggregationBits() + return json.Marshal(struct { + Slot uint64 `json:"slot,string"` + BeaconBlockRoot libcommon.Hash `json:"beacon_block_root"` + SubcommitteeIndex uint64 `json:"subcommittee_index,string"` + AggregationBits hexutility.Bytes `json:"aggregation_bits"` + Signature libcommon.Bytes96 `json:"signature"` + }{ + Slot: a.Slot(), + BeaconBlockRoot: a.BeaconBlockRoot(), + SubcommitteeIndex: a.SubcommitteeIndex(), + AggregationBits: hexutility.Bytes(ab[:]), + Signature: a.Signature(), + }) +} + +func (a *Contribution) UnmarshalJSON(buf []byte) error { + var tmp struct { + Slot uint64 `json:"slot,string"` + BeaconBlockRoot libcommon.Hash `json:"beacon_block_root"` + SubcommitteeIndex uint64 `json:"subcommittee_index,string"` + AggregationBits hexutility.Bytes `json:"aggregation_bits"` + Signature libcommon.Bytes96 `json:"signature"` + } + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + a.SetSlot(tmp.Slot) + a.SetBeaconBlockRoot(tmp.BeaconBlockRoot) + a.SetSubcommitteeIndex(tmp.SubcommitteeIndex) + o := [16]byte{} + copy(o[:], tmp.AggregationBits) + a.SetAggregationBits(o) + a.SetSignature(tmp.Signature) + return nil +} +func (a Contribution) Slot() uint64 { + return binary.LittleEndian.Uint64(a[:8]) +} +func (a Contribution) BeaconBlockRoot() (o libcommon.Hash) { + copy(o[:], a[16:40]) + return +} +func (a Contribution) SubcommitteeIndex() uint64 { + return binary.LittleEndian.Uint64(a[40:48]) +} +func (a Contribution) AggregationBits() (o [16]byte) { + copy(o[:], a[48:64]) + return +} +func (a Contribution) Signature() (o libcommon.Bytes96) { + copy(o[:], a[96:160]) + return +} + +func (a Contribution) SetSlot(slot uint64) { + binary.LittleEndian.PutUint64(a[:8], slot) +} + +func (a Contribution) SetBeaconBlockRoot(hsh common.Hash) { + copy(a[40:48], hsh[:]) +} + +func (a Contribution) SetSubcommitteeIndex(validatorIndex uint64) { + binary.LittleEndian.PutUint64(a[40:48], validatorIndex) +} + +func (a Contribution) SetAggregationBits(xs [16]byte) { + copy(a[48:64], xs[:]) +} + +// SetSignature sets the signature of the Contribution instance. +func (a Contribution) SetSignature(signature [96]byte) { + copy(a[64:], signature[:]) +} + +// EncodingSizeSSZ returns the size of the Contribution instance when encoded in SSZ format. +func (a *Contribution) EncodingSizeSSZ() (size int) { + return 160 +} + +// DecodeSSZ decodes the provided buffer into the Contribution instance. +func (a *Contribution) DecodeSSZ(buf []byte, _ int) error { + if len(buf) < contributionStaticBufferSize { + return ssz.ErrLowBufferSize + } + copy((*a)[:], buf) + return nil +} + +// EncodeSSZ encodes the Contribution instance into the provided buffer. +func (a *Contribution) EncodeSSZ(dst []byte) ([]byte, error) { + buf := dst + buf = append(buf, (*a)[:]...) + return buf, nil +} + +// CopyHashBufferTo copies the hash buffer of the Contribution instance to the provided byte slice. +func (a *Contribution) CopyHashBufferTo(o []byte) error { + for i := 0; i < 160; i++ { + o[i] = 0 + } + + // hash signature first + copy(o[:128], a[64:160]) + if err := merkle_tree.InPlaceRoot(o); err != nil { + return err + } + copy(o[:128:160], o[:32]) + + copy(o[:32], a[:8]) + copy(o[32:64], a[8:40]) + copy(o[64:96], a[40:48]) + copy(o[96:128], a[48:64]) + return nil +} + +// HashSSZ hashes the Contribution instance using SSZ. +// It creates a byte slice `leaves` with a size based on length.Hash, +// then fills this slice with the values from the Contribution's hash buffer. +func (a *Contribution) HashSSZ() (o [32]byte, err error) { + leaves := make([]byte, length.Hash*5) + if err = a.CopyHashBufferTo(leaves); err != nil { + return + } + err = merkle_tree.MerkleRootFromFlatLeaves(leaves, o[:]) + return +} + +// Clone creates a new clone of the Contribution instance. +// This can be useful for creating copies without changing the original object. +func (*Contribution) Clone() clonable.Clonable { + return &Contribution{} +} + +type ContributionAndProof struct { + AggregatorIndex uint64 `json:"aggregator_index,string"` + Message *Contribution `json:"message"` + Signature libcommon.Bytes96 `json:"selection_proof"` +} + +func (a *ContributionAndProof) EncodeSSZ(dst []byte) ([]byte, error) { + return ssz2.MarshalSSZ(dst, a.Message, a.Signature[:]) +} + +func (a *ContributionAndProof) DecodeSSZ(buf []byte, version int) error { + a.Message = new(Contribution) + return ssz2.UnmarshalSSZ(buf, version, a.Message, a.Signature[:]) +} + +func (a *ContributionAndProof) EncodingSizeSSZ() int { + return 100 + a.Message.EncodingSizeSSZ() +} + +func (a *ContributionAndProof) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(a.Message, a.Signature[:]) +} + +type SignedContributionAndProof struct { + Message *ContributionAndProof `json:"message"` + Signature libcommon.Bytes96 `json:"signature"` +} + +func (a *SignedContributionAndProof) EncodeSSZ(dst []byte) ([]byte, error) { + return ssz2.MarshalSSZ(dst, a.Message, a.Signature[:]) +} + +func (a *SignedContributionAndProof) DecodeSSZ(buf []byte, version int) error { + a.Message = new(ContributionAndProof) + return ssz2.UnmarshalSSZ(buf, version, a.Message, a.Signature[:]) +} + +func (a *SignedContributionAndProof) EncodingSizeSSZ() int { + return 100 + a.Message.EncodingSizeSSZ() +} + +func (a *SignedContributionAndProof) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(a.Message, a.Signature[:]) +} diff --git a/cl/cltypes/solid/pending_attestation.go b/cl/cltypes/solid/pending_attestation.go index e17b48b07ad..02788f33089 100644 --- a/cl/cltypes/solid/pending_attestation.go +++ b/cl/cltypes/solid/pending_attestation.go @@ -115,8 +115,8 @@ func (a *PendingAttestation) MarshalJSON() ([]byte, error) { return json.Marshal(struct { AggregationBits hexutility.Bytes `json:"aggregation_bits"` AttestationData AttestationData `json:"attestation_data"` - InclusionDelay uint64 `json:"inclusion_delay"` - ProposerIndex uint64 `json:"proposer_index"` + InclusionDelay uint64 `json:"inclusion_delay,string"` + ProposerIndex uint64 `json:"proposer_index,string"` }{ AggregationBits: a.AggregationBits(), AttestationData: a.AttestantionData(), @@ -130,8 +130,8 @@ func (a *PendingAttestation) UnmarshalJSON(input []byte) error { var tmp struct { AggregationBits hexutility.Bytes `json:"aggregation_bits"` AttestationData AttestationData `json:"attestation_data"` - InclusionDelay uint64 `json:"inclusion_delay"` - ProposerIndex uint64 `json:"proposer_index"` + InclusionDelay uint64 `json:"inclusion_delay,string"` + ProposerIndex uint64 `json:"proposer_index,string"` } if err = json.Unmarshal(input, &tmp); err != nil { return err diff --git a/cl/cltypes/solid/uint64_raw_list.go b/cl/cltypes/solid/uint64_raw_list.go index b520700bfd8..6461f435b97 100644 --- a/cl/cltypes/solid/uint64_raw_list.go +++ b/cl/cltypes/solid/uint64_raw_list.go @@ -3,6 +3,7 @@ package solid import ( "encoding/binary" "encoding/json" + "strconv" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" @@ -154,14 +155,10 @@ func (arr *RawUint64List) Pop() uint64 { } func (arr *RawUint64List) MarshalJSON() ([]byte, error) { - return json.Marshal(arr.u) -} - -func (arr *RawUint64List) UnmarshalJSON(data []byte) error { - arr.cachedHash = libcommon.Hash{} - if err := json.Unmarshal(data, &arr.u); err != nil { - return err + // convert it to a list of strings + strs := make([]string, len(arr.u)) + for i, v := range arr.u { + strs[i] = strconv.FormatInt(int64(v), 10) } - arr.c = len(arr.u) - return nil + return json.Marshal(strs) } diff --git a/cl/cltypes/solid/uint64slice_byte.go b/cl/cltypes/solid/uint64slice_byte.go index a642c6278c1..8cdaa7fa7fe 100644 --- a/cl/cltypes/solid/uint64slice_byte.go +++ b/cl/cltypes/solid/uint64slice_byte.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "encoding/json" + "strconv" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" @@ -79,9 +80,9 @@ func (arr *byteBasedUint64Slice) CopyTo(target *byteBasedUint64Slice) { } func (arr *byteBasedUint64Slice) MarshalJSON() ([]byte, error) { - list := make([]uint64, arr.l) + list := make([]string, arr.l) for i := 0; i < arr.l; i++ { - list[i] = arr.Get(i) + list[i] = strconv.FormatInt(int64(arr.Get(i)), 10) } return json.Marshal(list) } diff --git a/cl/cltypes/solid/validator.go b/cl/cltypes/solid/validator.go index 63353e1ee66..ce8e7245676 100644 --- a/cl/cltypes/solid/validator.go +++ b/cl/cltypes/solid/validator.go @@ -195,12 +195,12 @@ func (v Validator) MarshalJSON() ([]byte, error) { return json.Marshal(struct { PublicKey common.Bytes48 `json:"public_key"` WithdrawalCredentials common.Hash `json:"withdrawal_credentials"` - EffectiveBalance uint64 `json:"effective_balance"` + EffectiveBalance uint64 `json:"effective_balance,string"` Slashed bool `json:"slashed"` - ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch"` - ActivationEpoch uint64 `json:"activation_epoch"` - ExitEpoch uint64 `json:"exit_epoch"` - WithdrawableEpoch uint64 `json:"withdrawable_epoch"` + ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch,string"` + ActivationEpoch uint64 `json:"activation_epoch,string"` + ExitEpoch uint64 `json:"exit_epoch,string"` + WithdrawableEpoch uint64 `json:"withdrawable_epoch,string"` }{ PublicKey: v.PublicKey(), WithdrawalCredentials: v.WithdrawalCredentials(), @@ -218,12 +218,12 @@ func (v *Validator) UnmarshalJSON(input []byte) error { var tmp struct { PublicKey common.Bytes48 `json:"public_key"` WithdrawalCredentials common.Hash `json:"withdrawal_credentials"` - EffectiveBalance uint64 `json:"effective_balance"` + EffectiveBalance uint64 `json:"effective_balance,string"` Slashed bool `json:"slashed"` - ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch"` - ActivationEpoch uint64 `json:"activation_epoch"` - ExitEpoch uint64 `json:"exit_epoch"` - WithdrawableEpoch uint64 `json:"withdrawable_epoch"` + ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch,string"` + ActivationEpoch uint64 `json:"activation_epoch,string"` + ExitEpoch uint64 `json:"exit_epoch,string"` + WithdrawableEpoch uint64 `json:"withdrawable_epoch,string"` } if err = json.Unmarshal(input, &tmp); err != nil { return err diff --git a/cl/cltypes/validator.go b/cl/cltypes/validator.go index 7d328bf21cf..645b4fec5a0 100644 --- a/cl/cltypes/validator.go +++ b/cl/cltypes/validator.go @@ -18,7 +18,7 @@ const ( type DepositData struct { PubKey libcommon.Bytes48 `json:"pubkey"` WithdrawalCredentials libcommon.Hash `json:"withdrawal_credentials"` - Amount uint64 `json:"amount"` + Amount uint64 `json:"amount,string"` Signature libcommon.Bytes96 `json:"signature"` } @@ -72,8 +72,8 @@ func (d *Deposit) HashSSZ() ([32]byte, error) { } type VoluntaryExit struct { - Epoch uint64 `json:"epoch"` - ValidatorIndex uint64 `json:"validator_index"` + Epoch uint64 `json:"epoch,string"` + ValidatorIndex uint64 `json:"validator_index,string"` } func (e *VoluntaryExit) EncodeSSZ(buf []byte) ([]byte, error) { diff --git a/cl/cltypes/withdrawal.go b/cl/cltypes/withdrawal.go index 8923820b243..ffaae3234d9 100644 --- a/cl/cltypes/withdrawal.go +++ b/cl/cltypes/withdrawal.go @@ -11,10 +11,10 @@ import ( ) type Withdrawal struct { - Index uint64 `json:"index"` // monotonically increasing identifier issued by consensus layer - Validator uint64 `json:"validatorIndex"` // index of validator associated with withdrawal - Address libcommon.Address `json:"address"` // target address for withdrawn ether - Amount uint64 `json:"amount"` // value of withdrawal in GWei + Index uint64 `json:"index,string"` // monotonically increasing identifier issued by consensus layer + Validator uint64 `json:"validatorIndex,string"` // index of validator associated with withdrawal + Address libcommon.Address `json:"address"` // target address for withdrawn ether + Amount uint64 `json:"amount,string"` // value of withdrawal in GWei } func (obj *Withdrawal) EncodeSSZ(buf []byte) ([]byte, error) { diff --git a/cl/gossip/gossip.go b/cl/gossip/gossip.go new file mode 100644 index 00000000000..05d335273ed --- /dev/null +++ b/cl/gossip/gossip.go @@ -0,0 +1,25 @@ +package gossip + +import ( + "strconv" + "strings" +) + +const ( + TopicNameBeaconBlock = "beacon_block" + TopicNameBeaconAggregateAndProof = "beacon_aggregate_and_proof" + TopicNameVoluntaryExit = "voluntary_exit" + TopicNameProposerSlashing = "proposer_slashing" + TopicNameAttesterSlashing = "attester_slashing" + TopicNameBlsToExecutionChange = "bls_to_execution_change" + + TopicNamePrefixBlobSidecar = "blob_sidecar_" +) + +func TopicNameBlobSidecar(d int) string { + return TopicNamePrefixBlobSidecar + strconv.Itoa(d) +} + +func IsTopicBlobSidecar(d string) bool { + return strings.Contains(d, TopicNamePrefixBlobSidecar) +} diff --git a/cl/persistence/base_encoding/primitives_test.go b/cl/persistence/base_encoding/primitives_test.go index 11c80684ecf..a2b18d0c0fb 100644 --- a/cl/persistence/base_encoding/primitives_test.go +++ b/cl/persistence/base_encoding/primitives_test.go @@ -3,7 +3,6 @@ package base_encoding import ( "bytes" "encoding/binary" - "fmt" "testing" "github.com/stretchr/testify/require" @@ -70,7 +69,6 @@ func TestDiff64Effective(t *testing.T) { out := b.Bytes() new2, err := ApplyCompressedSerializedUint64ListDiff(previous, nil, out) require.NoError(t, err) - fmt.Println(previous) require.Equal(t, new2, expected) } diff --git a/cl/persistence/base_encoding/rabbit.go b/cl/persistence/base_encoding/rabbit.go new file mode 100644 index 00000000000..7478d17fd5b --- /dev/null +++ b/cl/persistence/base_encoding/rabbit.go @@ -0,0 +1,86 @@ +package base_encoding + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/klauspost/compress/zstd" +) + +func WriteRabbits(in []uint64, w io.Writer) error { + // Retrieve compressor first + compressor := compressorPool.Get().(*zstd.Encoder) + defer compressorPool.Put(compressor) + compressor.Reset(w) + + expectedNum := uint64(0) + count := 0 + // write length + if err := binary.Write(compressor, binary.LittleEndian, uint64(len(in))); err != nil { + return err + } + for _, element := range in { + if expectedNum != element { + // [1,2,5,6] + // write contiguous sequence + if err := binary.Write(compressor, binary.LittleEndian, uint64(count)); err != nil { + return err + } + // write non-contiguous element + if err := binary.Write(compressor, binary.LittleEndian, element-expectedNum); err != nil { + return err + } + count = 0 + } + count++ + expectedNum = element + 1 + + } + // write last contiguous sequence + if err := binary.Write(compressor, binary.LittleEndian, uint64(count)); err != nil { + return err + } + return compressor.Close() +} + +func ReadRabbits(out []uint64, r io.Reader) ([]uint64, error) { + // Retrieve compressor first + decompressor, err := zstd.NewReader(r) + if err != nil { + return nil, err + } + defer decompressor.Close() + + var length uint64 + if err := binary.Read(decompressor, binary.LittleEndian, &length); err != nil { + return nil, err + } + + if cap(out) < int(length) { + out = make([]uint64, 0, length) + } + out = out[:0] + var count uint64 + var current uint64 + active := true + for err != io.EOF { + err = binary.Read(decompressor, binary.LittleEndian, &count) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + if active { + for i := current; i < current+count; i++ { + out = append(out, i) + } + current += count + } else { + current += count + } + active = !active + } + return out, nil +} diff --git a/cl/persistence/base_encoding/rabbit_test.go b/cl/persistence/base_encoding/rabbit_test.go new file mode 100644 index 00000000000..fe98fe3bc34 --- /dev/null +++ b/cl/persistence/base_encoding/rabbit_test.go @@ -0,0 +1,22 @@ +package base_encoding + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRabbit(t *testing.T) { + list := []uint64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 17, 23, 90} + var w bytes.Buffer + if err := WriteRabbits(list, &w); err != nil { + t.Fatal(err) + } + var out []uint64 + out, err := ReadRabbits(out, &w) + if err != nil { + t.Fatal(err) + } + require.Equal(t, list, out) +} diff --git a/cl/persistence/beacon_indicies/indicies.go b/cl/persistence/beacon_indicies/indicies.go index a58b0de8c96..d485b121b70 100644 --- a/cl/persistence/beacon_indicies/indicies.go +++ b/cl/persistence/beacon_indicies/indicies.go @@ -247,11 +247,16 @@ func PruneBlockRoots(ctx context.Context, tx kv.RwTx, fromSlot, toSlot uint64) e func ReadBeaconBlockRootsInSlotRange(ctx context.Context, tx kv.Tx, fromSlot, count uint64) ([]libcommon.Hash, []uint64, error) { blockRoots := make([]libcommon.Hash, 0, count) slots := make([]uint64, 0, count) - err := RangeBlockRoots(ctx, tx, fromSlot, fromSlot+count, func(slot uint64, beaconBlockRoot libcommon.Hash) bool { - blockRoots = append(blockRoots, beaconBlockRoot) - slots = append(slots, slot) - return true - }) + cursor, err := tx.Cursor(kv.CanonicalBlockRoots) + if err != nil { + return nil, nil, err + } + currentCount := uint64(0) + for k, v, err := cursor.Seek(base_encoding.Encode64ToBytes4(fromSlot)); err == nil && k != nil && currentCount != count; k, v, err = cursor.Next() { + currentCount++ + blockRoots = append(blockRoots, libcommon.BytesToHash(v)) + slots = append(slots, base_encoding.Decode64FromBytes4(k)) + } return blockRoots, slots, err } diff --git a/cl/persistence/format/chunk_encoding/chunks.go b/cl/persistence/format/chunk_encoding/chunks.go deleted file mode 100644 index 28afb2008d9..00000000000 --- a/cl/persistence/format/chunk_encoding/chunks.go +++ /dev/null @@ -1,68 +0,0 @@ -package chunk_encoding - -import ( - "encoding/binary" - "io" -) - -type DataType int - -const ( - ChunkDataType DataType = 0 - PointerDataType DataType = 1 -) - -// writeChunk writes a chunk to the writer. -func WriteChunk(w io.Writer, buf []byte, t DataType) error { - - // prefix is type of chunk + length of chunk - prefix := make([]byte, 8) - binary.BigEndian.PutUint64(prefix, uint64(len(buf))) - prefix[0] = byte(t) - if _, err := w.Write(prefix); err != nil { - return err - } - if _, err := w.Write(buf); err != nil { - return err - } - return nil -} - -func ReadChunk(r io.Reader, out io.Writer) (t DataType, err error) { - prefix := make([]byte, 8) - if _, err := r.Read(prefix); err != nil { - return DataType(0), err - } - t = DataType(prefix[0]) - prefix[0] = 0 - - bufLen := binary.BigEndian.Uint64(prefix) - if bufLen == 0 { - return - } - - if _, err = io.CopyN(out, r, int64(bufLen)); err != nil { - return - } - return -} - -func ReadChunkToBytes(r io.Reader) (b []byte, t DataType, err error) { - prefix := make([]byte, 8) - if _, err := r.Read(prefix); err != nil { - return nil, DataType(0), err - } - t = DataType(prefix[0]) - prefix[0] = 0 - - bufLen := binary.BigEndian.Uint64(prefix) - if bufLen == 0 { - return - } - b = make([]byte, bufLen) - - if _, err = r.Read(b); err != nil { - return - } - return -} diff --git a/cl/persistence/format/snapshot_format/blocks.go b/cl/persistence/format/snapshot_format/blocks.go index f392bee27c5..3692a6de30b 100644 --- a/cl/persistence/format/snapshot_format/blocks.go +++ b/cl/persistence/format/snapshot_format/blocks.go @@ -3,7 +3,6 @@ package snapshot_format import ( "bytes" "encoding/binary" - "fmt" "io" "sync" @@ -11,66 +10,18 @@ import ( "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" - "github.com/ledgerwatch/erigon/cl/persistence/format/chunk_encoding" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" ) type ExecutionBlockReaderByNumber interface { - TransactionsSSZ(w io.Writer, number uint64, hash libcommon.Hash) error - WithdrawalsSZZ(w io.Writer, number uint64, hash libcommon.Hash) error + Transactions(number uint64, hash libcommon.Hash) (*solid.TransactionsSSZ, error) + Withdrawals(number uint64, hash libcommon.Hash) (*solid.ListSSZ[*cltypes.Withdrawal], error) } var buffersPool = sync.Pool{ New: func() interface{} { return &bytes.Buffer{} }, } -const ( - blockBaseOffset = 100 /* Signature + Block Offset */ + - 84 /* Slot + ProposerIndex + ParentRoot + StateRoot + Body Offset */ + - 96 /*Signature*/ + 72 /*Eth1Data*/ + 32 /*Graffiti*/ + 4 /*ProposerSlashings Offset*/ + 4 /*AttesterSlashings Offset*/ + 4 /*Attestations*/ + - 4 /*Deposits Offset*/ + 4 /*VoluntaryExits Offset*/ - - altairBlockAdditionalBaseOffset = 160 /*SyncAggregate*/ - bellatrixBlockAdditionalBaseOffset = 4 /*ExecutionPayload Offset*/ - capellaBlockAdditionalBaseOffset = 4 /*ExecutionChanges Offset*/ - denebBlockAdditionalBaseOffset = 4 /*BlobKzgCommitments Offset*/ -) - -func writeExecutionBlockPtr(w io.Writer, p *cltypes.Eth1Block) error { - temp := make([]byte, 40) - binary.BigEndian.PutUint64(temp, p.BlockNumber) - copy(temp[8:], p.BlockHash[:]) - - return chunk_encoding.WriteChunk(w, temp, chunk_encoding.PointerDataType) -} - -func readExecutionBlockPtr(r io.Reader) (uint64, libcommon.Hash, error) { - b, dT, err := chunk_encoding.ReadChunkToBytes(r) - if err != nil { - return 0, libcommon.Hash{}, err - } - if dT != chunk_encoding.PointerDataType { - return 0, libcommon.Hash{}, fmt.Errorf("malformed beacon block, invalid block pointer type %d, expected: %d", dT, chunk_encoding.ChunkDataType) - } - return binary.BigEndian.Uint64(b[:8]), libcommon.BytesToHash(b[8:]), nil -} - -func computeInitialOffset(version clparams.StateVersion) uint64 { - ret := uint64(blockBaseOffset) - if version >= clparams.AltairVersion { - ret += altairBlockAdditionalBaseOffset - } - if version >= clparams.BellatrixVersion { - ret += bellatrixBlockAdditionalBaseOffset - } - if version >= clparams.CapellaVersion { - ret += capellaBlockAdditionalBaseOffset - } - if version >= clparams.DenebVersion { - ret += denebBlockAdditionalBaseOffset - } - return ret -} - // WriteBlockForSnapshot writes a block to the given writer in the format expected by the snapshot. // buf is just a reusable buffer. if it had to grow it will be returned back as grown. func WriteBlockForSnapshot(w io.Writer, block *cltypes.SignedBeaconBlock, reusable []byte) ([]byte, error) { @@ -79,11 +30,17 @@ func WriteBlockForSnapshot(w io.Writer, block *cltypes.SignedBeaconBlock, reusab return reusable, err } reusable = reusable[:0] + // Find the blinded block + blinded, err := block.Blinded() + if err != nil { + return reusable, err + } // Maybe reuse the buffer? - encoded, err := block.EncodeSSZ(reusable) + encoded, err := blinded.EncodeSSZ(reusable) if err != nil { return reusable, err } + reusable = encoded version := block.Version() if _, err := w.Write([]byte{byte(version)}); err != nil { @@ -92,32 +49,17 @@ func WriteBlockForSnapshot(w io.Writer, block *cltypes.SignedBeaconBlock, reusab if _, err := w.Write(bodyRoot[:]); err != nil { return reusable, err } - currentChunkLength := computeInitialOffset(version) - - body := block.Block.Body - // count in body for phase0 fields - currentChunkLength += uint64(body.ProposerSlashings.EncodingSizeSSZ()) - currentChunkLength += uint64(body.AttesterSlashings.EncodingSizeSSZ()) - currentChunkLength += uint64(body.Attestations.EncodingSizeSSZ()) - currentChunkLength += uint64(body.Deposits.EncodingSizeSSZ()) - currentChunkLength += uint64(body.VoluntaryExits.EncodingSizeSSZ()) - // Write the chunk and chunk attestations - if err := chunk_encoding.WriteChunk(w, encoded[:currentChunkLength], chunk_encoding.ChunkDataType); err != nil { + // Write the length of the buffer + length := make([]byte, 8) + binary.BigEndian.PutUint64(length, uint64(len(reusable))) + if _, err := w.Write(length); err != nil { return reusable, err } - // we are done if we are before altair - if version <= clparams.AltairVersion { - return reusable, nil - } - encoded = encoded[currentChunkLength:] - if err := writeEth1BlockForSnapshot(w, encoded[:body.ExecutionPayload.EncodingSizeSSZ()], body.ExecutionPayload); err != nil { + // Write the buffer + if _, err := w.Write(reusable); err != nil { return reusable, err } - encoded = encoded[body.ExecutionPayload.EncodingSizeSSZ():] - if version <= clparams.BellatrixVersion { - return reusable, nil - } - return reusable, chunk_encoding.WriteChunk(w, encoded, chunk_encoding.ChunkDataType) + return reusable, nil } func readMetadataForBlock(r io.Reader, b []byte) (clparams.StateVersion, libcommon.Hash, error) { @@ -128,102 +70,89 @@ func readMetadataForBlock(r io.Reader, b []byte) (clparams.StateVersion, libcomm } func ReadBlockFromSnapshot(r io.Reader, executionReader ExecutionBlockReaderByNumber, cfg *clparams.BeaconChainConfig) (*cltypes.SignedBeaconBlock, error) { - block := cltypes.NewSignedBeaconBlock(cfg) + blindedBlock := cltypes.NewSignedBlindedBeaconBlock(cfg) buffer := buffersPool.Get().(*bytes.Buffer) defer buffersPool.Put(buffer) buffer.Reset() - v, err := ReadRawBlockFromSnapshot(r, buffer, executionReader, cfg) + // Read the metadata + metadataSlab := make([]byte, 33) + v, _, err := readMetadataForBlock(r, metadataSlab) + if err != nil { + return nil, err + } + // Read the length + length := make([]byte, 8) + if _, err := io.ReadFull(r, length); err != nil { + return nil, err + } + // Read the block + if _, err := io.CopyN(buffer, r, int64(binary.BigEndian.Uint64(length))); err != nil { + return nil, err + } + // Decode the block in blinded + if err := blindedBlock.DecodeSSZ(buffer.Bytes(), int(v)); err != nil { + return nil, err + } + // No execution data for pre-altair blocks + if v <= clparams.AltairVersion { + return blindedBlock.Full(nil, nil), nil + } + blockNumber := blindedBlock.Block.Body.ExecutionPayload.BlockNumber + blockHash := blindedBlock.Block.Body.ExecutionPayload.BlockHash + txs, err := executionReader.Transactions(blockNumber, blockHash) if err != nil { return nil, err } - return block, block.DecodeSSZ(buffer.Bytes(), int(v)) + ws, err := executionReader.Withdrawals(blockNumber, blockHash) + if err != nil { + return nil, err + } + return blindedBlock.Full(txs, ws), nil } // ReadBlockHeaderFromSnapshotWithExecutionData reads the beacon block header and the EL block number and block hash. -func ReadBlockHeaderFromSnapshotWithExecutionData(r io.Reader) (*cltypes.SignedBeaconBlockHeader, uint64, libcommon.Hash, error) { +func ReadBlockHeaderFromSnapshotWithExecutionData(r io.Reader, cfg *clparams.BeaconChainConfig) (*cltypes.SignedBeaconBlockHeader, uint64, libcommon.Hash, error) { buffer := buffersPool.Get().(*bytes.Buffer) defer buffersPool.Put(buffer) buffer.Reset() + blindedBlock := cltypes.NewSignedBlindedBeaconBlock(cfg) + + // Read the metadata metadataSlab := make([]byte, 33) v, bodyRoot, err := readMetadataForBlock(r, metadataSlab) if err != nil { return nil, 0, libcommon.Hash{}, err } - chunk1, dT1, err := chunk_encoding.ReadChunkToBytes(r) - if err != nil { + // Read the length + length := make([]byte, 8) + if _, err := io.ReadFull(r, length); err != nil { return nil, 0, libcommon.Hash{}, err } - if dT1 != chunk_encoding.ChunkDataType { - return nil, 0, libcommon.Hash{}, fmt.Errorf("malformed beacon block, invalid chunk 1 type %d, expected: %d", dT1, chunk_encoding.ChunkDataType) - } - - var signature libcommon.Bytes96 - copy(signature[:], chunk1[4:100]) - header := &cltypes.SignedBeaconBlockHeader{ - Signature: signature, - Header: &cltypes.BeaconBlockHeader{ - Slot: binary.LittleEndian.Uint64(chunk1[100:108]), - ProposerIndex: binary.LittleEndian.Uint64(chunk1[108:116]), - ParentRoot: libcommon.BytesToHash(chunk1[116:148]), - Root: libcommon.BytesToHash(chunk1[148:180]), - BodyRoot: bodyRoot, - }} - if v <= clparams.AltairVersion { - return header, 0, libcommon.Hash{}, nil - } - if _, err := r.Read(make([]byte, 1)); err != nil { - return header, 0, libcommon.Hash{}, nil - } - // Read the first eth 1 block chunk - _, err = chunk_encoding.ReadChunk(r, io.Discard) - if err != nil { + // Read the block + if _, err := io.CopyN(buffer, r, int64(binary.BigEndian.Uint64(length))); err != nil { return nil, 0, libcommon.Hash{}, err } - // lastly read the executionBlock ptr - blockNumber, blockHash, err := readExecutionBlockPtr(r) - if err != nil { + // Decode the block in blinded + if err := blindedBlock.DecodeSSZ(buffer.Bytes(), int(v)); err != nil { return nil, 0, libcommon.Hash{}, err } - return header, blockNumber, blockHash, nil -} - -func ReadRawBlockFromSnapshot(r io.Reader, out io.Writer, executionReader ExecutionBlockReaderByNumber, cfg *clparams.BeaconChainConfig) (clparams.StateVersion, error) { - metadataSlab := make([]byte, 33) - // Metadata section is just the current hardfork of the block. - v, _, err := readMetadataForBlock(r, metadataSlab) - if err != nil { - return v, err - } - - // Read the first chunk - dT1, err := chunk_encoding.ReadChunk(r, out) - if err != nil { - return v, err - } - if dT1 != chunk_encoding.ChunkDataType { - return v, fmt.Errorf("malformed beacon block, invalid chunk 1 type %d, expected: %d", dT1, chunk_encoding.ChunkDataType) + blockHeader := &cltypes.SignedBeaconBlockHeader{ + Signature: blindedBlock.Signature, + Header: &cltypes.BeaconBlockHeader{ + Slot: blindedBlock.Block.Slot, + ProposerIndex: blindedBlock.Block.ProposerIndex, + ParentRoot: blindedBlock.Block.ParentRoot, + Root: blindedBlock.Block.StateRoot, + BodyRoot: bodyRoot, + }, } - + // No execution data for pre-altair blocks if v <= clparams.AltairVersion { - return v, nil - } - // Read the block pointer and retrieve chunk4 from the execution reader - if _, err := readEth1BlockFromSnapshot(r, out, executionReader, cfg); err != nil { - return v, err - } - if v <= clparams.BellatrixVersion { - return v, nil - } - - // Read the 5h chunk - dT2, err := chunk_encoding.ReadChunk(r, out) - if err != nil { - return v, err - } - if dT2 != chunk_encoding.ChunkDataType { - return v, fmt.Errorf("malformed beacon block, invalid chunk 5 type %d, expected: %d", dT2, chunk_encoding.ChunkDataType) + return blockHeader, 0, libcommon.Hash{}, nil } - return v, nil + blockNumber := blindedBlock.Block.Body.ExecutionPayload.BlockNumber + blockHash := blindedBlock.Block.Body.ExecutionPayload.BlockHash + return blockHeader, blockNumber, blockHash, nil } diff --git a/cl/persistence/format/snapshot_format/blocks_test.go b/cl/persistence/format/snapshot_format/blocks_test.go index b5d0815fc98..8c357fd4b01 100644 --- a/cl/persistence/format/snapshot_format/blocks_test.go +++ b/cl/persistence/format/snapshot_format/blocks_test.go @@ -70,7 +70,7 @@ func TestBlockSnapshotEncoding(t *testing.T) { b.Reset() _, err = snapshot_format.WriteBlockForSnapshot(&b, blk, nil) require.NoError(t, err) - header, bn, bHash, err := snapshot_format.ReadBlockHeaderFromSnapshotWithExecutionData(&b) + header, bn, bHash, err := snapshot_format.ReadBlockHeaderFromSnapshotWithExecutionData(&b, &clparams.MainnetBeaconConfig) require.NoError(t, err) hash3, err := header.HashSSZ() require.NoError(t, err) diff --git a/cl/persistence/format/snapshot_format/eth1_blocks.go b/cl/persistence/format/snapshot_format/eth1_blocks.go deleted file mode 100644 index 053c075aa22..00000000000 --- a/cl/persistence/format/snapshot_format/eth1_blocks.go +++ /dev/null @@ -1,92 +0,0 @@ -package snapshot_format - -import ( - "fmt" - "io" - - "github.com/ledgerwatch/erigon-lib/common/length" - "github.com/ledgerwatch/erigon/cl/clparams" - "github.com/ledgerwatch/erigon/cl/cltypes" - "github.com/ledgerwatch/erigon/cl/persistence/format/chunk_encoding" - "github.com/ledgerwatch/erigon/core/types" -) - -// WriteEth1BlockForSnapshot writes an execution block to the given writer in the format expected by the snapshot. -func writeEth1BlockForSnapshot(w io.Writer, encoded []byte, block *cltypes.Eth1Block) error { - pos := (length.Hash /*ParentHash*/ + length.Addr /*Miner*/ + length.Hash /*StateRoot*/ + length.Hash /*ReceiptsRoot*/ + types.BloomByteLength /*Bloom*/ + - length.Hash /*PrevRandao*/ + 32 /*BlockNumber + Timestamp + GasLimit + GasUsed */ + 4 /*ExtraDataOffset*/ + length.Hash /*BaseFee*/ + - length.Hash /*BlockHash*/ + 4 /*TransactionOffset*/) - - if block.Version() >= clparams.CapellaVersion { - pos += 4 /*WithdrawalsOffset*/ - } - if block.Version() >= clparams.DenebVersion { - pos += 16 /*BlobGasUsed + ExcessBlobGas*/ - } - // Add metadata first for Eth1Block, aka. version - if _, err := w.Write([]byte{byte(block.Version())}); err != nil { - return err - } - - // Maybe reuse the buffer? - pos += block.Extra.EncodingSizeSSZ() - if err := chunk_encoding.WriteChunk(w, encoded[:pos], chunk_encoding.ChunkDataType); err != nil { - return err - } - pos += block.Withdrawals.EncodingSizeSSZ() - pos += block.Transactions.EncodingSizeSSZ() - encoded = encoded[pos:] - //pos = 0 - // write the block pointer - if err := writeExecutionBlockPtr(w, block); err != nil { - return err - } - // From now on here, just finish up - return chunk_encoding.WriteChunk(w, encoded, chunk_encoding.ChunkDataType) -} - -func readEth1BlockFromSnapshot(r io.Reader, out io.Writer, executionReader ExecutionBlockReaderByNumber, cfg *clparams.BeaconChainConfig) (clparams.StateVersion, error) { - // Metadata section is just the current hardfork of the block. - vArr := make([]byte, 1) - if _, err := r.Read(vArr); err != nil { - return 0, err - } - v := clparams.StateVersion(vArr[0]) - - // Read the first chunk - dT1, err := chunk_encoding.ReadChunk(r, out) - if err != nil { - return v, err - } - if dT1 != chunk_encoding.ChunkDataType { - return v, fmt.Errorf("malformed beacon block, invalid chunk 1 type %d, expected: %d", dT1, chunk_encoding.ChunkDataType) - } - // Read the block pointer and retrieve chunk4 from the execution reader - blockNumber, blockHash, err := readExecutionBlockPtr(r) - if err != nil { - return v, err - } - err = executionReader.TransactionsSSZ(out, blockNumber, blockHash) - if err != nil { - return v, err - } - - if v < clparams.CapellaVersion { - return v, nil - } - err = executionReader.WithdrawalsSZZ(out, blockNumber, blockHash) - if err != nil { - return v, err - } - - // Read the 5h chunk - dT2, err := chunk_encoding.ReadChunk(r, out) - if err != nil { - return v, err - } - if dT2 != chunk_encoding.ChunkDataType { - return v, fmt.Errorf("malformed beacon block, invalid chunk 5 type %d, expected: %d", dT2, chunk_encoding.ChunkDataType) - } - - return v, nil -} diff --git a/cl/persistence/format/snapshot_format/getters/execution_snapshot.go b/cl/persistence/format/snapshot_format/getters/execution_snapshot.go index d201a63ea86..60d6e92179d 100644 --- a/cl/persistence/format/snapshot_format/getters/execution_snapshot.go +++ b/cl/persistence/format/snapshot_format/getters/execution_snapshot.go @@ -4,78 +4,51 @@ import ( "context" "encoding/binary" "fmt" - "io" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/types/ssz" - "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/turbo/services" ) -type cacheEntry struct { - number uint64 - hash libcommon.Hash -} type ExecutionSnapshotReader struct { ctx context.Context blockReader services.FullBlockReader + beaconCfg *clparams.BeaconChainConfig - db kv.RoDB - txsCache *lru.Cache[cacheEntry, []byte] - withdrawalsCache *lru.Cache[cacheEntry, []byte] + db kv.RoDB } -func NewExecutionSnapshotReader(ctx context.Context, blockReader services.FullBlockReader, db kv.RoDB) *ExecutionSnapshotReader { - txsCache, err := lru.New[cacheEntry, []byte]("txsCache", 96) - if err != nil { - panic(err) - } - withdrawalsCache, err := lru.New[cacheEntry, []byte]("wsCache", 96) - if err != nil { - panic(err) - } - return &ExecutionSnapshotReader{ctx: ctx, blockReader: blockReader, withdrawalsCache: withdrawalsCache, txsCache: txsCache, db: db} +func NewExecutionSnapshotReader(ctx context.Context, beaconCfg *clparams.BeaconChainConfig, blockReader services.FullBlockReader, db kv.RoDB) *ExecutionSnapshotReader { + return &ExecutionSnapshotReader{ctx: ctx, beaconCfg: beaconCfg, blockReader: blockReader, db: db} } -func (r *ExecutionSnapshotReader) TransactionsSSZ(w io.Writer, number uint64, hash libcommon.Hash) error { - ok, err := r.lookupTransactionsInCache(w, number, hash) - if err != nil { - return err - } - if ok { - return nil - } - +func (r *ExecutionSnapshotReader) Transactions(number uint64, hash libcommon.Hash) (*solid.TransactionsSSZ, error) { tx, err := r.db.BeginRo(r.ctx) if err != nil { - return err + return nil, err } defer tx.Rollback() // Get the body and fill both caches body, err := r.blockReader.BodyWithTransactions(r.ctx, tx, hash, number) if err != nil { - return err + return nil, err } if body == nil { - return fmt.Errorf("transactions not found for block %d", number) + return nil, fmt.Errorf("transactions not found for block %d", number) } // compute txs flats txs, err := types.MarshalTransactionsBinary(body.Transactions) if err != nil { - return err + return nil, err } - flattenedTxs := convertTxsToBytesSSZ(txs) - r.txsCache.Add(cacheEntry{number: number, hash: hash}, flattenedTxs) - // compute withdrawals flat - ws := body.Withdrawals - flattenedWs := convertWithdrawalsToBytesSSZ(ws) - r.withdrawalsCache.Add(cacheEntry{number: number, hash: hash}, flattenedWs) - _, err = w.Write(flattenedTxs) - return err + return solid.NewTransactionsSSZFromTransactions(txs), nil } func convertTxsToBytesSSZ(txs [][]byte) []byte { @@ -107,60 +80,28 @@ func convertWithdrawalsToBytesSSZ(ws []*types.Withdrawal) []byte { return ret } -func (r *ExecutionSnapshotReader) WithdrawalsSZZ(w io.Writer, number uint64, hash libcommon.Hash) error { - ok, err := r.lookupWithdrawalsInCache(w, number, hash) - if err != nil { - return err - } - if ok { - return nil - } +func (r *ExecutionSnapshotReader) Withdrawals(number uint64, hash libcommon.Hash) (*solid.ListSSZ[*cltypes.Withdrawal], error) { tx, err := r.db.BeginRo(r.ctx) if err != nil { - return err + return nil, err } defer tx.Rollback() // Get the body and fill both caches - body, err := r.blockReader.BodyWithTransactions(r.ctx, tx, hash, number) + body, _, err := r.blockReader.Body(r.ctx, tx, hash, number) if err != nil { - return err + return nil, err } if body == nil { - return fmt.Errorf("transactions not found for block %d", number) - } - // compute txs flats - txs, err := types.MarshalTransactionsBinary(body.Transactions) - if err != nil { - return err - } - flattenedTxs := convertTxsToBytesSSZ(txs) - r.txsCache.Add(cacheEntry{number: number, hash: hash}, flattenedTxs) - // compute withdrawals flat - ws := body.Withdrawals - flattenedWs := convertWithdrawalsToBytesSSZ(ws) - - r.withdrawalsCache.Add(cacheEntry{number: number, hash: hash}, flattenedWs) - _, err = w.Write(flattenedWs) - - return err -} - -func (r *ExecutionSnapshotReader) lookupWithdrawalsInCache(w io.Writer, number uint64, hash libcommon.Hash) (bool, error) { - var wsBytes []byte - var ok bool - if wsBytes, ok = r.withdrawalsCache.Get(cacheEntry{number: number, hash: hash}); !ok { - return false, nil - } - _, err := w.Write(wsBytes) - return true, err -} - -func (r *ExecutionSnapshotReader) lookupTransactionsInCache(w io.Writer, number uint64, hash libcommon.Hash) (bool, error) { - var wsBytes []byte - var ok bool - if wsBytes, ok = r.txsCache.Get(cacheEntry{number: number, hash: hash}); !ok { - return false, nil - } - _, err := w.Write(wsBytes) - return true, err + return nil, fmt.Errorf("transactions not found for block %d", number) + } + ret := solid.NewStaticListSSZ[*cltypes.Withdrawal](int(r.beaconCfg.MaxWithdrawalsPerPayload), 44) + for _, w := range body.Withdrawals { + ret.Append(&cltypes.Withdrawal{ + Index: w.Index, + Validator: w.Validator, + Address: w.Address, + Amount: w.Amount, + }) + } + return ret, nil } diff --git a/cl/persistence/format/snapshot_format/test_util.go b/cl/persistence/format/snapshot_format/test_util.go index 1bf45999522..cd806ecb3c6 100644 --- a/cl/persistence/format/snapshot_format/test_util.go +++ b/cl/persistence/format/snapshot_format/test_util.go @@ -1,30 +1,19 @@ package snapshot_format import ( - "io" - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" ) type MockBlockReader struct { Block *cltypes.Eth1Block } -func (t *MockBlockReader) WithdrawalsSZZ(out io.Writer, number uint64, hash libcommon.Hash) error { - l, err := t.Block.Withdrawals.EncodeSSZ(nil) - if err != nil { - return err - } - _, err = out.Write(l) - return err +func (t *MockBlockReader) Withdrawals(number uint64, hash libcommon.Hash) (*solid.ListSSZ[*cltypes.Withdrawal], error) { + return t.Block.Withdrawals, nil } -func (t *MockBlockReader) TransactionsSSZ(out io.Writer, number uint64, hash libcommon.Hash) error { - l, err := t.Block.Transactions.EncodeSSZ(nil) - if err != nil { - return err - } - _, err = out.Write(l) - return err +func (t *MockBlockReader) Transactions(number uint64, hash libcommon.Hash) (*solid.TransactionsSSZ, error) { + return t.Block.Transactions, nil } diff --git a/cl/persistence/state/epoch_data.go b/cl/persistence/state/epoch_data.go new file mode 100644 index 00000000000..a269360abc0 --- /dev/null +++ b/cl/persistence/state/epoch_data.go @@ -0,0 +1,75 @@ +package state_accessors + +import ( + "encoding/binary" + "io" + + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + ssz2 "github.com/ledgerwatch/erigon/cl/ssz" +) + +// EpochData stores the data for the epoch (valid throughout the epoch) +type EpochData struct { + TotalActiveBalance uint64 + JustificationBits *cltypes.JustificationBits + Fork *cltypes.Fork + CurrentJustifiedCheckpoint solid.Checkpoint + PreviousJustifiedCheckpoint solid.Checkpoint + FinalizedCheckpoint solid.Checkpoint + HistoricalSummariesLength uint64 + HistoricalRootsLength uint64 +} + +func EpochDataFromBeaconState(s *state.CachingBeaconState) *EpochData { + justificationCopy := &cltypes.JustificationBits{} + jj := s.JustificationBits() + copy(justificationCopy[:], jj[:]) + return &EpochData{ + Fork: s.Fork(), + JustificationBits: justificationCopy, + TotalActiveBalance: s.GetTotalActiveBalance(), + CurrentJustifiedCheckpoint: s.CurrentJustifiedCheckpoint(), + PreviousJustifiedCheckpoint: s.PreviousJustifiedCheckpoint(), + FinalizedCheckpoint: s.FinalizedCheckpoint(), + } +} + +// Serialize serializes the state into a byte slice with zstd compression. +func (m *EpochData) WriteTo(w io.Writer) error { + buf, err := ssz2.MarshalSSZ(nil, m.getSchema()...) + if err != nil { + return err + } + lenB := make([]byte, 8) + binary.LittleEndian.PutUint64(lenB, uint64(len(buf))) + if _, err := w.Write(lenB); err != nil { + return err + } + _, err = w.Write(buf) + return err +} + +// Deserialize deserializes the state from a byte slice with zstd compression. +func (m *EpochData) ReadFrom(r io.Reader) error { + m.JustificationBits = &cltypes.JustificationBits{} + m.Fork = &cltypes.Fork{} + m.FinalizedCheckpoint = solid.NewCheckpoint() + m.CurrentJustifiedCheckpoint = solid.NewCheckpoint() + m.PreviousJustifiedCheckpoint = solid.NewCheckpoint() + lenB := make([]byte, 8) + if _, err := io.ReadFull(r, lenB); err != nil { + return err + } + len := binary.LittleEndian.Uint64(lenB) + buf := make([]byte, len) + if _, err := io.ReadFull(r, buf); err != nil { + return err + } + return ssz2.UnmarshalSSZ(buf, 0, m.getSchema()...) +} + +func (m *EpochData) getSchema() []interface{} { + return []interface{}{&m.TotalActiveBalance, m.JustificationBits, m.Fork, m.CurrentJustifiedCheckpoint, m.PreviousJustifiedCheckpoint, m.FinalizedCheckpoint, &m.HistoricalSummariesLength, &m.HistoricalRootsLength} +} diff --git a/cl/persistence/state/epoch_data_test.go b/cl/persistence/state/epoch_data_test.go new file mode 100644 index 00000000000..95079cc58bb --- /dev/null +++ b/cl/persistence/state/epoch_data_test.go @@ -0,0 +1,35 @@ +package state_accessors + +import ( + "bytes" + "testing" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/stretchr/testify/require" +) + +func TestEpochData(t *testing.T) { + e := &EpochData{ + TotalActiveBalance: 123, + JustificationBits: &cltypes.JustificationBits{true}, + Fork: &cltypes.Fork{}, + CurrentJustifiedCheckpoint: solid.NewCheckpointFromParameters(libcommon.Hash{}, 123), + PreviousJustifiedCheckpoint: solid.NewCheckpointFromParameters(libcommon.Hash{}, 123), + FinalizedCheckpoint: solid.NewCheckpointFromParameters(libcommon.Hash{}, 123), + HistoricalSummariesLength: 235, + HistoricalRootsLength: 345, + } + var b bytes.Buffer + if err := e.WriteTo(&b); err != nil { + t.Fatal(err) + } + + e2 := &EpochData{} + if err := e2.ReadFrom(&b); err != nil { + t.Fatal(err) + } + + require.Equal(t, e, e2) +} diff --git a/cl/persistence/state/historical_states_reader/attesting_indicies.go b/cl/persistence/state/historical_states_reader/attesting_indicies.go index e3d5f717b59..22868b02248 100644 --- a/cl/persistence/state/historical_states_reader/attesting_indicies.go +++ b/cl/persistence/state/historical_states_reader/attesting_indicies.go @@ -3,20 +3,24 @@ package historical_states_reader import ( "fmt" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/persistence/base_encoding" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" "github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling" "github.com/ledgerwatch/erigon/cl/utils" ) -func (r *HistoricalStatesReader) attestingIndicies(attestation solid.AttestationData, aggregationBits []byte, checkBitsLength bool, randaoMixes solid.HashVectorSSZ, idxs []uint64) ([]uint64, error) { +func (r *HistoricalStatesReader) attestingIndicies(attestation solid.AttestationData, aggregationBits []byte, checkBitsLength bool, mix libcommon.Hash, idxs []uint64) ([]uint64, error) { slot := attestation.Slot() committeesPerSlot := committeeCount(r.cfg, slot/r.cfg.SlotsPerEpoch, idxs) committeeIndex := attestation.ValidatorIndex() index := (slot%r.cfg.SlotsPerEpoch)*committeesPerSlot + committeeIndex count := committeesPerSlot * r.cfg.SlotsPerEpoch - committee, err := r.computeCommittee(randaoMixes, idxs, attestation.Slot(), count, index) + committee, err := r.ComputeCommittee(mix, idxs, attestation.Slot(), count, index) if err != nil { return nil, err } @@ -40,7 +44,7 @@ func (r *HistoricalStatesReader) attestingIndicies(attestation solid.Attestation } // computeCommittee uses cache to compute compittee -func (r *HistoricalStatesReader) computeCommittee(randaoMixes solid.HashVectorSSZ, indicies []uint64, slot uint64, count, index uint64) ([]uint64, error) { +func (r *HistoricalStatesReader) ComputeCommittee(mix libcommon.Hash, indicies []uint64, slot uint64, count, index uint64) ([]uint64, error) { cfg := r.cfg lenIndicies := uint64(len(indicies)) @@ -48,12 +52,9 @@ func (r *HistoricalStatesReader) computeCommittee(randaoMixes solid.HashVectorSS end := (lenIndicies * (index + 1)) / count var shuffledIndicies []uint64 epoch := slot / cfg.SlotsPerEpoch - - mixPosition := (epoch + cfg.EpochsPerHistoricalVector - cfg.MinSeedLookahead - 1) % - cfg.EpochsPerHistoricalVector - // Input for the seed hash. - mix := randaoMixes.Get(int(mixPosition)) - + /* + mixPosition := (epoch + cfg.EpochsPerHistoricalVector - cfg.MinSeedLookahead - 1) % cfg.EpochsPerHistoricalVector + */ if shuffledIndicesInterface, ok := r.shuffledSetsCache.Get(epoch); ok { shuffledIndicies = shuffledIndicesInterface } else { @@ -75,3 +76,92 @@ func committeeCount(cfg *clparams.BeaconChainConfig, epoch uint64, idxs []uint64 } return committeCount } + +func (r *HistoricalStatesReader) readHistoricalBlockRoot(tx kv.Tx, slot, index uint64) (libcommon.Hash, error) { + slotSubIndex := slot % r.cfg.SlotsPerHistoricalRoot + needFromGenesis := true + + var slotLookup uint64 + if index <= slotSubIndex { + if slot > (slotSubIndex - index) { + slotLookup = slot - (slotSubIndex - index) + needFromGenesis = false + } + } else { + if slot > (slotSubIndex + (r.cfg.SlotsPerHistoricalRoot - index)) { + slotLookup = slot - (slotSubIndex + (r.cfg.SlotsPerHistoricalRoot - index)) + needFromGenesis = false + } + } + + if needFromGenesis { + return r.genesisState.GetBlockRootAtSlot(slot) + } + br, err := tx.GetOne(kv.BlockRoot, base_encoding.Encode64ToBytes4(slotLookup)) + if err != nil { + return libcommon.Hash{}, err + } + if len(br) != 32 { + return libcommon.Hash{}, fmt.Errorf("invalid block root length %d", len(br)) + } + return libcommon.BytesToHash(br), nil + +} + +func (r *HistoricalStatesReader) getAttestationParticipationFlagIndicies(tx kv.Tx, stateSlot uint64, data solid.AttestationData, inclusionDelay uint64, skipAssert bool) ([]uint8, error) { + currentCheckpoint, previousCheckpoint, _, err := state_accessors.ReadCheckpoints(tx, r.cfg.RoundSlotToEpoch(stateSlot)) + if err != nil { + return nil, err + } + if currentCheckpoint == nil { + currentCheckpoint = r.genesisState.CurrentJustifiedCheckpoint() + } + if previousCheckpoint == nil { + previousCheckpoint = r.genesisState.PreviousJustifiedCheckpoint() + } + + var justifiedCheckpoint solid.Checkpoint + // get checkpoint from epoch + if data.Target().Epoch() == stateSlot/r.cfg.SlotsPerEpoch { + justifiedCheckpoint = currentCheckpoint + } else { + justifiedCheckpoint = previousCheckpoint + } + // Matching roots + if !data.Source().Equal(justifiedCheckpoint) && !skipAssert { + // jsonify the data.Source and justifiedCheckpoint + jsonSource, err := data.Source().MarshalJSON() + if err != nil { + return nil, err + } + jsonJustifiedCheckpoint, err := justifiedCheckpoint.MarshalJSON() + if err != nil { + return nil, err + } + return nil, fmt.Errorf("GetAttestationParticipationFlagIndicies: source does not match. source: %s, justifiedCheckpoint: %s", jsonSource, jsonJustifiedCheckpoint) + } + i := (data.Target().Epoch() * r.cfg.SlotsPerEpoch) % r.cfg.SlotsPerHistoricalRoot + targetRoot, err := r.readHistoricalBlockRoot(tx, stateSlot, i) + if err != nil { + return nil, err + } + + i = data.Slot() % r.cfg.SlotsPerHistoricalRoot + headRoot, err := r.readHistoricalBlockRoot(tx, stateSlot, i) + if err != nil { + return nil, err + } + matchingTarget := data.Target().BlockRoot() == targetRoot + matchingHead := matchingTarget && data.BeaconBlockRoot() == headRoot + participationFlagIndicies := []uint8{} + if inclusionDelay <= utils.IntegerSquareRoot(r.cfg.SlotsPerEpoch) { + participationFlagIndicies = append(participationFlagIndicies, r.cfg.TimelySourceFlagIndex) + } + if matchingTarget && inclusionDelay <= r.cfg.SlotsPerEpoch { + participationFlagIndicies = append(participationFlagIndicies, r.cfg.TimelyTargetFlagIndex) + } + if matchingHead && inclusionDelay == r.cfg.MinAttestationInclusionDelay { + participationFlagIndicies = append(participationFlagIndicies, r.cfg.TimelyHeadFlagIndex) + } + return participationFlagIndicies, nil +} diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader.go b/cl/persistence/state/historical_states_reader/historical_states_reader.go index fe804024ee7..5cf69b590b2 100644 --- a/cl/persistence/state/historical_states_reader/historical_states_reader.go +++ b/cl/persistence/state/historical_states_reader/historical_states_reader.go @@ -8,7 +8,6 @@ import ( "fmt" "io" "sync" - "time" "github.com/klauspost/compress/zstd" "github.com/ledgerwatch/erigon-lib/common" @@ -24,6 +23,8 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" "github.com/spf13/afero" "golang.org/x/exp/slices" + + libcommon "github.com/ledgerwatch/erigon-lib/common" ) type HistoricalStatesReader struct { @@ -66,7 +67,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. return nil, fmt.Errorf("slot %d is greater than latest processed state %d", slot, latestProcessedState) } - if slot == 0 { + if slot == r.genesisState.Slot() { return r.genesisState.Copy() } // Read the current block (we need the block header) + other stuff @@ -75,26 +76,34 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. return nil, err } if block == nil { - return nil, fmt.Errorf("block at slot %d not found", slot) + return nil, nil } blockHeader := block.SignedBeaconBlockHeader().Header blockHeader.Root = common.Hash{} - // Read the minimal beacon state which have the small fields. - minimalBeaconState, err := state_accessors.ReadMinimalBeaconState(tx, slot) + // Read the epoch and per-slot data. + slotData, err := state_accessors.ReadSlotData(tx, slot) if err != nil { return nil, err } - // State not found - if minimalBeaconState == nil { + if slotData == nil { + return nil, nil + } + roundedSlot := r.cfg.RoundSlotToEpoch(slot) + + epochData, err := state_accessors.ReadEpochData(tx, roundedSlot) + if err != nil { + return nil, err + } + if epochData == nil { return nil, nil } // Versioning - ret.SetVersion(minimalBeaconState.Version) + ret.SetVersion(slotData.Version) ret.SetGenesisTime(r.genesisState.GenesisTime()) ret.SetGenesisValidatorsRoot(r.genesisState.GenesisValidatorsRoot()) ret.SetSlot(slot) - ret.SetFork(minimalBeaconState.Fork) + ret.SetFork(epochData.Fork) // History stateRoots, blockRoots := solid.NewHashVector(int(r.cfg.SlotsPerHistoricalRoot)), solid.NewHashVector(int(r.cfg.SlotsPerHistoricalRoot)) ret.SetLatestBlockHeader(blockHeader) @@ -110,7 +119,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetStateRoots(stateRoots) historicalRoots := solid.NewHashList(int(r.cfg.HistoricalRootsLimit)) - if err := state_accessors.ReadHistoricalRoots(tx, minimalBeaconState.HistoricalRootsLength, func(idx int, root common.Hash) error { + if err := state_accessors.ReadHistoricalRoots(tx, epochData.HistoricalRootsLength, func(idx int, root common.Hash) error { historicalRoots.Append(root) return nil }); err != nil { @@ -120,12 +129,12 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. // Eth1 eth1DataVotes := solid.NewStaticListSSZ[*cltypes.Eth1Data](int(r.cfg.Eth1DataVotesLength()), 72) - if err := r.readEth1DataVotes(tx, minimalBeaconState.Eth1DataLength, slot, eth1DataVotes); err != nil { + if err := r.readEth1DataVotes(tx, slotData.Eth1DataLength, slot, eth1DataVotes); err != nil { return nil, err } ret.SetEth1DataVotes(eth1DataVotes) - ret.SetEth1Data(minimalBeaconState.Eth1Data) - ret.SetEth1DepositIndex(minimalBeaconState.Eth1DepositIndex) + ret.SetEth1Data(slotData.Eth1Data) + ret.SetEth1DepositIndex(slotData.Eth1DepositIndex) // Registry (Validators + Balances) balancesBytes, err := r.reconstructBalances(tx, slot, kv.ValidatorBalance) if err != nil { @@ -137,7 +146,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. } ret.SetBalances(balances) - validatorSet, currActiveIdxs, prevActiveIdxs, err := r.readValidatorsForHistoricalState(tx, slot, minimalBeaconState.ValidatorLength) + validatorSet, err := r.ReadValidatorsForHistoricalState(tx, slot) if err != nil { return nil, fmt.Errorf("failed to read validators: %w", err) } @@ -150,14 +159,14 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetRandaoMixes(randaoMixes) slashingsVector := solid.NewUint64VectorSSZ(int(r.cfg.EpochsPerSlashingsVector)) // Slashings - err = r.reconstructUint64ListDump(tx, slot, kv.ValidatorSlashings, int(r.cfg.EpochsPerSlashingsVector), slashingsVector) + err = r.ReconstructUint64ListDump(tx, slot, kv.ValidatorSlashings, int(r.cfg.EpochsPerSlashingsVector), slashingsVector) if err != nil { return nil, fmt.Errorf("failed to read slashings: %w", err) } ret.SetSlashings(slashingsVector) // Finality - currentCheckpoint, previousCheckpoint, finalizedCheckpoint, err := state_accessors.ReadCheckpoints(tx, r.cfg.RoundSlotToEpoch(slot)) + currentCheckpoint, previousCheckpoint, finalizedCheckpoint, err := state_accessors.ReadCheckpoints(tx, roundedSlot) if err != nil { return nil, err } @@ -170,20 +179,20 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. if finalizedCheckpoint == nil { finalizedCheckpoint = r.genesisState.FinalizedCheckpoint() } - ret.SetJustificationBits(*minimalBeaconState.JustificationBits) + ret.SetJustificationBits(*epochData.JustificationBits) ret.SetPreviousJustifiedCheckpoint(previousCheckpoint) ret.SetCurrentJustifiedCheckpoint(currentCheckpoint) ret.SetFinalizedCheckpoint(finalizedCheckpoint) // Participation if ret.Version() == clparams.Phase0Version { - currentAtts, previousAtts, err := r.readPendingEpochs(tx, slot, minimalBeaconState.CurrentEpochAttestationsLength, minimalBeaconState.PreviousEpochAttestationsLength) + currentAtts, previousAtts, err := r.readPendingEpochs(tx, slot, slotData.CurrentEpochAttestationsLength, slotData.PreviousEpochAttestationsLength) if err != nil { return nil, fmt.Errorf("failed to read pending attestations: %w", err) } ret.SetCurrentEpochAttestations(currentAtts) ret.SetPreviousEpochAttestations(previousAtts) } else { - currentIdxs, previousIdxs, err := r.readPartecipations(tx, slot, minimalBeaconState.ValidatorLength, currActiveIdxs, prevActiveIdxs, ret, currentCheckpoint, previousCheckpoint) + currentIdxs, previousIdxs, err := r.ReadPartecipations(tx, slot) if err != nil { return nil, fmt.Errorf("failed to read participations: %w", err) } @@ -192,11 +201,11 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. } if ret.Version() < clparams.AltairVersion { - return ret, ret.InitBeaconState() + return ret, nil } inactivityScores := solid.NewUint64ListSSZ(int(r.cfg.ValidatorRegistryLimit)) // Inactivity - err = r.reconstructUint64ListDump(tx, slot, kv.InactivityScores, int(minimalBeaconState.ValidatorLength), inactivityScores) + err = r.ReconstructUint64ListDump(tx, slot, kv.InactivityScores, int(slotData.ValidatorLength), inactivityScores) if err != nil { return nil, fmt.Errorf("failed to read inactivity scores: %w", err) } @@ -223,7 +232,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetNextSyncCommittee(nextSyncCommittee) // Execution if ret.Version() < clparams.BellatrixVersion { - return ret, ret.InitBeaconState() + return ret, nil } payloadHeader, err := block.Block.Body.ExecutionPayload.PayloadHeader() if err != nil { @@ -231,22 +240,22 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. } ret.SetLatestExecutionPayloadHeader(payloadHeader) if ret.Version() < clparams.CapellaVersion { - return ret, ret.InitBeaconState() + return ret, nil } // Withdrawals - ret.SetNextWithdrawalIndex(minimalBeaconState.NextWithdrawalIndex) - ret.SetNextWithdrawalValidatorIndex(minimalBeaconState.NextWithdrawalValidatorIndex) + ret.SetNextWithdrawalIndex(slotData.NextWithdrawalIndex) + ret.SetNextWithdrawalValidatorIndex(slotData.NextWithdrawalValidatorIndex) // Deep history valid from Capella onwards historicalSummaries := solid.NewStaticListSSZ[*cltypes.HistoricalSummary](int(r.cfg.HistoricalRootsLimit), 64) - if err := state_accessors.ReadHistoricalSummaries(tx, minimalBeaconState.HistoricalSummariesLength, func(idx int, historicalSummary *cltypes.HistoricalSummary) error { + if err := state_accessors.ReadHistoricalSummaries(tx, epochData.HistoricalSummariesLength, func(idx int, historicalSummary *cltypes.HistoricalSummary) error { historicalSummaries.Append(historicalSummary) return nil }); err != nil { return nil, fmt.Errorf("failed to read historical summaries: %w", err) } ret.SetHistoricalSummaries(historicalSummaries) - return ret, ret.InitBeaconState() + return ret, nil } func (r *HistoricalStatesReader) readHistoryHashVector(tx kv.Tx, genesisVector solid.HashVectorSSZ, slot, size uint64, table string, out solid.HashVectorSSZ) (err error) { @@ -390,7 +399,6 @@ func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, slot uint return nil, err } - // now start diffing diffCursor, err := tx.Cursor(diffBucket) if err != nil { return nil, err @@ -407,12 +415,10 @@ func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, slot uint if base_encoding.Decode64FromBytes4(k) > slot { return nil, fmt.Errorf("diff not found for slot %d", slot) } - s := time.Now() currentList, err = base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, v) if err != nil { return nil, err } - fmt.Println("diffing", time.Since(s)) } return currentList, err @@ -445,7 +451,6 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, slot uint64, diff return nil, err } roundedSlot := r.cfg.RoundSlotToEpoch(slot) - fmt.Println(roundedSlot, freshDumpSlot) for i := freshDumpSlot; i < roundedSlot; i += r.cfg.SlotsPerEpoch { diff, err := tx.GetOne(diffBucket, base_encoding.Encode64ToBytes4(i)) if err != nil { @@ -454,14 +459,12 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, slot uint64, diff if len(diff) == 0 { continue } - fmt.Println(i) currentList, err = base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, diff) if err != nil { return nil, err } } - // now start diffing diffCursor, err := tx.Cursor(diffBucket) if err != nil { return nil, err @@ -478,18 +481,16 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, slot uint64, diff if base_encoding.Decode64FromBytes4(k) > slot { return nil, fmt.Errorf("diff not found for slot %d", slot) } - s := time.Now() currentList, err = base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, v) if err != nil { return nil, err } - fmt.Println("diffing", time.Since(s)) } return currentList, err } -func (r *HistoricalStatesReader) reconstructUint64ListDump(tx kv.Tx, slot uint64, bkt string, size int, out solid.Uint64ListSSZ) error { +func (r *HistoricalStatesReader) ReconstructUint64ListDump(tx kv.Tx, slot uint64, bkt string, size int, out solid.Uint64ListSSZ) error { diffCursor, err := tx.Cursor(bkt) if err != nil { return err @@ -529,44 +530,39 @@ func (r *HistoricalStatesReader) reconstructUint64ListDump(tx kv.Tx, slot uint64 return out.DecodeSSZ(currentList, 0) } -func (r *HistoricalStatesReader) readValidatorsForHistoricalState(tx kv.Tx, slot, validatorSetLength uint64) (*solid.ValidatorSet, []uint64, []uint64, error) { +func (r *HistoricalStatesReader) ReadValidatorsForHistoricalState(tx kv.Tx, slot uint64) (*solid.ValidatorSet, error) { + // Read the minimal beacon state which have the small fields. + sd, err := state_accessors.ReadSlotData(tx, slot) + if err != nil { + return nil, err + } + // State not found + if sd == nil { + return nil, nil + } + validatorSetLength := sd.ValidatorLength + out := solid.NewValidatorSetWithLength(int(r.cfg.ValidatorRegistryLimit), int(validatorSetLength)) // Read the static validator field which are hot in memory (this is > 70% of the whole beacon state) - activeIds := make([]uint64, 0, validatorSetLength) - epoch := slot / r.cfg.SlotsPerEpoch - - prevActiveIds := make([]uint64, 0, validatorSetLength) - if epoch == 0 { - prevActiveIds = activeIds - } r.validatorTable.ForEach(func(validatorIndex uint64, validator *state_accessors.StaticValidator) bool { if validatorIndex >= validatorSetLength { return false } currValidator := out.Get(int(validatorIndex)) validator.ToValidator(currValidator, slot) - if currValidator.Active(epoch) { - activeIds = append(activeIds, validatorIndex) - } - if epoch == 0 { - return true - } - if currValidator.Active(epoch - 1) { - prevActiveIds = append(prevActiveIds, validatorIndex) - } return true }) // Read the balances bytesEffectiveBalances, err := r.reconstructDiffedUint64List(tx, slot, kv.ValidatorEffectiveBalance, "effective_balances") if err != nil { - return nil, nil, nil, err + return nil, err } for i := 0; i < int(validatorSetLength); i++ { out.Get(i). SetEffectiveBalanceFromBytes(bytesEffectiveBalances[(i * 8) : (i*8)+8]) } - return out, activeIds, prevActiveIds, nil + return out, nil } func (r *HistoricalStatesReader) readPendingEpochs(tx kv.Tx, slot uint64, currentEpochAttestationsLength, previousEpochAttestationsLength uint64) (*solid.ListSSZ[*solid.PendingAttestation], *solid.ListSSZ[*solid.PendingAttestation], error) { @@ -589,20 +585,42 @@ func (r *HistoricalStatesReader) readPendingEpochs(tx kv.Tx, slot uint64, curren } // readParticipations shuffles active indicies and returns the participation flags for the given epoch. -func (r *HistoricalStatesReader) readPartecipations(tx kv.Tx, slot uint64, validatorLength uint64, - currentActiveIndicies, previousActiveIndicies []uint64, ret *state.CachingBeaconState, - currentJustifiedCheckpoint, previousJustifiedCheckpoint solid.Checkpoint) (*solid.BitList, *solid.BitList, error) { - randaoMixes := ret.RandaoMixes() +func (r *HistoricalStatesReader) ReadPartecipations(tx kv.Tx, slot uint64) (*solid.BitList, *solid.BitList, error) { var beginSlot uint64 epoch, prevEpoch := r.computeRelevantEpochs(slot) beginSlot = prevEpoch * r.cfg.SlotsPerEpoch + currentActiveIndicies, err := state_accessors.ReadActiveIndicies(tx, epoch*r.cfg.SlotsPerEpoch) + if err != nil { + return nil, nil, err + } + var previousActiveIndicies []uint64 + if epoch == 0 { + previousActiveIndicies = currentActiveIndicies + } else { + previousActiveIndicies, err = state_accessors.ReadActiveIndicies(tx, (epoch-1)*r.cfg.SlotsPerEpoch) + if err != nil { + return nil, nil, err + } + } + + // Read the minimal beacon state which have the small fields. + sd, err := state_accessors.ReadSlotData(tx, slot) + if err != nil { + return nil, nil, err + } + // State not found + if sd == nil { + return nil, nil, nil + } + validatorLength := sd.ValidatorLength + currentIdxs := solid.NewBitList(int(validatorLength), int(r.cfg.ValidatorRegistryLimit)) previousIdxs := solid.NewBitList(int(validatorLength), int(r.cfg.ValidatorRegistryLimit)) // trigger the cache for shuffled sets in parallel - s := time.Now() - r.tryCachingEpochsInParallell(randaoMixes, [][]uint64{currentActiveIndicies, previousActiveIndicies}, []uint64{epoch, prevEpoch}) - fmt.Println("parallel", time.Since(s)) + if err := r.tryCachingEpochsInParallell(tx, [][]uint64{currentActiveIndicies, previousActiveIndicies}, []uint64{epoch, prevEpoch}); err != nil { + return nil, nil, err + } // Read the previous idxs for i := beginSlot; i <= slot; i++ { // Read the block @@ -613,7 +631,6 @@ func (r *HistoricalStatesReader) readPartecipations(tx kv.Tx, slot uint64, valid if block == nil { continue } - ret.SetSlot(i) currentEpoch := i / r.cfg.SlotsPerEpoch // Read the participation flags @@ -635,13 +652,21 @@ func (r *HistoricalStatesReader) readPartecipations(tx kv.Tx, slot uint64, valid activeIndicies = previousActiveIndicies } + attestationEpoch := data.Slot() / r.cfg.SlotsPerEpoch + + mixPosition := (attestationEpoch + r.cfg.EpochsPerHistoricalVector - r.cfg.MinSeedLookahead - 1) % r.cfg.EpochsPerHistoricalVector + mix, err := r.ReadRandaoMixBySlotAndIndex(tx, data.Slot(), mixPosition) + if err != nil { + return false + } + var attestingIndicies []uint64 - attestingIndicies, err = r.attestingIndicies(attestation.AttestantionData(), attestation.AggregationBits(), true, randaoMixes, activeIndicies) + attestingIndicies, err = r.attestingIndicies(data, attestation.AggregationBits(), true, mix, activeIndicies) if err != nil { return false } var participationFlagsIndicies []uint8 - participationFlagsIndicies, err = ret.GetAttestationParticipationFlagIndicies(data, ret.Slot()-data.Slot(), true) + participationFlagsIndicies, err = r.getAttestationParticipationFlagIndicies(tx, i, data, i-data.Slot(), true) if err != nil { return false } @@ -681,15 +706,84 @@ func (r *HistoricalStatesReader) computeRelevantEpochs(slot uint64) (uint64, uin return epoch, epoch - 1 } -func (r *HistoricalStatesReader) tryCachingEpochsInParallell(randaoMixes solid.HashVectorSSZ, activeIdxs [][]uint64, epochs []uint64) { +func (r *HistoricalStatesReader) tryCachingEpochsInParallell(tx kv.Tx, activeIdxs [][]uint64, epochs []uint64) error { var wg sync.WaitGroup wg.Add(len(epochs)) for i, epoch := range epochs { - go func(epoch uint64, idxs []uint64) { + mixPosition := (epoch + r.cfg.EpochsPerHistoricalVector - r.cfg.MinSeedLookahead - 1) % r.cfg.EpochsPerHistoricalVector + mix, err := r.ReadRandaoMixBySlotAndIndex(tx, epochs[0]*r.cfg.SlotsPerEpoch, mixPosition) + if err != nil { + return err + } + + go func(mix libcommon.Hash, epoch uint64, idxs []uint64) { defer wg.Done() - _, _ = r.computeCommittee(randaoMixes, idxs, epoch*r.cfg.SlotsPerEpoch, r.cfg.TargetCommitteeSize, 0) - }(epoch, activeIdxs[i]) + + _, _ = r.ComputeCommittee(mix, idxs, epoch*r.cfg.SlotsPerEpoch, r.cfg.TargetCommitteeSize, 0) + }(mix, epoch, activeIdxs[i]) } wg.Wait() + return nil +} +func (r *HistoricalStatesReader) ReadValidatorsBalances(tx kv.Tx, slot uint64) (solid.Uint64ListSSZ, error) { + sd, err := state_accessors.ReadSlotData(tx, slot) + if err != nil { + return nil, err + } + // State not found + if sd == nil { + return nil, nil + } + + balances, err := r.reconstructBalances(tx, slot, kv.ValidatorBalance) + if err != nil { + return nil, err + } + balancesList := solid.NewUint64ListSSZ(int(r.cfg.ValidatorRegistryLimit)) + + return balancesList, balancesList.DecodeSSZ(balances, 0) +} + +func (r *HistoricalStatesReader) ReadRandaoMixBySlotAndIndex(tx kv.Tx, slot, index uint64) (libcommon.Hash, error) { + epoch := slot / r.cfg.SlotsPerEpoch + epochSubIndex := epoch % r.cfg.EpochsPerHistoricalVector + if index == epochSubIndex { + intraRandaoMix, err := tx.GetOne(kv.IntraRandaoMixes, base_encoding.Encode64ToBytes4(slot)) + if err != nil { + return libcommon.Hash{}, err + } + if len(intraRandaoMix) != 32 { + return libcommon.Hash{}, fmt.Errorf("invalid intra randao mix length %d", len(intraRandaoMix)) + } + return libcommon.BytesToHash(intraRandaoMix), nil + } + needFromGenesis := true + var epochLookup uint64 + if index <= epochSubIndex { + if epoch > (epochSubIndex - index) { + needFromGenesis = false + epochLookup = epoch - (epochSubIndex - index) + } + } else { + if epoch > (epochSubIndex + (r.cfg.EpochsPerHistoricalVector - index)) { + needFromGenesis = false + epochLookup = epoch - (epochSubIndex + (r.cfg.EpochsPerHistoricalVector - index)) + } + } + if epochLookup < r.genesisState.Slot()/r.cfg.SlotsPerEpoch { + needFromGenesis = true + } + + if needFromGenesis { + return r.genesisState.GetRandaoMixes(epoch), nil + } + mixBytes, err := tx.GetOne(kv.RandaoMixes, base_encoding.Encode64ToBytes4(epochLookup*r.cfg.SlotsPerEpoch)) + if err != nil { + return libcommon.Hash{}, err + } + if len(mixBytes) != 32 { + return libcommon.Hash{}, fmt.Errorf("invalid mix length %d", len(mixBytes)) + } + return libcommon.BytesToHash(mixBytes), nil } diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go index fb641c730b6..cec53451589 100644 --- a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go +++ b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go @@ -21,12 +21,12 @@ import ( func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postState *state.CachingBeaconState) { db := memdb.NewTestDB(t) - reader := tests.LoadChain(blocks, db) + reader, _ := tests.LoadChain(blocks, postState, db, t) ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() f := afero.NewMemMapFs() - a := antiquary.NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, f) + a := antiquary.NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, true, f) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) // Now lets test it against the reader tx, err := db.BeginRw(ctx) @@ -47,19 +47,19 @@ func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postSt } func TestStateAntiquaryCapella(t *testing.T) { - //t.Skip() + t.Skip() blocks, preState, postState := tests.GetCapellaRandom() runTest(t, blocks, preState, postState) } func TestStateAntiquaryPhase0(t *testing.T) { - // t.Skip() + t.Skip() blocks, preState, postState := tests.GetPhase0Random() runTest(t, blocks, preState, postState) } func TestStateAntiquaryBellatrix(t *testing.T) { - // t.Skip() + t.Skip() blocks, preState, postState := tests.GetBellatrixRandom() runTest(t, blocks, preState, postState) } diff --git a/cl/persistence/state/minimal_state.go b/cl/persistence/state/slot_data.go similarity index 68% rename from cl/persistence/state/minimal_state.go rename to cl/persistence/state/slot_data.go index b22923767b2..2d0e865bfc2 100644 --- a/cl/persistence/state/minimal_state.go +++ b/cl/persistence/state/slot_data.go @@ -7,11 +7,11 @@ import ( "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" - "github.com/ledgerwatch/erigon/cl/phase1/core/state/raw" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" ssz2 "github.com/ledgerwatch/erigon/cl/ssz" ) -type MinimalBeaconState struct { +type SlotData struct { // Block Header and Execution Headers can be retrieved from block snapshots Version clparams.StateVersion // Lengths @@ -19,42 +19,39 @@ type MinimalBeaconState struct { Eth1DataLength uint64 PreviousEpochAttestationsLength uint64 CurrentEpochAttestationsLength uint64 - HistoricalSummariesLength uint64 - HistoricalRootsLength uint64 // Phase0 - Eth1Data *cltypes.Eth1Data - Eth1DepositIndex uint64 - JustificationBits *cltypes.JustificationBits - Fork *cltypes.Fork + Eth1Data *cltypes.Eth1Data + Eth1DepositIndex uint64 // Capella NextWithdrawalIndex uint64 NextWithdrawalValidatorIndex uint64 + + // BlockRewards for proposer + AttestationsRewards uint64 + SyncAggregateRewards uint64 + ProposerSlashings uint64 + AttesterSlashings uint64 } -func MinimalBeaconStateFromBeaconState(s *raw.BeaconState) *MinimalBeaconState { +func SlotDataFromBeaconState(s *state.CachingBeaconState) *SlotData { justificationCopy := &cltypes.JustificationBits{} jj := s.JustificationBits() copy(justificationCopy[:], jj[:]) - return &MinimalBeaconState{ - Fork: s.Fork(), + return &SlotData{ ValidatorLength: uint64(s.ValidatorLength()), Eth1DataLength: uint64(s.Eth1DataVotes().Len()), PreviousEpochAttestationsLength: uint64(s.PreviousEpochAttestations().Len()), CurrentEpochAttestationsLength: uint64(s.CurrentEpochAttestations().Len()), - HistoricalSummariesLength: s.HistoricalSummariesLength(), - HistoricalRootsLength: s.HistoricalRootsLength(), Version: s.Version(), Eth1Data: s.Eth1Data(), Eth1DepositIndex: s.Eth1DepositIndex(), - JustificationBits: justificationCopy, NextWithdrawalIndex: s.NextWithdrawalIndex(), NextWithdrawalValidatorIndex: s.NextWithdrawalValidatorIndex(), } - } // Serialize serializes the state into a byte slice with zstd compression. -func (m *MinimalBeaconState) WriteTo(w io.Writer) error { +func (m *SlotData) WriteTo(w io.Writer) error { buf, err := ssz2.MarshalSSZ(nil, m.getSchema()...) if err != nil { return err @@ -75,10 +72,8 @@ func (m *MinimalBeaconState) WriteTo(w io.Writer) error { } // Deserialize deserializes the state from a byte slice with zstd compression. -func (m *MinimalBeaconState) ReadFrom(r io.Reader) error { +func (m *SlotData) ReadFrom(r io.Reader) error { m.Eth1Data = &cltypes.Eth1Data{} - m.JustificationBits = &cltypes.JustificationBits{} - m.Fork = &cltypes.Fork{} var err error versionByte := make([]byte, 1) @@ -105,8 +100,8 @@ func (m *MinimalBeaconState) ReadFrom(r io.Reader) error { return ssz2.UnmarshalSSZ(buf, int(m.Version), m.getSchema()...) } -func (m *MinimalBeaconState) getSchema() []interface{} { - schema := []interface{}{m.Eth1Data, m.Fork, &m.Eth1DepositIndex, m.JustificationBits, &m.ValidatorLength, &m.Eth1DataLength, &m.PreviousEpochAttestationsLength, &m.CurrentEpochAttestationsLength, &m.HistoricalSummariesLength, &m.HistoricalRootsLength} +func (m *SlotData) getSchema() []interface{} { + schema := []interface{}{m.Eth1Data, &m.Eth1DepositIndex, &m.ValidatorLength, &m.Eth1DataLength, &m.PreviousEpochAttestationsLength, &m.CurrentEpochAttestationsLength, &m.AttestationsRewards, &m.SyncAggregateRewards, &m.ProposerSlashings, &m.AttesterSlashings} if m.Version >= clparams.CapellaVersion { schema = append(schema, &m.NextWithdrawalIndex, &m.NextWithdrawalValidatorIndex) } diff --git a/cl/persistence/state/minimal_state_test.go b/cl/persistence/state/slot_data_test.go similarity index 73% rename from cl/persistence/state/minimal_state_test.go rename to cl/persistence/state/slot_data_test.go index acfb7c22f07..6378d159a26 100644 --- a/cl/persistence/state/minimal_state_test.go +++ b/cl/persistence/state/slot_data_test.go @@ -9,13 +9,11 @@ import ( "github.com/stretchr/testify/require" ) -func TestMinimalState(t *testing.T) { - m := &MinimalBeaconState{ +func TestSlotData(t *testing.T) { + m := &SlotData{ Version: clparams.CapellaVersion, Eth1Data: &cltypes.Eth1Data{}, - Fork: &cltypes.Fork{}, Eth1DepositIndex: 0, - JustificationBits: &cltypes.JustificationBits{}, NextWithdrawalIndex: 0, NextWithdrawalValidatorIndex: 0, } @@ -23,7 +21,7 @@ func TestMinimalState(t *testing.T) { if err := m.WriteTo(&b); err != nil { t.Fatal(err) } - m2 := &MinimalBeaconState{} + m2 := &SlotData{} if err := m2.ReadFrom(&b); err != nil { t.Fatal(err) } diff --git a/cl/persistence/state/state_accessors.go b/cl/persistence/state/state_accessors.go index 6155bde3371..234641de968 100644 --- a/cl/persistence/state/state_accessors.go +++ b/cl/persistence/state/state_accessors.go @@ -31,6 +31,9 @@ func InitializeStaticTables(tx kv.RwTx, state *state.CachingBeaconState) error { if err = tx.Append(kv.ValidatorPublicKeys, key, v.PublicKeyBytes()); err != nil { return false } + if err = tx.Put(kv.InvertedValidatorPublicKeys, v.PublicKeyBytes(), key); err != nil { + return false + } return true }) if err != nil { @@ -72,6 +75,9 @@ func IncrementPublicKeyTable(tx kv.RwTx, state *state.CachingBeaconState, prever if err := tx.Put(kv.ValidatorPublicKeys, key, pubKey[:]); err != nil { return err } + if err := tx.Put(kv.InvertedValidatorPublicKeys, pubKey[:], key); err != nil { + return err + } } return nil } @@ -117,6 +123,18 @@ func ReadPublicKeyByIndex(tx kv.Tx, index uint64) (libcommon.Bytes48, error) { return ret, err } +func ReadValidatorIndexByPublicKey(tx kv.Tx, key libcommon.Bytes48) (uint64, bool, error) { + var index []byte + var err error + if index, err = tx.GetOne(kv.InvertedValidatorPublicKeys, key[:]); err != nil { + return 0, false, err + } + if len(index) == 0 { + return 0, false, nil + } + return base_encoding.Decode64FromBytes4(index), true, nil +} + func GetStateProcessingProgress(tx kv.Tx) (uint64, error) { progressByytes, err := tx.GetOne(kv.StatesProcessingProgress, kv.StatesProcessingKey) if err != nil { @@ -132,9 +150,23 @@ func SetStateProcessingProgress(tx kv.RwTx, progress uint64) error { return tx.Put(kv.StatesProcessingProgress, kv.StatesProcessingKey, base_encoding.Encode64ToBytes4(progress)) } -func ReadMinimalBeaconState(tx kv.Tx, slot uint64) (*MinimalBeaconState, error) { - minimalState := &MinimalBeaconState{} - v, err := tx.GetOne(kv.MinimalBeaconState, base_encoding.Encode64ToBytes4(slot)) +func ReadSlotData(tx kv.Tx, slot uint64) (*SlotData, error) { + sd := &SlotData{} + v, err := tx.GetOne(kv.SlotData, base_encoding.Encode64ToBytes4(slot)) + if err != nil { + return nil, err + } + if len(v) == 0 { + return nil, nil + } + buf := bytes.NewBuffer(v) + + return sd, sd.ReadFrom(buf) +} + +func ReadEpochData(tx kv.Tx, slot uint64) (*EpochData, error) { + ed := &EpochData{} + v, err := tx.GetOne(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -143,20 +175,26 @@ func ReadMinimalBeaconState(tx kv.Tx, slot uint64) (*MinimalBeaconState, error) } buf := bytes.NewBuffer(v) - return minimalState, minimalState.ReadFrom(buf) + return ed, ed.ReadFrom(buf) } // ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized func ReadCheckpoints(tx kv.Tx, slot uint64) (current solid.Checkpoint, previous solid.Checkpoint, finalized solid.Checkpoint, err error) { - v, err := tx.GetOne(kv.Checkpoints, base_encoding.Encode64ToBytes4(slot)) + ed := &EpochData{} + v, err := tx.GetOne(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, nil, nil, err } if len(v) == 0 { return nil, nil, nil, nil } + buf := bytes.NewBuffer(v) + + if err := ed.ReadFrom(buf); err != nil { + return nil, nil, nil, err + } // Current, Pre - return solid.Checkpoint(v[0:40]), solid.Checkpoint(v[40:80]), solid.Checkpoint(v[80:120]), nil + return ed.CurrentJustifiedCheckpoint, ed.PreviousJustifiedCheckpoint, ed.FinalizedCheckpoint, nil } // ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized @@ -225,7 +263,13 @@ func ReadCurrentEpochAttestations(tx kv.Tx, slot uint64, limit int) (*solid.List return nil, err } if len(v) == 0 { - return nil, nil + has, err := tx.Has(kv.CurrentEpochAttestations, base_encoding.Encode64ToBytes4(slot)) + if err != nil { + return nil, err + } + if !has { + return nil, nil + } } attestations := solid.NewDynamicListSSZ[*solid.PendingAttestation](limit) reader, err := zstd.NewReader(bytes.NewReader(v)) @@ -249,7 +293,13 @@ func ReadPreviousEpochAttestations(tx kv.Tx, slot uint64, limit int) (*solid.Lis return nil, err } if len(v) == 0 { - return nil, nil + has, err := tx.Has(kv.PreviousEpochAttestations, base_encoding.Encode64ToBytes4(slot)) + if err != nil { + return nil, err + } + if !has { + return nil, nil + } } attestations := solid.NewDynamicListSSZ[*solid.PendingAttestation](limit) reader, err := zstd.NewReader(bytes.NewReader(v)) @@ -294,5 +344,17 @@ func ReadValidatorsTable(tx kv.Tx, out *StaticValidatorTable) error { } out.slot = slot return err +} +func ReadActiveIndicies(tx kv.Tx, slot uint64) ([]uint64, error) { + key := base_encoding.Encode64ToBytes4(slot) + v, err := tx.GetOne(kv.ActiveValidatorIndicies, key) + if err != nil { + return nil, err + } + if len(v) == 0 { + return nil, nil + } + buf := bytes.NewBuffer(v) + return base_encoding.ReadRabbits(nil, buf) } diff --git a/cl/phase1/core/checkpoint.go b/cl/phase1/core/checkpoint.go index 0acc633c433..59eaf82aec1 100644 --- a/cl/phase1/core/checkpoint.go +++ b/cl/phase1/core/checkpoint.go @@ -2,12 +2,15 @@ package core import ( "context" + "encoding/binary" "fmt" "io" "net/http" + "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/phase1/core/state" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/utils" "github.com/ledgerwatch/log/v3" @@ -48,3 +51,51 @@ func RetrieveBeaconState(ctx context.Context, beaconConfig *clparams.BeaconChain } return beaconState, nil } + +func RetrieveBlock(ctx context.Context, beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, uri string, expectedBlockRoot *libcommon.Hash) (*cltypes.SignedBeaconBlock, error) { + log.Debug("[Checkpoint Sync] Requesting beacon block", "uri", uri) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", "application/octet-stream") + if err != nil { + return nil, fmt.Errorf("checkpoint sync request failed %s", err) + } + r, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer func() { + err = r.Body.Close() + }() + if r.StatusCode != http.StatusOK { + return nil, fmt.Errorf("checkpoint sync failed, bad status code %d", r.StatusCode) + } + marshaled, err := io.ReadAll(r.Body) + if err != nil { + return nil, fmt.Errorf("checkpoint sync read failed %s", err) + } + if len(marshaled) < 108 { + return nil, fmt.Errorf("checkpoint sync read failed, too short") + } + currentSlot := binary.LittleEndian.Uint64(marshaled[100:108]) + v := beaconConfig.GetCurrentStateVersion(currentSlot / beaconConfig.SlotsPerEpoch) + + block := cltypes.NewSignedBeaconBlock(beaconConfig) + err = block.DecodeSSZ(marshaled, int(v)) + if err != nil { + return nil, fmt.Errorf("checkpoint sync decode failed %s", err) + } + if expectedBlockRoot != nil { + has, err := block.Block.HashSSZ() + if err != nil { + return nil, fmt.Errorf("checkpoint sync decode failed %s", err) + } + if has != *expectedBlockRoot { + return nil, fmt.Errorf("checkpoint sync decode failed, unexpected block root %s", has) + } + } + return block, nil +} diff --git a/cl/phase1/core/state/accessors.go b/cl/phase1/core/state/accessors.go index e5faa8e2eb8..14d6d74c03d 100644 --- a/cl/phase1/core/state/accessors.go +++ b/cl/phase1/core/state/accessors.go @@ -82,12 +82,9 @@ func InactivityLeaking(b abstract.BeaconState) bool { } // IsUnslashedParticipatingIndex -func IsUnslashedParticipatingIndex(b abstract.BeaconState, epoch, index uint64, flagIdx int) bool { - validator, err := b.ValidatorForValidatorIndex(int(index)) - if err != nil { - return false - } - return validator.Active(epoch) && cltypes.ParticipationFlags(b.EpochParticipation(false).Get(int(index))).HasFlag(flagIdx) && !validator.Slashed() +func IsUnslashedParticipatingIndex(validatorSet *solid.ValidatorSet, previousEpochParticipation *solid.BitList, epoch, index uint64, flagIdx int) bool { + validator := validatorSet.Get(int(index)) + return validator.Active(epoch) && cltypes.ParticipationFlags(previousEpochParticipation.Get(int(index))).HasFlag(flagIdx) && !validator.Slashed() } // EligibleValidatorsIndicies Implementation of get_eligible_validator_indices as defined in the eth 2.0 specs. diff --git a/cl/phase1/core/state/cache_mutators.go b/cl/phase1/core/state/cache_mutators.go index 05f9080c91a..b5ee40db296 100644 --- a/cl/phase1/core/state/cache_mutators.go +++ b/cl/phase1/core/state/cache_mutators.go @@ -16,46 +16,46 @@ func (b *CachingBeaconState) getSlashingProposerReward(whistleBlowerReward uint6 return whistleBlowerReward * b.BeaconConfig().ProposerWeight / b.BeaconConfig().WeightDenominator } -func (b *CachingBeaconState) SlashValidator(slashedInd uint64, whistleblowerInd *uint64) error { +func (b *CachingBeaconState) SlashValidator(slashedInd uint64, whistleblowerInd *uint64) (uint64, error) { epoch := Epoch(b) if err := b.InitiateValidatorExit(slashedInd); err != nil { - return err + return 0, err } // Record changes in changeset slashingsIndex := int(epoch % b.BeaconConfig().EpochsPerSlashingsVector) // Change the validator to be slashed if err := b.SetValidatorSlashed(int(slashedInd), true); err != nil { - return err + return 0, err } currentWithdrawableEpoch, err := b.ValidatorWithdrawableEpoch(int(slashedInd)) if err != nil { - return err + return 0, err } newWithdrawableEpoch := utils.Max64(currentWithdrawableEpoch, epoch+b.BeaconConfig().EpochsPerSlashingsVector) if err := b.SetWithdrawableEpochForValidatorAtIndex(int(slashedInd), newWithdrawableEpoch); err != nil { - return err + return 0, err } // Update slashings vector currentEffectiveBalance, err := b.ValidatorEffectiveBalance(int(slashedInd)) if err != nil { - return err + return 0, err } b.SetSlashingSegmentAt(slashingsIndex, b.SlashingSegmentAt(slashingsIndex)+currentEffectiveBalance) newEffectiveBalance, err := b.ValidatorEffectiveBalance(int(slashedInd)) if err != nil { - return err + return 0, err } if err := DecreaseBalance(b, slashedInd, newEffectiveBalance/b.BeaconConfig().GetMinSlashingPenaltyQuotient(b.Version())); err != nil { - return err + return 0, err } proposerInd, err := b.GetBeaconProposerIndex() if err != nil { - return fmt.Errorf("unable to get beacon proposer index: %v", err) + return 0, fmt.Errorf("unable to get beacon proposer index: %v", err) } if whistleblowerInd == nil { whistleblowerInd = new(uint64) @@ -64,9 +64,13 @@ func (b *CachingBeaconState) SlashValidator(slashedInd uint64, whistleblowerInd whistleBlowerReward := newEffectiveBalance / b.BeaconConfig().WhistleBlowerRewardQuotient proposerReward := b.getSlashingProposerReward(whistleBlowerReward) if err := IncreaseBalance(b, proposerInd, proposerReward); err != nil { - return err + return 0, err + } + rewardWhist := whistleBlowerReward - proposerReward + if whistleblowerInd == nil { + proposerReward += rewardWhist } - return IncreaseBalance(b, *whistleblowerInd, whistleBlowerReward-proposerReward) + return proposerReward, IncreaseBalance(b, *whistleblowerInd, whistleBlowerReward-proposerReward) } func (b *CachingBeaconState) InitiateValidatorExit(index uint64) error { diff --git a/cl/phase1/core/state/raw/getters.go b/cl/phase1/core/state/raw/getters.go index a1676714b28..1d234a647a0 100644 --- a/cl/phase1/core/state/raw/getters.go +++ b/cl/phase1/core/state/raw/getters.go @@ -84,6 +84,18 @@ func (b *BeaconState) Eth1DepositIndex() uint64 { return b.eth1DepositIndex } +func (b *BeaconState) ValidatorSet() *solid.ValidatorSet { + return b.validators +} + +func (b *BeaconState) PreviousEpochParticipation() *solid.BitList { + return b.previousEpochParticipation +} + +func (b *BeaconState) CurrentEpochParticipation() *solid.BitList { + return b.currentEpochParticipation +} + func (b *BeaconState) ValidatorLength() int { return b.validators.Length() } diff --git a/cl/phase1/core/state/raw/state.go b/cl/phase1/core/state/raw/state.go index 3f6533e4e92..f84eade855a 100644 --- a/cl/phase1/core/state/raw/state.go +++ b/cl/phase1/core/state/raw/state.go @@ -2,6 +2,7 @@ package raw import ( "encoding/json" + "strconv" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cl/clparams" @@ -105,9 +106,9 @@ func (b *BeaconState) init() error { func (b *BeaconState) MarshalJSON() ([]byte, error) { obj := map[string]interface{}{ - "genesis_time": b.genesisTime, + "genesis_time": strconv.FormatInt(int64(b.genesisTime), 10), "genesis_validators_root": b.genesisValidatorsRoot, - "slot": b.slot, + "slot": strconv.FormatInt(int64(b.slot), 10), "fork": b.fork, "latest_block_header": b.latestBlockHeader, "block_roots": b.blockRoots, @@ -115,7 +116,7 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) { "historical_roots": b.historicalRoots, "eth1_data": b.eth1Data, "eth1_data_votes": b.eth1DataVotes, - "eth1_deposit_index": b.eth1DepositIndex, + "eth1_deposit_index": strconv.FormatInt(int64(b.eth1DepositIndex), 10), "validators": b.validators, "balances": b.balances, "randao_mixes": b.randaoMixes, @@ -141,8 +142,8 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) { obj["latest_execution_payload_header"] = b.latestExecutionPayloadHeader } if b.version >= clparams.CapellaVersion { - obj["next_withdrawal_index"] = b.nextWithdrawalIndex - obj["next_withdrawal_validator_index"] = b.nextWithdrawalValidatorIndex + obj["next_withdrawal_index"] = strconv.FormatInt(int64(b.nextWithdrawalIndex), 10) + obj["next_withdrawal_validator_index"] = strconv.FormatInt(int64(b.nextWithdrawalValidatorIndex), 10) obj["historical_summaries"] = b.historicalSummaries } return json.Marshal(obj) diff --git a/cl/phase1/core/state/utils_test.go b/cl/phase1/core/state/utils_test.go index 61fca829fed..8a1ac83e6c3 100644 --- a/cl/phase1/core/state/utils_test.go +++ b/cl/phase1/core/state/utils_test.go @@ -14,9 +14,10 @@ import ( func TestValidatorSlashing(t *testing.T) { state := New(&clparams.MainnetBeaconConfig) utils.DecodeSSZSnappy(state, stateEncoded, int(clparams.DenebVersion)) - - require.NoError(t, state.SlashValidator(1, nil)) - require.NoError(t, state.SlashValidator(2, nil)) + _, err := state.SlashValidator(1, nil) + require.NoError(t, err) + _, err = state.SlashValidator(2, nil) + require.NoError(t, err) exit, err := state.BeaconState.ValidatorExitEpoch(1) require.NoError(t, err) diff --git a/cl/phase1/execution_client/execution_client_direct.go b/cl/phase1/execution_client/execution_client_direct.go index 9f8eb1f50a7..cf827e4f8b8 100644 --- a/cl/phase1/execution_client/execution_client_direct.go +++ b/cl/phase1/execution_client/execution_client_direct.go @@ -28,7 +28,7 @@ func (cc *ExecutionClientDirect) NewPayload(payload *cltypes.Eth1Block, beaconPa return } - header, err := payload.RlpHeader() + header, err := payload.RlpHeader(beaconParentRoot) if err != nil { return true, err } diff --git a/cl/phase1/forkchoice/fork_choice_test.go b/cl/phase1/forkchoice/fork_choice_test.go index 4b5c5d81975..f712a447124 100644 --- a/cl/phase1/forkchoice/fork_choice_test.go +++ b/cl/phase1/forkchoice/fork_choice_test.go @@ -3,13 +3,16 @@ package forkchoice_test import ( "context" _ "embed" + "fmt" "testing" + "github.com/ledgerwatch/erigon/cl/antiquary/tests" "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph" "github.com/ledgerwatch/erigon/cl/pool" + "github.com/ledgerwatch/erigon/cl/transition" "github.com/spf13/afero" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -107,3 +110,42 @@ func TestForkChoiceBasic(t *testing.T) { require.NoError(t, err) require.Equal(t, len(pool.VoluntaryExistsPool.Raw()), 1) } + +func TestForkChoiceChainBellatrix(t *testing.T) { + blocks, anchorState, _ := tests.GetBellatrixRandom() + + intermediaryState, err := anchorState.Copy() + require.NoError(t, err) + + intermediaryBlockRoot := blocks[0].Block.ParentRoot + for i := 0; i < 35; i++ { + require.NoError(t, transition.TransitionState(intermediaryState, blocks[i], nil, false)) + intermediaryBlockRoot, err = blocks[i].Block.HashSSZ() + require.NoError(t, err) + } + // Initialize forkchoice store + pool := pool.NewOperationsPool(&clparams.MainnetBeaconConfig) + store, err := forkchoice.NewForkChoiceStore(context.Background(), anchorState, nil, nil, pool, fork_graph.NewForkGraphDisk(anchorState, afero.NewMemMapFs())) + store.OnTick(2000) + require.NoError(t, err) + for _, block := range blocks { + require.NoError(t, store.OnBlock(block, false, true)) + } + root1, err := blocks[20].Block.HashSSZ() + require.NoError(t, err) + + rewards, ok := store.BlockRewards(libcommon.Hash(root1)) + require.True(t, ok) + require.Equal(t, rewards.Attestations, uint64(0x511ad)) + // test randao mix + mixes := solid.NewHashVector(int(clparams.MainnetBeaconConfig.EpochsPerHistoricalVector)) + require.True(t, store.RandaoMixes(intermediaryBlockRoot, mixes)) + for i := 0; i < mixes.Length(); i++ { + require.Equal(t, mixes.Get(i), intermediaryState.RandaoMixes().Get(i), fmt.Sprintf("mixes mismatch at index %d, have: %x, expected: %x", i, mixes.Get(i), intermediaryState.RandaoMixes().Get(i))) + } + currentIntermediarySyncCommittee, nextIntermediarySyncCommittee, ok := store.GetSyncCommittees(intermediaryBlockRoot) + require.True(t, ok) + + require.Equal(t, intermediaryState.CurrentSyncCommittee(), currentIntermediarySyncCommittee) + require.Equal(t, intermediaryState.NextSyncCommittee(), nextIntermediarySyncCommittee) +} diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go index d22d99905f3..1030cba9014 100644 --- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go +++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go @@ -5,27 +5,41 @@ import ( "errors" "sync" + "github.com/klauspost/compress/zstd" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/transition" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" "github.com/ledgerwatch/log/v3" - "github.com/pierrec/lz4" "github.com/spf13/afero" "golang.org/x/exp/slices" ) -var lz4PoolWriterPool = sync.Pool{ +type syncCommittees struct { + currentSyncCommittee *solid.SyncCommittee + nextSyncCommittee *solid.SyncCommittee +} + +var compressorPool = sync.Pool{ New: func() interface{} { - return lz4.NewWriter(nil) + w, err := zstd.NewWriter(nil) + if err != nil { + panic(err) + } + return w }, } -var lz4PoolReaderPool = sync.Pool{ +var decompressPool = sync.Pool{ New: func() interface{} { - return lz4.NewReader(nil) + r, err := zstd.NewReader(nil) + if err != nil { + panic(err) + } + return r }, } @@ -69,12 +83,16 @@ type forkGraphDisk struct { // for each block root we also keep track of te equivalent current justified and finalized checkpoints for faster head retrieval. currentJustifiedCheckpoints map[libcommon.Hash]solid.Checkpoint finalizedCheckpoints map[libcommon.Hash]solid.Checkpoint + // keep track of rewards too + blockRewards map[libcommon.Hash]*eth2.BlockRewardsCollector + // for each block root we keep track of the sync committees for head retrieval. + syncCommittees map[libcommon.Hash]syncCommittees // configurations beaconCfg *clparams.BeaconChainConfig genesisTime uint64 // highest block seen - highestSeen, anchorSlot uint64 + highestSeen, lowestAvaiableSlot, anchorSlot uint64 // reusable buffers sszBuffer bytes.Buffer @@ -108,13 +126,16 @@ func NewForkGraphDisk(anchorState *state.CachingBeaconState, aferoFs afero.Fs) F currentState: anchorState, currentStateBlockRoot: anchorRoot, saveStates: make(map[libcommon.Hash]savedStateRecord), + syncCommittees: make(map[libcommon.Hash]syncCommittees), // checkpoints trackers currentJustifiedCheckpoints: make(map[libcommon.Hash]solid.Checkpoint), finalizedCheckpoints: make(map[libcommon.Hash]solid.Checkpoint), + blockRewards: make(map[libcommon.Hash]*eth2.BlockRewardsCollector), // configuration - beaconCfg: anchorState.BeaconConfig(), - genesisTime: anchorState.GenesisTime(), - anchorSlot: anchorState.Slot(), + beaconCfg: anchorState.BeaconConfig(), + genesisTime: anchorState.GenesisTime(), + anchorSlot: anchorState.Slot(), + lowestAvaiableSlot: anchorState.Slot(), } f.dumpBeaconStateOnDisk(anchorState, anchorRoot) return f @@ -157,8 +178,9 @@ func (f *forkGraphDisk) AddChainSegment(signedBlock *cltypes.SignedBeaconBlock, return nil, MissingSegment, nil } + blockRewardsCollector := ð2.BlockRewardsCollector{} // Execute the state - if invalidBlockErr := transition.TransitionState(newState, signedBlock, fullValidation); invalidBlockErr != nil { + if invalidBlockErr := transition.TransitionState(newState, signedBlock, blockRewardsCollector, fullValidation); invalidBlockErr != nil { // Add block to list of invalid blocks log.Debug("Invalid beacon block", "reason", invalidBlockErr) f.badBlocks[blockRoot] = struct{}{} @@ -173,6 +195,12 @@ func (f *forkGraphDisk) AddChainSegment(signedBlock *cltypes.SignedBeaconBlock, return nil, InvalidBlock, invalidBlockErr } + f.blockRewards[blockRoot] = blockRewardsCollector + f.syncCommittees[blockRoot] = syncCommittees{ + currentSyncCommittee: newState.CurrentSyncCommittee().Copy(), + nextSyncCommittee: newState.NextSyncCommittee().Copy(), + } + f.blocks[blockRoot] = signedBlock bodyRoot, err := signedBlock.Block.Body.HashSSZ() if err != nil { @@ -296,7 +324,7 @@ func (f *forkGraphDisk) GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.Cac // Traverse the blocks from top to bottom. for _, block := range blocksInTheWay { - if err := transition.TransitionState(copyReferencedState, block, false); err != nil { + if err := transition.TransitionState(copyReferencedState, block, nil, false); err != nil { return nil, err } } @@ -345,7 +373,7 @@ func (f *forkGraphDisk) GetState(blockRoot libcommon.Hash, alwaysCopy bool) (*st // Traverse the blocks from top to bottom. for i := len(blocksInTheWay) - 1; i >= 0; i-- { - if err := transition.TransitionState(copyReferencedState, blocksInTheWay[i], false); err != nil { + if err := transition.TransitionState(copyReferencedState, blocksInTheWay[i], nil, false); err != nil { return nil, err } } @@ -375,6 +403,7 @@ func (f *forkGraphDisk) Prune(pruneSlot uint64) (err error) { } oldRoots = append(oldRoots, hash) } + f.lowestAvaiableSlot = pruneSlot + 1 for _, root := range oldRoots { delete(f.badBlocks, root) delete(f.blocks, root) @@ -382,9 +411,28 @@ func (f *forkGraphDisk) Prune(pruneSlot uint64) (err error) { delete(f.finalizedCheckpoints, root) delete(f.headers, root) delete(f.saveStates, root) + delete(f.syncCommittees, root) + delete(f.blockRewards, root) f.fs.Remove(getBeaconStateFilename(root)) f.fs.Remove(getBeaconStateCacheFilename(root)) } log.Debug("Pruned old blocks", "pruneSlot", pruneSlot) return } + +func (f *forkGraphDisk) GetSyncCommittees(blockRoot libcommon.Hash) (*solid.SyncCommittee, *solid.SyncCommittee, bool) { + obj, has := f.syncCommittees[blockRoot] + if !has { + return nil, nil, false + } + return obj.currentSyncCommittee, obj.nextSyncCommittee, true +} + +func (f *forkGraphDisk) GetBlockRewards(blockRoot libcommon.Hash) (*eth2.BlockRewardsCollector, bool) { + obj, has := f.blockRewards[blockRoot] + return obj, has +} + +func (f *forkGraphDisk) LowestAvaiableSlot() uint64 { + return f.lowestAvaiableSlot +} diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go index e0ebf2a80f2..274fa7a691b 100644 --- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go +++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go @@ -6,9 +6,9 @@ import ( "os" "github.com/golang/snappy" + "github.com/klauspost/compress/zstd" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cl/phase1/core/state" - "github.com/pierrec/lz4" "github.com/spf13/afero" ) @@ -70,12 +70,12 @@ func (f *forkGraphDisk) readBeaconStateFromDisk(blockRoot libcommon.Hash) (bs *s } defer cacheFile.Close() - lz4Reader := lz4PoolReaderPool.Get().(*lz4.Reader) - defer lz4PoolReaderPool.Put(lz4Reader) + reader := decompressPool.Get().(*zstd.Decoder) + defer decompressPool.Put(reader) - lz4Reader.Reset(cacheFile) + reader.Reset(cacheFile) - if err := bs.DecodeCaches(lz4Reader); err != nil { + if err := bs.DecodeCaches(reader); err != nil { return nil, err } @@ -135,16 +135,16 @@ func (f *forkGraphDisk) dumpBeaconStateOnDisk(bs *state.CachingBeaconState, bloc } defer cacheFile.Close() - lz4Writer := lz4PoolWriterPool.Get().(*lz4.Writer) - defer lz4PoolWriterPool.Put(lz4Writer) + writer := compressorPool.Get().(*zstd.Encoder) + defer compressorPool.Put(writer) - lz4Writer.CompressionLevel = 5 - lz4Writer.Reset(cacheFile) + writer.Reset(cacheFile) + defer writer.Close() - if err := bs.EncodeCaches(lz4Writer); err != nil { + if err := bs.EncodeCaches(writer); err != nil { return err } - if err = lz4Writer.Flush(); err != nil { + if err = writer.Close(); err != nil { return } err = cacheFile.Sync() diff --git a/cl/phase1/forkchoice/fork_graph/interface.go b/cl/phase1/forkchoice/fork_graph/interface.go index 66a2edd0e83..23d9e106040 100644 --- a/cl/phase1/forkchoice/fork_graph/interface.go +++ b/cl/phase1/forkchoice/fork_graph/interface.go @@ -5,6 +5,7 @@ import ( "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" ) /* @@ -23,9 +24,12 @@ type ForkGraph interface { GetState(blockRoot libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) GetCurrentJustifiedCheckpoint(blockRoot libcommon.Hash) (solid.Checkpoint, bool) GetFinalizedCheckpoint(blockRoot libcommon.Hash) (solid.Checkpoint, bool) + GetSyncCommittees(blockRoot libcommon.Hash) (*solid.SyncCommittee, *solid.SyncCommittee, bool) MarkHeaderAsInvalid(blockRoot libcommon.Hash) AnchorSlot() uint64 Prune(uint64) error + GetBlockRewards(blockRoot libcommon.Hash) (*eth2.BlockRewardsCollector, bool) + LowestAvaiableSlot() uint64 // extra methods for validator api GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) diff --git a/cl/phase1/forkchoice/forkchoice.go b/cl/phase1/forkchoice/forkchoice.go index bbe79bc4549..f6533b96ebb 100644 --- a/cl/phase1/forkchoice/forkchoice.go +++ b/cl/phase1/forkchoice/forkchoice.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon/cl/phase1/execution_client" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph" "github.com/ledgerwatch/erigon/cl/pool" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" "golang.org/x/exp/slices" lru "github.com/hashicorp/golang-lru/v2" @@ -26,6 +27,17 @@ const ( allowedCachedStates = 8 ) +type randaoDelta struct { + epoch uint64 + delta libcommon.Hash +} + +type finalityCheckpoints struct { + finalizedCheckpoint solid.Checkpoint + currentJustifiedCheckpoint solid.Checkpoint + previousJustifiedCheckpoint solid.Checkpoint +} + type preverifiedAppendListsSizes struct { validatorLength uint64 historicalRootsLength uint64 @@ -55,8 +67,15 @@ type ForkChoiceStore struct { anchorPublicKeys []byte // We keep track of them so that we can forkchoice with EL. eth2Roots *lru.Cache[libcommon.Hash, libcommon.Hash] // ETH2 root -> ETH1 hash - // preverifid sizes - preverifiedSizes *lru.Cache[libcommon.Hash, preverifiedAppendListsSizes] + // preverifid sizes and other data collection + preverifiedSizes *lru.Cache[libcommon.Hash, preverifiedAppendListsSizes] + finalityCheckpoints *lru.Cache[libcommon.Hash, finalityCheckpoints] + totalActiveBalances *lru.Cache[libcommon.Hash, uint64] + // Randao mixes + randaoMixesLists *lru.Cache[libcommon.Hash, solid.HashListSSZ] // limited randao mixes full list (only 16 elements) + randaoDeltas *lru.Cache[libcommon.Hash, randaoDelta] // small entry can be lots of elements. + // participation tracking + participation *lru.Cache[uint64, *solid.BitList] // epoch -> [partecipation] mu sync.Mutex // EL @@ -93,6 +112,22 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt if err != nil { return nil, err } + + randaoMixesLists, err := lru.New[libcommon.Hash, solid.HashListSSZ](allowedCachedStates) + if err != nil { + return nil, err + } + + randaoDeltas, err := lru.New[libcommon.Hash, randaoDelta](checkpointsPerCache) + if err != nil { + return nil, err + } + + finalityCheckpoints, err := lru.New[libcommon.Hash, finalityCheckpoints](checkpointsPerCache) + if err != nil { + return nil, err + } + anchorPublicKeys := make([]byte, anchorState.ValidatorLength()*length.Bytes48) for idx := 0; idx < anchorState.ValidatorLength(); idx++ { pk, err := anchorState.ValidatorPublicKey(idx) @@ -112,6 +147,22 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt historicalSummariesLength: anchorState.HistoricalSummariesLength(), }) + totalActiveBalances, err := lru.New[libcommon.Hash, uint64](checkpointsPerCache * 10) + if err != nil { + return nil, err + } + + participation, err := lru.New[uint64, *solid.BitList](16) + if err != nil { + return nil, err + } + + participation.Add(state.Epoch(anchorState.BeaconState), anchorState.CurrentEpochParticipation().Copy()) + + totalActiveBalances.Add(anchorRoot, anchorState.GetTotalActiveBalance()) + r := solid.NewHashVector(int(anchorState.BeaconConfig().EpochsPerHistoricalVector)) + anchorState.RandaoMixes().CopyTo(r) + randaoMixesLists.Add(anchorRoot, r) return &ForkChoiceStore{ ctx: ctx, highestSeen: anchorState.Slot(), @@ -133,6 +184,11 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt beaconCfg: anchorState.BeaconConfig(), childrens: make(map[libcommon.Hash]childrens), preverifiedSizes: preverifiedSizes, + finalityCheckpoints: finalityCheckpoints, + totalActiveBalances: totalActiveBalances, + randaoMixesLists: randaoMixesLists, + randaoDeltas: randaoDeltas, + participation: participation, }, nil } @@ -212,7 +268,7 @@ func (f *ForkChoiceStore) FinalizedCheckpoint() solid.Checkpoint { func (f *ForkChoiceStore) FinalizedSlot() uint64 { f.mu.Lock() defer f.mu.Unlock() - return f.computeStartSlotAtEpoch(f.finalizedCheckpoint.Epoch()) + return f.computeStartSlotAtEpoch(f.finalizedCheckpoint.Epoch()) + (f.beaconCfg.SlotsPerEpoch - 1) } // FinalizedCheckpoint returns justified checkpoint @@ -273,3 +329,73 @@ func (f *ForkChoiceStore) PreverifiedHistoricalSummaries(blockRoot libcommon.Has } return 0 } + +func (f *ForkChoiceStore) GetFinalityCheckpoints(blockRoot libcommon.Hash) (bool, solid.Checkpoint, solid.Checkpoint, solid.Checkpoint) { + f.mu.Lock() + defer f.mu.Unlock() + if ret, ok := f.finalityCheckpoints.Get(blockRoot); ok { + return true, ret.finalizedCheckpoint, ret.currentJustifiedCheckpoint, ret.previousJustifiedCheckpoint + } + return false, solid.Checkpoint{}, solid.Checkpoint{}, solid.Checkpoint{} +} + +func (f *ForkChoiceStore) GetSyncCommittees(blockRoot libcommon.Hash) (*solid.SyncCommittee, *solid.SyncCommittee, bool) { + f.mu.Lock() + defer f.mu.Unlock() + return f.forkGraph.GetSyncCommittees(blockRoot) +} + +func (f *ForkChoiceStore) BlockRewards(root libcommon.Hash) (*eth2.BlockRewardsCollector, bool) { + f.mu.Lock() + defer f.mu.Unlock() + return f.forkGraph.GetBlockRewards(root) +} + +func (f *ForkChoiceStore) TotalActiveBalance(root libcommon.Hash) (uint64, bool) { + return f.totalActiveBalances.Get(root) +} + +func (f *ForkChoiceStore) LowestAvaiableSlot() uint64 { + f.mu.Lock() + defer f.mu.Unlock() + return f.forkGraph.LowestAvaiableSlot() +} + +func (f *ForkChoiceStore) RandaoMixes(blockRoot libcommon.Hash, out solid.HashListSSZ) bool { + f.mu.Lock() + defer f.mu.Unlock() + relevantDeltas := map[uint64]randaoDelta{} + currentBlockRoot := blockRoot + var currentSlot uint64 + for { + h, ok := f.forkGraph.GetHeader(currentBlockRoot) + if !ok { + return false + } + currentSlot = h.Slot + if f.randaoMixesLists.Contains(currentBlockRoot) { + break + } + randaoDelta, ok := f.randaoDeltas.Get(currentBlockRoot) + if !ok { + return false + } + currentBlockRoot = h.ParentRoot + if _, ok := relevantDeltas[currentSlot/f.beaconCfg.SlotsPerEpoch]; !ok { + relevantDeltas[currentSlot/f.beaconCfg.SlotsPerEpoch] = randaoDelta + } + } + randaoMixes, ok := f.randaoMixesLists.Get(currentBlockRoot) + if !ok { + return false + } + randaoMixes.CopyTo(out) + for epoch, delta := range relevantDeltas { + out.Set(int(epoch%f.beaconCfg.EpochsPerHistoricalVector), delta.delta) + } + return true +} + +func (f *ForkChoiceStore) Partecipation(epoch uint64) (*solid.BitList, bool) { + return f.participation.Get(epoch) +} diff --git a/cl/phase1/forkchoice/forkchoice_mock.go b/cl/phase1/forkchoice/forkchoice_mock.go new file mode 100644 index 00000000000..3b2b35eccc6 --- /dev/null +++ b/cl/phase1/forkchoice/forkchoice_mock.go @@ -0,0 +1,197 @@ +package forkchoice + +import ( + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "github.com/ledgerwatch/erigon/cl/phase1/execution_client" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" +) + +// type ForkChoiceStorage interface { +// ForkChoiceStorageWriter +// ForkChoiceStorageReader +// } + +// type ForkChoiceStorageReader interface { +// Ancestor(root common.Hash, slot uint64) common.Hash +// AnchorSlot() uint64 +// Engine() execution_client.ExecutionEngine +// FinalizedCheckpoint() solid.Checkpoint +// FinalizedSlot() uint64 +// GetEth1Hash(eth2Root common.Hash) common.Hash +// GetHead() (common.Hash, uint64, error) +// HighestSeen() uint64 +// JustifiedCheckpoint() solid.Checkpoint +// JustifiedSlot() uint64 +// ProposerBoostRoot() common.Hash +// GetStateAtBlockRoot(blockRoot libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) +// GetFinalityCheckpoints(blockRoot libcommon.Hash) (bool, solid.Checkpoint, solid.Checkpoint, solid.Checkpoint) +// GetSyncCommittees(blockRoot libcommon.Hash) (*solid.SyncCommittee, *solid.SyncCommittee, bool) +// Slot() uint64 +// Time() uint64 + +// GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) +// GetStateAtStateRoot(root libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) +// } + +// type ForkChoiceStorageWriter interface { +// OnAttestation(attestation *solid.Attestation, fromBlock bool) error +// OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing, test bool) error +// OnBlock(block *cltypes.SignedBeaconBlock, newPayload bool, fullValidation bool) error +// OnTick(time uint64) +// } + +// Make mocks with maps and simple setters and getters, panic on methods from ForkChoiceStorageWriter + +type ForkChoiceStorageMock struct { + Ancestors map[uint64]common.Hash + AnchorSlotVal uint64 + FinalizedCheckpointVal solid.Checkpoint + FinalizedSlotVal uint64 + HeadVal common.Hash + HeadSlotVal uint64 + HighestSeenVal uint64 + JustifiedCheckpointVal solid.Checkpoint + JustifiedSlotVal uint64 + ProposerBoostRootVal common.Hash + SlotVal uint64 + TimeVal uint64 + + ParticipationVal *solid.BitList + + StateAtBlockRootVal map[common.Hash]*state.CachingBeaconState + StateAtSlotVal map[uint64]*state.CachingBeaconState + GetSyncCommitteesVal map[common.Hash][2]*solid.SyncCommittee + GetFinalityCheckpointsVal map[common.Hash][3]solid.Checkpoint +} + +func NewForkChoiceStorageMock() *ForkChoiceStorageMock { + return &ForkChoiceStorageMock{ + Ancestors: make(map[uint64]common.Hash), + AnchorSlotVal: 0, + FinalizedCheckpointVal: solid.Checkpoint{}, + FinalizedSlotVal: 0, + HeadVal: common.Hash{}, + HighestSeenVal: 0, + JustifiedCheckpointVal: solid.Checkpoint{}, + JustifiedSlotVal: 0, + ProposerBoostRootVal: common.Hash{}, + SlotVal: 0, + TimeVal: 0, + StateAtBlockRootVal: make(map[common.Hash]*state.CachingBeaconState), + StateAtSlotVal: make(map[uint64]*state.CachingBeaconState), + GetSyncCommitteesVal: make(map[common.Hash][2]*solid.SyncCommittee), + GetFinalityCheckpointsVal: make(map[common.Hash][3]solid.Checkpoint), + } +} + +func (f *ForkChoiceStorageMock) Ancestor(root common.Hash, slot uint64) common.Hash { + return f.Ancestors[slot] +} + +func (f *ForkChoiceStorageMock) AnchorSlot() uint64 { + return f.AnchorSlotVal +} + +func (f *ForkChoiceStorageMock) Engine() execution_client.ExecutionEngine { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) FinalizedCheckpoint() solid.Checkpoint { + return f.FinalizedCheckpointVal +} + +func (f *ForkChoiceStorageMock) FinalizedSlot() uint64 { + return f.FinalizedSlotVal +} + +func (f *ForkChoiceStorageMock) GetEth1Hash(eth2Root common.Hash) common.Hash { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) GetHead() (common.Hash, uint64, error) { + return f.HeadVal, f.HeadSlotVal, nil +} + +func (f *ForkChoiceStorageMock) HighestSeen() uint64 { + return f.HighestSeenVal +} + +func (f *ForkChoiceStorageMock) JustifiedCheckpoint() solid.Checkpoint { + return f.JustifiedCheckpointVal +} + +func (f *ForkChoiceStorageMock) JustifiedSlot() uint64 { + return f.JustifiedSlotVal +} + +func (f *ForkChoiceStorageMock) ProposerBoostRoot() common.Hash { + return f.ProposerBoostRootVal +} + +func (f *ForkChoiceStorageMock) GetStateAtBlockRoot(blockRoot common.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) { + return f.StateAtBlockRootVal[blockRoot], nil +} + +func (f *ForkChoiceStorageMock) GetFinalityCheckpoints(blockRoot common.Hash) (bool, solid.Checkpoint, solid.Checkpoint, solid.Checkpoint) { + oneNil := f.GetFinalityCheckpointsVal[blockRoot][0] != nil && f.GetFinalityCheckpointsVal[blockRoot][1] != nil && f.GetFinalityCheckpointsVal[blockRoot][2] != nil + return oneNil, f.GetFinalityCheckpointsVal[blockRoot][0], f.GetFinalityCheckpointsVal[blockRoot][1], f.GetFinalityCheckpointsVal[blockRoot][2] +} + +func (f *ForkChoiceStorageMock) GetSyncCommittees(blockRoot common.Hash) (*solid.SyncCommittee, *solid.SyncCommittee, bool) { + return f.GetSyncCommitteesVal[blockRoot][0], f.GetSyncCommitteesVal[blockRoot][1], f.GetSyncCommitteesVal[blockRoot][0] != nil && f.GetSyncCommitteesVal[blockRoot][1] != nil +} + +func (f *ForkChoiceStorageMock) GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) { + return f.StateAtSlotVal[slot], nil +} + +func (f *ForkChoiceStorageMock) Slot() uint64 { + return f.SlotVal +} + +func (f *ForkChoiceStorageMock) Time() uint64 { + return f.TimeVal +} + +func (f *ForkChoiceStorageMock) OnAttestation(attestation *solid.Attestation, fromBlock bool) error { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing, test bool) error { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) OnBlock(block *cltypes.SignedBeaconBlock, newPayload bool, fullValidation bool) error { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) OnTick(time uint64) { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) GetStateAtStateRoot(root common.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) BlockRewards(root common.Hash) (*eth2.BlockRewardsCollector, bool) { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) TotalActiveBalance(root common.Hash) (uint64, bool) { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) RandaoMixes(blockRoot common.Hash, out solid.HashListSSZ) bool { + return false +} + +func (f *ForkChoiceStorageMock) LowestAvaiableSlot() uint64 { + return f.FinalizedSlotVal +} + +func (f *ForkChoiceStorageMock) Partecipation(epoch uint64) (*solid.BitList, bool) { + return f.ParticipationVal, f.ParticipationVal != nil +} diff --git a/cl/phase1/forkchoice/interface.go b/cl/phase1/forkchoice/interface.go index 96d34abd561..60320b4c715 100644 --- a/cl/phase1/forkchoice/interface.go +++ b/cl/phase1/forkchoice/interface.go @@ -7,6 +7,7 @@ import ( "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/phase1/execution_client" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" ) type ForkChoiceStorage interface { @@ -20,6 +21,7 @@ type ForkChoiceStorageReader interface { Engine() execution_client.ExecutionEngine FinalizedCheckpoint() solid.Checkpoint FinalizedSlot() uint64 + LowestAvaiableSlot() uint64 GetEth1Hash(eth2Root common.Hash) common.Hash GetHead() (common.Hash, uint64, error) HighestSeen() uint64 @@ -27,8 +29,14 @@ type ForkChoiceStorageReader interface { JustifiedSlot() uint64 ProposerBoostRoot() common.Hash GetStateAtBlockRoot(blockRoot libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) + GetFinalityCheckpoints(blockRoot libcommon.Hash) (bool, solid.Checkpoint, solid.Checkpoint, solid.Checkpoint) + GetSyncCommittees(blockRoot libcommon.Hash) (*solid.SyncCommittee, *solid.SyncCommittee, bool) Slot() uint64 Time() uint64 + Partecipation(epoch uint64) (*solid.BitList, bool) + RandaoMixes(blockRoot libcommon.Hash, out solid.HashListSSZ) bool + BlockRewards(root libcommon.Hash) (*eth2.BlockRewardsCollector, bool) + TotalActiveBalance(root libcommon.Hash) (uint64, bool) GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) GetStateAtStateRoot(root libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) diff --git a/cl/phase1/forkchoice/on_block.go b/cl/phase1/forkchoice/on_block.go index 2e709f7b0c1..06b28c5e772 100644 --- a/cl/phase1/forkchoice/on_block.go +++ b/cl/phase1/forkchoice/on_block.go @@ -8,7 +8,9 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/freezer" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph" "github.com/ledgerwatch/erigon/cl/transition/impl/eth2/statechange" ) @@ -74,12 +76,28 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload, if err := freezer.PutObjectSSZIntoFreezer("beaconState", "caplin_core", lastProcessedState.Slot(), lastProcessedState, f.recorder); err != nil { return err } + // Update randao mixes + r := solid.NewHashVector(int(f.beaconCfg.EpochsPerHistoricalVector)) + lastProcessedState.RandaoMixes().CopyTo(r) + f.randaoMixesLists.Add(blockRoot, r) + } else { + f.randaoDeltas.Add(blockRoot, randaoDelta{ + epoch: state.Epoch(lastProcessedState), + delta: lastProcessedState.GetRandaoMixes(state.Epoch(lastProcessedState)), + }) } + f.participation.Add(state.Epoch(lastProcessedState), lastProcessedState.CurrentEpochParticipation().Copy()) f.preverifiedSizes.Add(blockRoot, preverifiedAppendListsSizes{ validatorLength: uint64(lastProcessedState.ValidatorLength()), historicalRootsLength: lastProcessedState.HistoricalRootsLength(), historicalSummariesLength: lastProcessedState.HistoricalSummariesLength(), }) + f.finalityCheckpoints.Add(blockRoot, finalityCheckpoints{ + finalizedCheckpoint: lastProcessedState.FinalizedCheckpoint().Copy(), + currentJustifiedCheckpoint: lastProcessedState.CurrentJustifiedCheckpoint().Copy(), + previousJustifiedCheckpoint: lastProcessedState.PreviousJustifiedCheckpoint().Copy(), + }) + f.totalActiveBalances.Add(blockRoot, lastProcessedState.GetTotalActiveBalance()) // Update checkpoints f.updateCheckpoints(lastProcessedState.CurrentJustifiedCheckpoint().Copy(), lastProcessedState.FinalizedCheckpoint().Copy()) // First thing save previous values of the checkpoints (avoid memory copy of all states and ensure easy revert) diff --git a/cl/phase1/network/gossip_manager.go b/cl/phase1/network/gossip_manager.go index abc33f3d6c4..d7a0543b3cc 100644 --- a/cl/phase1/network/gossip_manager.go +++ b/cl/phase1/network/gossip_manager.go @@ -3,10 +3,12 @@ package network import ( "context" "fmt" - "github.com/ledgerwatch/erigon-lib/common" "sync" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/freezer" + "github.com/ledgerwatch/erigon/cl/gossip" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" "github.com/ledgerwatch/erigon/cl/sentinel/peers" @@ -96,8 +98,8 @@ func (g *GossipManager) onRecv(ctx context.Context, data *sentinel.GossipData, l // If the deserialization fails, an error is logged and the loop returns to the next iteration. // If the deserialization is successful, the object is set to the deserialized value and the loop returns to the next iteration. var object ssz.Unmarshaler - switch data.Type { - case sentinel.GossipType_BeaconBlockGossipType: + switch data.Name { + case gossip.TopicNameBeaconBlock: object = cltypes.NewSignedBeaconBlock(g.beaconConfig) if err := object.DecodeSSZ(common.CopyBytes(data.Data), int(version)); err != nil { g.sentinel.BanPeer(ctx, data.Peer) @@ -142,19 +144,19 @@ func (g *GossipManager) onRecv(ctx context.Context, data *sentinel.GossipData, l } g.mu.RUnlock() - case sentinel.GossipType_VoluntaryExitGossipType: + case gossip.TopicNameVoluntaryExit: if err := operationsContract[*cltypes.SignedVoluntaryExit](ctx, g, l, data, int(version), "voluntary exit", g.forkChoice.OnVoluntaryExit); err != nil { return err } - case sentinel.GossipType_ProposerSlashingGossipType: + case gossip.TopicNameProposerSlashing: if err := operationsContract[*cltypes.ProposerSlashing](ctx, g, l, data, int(version), "proposer slashing", g.forkChoice.OnProposerSlashing); err != nil { return err } - case sentinel.GossipType_AttesterSlashingGossipType: + case gossip.TopicNameAttesterSlashing: if err := operationsContract[*cltypes.AttesterSlashing](ctx, g, l, data, int(version), "attester slashing", g.forkChoice.OnAttesterSlashing); err != nil { return err } - case sentinel.GossipType_BlsToExecutionChangeGossipType: + case gossip.TopicNameBlsToExecutionChange: if err := operationsContract[*cltypes.SignedBLSToExecutionChange](ctx, g, l, data, int(version), "bls to execution change", g.forkChoice.OnBlsToExecutionChange); err != nil { return err } diff --git a/cl/phase1/stages/clstages.go b/cl/phase1/stages/clstages.go index 4d3f7f188cc..58cd687bd98 100644 --- a/cl/phase1/stages/clstages.go +++ b/cl/phase1/stages/clstages.go @@ -98,7 +98,6 @@ func ClStagesCfg( type StageName = string const ( - WaitForPeers StageName = "WaitForPeers" CatchUpEpochs StageName = "CatchUpEpochs" CatchUpBlocks StageName = "CatchUpBlocks" ForkChoice StageName = "ForkChoice" @@ -113,9 +112,6 @@ const ( ) func MetaCatchingUp(args Args) StageName { - if args.peers < minPeersForDownload { - return WaitForPeers - } if !args.hasDownloaded { return DownloadHistoricalBlocks } @@ -218,39 +214,6 @@ func ConsensusClStages(ctx context.Context, return }, Stages: map[string]clstages.Stage[*Cfg, Args]{ - WaitForPeers: { - Description: `wait for enough peers. This is also a safe stage to go to when unsure of what stage to use`, - TransitionFunc: func(cfg *Cfg, args Args, err error) string { - if x := MetaCatchingUp(args); x != "" { - return x - } - return CatchUpBlocks - }, - ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { - peersCount, err := cfg.rpc.Peers() - if err != nil { - return nil - } - waitWhenNotEnoughPeers := 3 * time.Second - for { - if peersCount >= minPeersForDownload { - break - } - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - logger.Info("[Caplin] Waiting For Peers", "have", peersCount, "needed", minPeersForDownload, "retryIn", waitWhenNotEnoughPeers) - time.Sleep(waitWhenNotEnoughPeers) - peersCount, err = cfg.rpc.Peers() - if err != nil { - peersCount = 0 - } - } - return nil - }, - }, DownloadHistoricalBlocks: { Description: "Download historical blocks", TransitionFunc: func(cfg *Cfg, args Args, err error) string { @@ -269,7 +232,7 @@ func ConsensusClStages(ctx context.Context, startingSlot := cfg.state.LatestBlockHeader().Slot downloader := network2.NewBackwardBeaconDownloader(context.Background(), cfg.rpc, cfg.indiciesDB) - if err := SpawnStageHistoryDownload(StageHistoryReconstruction(downloader, cfg.antiquary, cfg.sn, cfg.beaconDB, cfg.indiciesDB, cfg.executionClient, cfg.genesisCfg, cfg.beaconCfg, cfg.backfilling, false, startingRoot, startingSlot, cfg.tmpdir, logger), context.Background(), logger); err != nil { + if err := SpawnStageHistoryDownload(StageHistoryReconstruction(downloader, cfg.antiquary, cfg.sn, cfg.beaconDB, cfg.indiciesDB, cfg.executionClient, cfg.genesisCfg, cfg.beaconCfg, cfg.backfilling, false, startingRoot, startingSlot, cfg.tmpdir, 600*time.Millisecond, logger), context.Background(), logger); err != nil { cfg.hasDownloaded = false return err } @@ -321,7 +284,8 @@ func ConsensusClStages(ctx context.Context, currentEpoch = utils.Max64(args.seenEpoch, currentEpoch-1) continue MainLoop } - header, err := executionPayload.RlpHeader() + parentRoot := &block.Block.ParentRoot + header, err := executionPayload.RlpHeader(parentRoot) if err != nil { log.Warn("bad blocks segment received", "err", err) cfg.rpc.BanPeer(blocks.Peer) @@ -368,7 +332,7 @@ func ConsensusClStages(ctx context.Context, ) respCh := make(chan *peers.PeeredObject[[]*cltypes.SignedBeaconBlock]) errCh := make(chan error) - sources := []persistence.BlockSource{gossipSource} + sources := []persistence.BlockSource{gossipSource, rpcSource} // if we are more than one block behind, we request the rpc source as well if totalRequest > 2 { @@ -378,36 +342,74 @@ func ConsensusClStages(ctx context.Context, ctx, cn := context.WithTimeout(ctx, 15*time.Second) defer cn() - tx, err := cfg.indiciesDB.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() // we go ask all the sources and see who gets back to us first. whoever does is the winner!! for _, v := range sources { sourceFunc := v.GetRange - go func() { - blocks, err := sourceFunc(ctx, tx, args.seenSlot+1, totalRequest) + go func(source persistence.BlockSource) { + if _, ok := source.(*persistence.BeaconRpcSource); ok { + time.Sleep(2 * time.Second) + var blocks *peers.PeeredObject[[]*cltypes.SignedBeaconBlock] + Loop: + for { + var err error + from := args.seenSlot - 2 + currentSlot := utils.GetCurrentSlot(cfg.genesisCfg.GenesisTime, cfg.beaconCfg.SecondsPerSlot) + count := (currentSlot - from) + 2 + if currentSlot <= cfg.forkChoice.HighestSeen() { + time.Sleep(100 * time.Millisecond) + continue + } + blocks, err = sourceFunc(ctx, nil, from, count) + if err != nil { + errCh <- err + return + } + for _, block := range blocks.Data { + if block.Block.Slot >= currentSlot { + break Loop + } + } + } + respCh <- blocks + return + } + blocks, err := sourceFunc(ctx, nil, args.seenSlot+1, totalRequest) if err != nil { errCh <- err return } respCh <- blocks - }() + }(v) } + tx, err := cfg.indiciesDB.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + logTimer := time.NewTicker(30 * time.Second) defer logTimer.Stop() - select { - case err := <-errCh: - return err - case blocks := <-respCh: - for _, block := range blocks.Data { - if err := processBlock(tx, block, true, true); err != nil { - return err + MainLoop: + for { + select { + case <-ctx.Done(): + return errors.New("timeout waiting for blocks") + case err := <-errCh: + return err + case blocks := <-respCh: + for _, block := range blocks.Data { + if err := processBlock(tx, block, true, true); err != nil { + log.Error("bad blocks segment received", "err", err) + cfg.rpc.BanPeer(blocks.Peer) + continue MainLoop + } + if block.Block.Slot >= args.targetSlot { + break MainLoop + } } + case <-logTimer.C: + logger.Info("[Caplin] Progress", "progress", cfg.forkChoice.HighestSeen(), "from", args.seenSlot, "to", args.targetSlot) } - case <-logTimer.C: - logger.Info("[Caplin] Progress", "progress", cfg.forkChoice.HighestSeen(), "from", args.seenEpoch, "to", args.targetSlot) } return tx.Commit() }, @@ -619,7 +621,10 @@ func ConsensusClStages(ctx context.Context, SleepForSlot: { Description: `sleep until the next slot`, TransitionFunc: func(cfg *Cfg, args Args, err error) string { - return WaitForPeers + if x := MetaCatchingUp(args); x != "" { + return x + } + return ListenForForks }, ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { nextSlot := args.seenSlot + 1 diff --git a/cl/phase1/stages/stage_history_download.go b/cl/phase1/stages/stage_history_download.go index c65941b727b..c8fc88baa79 100644 --- a/cl/phase1/stages/stage_history_download.go +++ b/cl/phase1/stages/stage_history_download.go @@ -26,40 +26,42 @@ import ( ) type StageHistoryReconstructionCfg struct { - genesisCfg *clparams.GenesisConfig - beaconCfg *clparams.BeaconChainConfig - downloader *network.BackwardBeaconDownloader - sn *freezeblocks.CaplinSnapshots - startingRoot libcommon.Hash - backfilling bool - waitForAllRoutines bool - startingSlot uint64 - tmpdir string - db persistence.BeaconChainDatabase - indiciesDB kv.RwDB - engine execution_client.ExecutionEngine - antiquary *antiquary.Antiquary - logger log.Logger + genesisCfg *clparams.GenesisConfig + beaconCfg *clparams.BeaconChainConfig + downloader *network.BackwardBeaconDownloader + sn *freezeblocks.CaplinSnapshots + startingRoot libcommon.Hash + backfilling bool + waitForAllRoutines bool + startingSlot uint64 + tmpdir string + db persistence.BeaconChainDatabase + indiciesDB kv.RwDB + engine execution_client.ExecutionEngine + antiquary *antiquary.Antiquary + logger log.Logger + backfillingThrottling time.Duration } const logIntervalTime = 30 * time.Second -func StageHistoryReconstruction(downloader *network.BackwardBeaconDownloader, antiquary *antiquary.Antiquary, sn *freezeblocks.CaplinSnapshots, db persistence.BeaconChainDatabase, indiciesDB kv.RwDB, engine execution_client.ExecutionEngine, genesisCfg *clparams.GenesisConfig, beaconCfg *clparams.BeaconChainConfig, backfilling, waitForAllRoutines bool, startingRoot libcommon.Hash, startinSlot uint64, tmpdir string, logger log.Logger) StageHistoryReconstructionCfg { +func StageHistoryReconstruction(downloader *network.BackwardBeaconDownloader, antiquary *antiquary.Antiquary, sn *freezeblocks.CaplinSnapshots, db persistence.BeaconChainDatabase, indiciesDB kv.RwDB, engine execution_client.ExecutionEngine, genesisCfg *clparams.GenesisConfig, beaconCfg *clparams.BeaconChainConfig, backfilling, waitForAllRoutines bool, startingRoot libcommon.Hash, startinSlot uint64, tmpdir string, backfillingThrottling time.Duration, logger log.Logger) StageHistoryReconstructionCfg { return StageHistoryReconstructionCfg{ - genesisCfg: genesisCfg, - beaconCfg: beaconCfg, - downloader: downloader, - startingRoot: startingRoot, - tmpdir: tmpdir, - startingSlot: startinSlot, - waitForAllRoutines: waitForAllRoutines, - logger: logger, - backfilling: backfilling, - indiciesDB: indiciesDB, - antiquary: antiquary, - db: db, - engine: engine, - sn: sn, + genesisCfg: genesisCfg, + beaconCfg: beaconCfg, + downloader: downloader, + startingRoot: startingRoot, + tmpdir: tmpdir, + startingSlot: startinSlot, + waitForAllRoutines: waitForAllRoutines, + logger: logger, + backfilling: backfilling, + indiciesDB: indiciesDB, + antiquary: antiquary, + db: db, + engine: engine, + sn: sn, + backfillingThrottling: backfillingThrottling, } } @@ -116,7 +118,7 @@ func SpawnStageHistoryDownload(cfg StageHistoryReconstructionCfg, ctx context.Co return false, fmt.Errorf("error encoding execution payload during download: %s", err) } // Use snappy compression that the temporary files do not take too much disk. - encodedPayload = utils.CompressSnappy(append(encodedPayload, byte(blk.Version()))) + encodedPayload = utils.CompressSnappy(append(encodedPayload, append(blk.Block.ParentRoot[:], byte(blk.Version()))...)) if err := executionBlocksCollector.Collect(dbutils.BlockBodyKey(payload.BlockNumber, payload.BlockHash), encodedPayload); err != nil { return false, fmt.Errorf("error collecting execution payload during download: %s", err) } @@ -211,7 +213,7 @@ func SpawnStageHistoryDownload(cfg StageHistoryReconstructionCfg, ctx context.Co case <-time.After(5 * time.Second): } } - cfg.downloader.SetThrottle(600 * time.Millisecond) // throttle to 0.6 second for backfilling + cfg.downloader.SetThrottle(cfg.backfillingThrottling) // throttle to 0.6 second for backfilling cfg.downloader.SetNeverSkip(false) // If i do not give it a database, erigon lib starts to cry uncontrollably db2 := memdb.New(cfg.tmpdir) @@ -238,12 +240,18 @@ func SpawnStageHistoryDownload(cfg StageHistoryReconstructionCfg, ctx context.Co } version := clparams.StateVersion(v[len(v)-1]) + parentRoot := libcommon.BytesToHash(v[len(v)-1-32 : len(v)-1]) + executionPayload := cltypes.NewEth1Block(version, cfg.beaconCfg) - if err := executionPayload.DecodeSSZ(v[:len(v)-1], int(version)); err != nil { + if err := executionPayload.DecodeSSZ(v[:len(v)-1-32], int(version)); err != nil { return fmt.Errorf("error decoding execution payload during collection: %s", err) } + if executionPayload.BlockNumber%10000 == 0 { + cfg.logger.Info("Inserting execution payload", "blockNumber", executionPayload.BlockNumber) + } body := executionPayload.Body() - header, err := executionPayload.RlpHeader() + + header, err := executionPayload.RlpHeader(&parentRoot) if err != nil { return fmt.Errorf("error parsing rlp header during collection: %s", err) } diff --git a/cl/rpc/rpc.go b/cl/rpc/rpc.go index 0ada88e8115..338edaac382 100644 --- a/cl/rpc/rpc.go +++ b/cl/rpc/rpc.go @@ -186,7 +186,7 @@ func (b *BeaconRpcP2P) PropagateBlock(block *cltypes.SignedBeaconBlock) error { } _, err = b.sentinel.PublishGossip(b.ctx, &sentinel.GossipData{ Data: encoded, - Type: sentinel.GossipType_BeaconBlockGossipType, + Name: "beacon_block", }) return err } diff --git a/cl/sentinel/config.go b/cl/sentinel/config.go index c27e8b4e220..3157e1a96c3 100644 --- a/cl/sentinel/config.go +++ b/cl/sentinel/config.go @@ -44,6 +44,8 @@ type SentinelConfig struct { NoDiscovery bool TmpDir string LocalDiscovery bool + + EnableBlocks bool } func convertToCryptoPrivkey(privkey *ecdsa.PrivateKey) (crypto.PrivKey, error) { diff --git a/cl/sentinel/gossip.go b/cl/sentinel/gossip.go index 38af103b890..7067d79bf96 100644 --- a/cl/sentinel/gossip.go +++ b/cl/sentinel/gossip.go @@ -19,7 +19,9 @@ import ( "sync" "time" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cl/fork" + "github.com/ledgerwatch/erigon/cl/gossip" "github.com/ledgerwatch/log/v3" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" @@ -35,65 +37,55 @@ var ( const SSZSnappyCodec = "ssz_snappy" -type TopicName string - -const ( - BeaconBlockTopic TopicName = "beacon_block" - BeaconAggregateAndProofTopic TopicName = "beacon_aggregate_and_proof" - VoluntaryExitTopic TopicName = "voluntary_exit" - ProposerSlashingTopic TopicName = "proposer_slashing" - AttesterSlashingTopic TopicName = "attester_slashing" - BlsToExecutionChangeTopic TopicName = "bls_to_execution_change" - BlobSidecarTopic TopicName = "blob_sidecar_%d" // This topic needs an index -) - type GossipTopic struct { - Name TopicName + Name string CodecStr string } var BeaconBlockSsz = GossipTopic{ - Name: BeaconBlockTopic, + Name: gossip.TopicNameBeaconBlock, CodecStr: SSZSnappyCodec, } var BeaconAggregateAndProofSsz = GossipTopic{ - Name: BeaconAggregateAndProofTopic, + Name: gossip.TopicNameBeaconAggregateAndProof, CodecStr: SSZSnappyCodec, } var VoluntaryExitSsz = GossipTopic{ - Name: VoluntaryExitTopic, + Name: gossip.TopicNameVoluntaryExit, CodecStr: SSZSnappyCodec, } var ProposerSlashingSsz = GossipTopic{ - Name: ProposerSlashingTopic, + Name: gossip.TopicNameProposerSlashing, CodecStr: SSZSnappyCodec, } var AttesterSlashingSsz = GossipTopic{ - Name: AttesterSlashingTopic, + Name: gossip.TopicNameAttesterSlashing, CodecStr: SSZSnappyCodec, } var BlsToExecutionChangeSsz = GossipTopic{ - Name: BlsToExecutionChangeTopic, + Name: gossip.TopicNameBlsToExecutionChange, CodecStr: SSZSnappyCodec, } type GossipManager struct { - ch chan *pubsub.Message + ch chan *GossipMessage subscriptions map[string]*GossipSubscription mu sync.RWMutex } +const maxIncomingGossipMessages = 5092 + // construct a new gossip manager that will handle packets with the given handlerfunc func NewGossipManager( ctx context.Context, ) *GossipManager { g := &GossipManager{ - ch: make(chan *pubsub.Message, 1), + ch: make(chan *GossipMessage, maxIncomingGossipMessages), subscriptions: map[string]*GossipSubscription{}, } return g @@ -102,14 +94,14 @@ func NewGossipManager( func GossipSidecarTopics(maxBlobs uint64) (ret []GossipTopic) { for i := uint64(0); i < maxBlobs; i++ { ret = append(ret, GossipTopic{ - Name: TopicName(fmt.Sprintf(string(BlobSidecarTopic), i)), + Name: gossip.TopicNameBlobSidecar(int(i)), CodecStr: SSZSnappyCodec, }) } return } -func (s *GossipManager) Recv() <-chan *pubsub.Message { +func (s *GossipManager) Recv() <-chan *GossipMessage { return s.ch } @@ -137,10 +129,54 @@ func (s *GossipManager) unsubscribe(topic string) { if _, ok := s.subscriptions[topic]; !ok { return } - s.subscriptions[topic].Close() + sub := s.subscriptions[topic] + go func() { + timer := time.NewTimer(time.Hour) + ctx := sub.ctx + select { + case <-ctx.Done(): + sub.Close() + case <-timer.C: + sub.Close() + } + }() delete(s.subscriptions, topic) } +func (s *Sentinel) forkWatcher() { + prevDigest, err := fork.ComputeForkDigest(s.cfg.BeaconConfig, s.cfg.GenesisConfig) + if err != nil { + log.Error("[Gossip] Failed to calculate fork choice", "err", err) + return + } + iterationInterval := time.NewTicker(30 * time.Millisecond) + for { + select { + case <-s.ctx.Done(): + return + case <-iterationInterval.C: + digest, err := fork.ComputeForkDigest(s.cfg.BeaconConfig, s.cfg.GenesisConfig) + if err != nil { + log.Error("[Gossip] Failed to calculate fork choice", "err", err) + return + } + if prevDigest != digest { + subs := s.subManager.subscriptions + for path, sub := range subs { + s.subManager.unsubscribe(path) + newSub, err := s.SubscribeGossip(sub.gossip_topic) + if err != nil { + log.Error("[Gossip] Failed to resubscribe to topic", "err", err) + return + } + newSub.Listen() + } + prevDigest = digest + } + } + } +} + func (s *Sentinel) SubscribeGossip(topic GossipTopic, opts ...pubsub.TopicOpt) (sub *GossipSubscription, err error) { digest, err := fork.ComputeForkDigest(s.cfg.BeaconConfig, s.cfg.GenesisConfig) if err != nil { @@ -157,7 +193,7 @@ func (s *Sentinel) SubscribeGossip(topic GossipTopic, opts ...pubsub.TopicOpt) ( if err != nil { return nil, fmt.Errorf("failed to join topic %s, err=%w", path, err) } - topicScoreParams := s.topicScoreParams(string(topic.Name)) + topicScoreParams := s.topicScoreParams(topic.Name) if topicScoreParams != nil { sub.topic.SetScoreParams(topicScoreParams) } @@ -178,7 +214,7 @@ func (s *Sentinel) Unsubscribe(topic GossipTopic, opts ...pubsub.TopicOpt) (err func (s *Sentinel) topicScoreParams(topic string) *pubsub.TopicScoreParams { switch { - case strings.Contains(topic, string(BeaconBlockTopic)): + case strings.Contains(topic, gossip.TopicNameBeaconBlock): return s.defaultBlockTopicParams() /*case strings.Contains(topic, GossipAggregateAndProofMessage): return defaultAggregateTopicParams(activeValidators), nil @@ -239,7 +275,7 @@ func (g *GossipManager) Close() { type GossipSubscription struct { gossip_topic GossipTopic host peer.ID - ch chan *pubsub.Message + ch chan *GossipMessage ctx context.Context topic *pubsub.Topic @@ -286,9 +322,15 @@ func (s *GossipSubscription) Close() { } } +type GossipMessage struct { + From peer.ID + TopicName string + Data []byte +} + // this is a helper to begin running the gossip subscription. // function should not be used outside of the constructor for gossip subscription -func (s *GossipSubscription) run(ctx context.Context, sub *pubsub.Subscription, topic string) { +func (s *GossipSubscription) run(ctx context.Context, sub *pubsub.Subscription, topicName string) { defer func() { if r := recover(); r != nil { log.Error("[Sentinel Gossip] Message Handler Crashed", "err", r) @@ -306,13 +348,17 @@ func (s *GossipSubscription) run(ctx context.Context, sub *pubsub.Subscription, if errors.Is(err, context.Canceled) { return } - log.Warn("[Sentinel] fail to decode gossip packet", "err", err, "topic", topic) + log.Warn("[Sentinel] fail to decode gossip packet", "err", err, "topicName", topicName) return } if msg.GetFrom() == s.host { continue } - s.ch <- msg + s.ch <- &GossipMessage{ + From: msg.GetFrom(), + TopicName: topicName, + Data: common.Copy(msg.Data), + } } } } diff --git a/cl/sentinel/handlers/blocks.go b/cl/sentinel/handlers/blocks.go index 21f4aca2a33..d3b09c28c79 100644 --- a/cl/sentinel/handlers/blocks.go +++ b/cl/sentinel/handlers/blocks.go @@ -14,24 +14,160 @@ package handlers import ( + "io" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/fork" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" "github.com/ledgerwatch/erigon/cl/sentinel/communication/ssz_snappy" - "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/cl/utils" "github.com/libp2p/go-libp2p/core/network" ) -// func (c *ConsensusHandlers) blocksByRangeHandlerPROTODONOTTOUCH69(stream network.Stream) error { -// log.Trace("Got block by range handler call") -// return ssz_snappy.EncodeAndWrite(stream, &emptyString{}, ResourceUnavaiablePrefix) -// } +const MAX_REQUEST_BLOCKS = 96 + +func (c *ConsensusHandlers) beaconBlocksByRangeHandler(s network.Stream) error { + peerId := s.Conn().RemotePeer().String() + if err := c.checkRateLimit(peerId, "beaconBlocksByRange", rateLimits.beaconBlocksByRangeLimit); err != nil { + ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix) + return err + } + + req := &cltypes.BeaconBlocksByRangeRequest{} + if err := ssz_snappy.DecodeAndReadNoForkDigest(s, req, clparams.Phase0Version); err != nil { + return err + } + + tx, err := c.indiciesDB.BeginRo(c.ctx) + if err != nil { + return err + } + defer tx.Rollback() + // Limit the number of blocks to the count specified in the request. + if int(req.Count) > MAX_REQUEST_BLOCKS { + req.Count = MAX_REQUEST_BLOCKS + } + + beaconBlockRooots, slots, err := beacon_indicies.ReadBeaconBlockRootsInSlotRange(c.ctx, tx, req.StartSlot, req.Count) + if err != nil { + return err + } + + if len(beaconBlockRooots) == 0 || len(slots) == 0 { + return ssz_snappy.EncodeAndWrite(s, &emptyString{}, ResourceUnavaiablePrefix) + } + + for i, slot := range slots { + r, err := c.beaconDB.BlockReader(c.ctx, slot, beaconBlockRooots[i]) + if err != nil { + return err + } + defer r.Close() + + version := c.beaconConfig.GetCurrentStateVersion(slot / c.beaconConfig.SlotsPerEpoch) + // Read the fork digest + forkDigest, err := fork.ComputeForkDigestForVersion( + utils.Uint32ToBytes4(c.beaconConfig.GetForkVersionByVersion(version)), + c.genesisConfig.GenesisValidatorRoot, + ) + if err != nil { + return err + } + + if _, err := s.Write([]byte{0}); err != nil { + return err + } + + if _, err := s.Write(forkDigest[:]); err != nil { + return err + } + _, err = io.Copy(s, r) + if err != nil { + return err + } + } -func (c *ConsensusHandlers) blocksByRangeHandler(stream network.Stream) error { - log.Trace("Got block by range handler call") - return ssz_snappy.EncodeAndWrite(stream, &emptyString{}, ResourceUnavaiablePrefix) + return nil } -func (c *ConsensusHandlers) beaconBlocksByRootHandler(stream network.Stream) error { - log.Trace("Got beacon block by root handler call") - return ssz_snappy.EncodeAndWrite(stream, &emptyString{}, ResourceUnavaiablePrefix) +func (c *ConsensusHandlers) beaconBlocksByRootHandler(s network.Stream) error { + peerId := s.Conn().RemotePeer().String() + if err := c.checkRateLimit(peerId, "beaconBlocksByRoot", rateLimits.beaconBlocksByRootLimit); err != nil { + ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix) + return err + } + + var req solid.HashListSSZ = solid.NewHashList(100) + if err := ssz_snappy.DecodeAndReadNoForkDigest(s, req, clparams.Phase0Version); err != nil { + return err + } + + blockRoots := []libcommon.Hash{} + for i := 0; i < req.Length(); i++ { + blockRoot := req.Get(i) + blockRoots = append(blockRoots, blockRoot) + // Limit the number of blocks to the count specified in the request. + if len(blockRoots) >= MAX_REQUEST_BLOCKS { + break + } + } + if len(blockRoots) == 0 { + return ssz_snappy.EncodeAndWrite(s, &emptyString{}, ResourceUnavaiablePrefix) + } + tx, err := c.indiciesDB.BeginRo(c.ctx) + if err != nil { + return err + } + defer tx.Rollback() + + for i, blockRoot := range blockRoots { + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if slot == nil { + continue + } + if err != nil { + return err + } + + r, err := c.beaconDB.BlockReader(c.ctx, *slot, blockRoots[i]) + if err != nil { + return err + } + defer r.Close() + + if _, err := s.Write([]byte{0}); err != nil { + return err + } + + version := c.beaconConfig.GetCurrentStateVersion(*slot / c.beaconConfig.SlotsPerEpoch) + // Read the fork digest + forkDigest, err := fork.ComputeForkDigestForVersion( + utils.Uint32ToBytes4(c.beaconConfig.GetForkVersionByVersion(version)), + c.genesisConfig.GenesisValidatorRoot, + ) + if err != nil { + return err + } + + if _, err := s.Write(forkDigest[:]); err != nil { + return err + } + + // Read block from DB + block := cltypes.NewSignedBeaconBlock(c.beaconConfig) + + if err := ssz_snappy.DecodeAndReadNoForkDigest(r, block, clparams.Phase0Version); err != nil { + return err + } + if err := ssz_snappy.EncodeAndWrite(s, block); err != nil { + return err + } + } + + return nil } type emptyString struct{} diff --git a/cl/sentinel/handlers/blocks_by_range_test.go b/cl/sentinel/handlers/blocks_by_range_test.go new file mode 100644 index 00000000000..84500364c21 --- /dev/null +++ b/cl/sentinel/handlers/blocks_by_range_test.go @@ -0,0 +1,146 @@ +package handlers + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "io" + "testing" + + "github.com/golang/snappy" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/fork" + "github.com/ledgerwatch/erigon/cl/persistence" + "github.com/ledgerwatch/erigon/cl/sentinel/communication" + "github.com/ledgerwatch/erigon/cl/sentinel/communication/ssz_snappy" + "github.com/ledgerwatch/erigon/cl/sentinel/peers" + "github.com/ledgerwatch/erigon/cl/utils" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/stretchr/testify/require" +) + +func TestBlocksByRootHandler(t *testing.T) { + ctx := context.Background() + + listenAddrHost := "/ip4/127.0.0.1/tcp/5000" + host, err := libp2p.New(libp2p.ListenAddrStrings(listenAddrHost)) + require.NoError(t, err) + + listenAddrHost1 := "/ip4/127.0.0.1/tcp/5001" + host1, err := libp2p.New(libp2p.ListenAddrStrings(listenAddrHost1)) + require.NoError(t, err) + + err = host.Connect(ctx, peer.AddrInfo{ + ID: host1.ID(), + Addrs: host1.Addrs(), + }) + require.NoError(t, err) + + peersPool := peers.NewPool() + beaconDB, indiciesDB := setupStore(t) + store := persistence.NewBeaconChainDatabaseFilesystem(beaconDB, nil, &clparams.MainnetBeaconConfig) + + tx, _ := indiciesDB.BeginRw(ctx) + + startSlot := uint64(100) + count := uint64(10) + step := uint64(1) + + expBlocks := populateDatabaseWithBlocks(t, store, tx, startSlot, count) + tx.Commit() + + genesisCfg, _, beaconCfg := clparams.GetConfigsByNetwork(1) + c := NewConsensusHandlers( + ctx, + beaconDB, + indiciesDB, + host, + peersPool, + beaconCfg, + genesisCfg, + &cltypes.Metadata{}, true, + ) + c.Start() + req := &cltypes.BeaconBlocksByRangeRequest{ + StartSlot: startSlot, + Count: count, + Step: step, + } + var reqBuf bytes.Buffer + if err := ssz_snappy.EncodeAndWrite(&reqBuf, req); err != nil { + return + } + + reqData := libcommon.CopyBytes(reqBuf.Bytes()) + stream, err := host1.NewStream(ctx, host.ID(), protocol.ID(communication.BeaconBlocksByRangeProtocolV2)) + require.NoError(t, err) + + _, err = stream.Write(reqData) + require.NoError(t, err) + + firstByte := make([]byte, 1) + _, err = stream.Read(firstByte) + require.NoError(t, err) + require.Equal(t, firstByte[0], byte(0)) + + for i := 0; i < int(count); i++ { + forkDigest := make([]byte, 4) + + _, err := stream.Read(forkDigest) + if err != nil { + if err == io.EOF { + t.Fatal("Stream is empty") + } else { + require.NoError(t, err) + } + } + + encodedLn, _, err := ssz_snappy.ReadUvarint(stream) + require.NoError(t, err) + + raw := make([]byte, encodedLn) + sr := snappy.NewReader(stream) + bytesRead := 0 + for bytesRead < int(encodedLn) { + n, err := sr.Read(raw[bytesRead:]) + require.NoError(t, err) + bytesRead += n + } + + // Fork digests + respForkDigest := binary.BigEndian.Uint32(forkDigest) + if respForkDigest == 0 { + require.NoError(t, fmt.Errorf("null fork digest")) + } + + version, err := fork.ForkDigestVersion(utils.Uint32ToBytes4(respForkDigest), beaconCfg, genesisCfg.GenesisValidatorRoot) + if err != nil { + require.NoError(t, err) + } + + block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig) + if err = block.DecodeSSZ(raw, int(version)); err != nil { + require.NoError(t, err) + return + } + require.Equal(t, expBlocks[i].Block.Slot, block.Block.Slot) + require.Equal(t, expBlocks[i].Block.StateRoot, block.Block.StateRoot) + require.Equal(t, expBlocks[i].Block.ParentRoot, block.Block.ParentRoot) + require.Equal(t, expBlocks[i].Block.ProposerIndex, block.Block.ProposerIndex) + require.Equal(t, expBlocks[i].Block.Body.ExecutionPayload.BlockNumber, block.Block.Body.ExecutionPayload.BlockNumber) + stream.Read(make([]byte, 1)) + } + + _, err = stream.Read(make([]byte, 1)) + if err != io.EOF { + t.Fatal("Stream is not empty") + } + + defer indiciesDB.Close() + defer tx.Rollback() +} diff --git a/cl/sentinel/handlers/blocks_by_root_test.go b/cl/sentinel/handlers/blocks_by_root_test.go new file mode 100644 index 00000000000..b917644b145 --- /dev/null +++ b/cl/sentinel/handlers/blocks_by_root_test.go @@ -0,0 +1,145 @@ +package handlers + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "io" + "testing" + + "github.com/golang/snappy" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/fork" + "github.com/ledgerwatch/erigon/cl/persistence" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + "github.com/ledgerwatch/erigon/cl/sentinel/communication" + "github.com/ledgerwatch/erigon/cl/sentinel/communication/ssz_snappy" + "github.com/ledgerwatch/erigon/cl/sentinel/peers" + "github.com/ledgerwatch/erigon/cl/utils" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/stretchr/testify/require" +) + +func TestBlocksByRangeHandler(t *testing.T) { + ctx := context.Background() + + listenAddrHost := "/ip4/127.0.0.1/tcp/6000" + host, err := libp2p.New(libp2p.ListenAddrStrings(listenAddrHost)) + require.NoError(t, err) + + listenAddrHost1 := "/ip4/127.0.0.1/tcp/6001" + host1, err := libp2p.New(libp2p.ListenAddrStrings(listenAddrHost1)) + require.NoError(t, err) + + err = host.Connect(ctx, peer.AddrInfo{ + ID: host1.ID(), + Addrs: host1.Addrs(), + }) + require.NoError(t, err) + + peersPool := peers.NewPool() + beaconDB, indiciesDB := setupStore(t) + store := persistence.NewBeaconChainDatabaseFilesystem(beaconDB, nil, &clparams.MainnetBeaconConfig) + + tx, _ := indiciesDB.BeginRw(ctx) + + startSlot := uint64(100) + count := uint64(10) + + expBlocks := populateDatabaseWithBlocks(t, store, tx, startSlot, count) + var blockRoots []libcommon.Hash + blockRoots, _, _ = beacon_indicies.ReadBeaconBlockRootsInSlotRange(ctx, tx, startSlot, startSlot+count) + tx.Commit() + + genesisCfg, _, beaconCfg := clparams.GetConfigsByNetwork(1) + c := NewConsensusHandlers( + ctx, + beaconDB, + indiciesDB, + host, + peersPool, + beaconCfg, + genesisCfg, + &cltypes.Metadata{}, true, + ) + c.Start() + var req solid.HashListSSZ = solid.NewHashList(len(expBlocks)) + + for _, block := range blockRoots { + req.Append(block) + } + var reqBuf bytes.Buffer + if err := ssz_snappy.EncodeAndWrite(&reqBuf, req); err != nil { + return + } + + reqData := libcommon.CopyBytes(reqBuf.Bytes()) + stream, err := host1.NewStream(ctx, host.ID(), protocol.ID(communication.BeaconBlocksByRootProtocolV2)) + require.NoError(t, err) + + _, err = stream.Write(reqData) + require.NoError(t, err) + + firstByte := make([]byte, 1) + _, err = stream.Read(firstByte) + require.NoError(t, err) + require.Equal(t, firstByte[0], byte(0)) + + for i := 0; i < len(blockRoots); i++ { + forkDigest := make([]byte, 4) + _, err := stream.Read(forkDigest) + if err != nil && err != io.EOF { + require.NoError(t, err) + } + + encodedLn, _, err := ssz_snappy.ReadUvarint(stream) + require.NoError(t, err) + + raw := make([]byte, encodedLn) + sr := snappy.NewReader(stream) + bytesRead := 0 + for bytesRead < int(encodedLn) { + n, err := sr.Read(raw[bytesRead:]) + if err != nil { + require.NoError(t, err) + } + bytesRead += n + } + + // Fork digests + respForkDigest := binary.BigEndian.Uint32(forkDigest) + if respForkDigest == 0 { + require.NoError(t, fmt.Errorf("null fork digest")) + } + version, err := fork.ForkDigestVersion(utils.Uint32ToBytes4(respForkDigest), beaconCfg, genesisCfg.GenesisValidatorRoot) + if err != nil { + require.NoError(t, err) + } + + block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig) + if err = block.DecodeSSZ(raw, int(version)); err != nil { + require.NoError(t, err) + return + } + require.Equal(t, expBlocks[i].Block.Slot, block.Block.Slot) + require.Equal(t, expBlocks[i].Block.StateRoot, block.Block.StateRoot) + require.Equal(t, expBlocks[i].Block.ParentRoot, block.Block.ParentRoot) + require.Equal(t, expBlocks[i].Block.ProposerIndex, block.Block.ProposerIndex) + require.Equal(t, expBlocks[i].Block.Body.ExecutionPayload.BlockNumber, block.Block.Body.ExecutionPayload.BlockNumber) + stream.Read(make([]byte, 1)) + } + + _, err = stream.Read(make([]byte, 1)) + if err != io.EOF { + t.Fatal("Stream is not empty") + } + + defer indiciesDB.Close() + defer tx.Rollback() +} diff --git a/cl/sentinel/handlers/handlers.go b/cl/sentinel/handlers/handlers.go index 051b6d4fdfb..a97bfb57cd0 100644 --- a/cl/sentinel/handlers/handlers.go +++ b/cl/sentinel/handlers/handlers.go @@ -16,10 +16,12 @@ package handlers import ( "context" "errors" + "math" "strings" "sync" "time" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cl/sentinel/communication" "github.com/ledgerwatch/erigon/cl/sentinel/peers" "github.com/ledgerwatch/erigon/cl/utils" @@ -35,21 +37,27 @@ import ( ) type RateLimits struct { - pingLimit int - goodbyeLimit int - metadataV1Limit int - metadataV2Limit int - statusLimit int + pingLimit int + goodbyeLimit int + metadataV1Limit int + metadataV2Limit int + statusLimit int + beaconBlocksByRangeLimit int + beaconBlocksByRootLimit int } const punishmentPeriod = time.Minute - -var defaultRateLimits = RateLimits{ - pingLimit: 5000, - goodbyeLimit: 5000, - metadataV1Limit: 5000, - metadataV2Limit: 5000, - statusLimit: 5000, +const defaultRateLimit = math.MaxInt +const defaultBlockHandlerRateLimit = 200 + +var rateLimits = RateLimits{ + pingLimit: defaultRateLimit, + goodbyeLimit: defaultRateLimit, + metadataV1Limit: defaultRateLimit, + metadataV2Limit: defaultRateLimit, + statusLimit: defaultRateLimit, + beaconBlocksByRangeLimit: defaultBlockHandlerRateLimit, + beaconBlocksByRootLimit: defaultBlockHandlerRateLimit, } type ConsensusHandlers struct { @@ -60,37 +68,45 @@ type ConsensusHandlers struct { genesisConfig *clparams.GenesisConfig ctx context.Context beaconDB persistence.RawBeaconBlockChain + indiciesDB kv.RoDB peerRateLimits sync.Map punishmentEndTimes sync.Map + + enableBlocks bool } const ( SuccessfulResponsePrefix = 0x00 - RateLimitedPrefix = 0x02 - ResourceUnavaiablePrefix = 0x03 + RateLimitedPrefix = 0x01 + ResourceUnavaiablePrefix = 0x02 ) -func NewConsensusHandlers(ctx context.Context, db persistence.RawBeaconBlockChain, host host.Host, - peers *peers.Pool, beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, metadata *cltypes.Metadata) *ConsensusHandlers { +func NewConsensusHandlers(ctx context.Context, db persistence.RawBeaconBlockChain, indiciesDB kv.RoDB, host host.Host, + peers *peers.Pool, beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, metadata *cltypes.Metadata, enabledBlocks bool) *ConsensusHandlers { c := &ConsensusHandlers{ host: host, metadata: metadata, beaconDB: db, + indiciesDB: indiciesDB, genesisConfig: genesisConfig, beaconConfig: beaconConfig, ctx: ctx, peerRateLimits: sync.Map{}, punishmentEndTimes: sync.Map{}, + enableBlocks: enabledBlocks, } hm := map[string]func(s network.Stream) error{ - communication.PingProtocolV1: c.pingHandler, - communication.GoodbyeProtocolV1: c.goodbyeHandler, - communication.StatusProtocolV1: c.statusHandler, - communication.MetadataProtocolV1: c.metadataV1Handler, - communication.MetadataProtocolV2: c.metadataV2Handler, - communication.BeaconBlocksByRangeProtocolV1: c.blocksByRangeHandler, - communication.BeaconBlocksByRootProtocolV1: c.beaconBlocksByRootHandler, + communication.PingProtocolV1: c.pingHandler, + communication.GoodbyeProtocolV1: c.goodbyeHandler, + communication.StatusProtocolV1: c.statusHandler, + communication.MetadataProtocolV1: c.metadataV1Handler, + communication.MetadataProtocolV2: c.metadataV2Handler, + } + + if c.enableBlocks { + hm[communication.BeaconBlocksByRangeProtocolV2] = c.beaconBlocksByRangeHandler + hm[communication.BeaconBlocksByRootProtocolV2] = c.beaconBlocksByRootHandler } c.handlers = map[protocol.ID]network.StreamHandler{} @@ -106,12 +122,16 @@ func (c *ConsensusHandlers) checkRateLimit(peerId string, method string, limit i if punishmentEndTime, ok := c.punishmentEndTimes.Load(keyHash); ok { if time.Now().Before(punishmentEndTime.(time.Time)) { return errors.New("rate limit exceeded, punishment period in effect") - } else { - c.punishmentEndTimes.Delete(keyHash) } + c.punishmentEndTimes.Delete(keyHash) + } + + value, ok := c.peerRateLimits.Load(keyHash) + if !ok { + value = rate.NewLimiter(rate.Every(time.Minute), limit) + c.peerRateLimits.Store(keyHash, value) } - value, _ := c.peerRateLimits.LoadOrStore(keyHash, rate.NewLimiter(rate.Every(time.Minute), limit)) limiter := value.(*rate.Limiter) if !limiter.Allow() { @@ -146,6 +166,7 @@ func (c *ConsensusHandlers) wrapStreamHandler(name string, fn func(s network.Str log.Trace("[pubsubhandler] stream handler", l) // TODO: maybe we should log this _ = s.Reset() + _ = s.Close() return } err = s.Close() diff --git a/cl/sentinel/handlers/heartbeats.go b/cl/sentinel/handlers/heartbeats.go index 4dc04556916..b06774a77fe 100644 --- a/cl/sentinel/handlers/heartbeats.go +++ b/cl/sentinel/handlers/heartbeats.go @@ -25,9 +25,8 @@ import ( func (c *ConsensusHandlers) pingHandler(s network.Stream) error { peerId := s.Conn().RemotePeer().String() - if err := c.checkRateLimit(peerId, "ping", defaultRateLimits.pingLimit); err != nil { + if err := c.checkRateLimit(peerId, "ping", rateLimits.pingLimit); err != nil { ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix) - defer s.Close() return err } return ssz_snappy.EncodeAndWrite(s, &cltypes.Ping{ @@ -37,9 +36,8 @@ func (c *ConsensusHandlers) pingHandler(s network.Stream) error { func (c *ConsensusHandlers) goodbyeHandler(s network.Stream) error { peerId := s.Conn().RemotePeer().String() - if err := c.checkRateLimit(peerId, "goodbye", defaultRateLimits.goodbyeLimit); err != nil { + if err := c.checkRateLimit(peerId, "goodbye", rateLimits.goodbyeLimit); err != nil { ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix) - defer s.Close() return err } return ssz_snappy.EncodeAndWrite(s, &cltypes.Ping{ @@ -49,9 +47,8 @@ func (c *ConsensusHandlers) goodbyeHandler(s network.Stream) error { func (c *ConsensusHandlers) metadataV1Handler(s network.Stream) error { peerId := s.Conn().RemotePeer().String() - if err := c.checkRateLimit(peerId, "metadataV1", defaultRateLimits.metadataV1Limit); err != nil { + if err := c.checkRateLimit(peerId, "metadataV1", rateLimits.metadataV1Limit); err != nil { ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix) - defer s.Close() return err } return ssz_snappy.EncodeAndWrite(s, &cltypes.Metadata{ @@ -62,9 +59,9 @@ func (c *ConsensusHandlers) metadataV1Handler(s network.Stream) error { func (c *ConsensusHandlers) metadataV2Handler(s network.Stream) error { peerId := s.Conn().RemotePeer().String() - if err := c.checkRateLimit(peerId, "metadataV2", defaultRateLimits.metadataV2Limit); err != nil { + + if err := c.checkRateLimit(peerId, "metadataV2", rateLimits.metadataV2Limit); err != nil { ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix) - defer s.Close() return err } return ssz_snappy.EncodeAndWrite(s, c.metadata, SuccessfulResponsePrefix) @@ -73,12 +70,10 @@ func (c *ConsensusHandlers) metadataV2Handler(s network.Stream) error { // TODO: Actually respond with proper status func (c *ConsensusHandlers) statusHandler(s network.Stream) error { peerId := s.Conn().RemotePeer().String() - if err := c.checkRateLimit(peerId, "status", defaultRateLimits.statusLimit); err != nil { + if err := c.checkRateLimit(peerId, "status", rateLimits.statusLimit); err != nil { ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix) - defer s.Close() return err } - defer s.Close() status := &cltypes.Status{} if err := ssz_snappy.DecodeAndReadNoForkDigest(s, status, clparams.Phase0Version); err != nil { return err diff --git a/cl/sentinel/handlers/utils_test.go b/cl/sentinel/handlers/utils_test.go new file mode 100644 index 00000000000..f197fea46ff --- /dev/null +++ b/cl/sentinel/handlers/utils_test.go @@ -0,0 +1,61 @@ +package handlers + +import ( + "context" + "testing" + + "github.com/ledgerwatch/erigon-lib/common" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/persistence" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + "github.com/spf13/afero" + "github.com/stretchr/testify/require" +) + +func setupStore(t *testing.T) (persistence.RawBeaconBlockChain, kv.RwDB) { + db := memdb.NewTestDB(t) + af := afero.NewMemMapFs() + rawDB := persistence.NewAferoRawBlockSaver(af, &clparams.MainnetBeaconConfig) + return rawDB, db +} + +func populateDatabaseWithBlocks(t *testing.T, store persistence.BeaconChainDatabase, tx kv.RwTx, startSlot, count uint64) []*cltypes.SignedBeaconBlock { + + mockParentRoot := common.Hash{1} + blocks := make([]*cltypes.SignedBeaconBlock, 0, count) + for i := uint64(0); i <= count; i++ { + slot := startSlot + i + block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig) + block.Block.Slot = slot + block.Block.StateRoot = libcommon.Hash{byte(i)} + block.Block.ParentRoot = mockParentRoot + block.EncodingSizeSSZ() + bodyRoot, _ := block.Block.Body.HashSSZ() + canonical := true + + // Populate BeaconChainDatabase + store.WriteBlock(context.Background(), tx, block, canonical) + + // Populate indiciesDB + require.NoError(t, beacon_indicies.WriteBeaconBlockHeaderAndIndicies( + context.Background(), + tx, + &cltypes.SignedBeaconBlockHeader{ + Signature: block.Signature, + Header: &cltypes.BeaconBlockHeader{ + Slot: block.Block.Slot, + ParentRoot: block.Block.ParentRoot, + ProposerIndex: block.Block.ProposerIndex, + Root: block.Block.StateRoot, + BodyRoot: bodyRoot, + }, + }, + canonical)) + blocks = append(blocks, block) + } + return blocks +} diff --git a/cl/sentinel/sentinel.go b/cl/sentinel/sentinel.go index 839906fb1bb..f5b36861699 100644 --- a/cl/sentinel/sentinel.go +++ b/cl/sentinel/sentinel.go @@ -22,6 +22,7 @@ import ( "time" "github.com/go-chi/chi/v5" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cl/sentinel/handlers" "github.com/ledgerwatch/erigon/cl/sentinel/handshake" "github.com/ledgerwatch/erigon/cl/sentinel/httpreqresp" @@ -74,7 +75,8 @@ type Sentinel struct { metadataV2 *cltypes.Metadata handshaker *handshake.HandShaker - db persistence.RawBeaconBlockChain + db persistence.RawBeaconBlockChain + indiciesDB kv.RoDB discoverConfig discover.Config pubsub *pubsub.PubSub @@ -90,7 +92,7 @@ func (s *Sentinel) createLocalNode( udpPort, tcpPort int, tmpDir string, ) (*enode.LocalNode, error) { - db, err := enode.OpenDB(s.ctx, "", tmpDir) + db, err := enode.OpenDB(s.ctx, "", tmpDir, s.logger) if err != nil { return nil, fmt.Errorf("could not open node's peer database: %w", err) } @@ -166,7 +168,7 @@ func (s *Sentinel) createListener() (*discover.UDPv5, error) { } // Start stream handlers - handlers.NewConsensusHandlers(s.ctx, s.db, s.host, s.peers, s.cfg.BeaconConfig, s.cfg.GenesisConfig, s.metadataV2).Start() + handlers.NewConsensusHandlers(s.ctx, s.db, s.indiciesDB, s.host, s.peers, s.cfg.BeaconConfig, s.cfg.GenesisConfig, s.metadataV2, s.cfg.EnableBlocks).Start() net, err := discover.ListenV5(s.ctx, "any", conn, localNode, discCfg) if err != nil { @@ -180,14 +182,16 @@ func New( ctx context.Context, cfg *SentinelConfig, db persistence.RawBeaconBlockChain, + indiciesDB kv.RoDB, logger log.Logger, ) (*Sentinel, error) { s := &Sentinel{ - ctx: ctx, - cfg: cfg, - db: db, - metrics: true, - logger: logger, + ctx: ctx, + cfg: cfg, + db: db, + indiciesDB: indiciesDB, + metrics: true, + logger: logger, } // Setup discovery @@ -258,7 +262,7 @@ func (s *Sentinel) ReqRespHandler() http.Handler { return s.httpApi } -func (s *Sentinel) RecvGossip() <-chan *pubsub.Message { +func (s *Sentinel) RecvGossip() <-chan *GossipMessage { return s.subManager.Recv() } @@ -286,6 +290,7 @@ func (s *Sentinel) Start() error { s.subManager = NewGossipManager(s.ctx) go s.listenForPeers() + go s.forkWatcher() return nil } diff --git a/cl/sentinel/sentinel_gossip_test.go b/cl/sentinel/sentinel_gossip_test.go new file mode 100644 index 00000000000..5ef8b2082e8 --- /dev/null +++ b/cl/sentinel/sentinel_gossip_test.go @@ -0,0 +1,105 @@ +package sentinel + +import ( + "context" + "math" + "testing" + "time" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/persistence" + "github.com/ledgerwatch/log/v3" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +func TestSentinelGossipOnHardFork(t *testing.T) { + listenAddrHost := "127.0.0.1" + + ctx := context.Background() + db, _, f, _, _ := loadChain(t) + raw := persistence.NewAferoRawBlockSaver(f, &clparams.MainnetBeaconConfig) + genesisConfig, networkConfig, beaconConfig := clparams.GetConfigsByNetwork(clparams.MainnetNetwork) + bcfg := *beaconConfig + + bcfg.AltairForkEpoch = math.MaxUint64 + bcfg.BellatrixForkEpoch = math.MaxUint64 + bcfg.CapellaForkEpoch = math.MaxUint64 + bcfg.DenebForkEpoch = math.MaxUint64 + bcfg.InitializeForkSchedule() + + sentinel1, err := New(ctx, &SentinelConfig{ + NetworkConfig: networkConfig, + BeaconConfig: &bcfg, + GenesisConfig: genesisConfig, + IpAddr: listenAddrHost, + Port: 7070, + EnableBlocks: true, + }, raw, db, log.New()) + require.NoError(t, err) + defer sentinel1.Stop() + + require.NoError(t, sentinel1.Start()) + h := sentinel1.host + + sentinel2, err := New(ctx, &SentinelConfig{ + NetworkConfig: networkConfig, + BeaconConfig: &bcfg, + GenesisConfig: genesisConfig, + IpAddr: listenAddrHost, + Port: 7077, + EnableBlocks: true, + TCPPort: 9123, + }, raw, db, log.New()) + require.NoError(t, err) + defer sentinel2.Stop() + + require.NoError(t, sentinel2.Start()) + h2 := sentinel2.host + + sub1, err := sentinel1.SubscribeGossip(BeaconBlockSsz) + require.NoError(t, err) + defer sub1.Close() + + require.NoError(t, sub1.Listen()) + + sub2, err := sentinel2.SubscribeGossip(BeaconBlockSsz) + require.NoError(t, err) + defer sub2.Close() + require.NoError(t, sub2.Listen()) + + err = h.Connect(ctx, peer.AddrInfo{ + ID: h2.ID(), + Addrs: h2.Addrs(), + }) + require.NoError(t, err) + time.Sleep(5 * time.Second) + + ch := sentinel2.RecvGossip() + msg := []byte("hello") + go func() { + // delay to make sure that the connection is established + sub1.Publish(msg) + }() + previousTopic := "" + + ans := <-ch + require.Equal(t, ans.Data, msg) + previousTopic = ans.TopicName + + bcfg.AltairForkEpoch = clparams.MainnetBeaconConfig.AltairForkEpoch + bcfg.InitializeForkSchedule() + time.Sleep(5 * time.Second) + + msg = []byte("hello1") + go func() { + // delay to make sure that the connection is established + sub1 = sentinel1.subManager.GetMatchingSubscription(BeaconBlockSsz.Name) + sub1.Publish(msg) + }() + + ans = <-ch + require.Equal(t, ans.Data, msg) + require.NotEqual(t, previousTopic, ans.TopicName) + +} diff --git a/cl/sentinel/sentinel_requests_test.go b/cl/sentinel/sentinel_requests_test.go new file mode 100644 index 00000000000..1686c97e872 --- /dev/null +++ b/cl/sentinel/sentinel_requests_test.go @@ -0,0 +1,316 @@ +package sentinel + +import ( + "bytes" + "context" + "encoding/binary" + "io" + "testing" + + "github.com/golang/snappy" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/cl/antiquary" + "github.com/ledgerwatch/erigon/cl/antiquary/tests" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/fork" + "github.com/ledgerwatch/erigon/cl/persistence" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "github.com/ledgerwatch/erigon/cl/sentinel/communication" + "github.com/ledgerwatch/erigon/cl/sentinel/communication/ssz_snappy" + "github.com/ledgerwatch/erigon/cl/utils" + "github.com/ledgerwatch/log/v3" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/spf13/afero" + "github.com/stretchr/testify/require" +) + +func loadChain(t *testing.T) (db kv.RwDB, blocks []*cltypes.SignedBeaconBlock, f afero.Fs, preState, postState *state.CachingBeaconState) { + blocks, preState, postState = tests.GetPhase0Random() + db = memdb.NewTestDB(t) + var reader *tests.MockBlockReader + reader, f = tests.LoadChain(blocks, postState, db, t) + + ctx := context.Background() + vt := state_accessors.NewStaticValidatorTable() + a := antiquary.NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, true, f) + require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) + return +} + +func TestSentinelBlocksByRange(t *testing.T) { + listenAddrHost := "127.0.0.1" + + ctx := context.Background() + db, blocks, f, _, _ := loadChain(t) + raw := persistence.NewAferoRawBlockSaver(f, &clparams.MainnetBeaconConfig) + genesisConfig, networkConfig, beaconConfig := clparams.GetConfigsByNetwork(clparams.MainnetNetwork) + sentinel, err := New(ctx, &SentinelConfig{ + NetworkConfig: networkConfig, + BeaconConfig: beaconConfig, + GenesisConfig: genesisConfig, + IpAddr: listenAddrHost, + Port: 7070, + EnableBlocks: true, + }, raw, db, log.New()) + require.NoError(t, err) + defer sentinel.Stop() + + require.NoError(t, sentinel.Start()) + h := sentinel.host + + listenAddrHost1 := "/ip4/127.0.0.1/tcp/3202" + host1, err := libp2p.New(libp2p.ListenAddrStrings(listenAddrHost1)) + require.NoError(t, err) + + err = h.Connect(ctx, peer.AddrInfo{ + ID: host1.ID(), + Addrs: host1.Addrs(), + }) + require.NoError(t, err) + + stream, err := host1.NewStream(ctx, h.ID(), protocol.ID(communication.BeaconBlocksByRangeProtocolV2)) + require.NoError(t, err) + + req := &cltypes.BeaconBlocksByRangeRequest{ + StartSlot: blocks[0].Block.Slot, + Count: 6, + } + + if err := ssz_snappy.EncodeAndWrite(stream, req); err != nil { + return + } + + code := make([]byte, 1) + _, err = stream.Read(code) + require.NoError(t, err) + require.Equal(t, code[0], uint8(0)) + + var w bytes.Buffer + _, err = io.Copy(&w, stream) + require.NoError(t, err) + + responsePacket := make([]*cltypes.SignedBeaconBlock, 0) + + r := bytes.NewReader(w.Bytes()) + for i := 0; i < len(blocks); i++ { + forkDigest := make([]byte, 4) + if _, err := r.Read(forkDigest); err != nil { + if err == io.EOF { + break + } + require.NoError(t, err) + } + + // Read varint for length of message. + encodedLn, _, err := ssz_snappy.ReadUvarint(r) + require.NoError(t, err) + + // Read bytes using snappy into a new raw buffer of side encodedLn. + raw := make([]byte, encodedLn) + sr := snappy.NewReader(r) + bytesRead := 0 + for bytesRead < int(encodedLn) { + n, err := sr.Read(raw[bytesRead:]) + require.NoError(t, err) + bytesRead += n + } + // Fork digests + respForkDigest := binary.BigEndian.Uint32(forkDigest) + require.NoError(t, err) + + version, err := fork.ForkDigestVersion(utils.Uint32ToBytes4(respForkDigest), beaconConfig, genesisConfig.GenesisValidatorRoot) + require.NoError(t, err) + + responseChunk := cltypes.NewSignedBeaconBlock(beaconConfig) + + require.NoError(t, responseChunk.DecodeSSZ(raw, int(version))) + + responsePacket = append(responsePacket, responseChunk) + // TODO(issues/5884): figure out why there is this extra byte. + r.ReadByte() + } + require.Equal(t, len(responsePacket), len(blocks)) + for i := 0; i < len(blocks); i++ { + root1, err := responsePacket[i].HashSSZ() + require.NoError(t, err) + + root2, err := blocks[i].HashSSZ() + require.NoError(t, err) + + require.Equal(t, root1, root2) + } + +} + +func TestSentinelBlocksByRoots(t *testing.T) { + listenAddrHost := "127.0.0.1" + + ctx := context.Background() + db, blocks, f, _, _ := loadChain(t) + raw := persistence.NewAferoRawBlockSaver(f, &clparams.MainnetBeaconConfig) + genesisConfig, networkConfig, beaconConfig := clparams.GetConfigsByNetwork(clparams.MainnetNetwork) + sentinel, err := New(ctx, &SentinelConfig{ + NetworkConfig: networkConfig, + BeaconConfig: beaconConfig, + GenesisConfig: genesisConfig, + IpAddr: listenAddrHost, + Port: 7070, + EnableBlocks: true, + }, raw, db, log.New()) + require.NoError(t, err) + defer sentinel.Stop() + + require.NoError(t, sentinel.Start()) + h := sentinel.host + + listenAddrHost1 := "/ip4/127.0.0.1/tcp/5021" + host1, err := libp2p.New(libp2p.ListenAddrStrings(listenAddrHost1)) + require.NoError(t, err) + + err = h.Connect(ctx, peer.AddrInfo{ + ID: host1.ID(), + Addrs: host1.Addrs(), + }) + require.NoError(t, err) + + stream, err := host1.NewStream(ctx, h.ID(), protocol.ID(communication.BeaconBlocksByRootProtocolV2)) + require.NoError(t, err) + + req := solid.NewHashList(1232) + rt, err := blocks[0].Block.HashSSZ() + require.NoError(t, err) + + req.Append(rt) + rt, err = blocks[1].Block.HashSSZ() + require.NoError(t, err) + req.Append(rt) + + if err := ssz_snappy.EncodeAndWrite(stream, req); err != nil { + return + } + + code := make([]byte, 1) + _, err = stream.Read(code) + require.NoError(t, err) + require.Equal(t, code[0], uint8(0)) + + var w bytes.Buffer + _, err = io.Copy(&w, stream) + require.NoError(t, err) + + responsePacket := make([]*cltypes.SignedBeaconBlock, 0) + + r := bytes.NewReader(w.Bytes()) + for i := 0; i < len(blocks); i++ { + forkDigest := make([]byte, 4) + if _, err := r.Read(forkDigest); err != nil { + if err == io.EOF { + break + } + require.NoError(t, err) + } + + // Read varint for length of message. + encodedLn, _, err := ssz_snappy.ReadUvarint(r) + require.NoError(t, err) + + // Read bytes using snappy into a new raw buffer of side encodedLn. + raw := make([]byte, encodedLn) + sr := snappy.NewReader(r) + bytesRead := 0 + for bytesRead < int(encodedLn) { + n, err := sr.Read(raw[bytesRead:]) + require.NoError(t, err) + bytesRead += n + } + // Fork digests + respForkDigest := binary.BigEndian.Uint32(forkDigest) + require.NoError(t, err) + + version, err := fork.ForkDigestVersion(utils.Uint32ToBytes4(respForkDigest), beaconConfig, genesisConfig.GenesisValidatorRoot) + require.NoError(t, err) + + responseChunk := cltypes.NewSignedBeaconBlock(beaconConfig) + + require.NoError(t, responseChunk.DecodeSSZ(raw, int(version))) + + responsePacket = append(responsePacket, responseChunk) + // TODO(issues/5884): figure out why there is this extra byte. + r.ReadByte() + } + + require.Equal(t, len(responsePacket), len(blocks)) + for i := 0; i < len(responsePacket); i++ { + root1, err := responsePacket[i].HashSSZ() + require.NoError(t, err) + + root2, err := blocks[i].HashSSZ() + require.NoError(t, err) + + require.Equal(t, root1, root2) + } +} + +func TestSentinelStatusRequest(t *testing.T) { + t.Skip("TODO: fix me") + listenAddrHost := "127.0.0.1" + + ctx := context.Background() + db, blocks, f, _, _ := loadChain(t) + raw := persistence.NewAferoRawBlockSaver(f, &clparams.MainnetBeaconConfig) + genesisConfig, networkConfig, beaconConfig := clparams.GetConfigsByNetwork(clparams.MainnetNetwork) + sentinel, err := New(ctx, &SentinelConfig{ + NetworkConfig: networkConfig, + BeaconConfig: beaconConfig, + GenesisConfig: genesisConfig, + IpAddr: listenAddrHost, + Port: 7070, + EnableBlocks: true, + }, raw, db, log.New()) + require.NoError(t, err) + defer sentinel.Stop() + + require.NoError(t, sentinel.Start()) + h := sentinel.host + + listenAddrHost1 := "/ip4/127.0.0.1/tcp/5001" + host1, err := libp2p.New(libp2p.ListenAddrStrings(listenAddrHost1)) + require.NoError(t, err) + + err = h.Connect(ctx, peer.AddrInfo{ + ID: host1.ID(), + Addrs: host1.Addrs(), + }) + require.NoError(t, err) + req := &cltypes.Status{ + HeadRoot: blocks[0].Block.ParentRoot, + HeadSlot: 1234, + } + sentinel.SetStatus(req) + stream, err := host1.NewStream(ctx, h.ID(), protocol.ID(communication.StatusProtocolV1)) + require.NoError(t, err) + + if err := ssz_snappy.EncodeAndWrite(stream, req); err != nil { + return + } + + code := make([]byte, 1) + _, err = stream.Read(code) + require.NoError(t, err) + require.Equal(t, code[0], uint8(0)) + + resp := &cltypes.Status{} + if err := ssz_snappy.DecodeAndReadNoForkDigest(stream, resp, 0); err != nil { + return + } + require.NoError(t, err) + + require.Equal(t, resp, req) +} diff --git a/cl/sentinel/service/notifiers.go b/cl/sentinel/service/notifiers.go index 8af9b991a73..7297dc81802 100644 --- a/cl/sentinel/service/notifiers.go +++ b/cl/sentinel/service/notifiers.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel" + "github.com/ledgerwatch/erigon/cl/gossip" ) const ( @@ -12,10 +12,9 @@ const ( ) type gossipObject struct { - data []byte // gossip data - t sentinel.GossipType // determine which gossip message we are notifying of - pid string // pid is the peer id of the sender - blobIndex *uint32 // index of the blob + data []byte // gossip data + t string // determine which gossip message we are notifying of + pid string // pid is the peer id of the sender } type gossipNotifier struct { @@ -30,7 +29,7 @@ func newGossipNotifier() *gossipNotifier { } } -func (g *gossipNotifier) notify(t sentinel.GossipType, data []byte, pid string) { +func (g *gossipNotifier) notify(t string, data []byte, pid string) { g.mu.Lock() defer g.mu.Unlock() @@ -43,18 +42,15 @@ func (g *gossipNotifier) notify(t sentinel.GossipType, data []byte, pid string) } } -func (g *gossipNotifier) notifyBlob(t sentinel.GossipType, data []byte, pid string, blobIndex int) { +func (g *gossipNotifier) notifyBlob(data []byte, pid string, blobIndex int) { g.mu.Lock() defer g.mu.Unlock() - index := new(uint32) - *index = uint32(blobIndex) for _, ch := range g.notifiers { ch <- gossipObject{ - data: data, - t: t, - pid: pid, - blobIndex: index, + data: data, + t: gossip.TopicNameBlobSidecar(blobIndex), + pid: pid, } } } diff --git a/cl/sentinel/service/service.go b/cl/sentinel/service/service.go index 1c72d1fe4c6..d43b40cb6b5 100644 --- a/cl/sentinel/service/service.go +++ b/cl/sentinel/service/service.go @@ -3,7 +3,6 @@ package service import ( "bytes" "context" - "errors" "fmt" "io" "net/http" @@ -13,6 +12,7 @@ import ( "time" "github.com/ledgerwatch/erigon-lib/diagnostics" + "github.com/ledgerwatch/erigon/cl/gossip" "github.com/ledgerwatch/erigon/cl/sentinel" "github.com/ledgerwatch/erigon/cl/sentinel/httpreqresp" @@ -21,7 +21,6 @@ import ( "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/utils" "github.com/ledgerwatch/log/v3" - pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" ) @@ -51,7 +50,7 @@ func NewSentinelServer(ctx context.Context, sentinel *sentinel.Sentinel, logger // extractBlobSideCarIndex takes a topic and extract the blob sidecar func extractBlobSideCarIndex(topic string) int { // compute the index prefixless - startIndex := strings.Index(topic, string(sentinel.BlobSidecarTopic)) + len(sentinel.BlobSidecarTopic) + startIndex := strings.Index(topic, gossip.TopicNamePrefixBlobSidecar) + len(gossip.TopicNamePrefixBlobSidecar) endIndex := strings.Index(topic[:startIndex], "/") blobIndex, err := strconv.Atoi(topic[startIndex:endIndex]) if err != nil { @@ -78,28 +77,30 @@ func (s *SentinelServer) PublishGossip(_ context.Context, msg *sentinelrpc.Gossi // Snappify payload before sending it to gossip compressedData := utils.CompressSnappy(msg.Data) - s.trackPeerStatistics(msg.GetPeer().Pid, false, msg.Type.String(), "unknown", len(compressedData)) + s.trackPeerStatistics(msg.GetPeer().Pid, false, msg.Name, "unknown", len(compressedData)) var subscription *sentinel.GossipSubscription - switch msg.Type { - case sentinelrpc.GossipType_BeaconBlockGossipType: - subscription = manager.GetMatchingSubscription(string(sentinel.BeaconBlockTopic)) - case sentinelrpc.GossipType_AggregateAndProofGossipType: - subscription = manager.GetMatchingSubscription(string(sentinel.BeaconAggregateAndProofTopic)) - case sentinelrpc.GossipType_VoluntaryExitGossipType: - subscription = manager.GetMatchingSubscription(string(sentinel.VoluntaryExitTopic)) - case sentinelrpc.GossipType_ProposerSlashingGossipType: - subscription = manager.GetMatchingSubscription(string(sentinel.ProposerSlashingTopic)) - case sentinelrpc.GossipType_AttesterSlashingGossipType: - subscription = manager.GetMatchingSubscription(string(sentinel.AttesterSlashingTopic)) - case sentinelrpc.GossipType_BlobSidecarType: - if msg.BlobIndex == nil { - return &sentinelrpc.EmptyMessage{}, errors.New("cannot publish sidecar blob with no index") - } - subscription = manager.GetMatchingSubscription(fmt.Sprintf(string(sentinel.BlobSidecarTopic), *msg.BlobIndex)) + // TODO: this is still wrong... we should build a subscription here to match exactly, meaning that downstream consumers should be + // in charge of keeping track of fork id. + switch msg.Name { + case gossip.TopicNameBeaconBlock: + subscription = manager.GetMatchingSubscription(msg.Name) + case gossip.TopicNameBeaconAggregateAndProof: + subscription = manager.GetMatchingSubscription(msg.Name) + case gossip.TopicNameVoluntaryExit: + subscription = manager.GetMatchingSubscription(msg.Name) + case gossip.TopicNameProposerSlashing: + subscription = manager.GetMatchingSubscription(msg.Name) + case gossip.TopicNameAttesterSlashing: + subscription = manager.GetMatchingSubscription(msg.Name) default: - return &sentinelrpc.EmptyMessage{}, nil + switch { + case gossip.IsTopicBlobSidecar(msg.Name): + subscription = manager.GetMatchingSubscription(msg.Name) + default: + return &sentinelrpc.EmptyMessage{}, nil + } } if subscription == nil { return &sentinelrpc.EmptyMessage{}, nil @@ -123,11 +124,10 @@ func (s *SentinelServer) SubscribeGossip(_ *sentinelrpc.EmptyMessage, stream sen case packet := <-ch: if err := stream.Send(&sentinelrpc.GossipData{ Data: packet.data, - Type: packet.t, + Name: packet.t, Peer: &sentinelrpc.Peer{ Pid: packet.pid, }, - BlobIndex: packet.blobIndex, }); err != nil { s.logger.Warn("[Sentinel] Could not relay gossip packet", "reason", err) } @@ -273,43 +273,43 @@ func (s *SentinelServer) ListenToGossip() { } } -func (s *SentinelServer) handleGossipPacket(pkt *pubsub.Message) error { +func (s *SentinelServer) handleGossipPacket(pkt *sentinel.GossipMessage) error { var err error - s.logger.Trace("[Sentinel Gossip] Received Packet", "topic", pkt.Topic) - - data := pkt.GetData() + s.logger.Trace("[Sentinel Gossip] Received Packet", "topic", pkt.TopicName) + data := pkt.Data + topic := pkt.TopicName // If we use snappy codec then decompress it accordingly. - if strings.Contains(*pkt.Topic, sentinel.SSZSnappyCodec) { + if strings.Contains(topic, sentinel.SSZSnappyCodec) { data, err = utils.DecompressSnappy(data) if err != nil { return err } } - textPid, err := pkt.ReceivedFrom.MarshalText() + textPid, err := pkt.From.MarshalText() if err != nil { return err } - msgType, msgCap := parseTopic(pkt.GetTopic()) + msgType, msgCap := parseTopic(topic) s.trackPeerStatistics(string(textPid), true, msgType, msgCap, len(data)) // Check to which gossip it belongs to. - if strings.Contains(*pkt.Topic, string(sentinel.BeaconBlockTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_BeaconBlockGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.BeaconAggregateAndProofTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_AggregateAndProofGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.VoluntaryExitTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_VoluntaryExitGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.ProposerSlashingTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_ProposerSlashingGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.AttesterSlashingTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_AttesterSlashingGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.BlsToExecutionChangeTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_BlsToExecutionChangeGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.BlobSidecarTopic)) { + if strings.Contains(topic, string(gossip.TopicNameBeaconBlock)) { + s.gossipNotifier.notify(gossip.TopicNameBeaconBlock, data, string(textPid)) + } else if strings.Contains(topic, string(gossip.TopicNameBeaconAggregateAndProof)) { + s.gossipNotifier.notify(gossip.TopicNameBeaconAggregateAndProof, data, string(textPid)) + } else if strings.Contains(topic, string(gossip.TopicNameVoluntaryExit)) { + s.gossipNotifier.notify(gossip.TopicNameVoluntaryExit, data, string(textPid)) + } else if strings.Contains(topic, string(gossip.TopicNameProposerSlashing)) { + s.gossipNotifier.notify(gossip.TopicNameProposerSlashing, data, string(textPid)) + } else if strings.Contains(topic, string(gossip.TopicNameAttesterSlashing)) { + s.gossipNotifier.notify(gossip.TopicNameAttesterSlashing, data, string(textPid)) + } else if strings.Contains(topic, string(gossip.TopicNameBlsToExecutionChange)) { + s.gossipNotifier.notify(gossip.TopicNameBlsToExecutionChange, data, string(textPid)) + } else if gossip.IsTopicBlobSidecar(topic) { // extract the index - s.gossipNotifier.notifyBlob(sentinelrpc.GossipType_BlobSidecarType, data, string(textPid), extractBlobSideCarIndex(*pkt.Topic)) + s.gossipNotifier.notifyBlob(data, string(textPid), extractBlobSideCarIndex(topic)) } return nil } diff --git a/cl/sentinel/service/start.go b/cl/sentinel/service/start.go index efcd95ac77a..50ad1b38cf1 100644 --- a/cl/sentinel/service/start.go +++ b/cl/sentinel/service/start.go @@ -8,6 +8,7 @@ import ( "github.com/ledgerwatch/erigon-lib/direct" sentinelrpc "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/persistence" "github.com/ledgerwatch/log/v3" @@ -20,8 +21,8 @@ type ServerConfig struct { Addr string } -func createSentinel(cfg *sentinel.SentinelConfig, db persistence.RawBeaconBlockChain, logger log.Logger) (*sentinel.Sentinel, error) { - sent, err := sentinel.New(context.Background(), cfg, db, logger) +func createSentinel(cfg *sentinel.SentinelConfig, db persistence.RawBeaconBlockChain, indiciesDB kv.RwDB, logger log.Logger) (*sentinel.Sentinel, error) { + sent, err := sentinel.New(context.Background(), cfg, db, indiciesDB, logger) if err != nil { return nil, err } @@ -57,9 +58,9 @@ func createSentinel(cfg *sentinel.SentinelConfig, db persistence.RawBeaconBlockC return sent, nil } -func StartSentinelService(cfg *sentinel.SentinelConfig, db persistence.RawBeaconBlockChain, srvCfg *ServerConfig, creds credentials.TransportCredentials, initialStatus *cltypes.Status, logger log.Logger) (sentinelrpc.SentinelClient, error) { +func StartSentinelService(cfg *sentinel.SentinelConfig, db persistence.RawBeaconBlockChain, indiciesDB kv.RwDB, srvCfg *ServerConfig, creds credentials.TransportCredentials, initialStatus *cltypes.Status, logger log.Logger) (sentinelrpc.SentinelClient, error) { ctx := context.Background() - sent, err := createSentinel(cfg, db, logger) + sent, err := createSentinel(cfg, db, indiciesDB, logger) if err != nil { return nil, err } diff --git a/cl/spectest/Makefile b/cl/spectest/Makefile index f4f5be19693..42877b2a330 100644 --- a/cl/spectest/Makefile +++ b/cl/spectest/Makefile @@ -3,7 +3,7 @@ tests: GIT_LFS_SKIP_SMUDGE=1 git clone https://github.com/ethereum/consensus-spec-tests - cd consensus-spec-tests && git checkout 70dc28b18c71f3ae080c02f51bd3421e0b60609b && git lfs pull --exclude=tests/general,tests/minimal && cd .. + cd consensus-spec-tests && git checkout 99549a414c10baa9e69abcb08eb256fc1a8d54f6 && git lfs pull --exclude=tests/general,tests/minimal && cd .. mv consensus-spec-tests/tests . rm -rf consensus-spec-tests rm -rf tests/minimal diff --git a/cl/spectest/consensus_tests/epoch_processing.go b/cl/spectest/consensus_tests/epoch_processing.go index 390c2ae4289..8fea8841bbd 100644 --- a/cl/spectest/consensus_tests/epoch_processing.go +++ b/cl/spectest/consensus_tests/epoch_processing.go @@ -1,12 +1,14 @@ package consensus_tests import ( - "github.com/ledgerwatch/erigon/spectest" "io/fs" "os" "testing" + "github.com/ledgerwatch/erigon/spectest" + "github.com/ledgerwatch/erigon/cl/abstract" + "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/transition/impl/eth2/statechange" @@ -66,7 +68,11 @@ var historicalRootsUpdateTest = NewEpochProcessing(func(s abstract.BeaconState) }) var inactivityUpdateTest = NewEpochProcessing(func(s abstract.BeaconState) error { - return statechange.ProcessInactivityScores(s, state.EligibleValidatorsIndicies(s), statechange.GetUnslashedIndiciesSet(s)) + var unslashedIndiciesSet [][]bool + if s.Version() >= clparams.AltairVersion { + unslashedIndiciesSet = statechange.GetUnslashedIndiciesSet(s.BeaconConfig(), state.PreviousEpoch(s), s.ValidatorSet(), s.PreviousEpochParticipation()) + } + return statechange.ProcessInactivityScores(s, state.EligibleValidatorsIndicies(s), unslashedIndiciesSet) }) var justificationFinalizationTest = NewEpochProcessing(func(s abstract.BeaconState) error { @@ -91,7 +97,11 @@ var registryUpdatesTest = NewEpochProcessing(func(s abstract.BeaconState) error }) var rewardsAndPenaltiesTest = NewEpochProcessing(func(s abstract.BeaconState) error { - return statechange.ProcessRewardsAndPenalties(s, state.EligibleValidatorsIndicies(s), statechange.GetUnslashedIndiciesSet(s)) + var unslashedIndiciesSet [][]bool + if s.Version() >= clparams.AltairVersion { + unslashedIndiciesSet = statechange.GetUnslashedIndiciesSet(s.BeaconConfig(), state.PreviousEpoch(s), s.ValidatorSet(), s.PreviousEpochParticipation()) + } + return statechange.ProcessRewardsAndPenalties(s, state.EligibleValidatorsIndicies(s), unslashedIndiciesSet) }) var slashingsTest = NewEpochProcessing(func(s abstract.BeaconState) error { diff --git a/cl/transition/compat.go b/cl/transition/compat.go index 94a70a958d6..bb5d6f06185 100644 --- a/cl/transition/compat.go +++ b/cl/transition/compat.go @@ -13,7 +13,7 @@ var _ machine2.Interface = (*eth2.Impl)(nil) var DefaultMachine = ð2.Impl{} var ValidatingMachine = ð2.Impl{FullValidation: true} -func TransitionState(s abstract.BeaconState, block *cltypes.SignedBeaconBlock, fullValidation bool) error { - cvm := ð2.Impl{FullValidation: fullValidation} +func TransitionState(s abstract.BeaconState, block *cltypes.SignedBeaconBlock, blockRewardsCollector *eth2.BlockRewardsCollector, fullValidation bool) error { + cvm := ð2.Impl{FullValidation: fullValidation, BlockRewardsCollector: blockRewardsCollector} return machine2.TransitionState(cvm, s, block) } diff --git a/cl/transition/impl/eth2/block_processing_test.go b/cl/transition/impl/eth2/block_processing_test.go index 7838ce55ae2..4a5ac87a878 100644 --- a/cl/transition/impl/eth2/block_processing_test.go +++ b/cl/transition/impl/eth2/block_processing_test.go @@ -24,5 +24,5 @@ func TestBlockProcessing(t *testing.T) { require.NoError(t, utils.DecodeSSZSnappy(s, capellaState, int(clparams.CapellaVersion))) block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig) require.NoError(t, utils.DecodeSSZSnappy(block, capellaBlock, int(clparams.CapellaVersion))) - require.NoError(t, transition.TransitionState(s, block, true)) // All checks already made in transition state + require.NoError(t, transition.TransitionState(s, block, nil, true)) // All checks already made in transition state } diff --git a/cl/transition/impl/eth2/impl.go b/cl/transition/impl/eth2/impl.go index dcd233bca0c..9fc7490c94e 100644 --- a/cl/transition/impl/eth2/impl.go +++ b/cl/transition/impl/eth2/impl.go @@ -6,6 +6,14 @@ type Impl = impl var _ machine.Interface = (*impl)(nil) +type BlockRewardsCollector struct { + Attestations uint64 + AttesterSlashings uint64 + ProposerSlashings uint64 + SyncAggregate uint64 +} + type impl struct { - FullValidation bool + FullValidation bool + BlockRewardsCollector *BlockRewardsCollector } diff --git a/cl/transition/impl/eth2/operations.go b/cl/transition/impl/eth2/operations.go index 13335129d4b..1f8c5b917a3 100644 --- a/cl/transition/impl/eth2/operations.go +++ b/cl/transition/impl/eth2/operations.go @@ -71,8 +71,11 @@ func (I *impl) ProcessProposerSlashing(s abstract.BeaconState, propSlashing *clt } // Set whistleblower index to 0 so current proposer gets reward. - s.SlashValidator(h1.ProposerIndex, nil) - return nil + pr, err := s.SlashValidator(h1.ProposerIndex, nil) + if I.BlockRewardsCollector != nil { + I.BlockRewardsCollector.ProposerSlashings += pr + } + return err } func (I *impl) ProcessAttesterSlashing(s abstract.BeaconState, attSlashing *cltypes.AttesterSlashing) error { @@ -109,10 +112,13 @@ func (I *impl) ProcessAttesterSlashing(s abstract.BeaconState, attSlashing *clty return err } if validator.IsSlashable(currentEpoch) { - err := s.SlashValidator(ind, nil) + pr, err := s.SlashValidator(ind, nil) if err != nil { return fmt.Errorf("unable to slash validator: %d", ind) } + if I.BlockRewardsCollector != nil { + I.BlockRewardsCollector.AttesterSlashings += pr + } slashedAny = true } } @@ -305,7 +311,7 @@ func (I *impl) ProcessExecutionPayload(s abstract.BeaconState, payload *cltypes. } func (I *impl) ProcessSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAggregate) error { - votedKeys, err := processSyncAggregate(s, sync) + votedKeys, err := I.processSyncAggregate(s, sync) if err != nil { return err } @@ -335,7 +341,7 @@ func (I *impl) ProcessSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAg // processSyncAggregate applies all the logic in the spec function `process_sync_aggregate` except // verifying the BLS signatures. It returns the modified beacons state and the list of validators' // public keys that voted, for future signature verification. -func processSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAggregate) ([][]byte, error) { +func (I *impl) processSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAggregate) ([][]byte, error) { currentSyncCommittee := s.CurrentSyncCommittee() if currentSyncCommittee == nil { @@ -382,6 +388,9 @@ func processSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAggregate) ( } } + if I.BlockRewardsCollector != nil { + I.BlockRewardsCollector.SyncAggregate = earnedProposerReward + } return votedKeys, state.IncreaseBalance(s, proposerIndex, earnedProposerReward) } @@ -478,7 +487,7 @@ func (I *impl) ProcessAttestations(s abstract.BeaconState, attestations *solid.L c := h.Tag("attestation_step", "process") var err error if err := solid.RangeErr[*solid.Attestation](attestations, func(i int, a *solid.Attestation, _ int) error { - if attestingIndiciesSet[i], err = processAttestation(s, a, baseRewardPerIncrement); err != nil { + if attestingIndiciesSet[i], err = I.processAttestation(s, a, baseRewardPerIncrement); err != nil { return err } return nil @@ -505,7 +514,7 @@ func (I *impl) ProcessAttestations(s abstract.BeaconState, attestations *solid.L return nil } -func processAttestationPostAltair(s abstract.BeaconState, attestation *solid.Attestation, baseRewardPerIncrement uint64) ([]uint64, error) { +func (I *impl) processAttestationPostAltair(s abstract.BeaconState, attestation *solid.Attestation, baseRewardPerIncrement uint64) ([]uint64, error) { data := attestation.AttestantionData() currentEpoch := state.Epoch(s) stateSlot := s.Slot() @@ -560,11 +569,14 @@ func processAttestationPostAltair(s abstract.BeaconState, attestation *solid.Att c.PutSince() proposerRewardDenominator := (beaconConfig.WeightDenominator - beaconConfig.ProposerWeight) * beaconConfig.WeightDenominator / beaconConfig.ProposerWeight reward := proposerRewardNumerator / proposerRewardDenominator + if I.BlockRewardsCollector != nil { + I.BlockRewardsCollector.Attestations += reward + } return attestingIndicies, state.IncreaseBalance(s, proposer, reward) } // processAttestationsPhase0 implements the rules for phase0 processing. -func processAttestationPhase0(s abstract.BeaconState, attestation *solid.Attestation) ([]uint64, error) { +func (I *impl) processAttestationPhase0(s abstract.BeaconState, attestation *solid.Attestation) ([]uint64, error) { data := attestation.AttestantionData() committee, err := s.GetBeaconCommitee(data.Slot(), data.ValidatorIndex()) if err != nil { @@ -675,7 +687,7 @@ func processAttestationPhase0(s abstract.BeaconState, attestation *solid.Attesta } // ProcessAttestation takes an attestation and process it. -func processAttestation(s abstract.BeaconState, attestation *solid.Attestation, baseRewardPerIncrement uint64) ([]uint64, error) { +func (I *impl) processAttestation(s abstract.BeaconState, attestation *solid.Attestation, baseRewardPerIncrement uint64) ([]uint64, error) { data := attestation.AttestantionData() currentEpoch := state.Epoch(s) previousEpoch := state.PreviousEpoch(s) @@ -693,9 +705,9 @@ func processAttestation(s abstract.BeaconState, attestation *solid.Attestation, } // check if we need to use rules for phase0 or post-altair. if s.Version() == clparams.Phase0Version { - return processAttestationPhase0(s, attestation) + return I.processAttestationPhase0(s, attestation) } - return processAttestationPostAltair(s, attestation, baseRewardPerIncrement) + return I.processAttestationPostAltair(s, attestation, baseRewardPerIncrement) } func verifyAttestations(s abstract.BeaconState, attestations *solid.ListSSZ[*solid.Attestation], attestingIndicies [][]uint64) (bool, error) { diff --git a/cl/transition/impl/eth2/statechange/process_epoch.go b/cl/transition/impl/eth2/statechange/process_epoch.go index 304a218ebc7..f42c5a1263d 100644 --- a/cl/transition/impl/eth2/statechange/process_epoch.go +++ b/cl/transition/impl/eth2/statechange/process_epoch.go @@ -7,20 +7,16 @@ import ( "github.com/ledgerwatch/erigon/cl/phase1/core/state" ) -func GetUnslashedIndiciesSet(s abstract.BeaconState) [][]bool { - if s.Version() == clparams.Phase0Version { - return nil - } - weights := s.BeaconConfig().ParticipationWeights() +func GetUnslashedIndiciesSet(cfg *clparams.BeaconChainConfig, previousEpoch uint64, validatorSet *solid.ValidatorSet, previousEpochPartecipation *solid.BitList) [][]bool { + weights := cfg.ParticipationWeights() flagsUnslashedIndiciesSet := make([][]bool, len(weights)) for i := range weights { - flagsUnslashedIndiciesSet[i] = make([]bool, s.ValidatorLength()) + flagsUnslashedIndiciesSet[i] = make([]bool, validatorSet.Length()) } - previousEpoch := state.PreviousEpoch(s) - s.ForEachValidator(func(validator solid.Validator, validatorIndex, total int) bool { + validatorSet.Range(func(validatorIndex int, validator solid.Validator, total int) bool { for i := range weights { - flagsUnslashedIndiciesSet[i][validatorIndex] = state.IsUnslashedParticipatingIndex(s, previousEpoch, uint64(validatorIndex), i) + flagsUnslashedIndiciesSet[i][validatorIndex] = state.IsUnslashedParticipatingIndex(validatorSet, previousEpochPartecipation, previousEpoch, uint64(validatorIndex), i) } return true }) @@ -31,8 +27,10 @@ func GetUnslashedIndiciesSet(s abstract.BeaconState) [][]bool { func ProcessEpoch(s abstract.BeaconState) error { eligibleValidators := state.EligibleValidatorsIndicies(s) // start := time.Now() - - unslashedIndiciesSet := GetUnslashedIndiciesSet(s) + var unslashedIndiciesSet [][]bool + if s.Version() >= clparams.AltairVersion { + unslashedIndiciesSet = GetUnslashedIndiciesSet(s.BeaconConfig(), state.PreviousEpoch(s), s.ValidatorSet(), s.PreviousEpochParticipation()) + } if err := ProcessJustificationBitsAndFinality(s, unslashedIndiciesSet); err != nil { return err } diff --git a/cl/transition/impl/eth2/statechange/process_epoch_test.go b/cl/transition/impl/eth2/statechange/process_epoch_test.go index 18c7377afff..98970ce9985 100644 --- a/cl/transition/impl/eth2/statechange/process_epoch_test.go +++ b/cl/transition/impl/eth2/statechange/process_epoch_test.go @@ -91,7 +91,11 @@ var startingSlashingsResetState []byte func TestProcessRewardsAndPenalties(t *testing.T) { runEpochTransitionConsensusTest(t, startingRewardsPenaltyState, expectedRewardsPenaltyState, func(s abstract.BeaconState) error { - return ProcessRewardsAndPenalties(s, state.EligibleValidatorsIndicies(s), GetUnslashedIndiciesSet(s)) + var unslashedIndiciesSet [][]bool + if s.Version() >= clparams.AltairVersion { + unslashedIndiciesSet = GetUnslashedIndiciesSet(s.BeaconConfig(), state.PreviousEpoch(s), s.ValidatorSet(), s.PreviousEpochParticipation()) + } + return ProcessRewardsAndPenalties(s, state.EligibleValidatorsIndicies(s), unslashedIndiciesSet) }) } @@ -161,6 +165,11 @@ var startingInactivityScoresState []byte func TestInactivityScores(t *testing.T) { runEpochTransitionConsensusTest(t, startingInactivityScoresState, expectedInactivityScoresState, func(s abstract.BeaconState) error { - return ProcessInactivityScores(s, state.EligibleValidatorsIndicies(s), GetUnslashedIndiciesSet(s)) + var unslashedIndiciesSet [][]bool + if s.Version() >= clparams.AltairVersion { + unslashedIndiciesSet = GetUnslashedIndiciesSet(s.BeaconConfig(), state.PreviousEpoch(s), s.ValidatorSet(), s.PreviousEpochParticipation()) + } + + return ProcessInactivityScores(s, state.EligibleValidatorsIndicies(s), unslashedIndiciesSet) }) } diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index eedde266ad4..d3ea50e2e6f 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -120,7 +120,7 @@ func main() { ctx, cancel := common.RootContext() defer cancel() - db, err := enode.OpenDB(ctx, "" /* path */, "" /* tmpDir */) + db, err := enode.OpenDB(ctx, "" /* path */, "" /* tmpDir */, logger) if err != nil { panic(err) } diff --git a/cmd/capcli/cli.go b/cmd/capcli/cli.go index d6f239d2ca1..4a5be83f9ba 100644 --- a/cmd/capcli/cli.go +++ b/cmd/capcli/cli.go @@ -4,15 +4,17 @@ import ( "context" "fmt" "math" + "net/url" "os" "strings" "time" - "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/turbo/debug" - lg "github.com/anacrolix/log" libcommon "github.com/ledgerwatch/erigon-lib/common" + + lg "github.com/anacrolix/log" "github.com/ledgerwatch/erigon-lib/direct" downloader3 "github.com/ledgerwatch/erigon-lib/downloader" "github.com/ledgerwatch/erigon-lib/metrics" @@ -30,6 +32,7 @@ import ( persistence2 "github.com/ledgerwatch/erigon/cl/persistence" "github.com/ledgerwatch/erigon/cmd/caplin/caplin1" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" @@ -76,6 +79,7 @@ var CLI struct { DownloadSnapshots DownloadSnapshots `cmd:"" help:"download snapshots from webseed"` LoopSnapshots LoopSnapshots `cmd:"" help:"loop over snapshots"` RetrieveHistoricalState RetrieveHistoricalState `cmd:"" help:"retrieve historical state from db"` + ChainEndpoint ChainEndpoint `cmd:"" help:"chain endpoint"` } type chainCfg struct { @@ -396,7 +400,9 @@ func (c *Chain) Run(ctx *Context) error { log.Info("Started chain download", "chain", c.Chain) dirs := datadir.New(c.Datadir) - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root()) + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version + + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, log.Root()) rawDB, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory) beaconDB, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false) @@ -433,13 +439,133 @@ func (c *Chain) Run(ctx *Context) error { } downloader := network.NewBackwardBeaconDownloader(ctx, beacon, db) - cfg := stages.StageHistoryReconstruction(downloader, antiquary.NewAntiquary(ctx, nil, nil, nil, dirs, nil, nil, nil, nil, nil, nil, false, nil), csn, beaconDB, db, nil, genesisConfig, beaconConfig, true, true, bRoot, bs.Slot(), "/tmp", log.Root()) + cfg := stages.StageHistoryReconstruction(downloader, antiquary.NewAntiquary(ctx, nil, nil, nil, dirs, nil, nil, nil, nil, nil, nil, false, false, nil), csn, beaconDB, db, nil, genesisConfig, beaconConfig, true, true, bRoot, bs.Slot(), "/tmp", 300*time.Millisecond, log.Root()) return stages.SpawnStageHistoryDownload(cfg, ctx, log.Root()) } +type ChainEndpoint struct { + Endpoint string `help:"endpoint" default:""` + chainCfg + outputFolder +} + +func (c *ChainEndpoint) Run(ctx *Context) error { + genesisConfig, _, beaconConfig, _, err := clparams.GetConfigsByNetworkName(c.Chain) + if err != nil { + return err + } + log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler)) + + dirs := datadir.New(c.Datadir) + rawDB, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory) + beaconDB, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false) + if err != nil { + return err + } + defer db.Close() + + baseUri, err := url.JoinPath(c.Endpoint, "eth/v2/beacon/blocks") + if err != nil { + return err + } + log.Info("Hooked", "uri", baseUri) + // Let's fetch the head first + currentBlock, err := core.RetrieveBlock(ctx, beaconConfig, genesisConfig, fmt.Sprintf("%s/head", baseUri), nil) + if err != nil { + return err + } + currentRoot, err := currentBlock.Block.HashSSZ() + if err != nil { + return err + } + tx, err := db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + log.Info("Starting with", "root", libcommon.Hash(currentRoot), "slot", currentBlock.Block.Slot) + currentRoot = currentBlock.Block.ParentRoot + if err := beaconDB.WriteBlock(ctx, tx, currentBlock, true); err != nil { + return err + } + if err := tx.Commit(); err != nil { + return err + } + previousLogBlock := currentBlock.Block.Slot + + logInterval := time.NewTicker(30 * time.Second) + defer logInterval.Stop() + + loopStep := func() (bool, error) { + tx, err := db.BeginRw(ctx) + if err != nil { + return false, err + } + defer tx.Rollback() + + stringifiedRoot := common.Bytes2Hex(currentRoot[:]) + // Let's fetch the head first + currentBlock, err := core.RetrieveBlock(ctx, beaconConfig, genesisConfig, fmt.Sprintf("%s/0x%s", baseUri, stringifiedRoot), (*libcommon.Hash)(¤tRoot)) + if err != nil { + return false, err + } + currentRoot, err = currentBlock.Block.HashSSZ() + if err != nil { + return false, err + } + if err := beaconDB.WriteBlock(ctx, tx, currentBlock, true); err != nil { + return false, err + } + currentRoot = currentBlock.Block.ParentRoot + currentSlot := currentBlock.Block.Slot + // it will stop if we end finding a gap or if we reach the maxIterations + for { + // check if the expected root is in db + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, currentRoot) + if err != nil { + return false, err + } + if slot == nil || *slot == 0 { + break + } + if err := beacon_indicies.MarkRootCanonical(ctx, tx, *slot, currentRoot); err != nil { + return false, err + } + currentRoot, err = beacon_indicies.ReadParentBlockRoot(ctx, tx, currentRoot) + if err != nil { + return false, err + } + } + if err := tx.Commit(); err != nil { + return false, err + } + select { + case <-logInterval.C: + // up to 2 decimal places + rate := float64(previousLogBlock-currentSlot) / 30 + log.Info("Successfully processed", "slot", currentSlot, "blk/sec", fmt.Sprintf("%.2f", rate)) + previousLogBlock = currentBlock.Block.Slot + case <-ctx.Done(): + default: + } + return currentSlot != 0, nil + } + var keepGoing bool + for keepGoing, err = loopStep(); keepGoing && err == nil; keepGoing, err = loopStep() { + if !keepGoing { + break + } + } + + return err +} + type DumpSnapshots struct { chainCfg outputFolder + + To uint64 `name:"to" help:"slot to dump"` } func (c *DumpSnapshots) Run(ctx *Context) error { @@ -460,11 +586,17 @@ func (c *DumpSnapshots) Run(ctx *Context) error { } var to uint64 db.View(ctx, func(tx kv.Tx) (err error) { - to, err = beacon_indicies.ReadHighestFinalized(tx) + if c.To == 0 { + to, err = beacon_indicies.ReadHighestFinalized(tx) + return + } + to = c.To return }) - return freezeblocks.DumpBeaconBlocks(ctx, db, beaconDB, 0, to, snaptype.Erigon2RecentMergeLimit, dirs.Tmp, dirs.Snap, estimate.CompressSnapshot.Workers(), log.LvlInfo, log.Root()) + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version + + return freezeblocks.DumpBeaconBlocks(ctx, db, beaconDB, snapshotVersion, 0, to, snaptype.Erigon2MergeLimit, dirs.Tmp, dirs.Snap, estimate.CompressSnapshot.Workers(), log.LvlInfo, log.Root()) } type CheckSnapshots struct { @@ -501,9 +633,10 @@ func (c *CheckSnapshots) Run(ctx *Context) error { return err } - to = (to / snaptype.Erigon2RecentMergeLimit) * snaptype.Erigon2RecentMergeLimit + to = (to / snaptype.Erigon2MergeLimit) * snaptype.Erigon2MergeLimit + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root()) + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, log.Root()) if err := csn.ReopenFolder(); err != nil { return err } @@ -582,9 +715,11 @@ func (c *LoopSnapshots) Run(ctx *Context) error { return err } - to = (to / snaptype.Erigon2RecentMergeLimit) * snaptype.Erigon2RecentMergeLimit + to = (to / snaptype.Erigon2MergeLimit) * snaptype.Erigon2MergeLimit + + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root()) + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, log.Root()) if err := csn.ReopenFolder(); err != nil { return err } @@ -641,7 +776,6 @@ func (d *DownloadSnapshots) Run(ctx *Context) error { if err != nil { return err } - downloaderCfg.DownloadTorrentFilesFromWebseed = true downlo, err := downloader.New(ctx, downloaderCfg, dirs, log.Root(), log.LvlInfo, true) if err != nil { return err @@ -655,7 +789,14 @@ func (d *DownloadSnapshots) Run(ctx *Context) error { if err != nil { return fmt.Errorf("new server: %w", err) } - return snapshotsync.WaitForDownloader("CapCliDownloader", ctx, false, snapshotsync.OnlyCaplin, s, tx, freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, log.Root()), freezeblocks.NewBorRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, log.Root())), params.ChainConfigByChainName(d.Chain), direct.NewDownloaderClient(bittorrentServer)) + + snapshotVersion := snapcfg.KnownCfg(d.Chain, 0).Version + + return snapshotsync.WaitForDownloader(ctx, "CapCliDownloader", false, snapshotsync.OnlyCaplin, s, tx, + freezeblocks.NewBlockReader( + freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, snapshotVersion, log.Root()), + freezeblocks.NewBorRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, snapshotVersion, log.Root())), + params.ChainConfigByChainName(d.Chain), direct.NewDownloaderClient(bittorrentServer)) } type RetrieveHistoricalState struct { @@ -684,19 +825,20 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error { return err } defer tx.Rollback() - allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root()) + snapshotVersion := snapcfg.KnownCfg(r.Chain, 0).Version + + allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, snapshotVersion, log.Root()) if err := allSnapshots.ReopenFolder(); err != nil { return err } if err := state_accessors.ReadValidatorsTable(tx, vt); err != nil { return err } - fmt.Println(allSnapshots.BlocksAvailable(), allSnapshots.Dir()) var bor *freezeblocks.BorRoSnapshots blockReader := freezeblocks.NewBlockReader(allSnapshots, bor) - eth1Getter := getters.NewExecutionSnapshotReader(ctx, blockReader, db) - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root()) + eth1Getter := getters.NewExecutionSnapshotReader(ctx, beaconConfig, blockReader, db) + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, log.Root()) if err := csn.ReopenFolder(); err != nil { return err } @@ -712,6 +854,16 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error { if err != nil { return err } + endTime := time.Since(start) + hRoot, err := haveState.HashSSZ() + if err != nil { + return err + } + log.Info("Got state", "slot", haveState.Slot(), "root", libcommon.Hash(hRoot), "elapsed", endTime) + + if err := haveState.InitBeaconState(); err != nil { + return err + } v := haveState.Version() // encode and decode the state @@ -723,12 +875,10 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error { if err := haveState.DecodeSSZ(enc, int(v)); err != nil { return err } - endTime := time.Since(start) - hRoot, err := haveState.HashSSZ() + hRoot, err = haveState.HashSSZ() if err != nil { return err } - log.Info("Got state", "slot", haveState.Slot(), "root", libcommon.Hash(hRoot), "elapsed", endTime) if r.CompareFile == "" { return nil } diff --git a/cmd/caplin/caplin1/run.go b/cmd/caplin/caplin1/run.go index 0aba101d066..f236622a0f1 100644 --- a/cmd/caplin/caplin1/run.go +++ b/cmd/caplin/caplin1/run.go @@ -26,6 +26,7 @@ import ( "github.com/ledgerwatch/erigon/cl/persistence/db_config" "github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format" state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/persistence/state/historical_states_reader" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/phase1/execution_client" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" @@ -86,13 +87,10 @@ func OpenCaplinDatabase(ctx context.Context, func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engine execution_client.ExecutionEngine, beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, state *state.CachingBeaconState, - caplinFreezer freezer.Freezer, dirs datadir.Dirs, cfg beacon_router_configuration.RouterConfiguration, eth1Getter snapshot_format.ExecutionBlockReaderByNumber, - snDownloader proto_downloader.DownloaderClient, backfilling bool, states bool) error { + caplinFreezer freezer.Freezer, dirs datadir.Dirs, snapshotVersion uint8, cfg beacon_router_configuration.RouterConfiguration, eth1Getter snapshot_format.ExecutionBlockReaderByNumber, + snDownloader proto_downloader.DownloaderClient, backfilling bool, states bool, historyDB persistence.BeaconChainDatabase, indexDB kv.RwDB) error { rawDB, af := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory) - beaconDB, db, err := OpenCaplinDatabase(ctx, db_config.DefaultDatabaseConfiguration, beaconConfig, rawDB, dirs.CaplinIndexing, engine, false) - if err != nil { - return err - } + ctx, cn := context.WithCancel(ctx) defer cn() @@ -100,8 +98,8 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi logger := log.New("app", "caplin") - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, logger) - rcsn := freezeblocks.NewBeaconSnapshotReader(csn, eth1Getter, beaconDB, beaconConfig) + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, logger) + rcsn := freezeblocks.NewBeaconSnapshotReader(csn, eth1Getter, historyDB, beaconConfig) if caplinFreezer != nil { if err := freezer2.PutObjectSSZIntoFreezer("beaconState", "caplin_core", 0, state, caplinFreezer); err != nil { @@ -113,7 +111,7 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi caplinFcuPath := path.Join(dirs.Tmp, "caplin-forkchoice") os.RemoveAll(caplinFcuPath) - err = os.MkdirAll(caplinFcuPath, 0o755) + err := os.MkdirAll(caplinFcuPath, 0o755) if err != nil { return err } @@ -148,21 +146,6 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi }() } - syncedDataManager := synced_data.NewSyncedDataManager(cfg.Active, beaconConfig) - if cfg.Active { - apiHandler := handler.NewApiHandler(genesisConfig, beaconConfig, rawDB, db, forkChoice, pool, rcsn, syncedDataManager) - headApiHandler := &validatorapi.ValidatorApiHandler{ - FC: forkChoice, - BeaconChainCfg: beaconConfig, - GenesisCfg: genesisConfig, - } - go beacon.ListenAndServe(&beacon.LayeredBeaconHandler{ - ValidatorApi: headApiHandler, - ArchiveApi: apiHandler, - }, cfg) - log.Info("Beacon API started", "addr", cfg.Address) - } - { // start the gossip manager go gossipManager.Start(ctx) logger.Info("Started Ethereum 2.0 Gossip Service") @@ -184,7 +167,7 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi }() } - tx, err := db.BeginRw(ctx) + tx, err := indexDB.BeginRw(ctx) if err != nil { return err } @@ -214,7 +197,7 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi if err != nil { return err } - antiq := antiquary.NewAntiquary(ctx, genesisState, vTables, beaconConfig, dirs, snDownloader, db, csn, rcsn, beaconDB, logger, states, af) + antiq := antiquary.NewAntiquary(ctx, genesisState, vTables, beaconConfig, dirs, snDownloader, indexDB, csn, rcsn, historyDB, logger, states, backfilling, af) // Create the antiquary go func() { if err := antiq.Loop(); err != nil { @@ -226,11 +209,27 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi return err } - stageCfg := stages.ClStagesCfg(beaconRpc, antiq, genesisConfig, beaconConfig, state, engine, gossipManager, forkChoice, beaconDB, db, csn, dirs.Tmp, dbConfig, backfilling, syncedDataManager) + statesReader := historical_states_reader.NewHistoricalStatesReader(beaconConfig, rcsn, vTables, af, genesisState) + syncedDataManager := synced_data.NewSyncedDataManager(cfg.Active, beaconConfig) + if cfg.Active { + apiHandler := handler.NewApiHandler(genesisConfig, beaconConfig, rawDB, indexDB, forkChoice, pool, rcsn, syncedDataManager, statesReader) + headApiHandler := &validatorapi.ValidatorApiHandler{ + FC: forkChoice, + BeaconChainCfg: beaconConfig, + GenesisCfg: genesisConfig, + } + go beacon.ListenAndServe(&beacon.LayeredBeaconHandler{ + ValidatorApi: headApiHandler, + ArchiveApi: apiHandler, + }, cfg) + log.Info("Beacon API started", "addr", cfg.Address) + } + + stageCfg := stages.ClStagesCfg(beaconRpc, antiq, genesisConfig, beaconConfig, state, engine, gossipManager, forkChoice, historyDB, indexDB, csn, dirs.Tmp, dbConfig, backfilling, syncedDataManager) sync := stages.ConsensusClStages(ctx, stageCfg) logger.Info("[Caplin] starting clstages loop") - err = sync.StartWithStage(ctx, "WaitForPeers", logger, stageCfg) + err = sync.StartWithStage(ctx, "DownloadHistoricalBlocks", logger, stageCfg) logger.Info("[Caplin] exiting clstages loop") if err != nil { return err diff --git a/cmd/caplin/main.go b/cmd/caplin/main.go index 91b3c0b35e4..00457207ebb 100644 --- a/cmd/caplin/main.go +++ b/cmd/caplin/main.go @@ -16,10 +16,13 @@ import ( "fmt" "os" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon/cl/beacon/beacon_router_configuration" "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/fork" freezer2 "github.com/ledgerwatch/erigon/cl/freezer" + "github.com/ledgerwatch/erigon/cl/persistence" + "github.com/ledgerwatch/erigon/cl/persistence/db_config" "github.com/ledgerwatch/erigon/cl/phase1/core" "github.com/ledgerwatch/erigon/cl/phase1/core/state" execution_client2 "github.com/ledgerwatch/erigon/cl/phase1/execution_client" @@ -87,7 +90,7 @@ func runCaplinNode(cliCtx *cli.Context) error { NetworkConfig: cfg.NetworkCfg, BeaconConfig: cfg.BeaconCfg, NoDiscovery: cfg.NoDiscovery, - }, nil, &service.ServerConfig{Network: cfg.ServerProtocol, Addr: cfg.ServerAddr}, nil, &cltypes.Status{ + }, nil, nil, &service.ServerConfig{Network: cfg.ServerProtocol, Addr: cfg.ServerAddr}, nil, &cltypes.Status{ ForkDigest: forkDigest, FinalizedRoot: state.FinalizedCheckpoint().BlockRoot(), FinalizedEpoch: state.FinalizedCheckpoint().Epoch(), @@ -120,13 +123,20 @@ func runCaplinNode(cliCtx *cli.Context) error { Root: cfg.RecordDir, } } + rawBeaconBlockChainDb, _ := persistence.AferoRawBeaconBlockChainFromOsPath(cfg.BeaconCfg, cfg.Dirs.CaplinHistory) + historyDB, indiciesDB, err := caplin1.OpenCaplinDatabase(ctx, db_config.DefaultDatabaseConfiguration, cfg.BeaconCfg, rawBeaconBlockChainDb, cfg.Dirs.CaplinIndexing, executionEngine, false) + if err != nil { + return err + } + + snapshotVersion := snapcfg.KnownCfg(cliCtx.String(utils.ChainFlag.Name), 0).Version - return caplin1.RunCaplinPhase1(ctx, sentinel, executionEngine, cfg.BeaconCfg, cfg.GenesisCfg, state, caplinFreezer, cfg.Dirs, beacon_router_configuration.RouterConfiguration{ + return caplin1.RunCaplinPhase1(ctx, sentinel, executionEngine, cfg.BeaconCfg, cfg.GenesisCfg, state, caplinFreezer, cfg.Dirs, snapshotVersion, beacon_router_configuration.RouterConfiguration{ Protocol: cfg.BeaconProtocol, Address: cfg.BeaconAddr, ReadTimeTimeout: cfg.BeaconApiReadTimeout, WriteTimeout: cfg.BeaconApiWriteTimeout, IdleTimeout: cfg.BeaconApiWriteTimeout, Active: !cfg.NoBeaconApi, - }, nil, nil, false, false) + }, nil, nil, false, false, historyDB, indiciesDB) } diff --git a/cmd/devnet/args/node_args.go b/cmd/devnet/args/node_args.go index 50c73c0e96f..566b90a5009 100644 --- a/cmd/devnet/args/node_args.go +++ b/cmd/devnet/args/node_args.go @@ -182,18 +182,18 @@ func (n *BlockProducer) IsBlockProducer() bool { return true } -type NonBlockProducer struct { +type BlockConsumer struct { NodeArgs HttpApi string `arg:"--http.api" default:"admin,eth,debug,net,trace,web3,erigon,txpool" json:"http.api"` TorrentPort string `arg:"--torrent.port" default:"42070" json:"torrent.port"` NoDiscover string `arg:"--nodiscover" flag:"" default:"true" json:"nodiscover"` } -func (n *NonBlockProducer) IsBlockProducer() bool { +func (n *BlockConsumer) IsBlockProducer() bool { return false } -func (n *NonBlockProducer) Account() *accounts.Account { +func (n *BlockConsumer) Account() *accounts.Account { return nil } diff --git a/cmd/devnet/args/node_args_test.go b/cmd/devnet/args/node_args_test.go index a67370b19ea..d6247bd56ec 100644 --- a/cmd/devnet/args/node_args_test.go +++ b/cmd/devnet/args/node_args_test.go @@ -36,7 +36,7 @@ func TestNodeArgs(t *testing.T) { t.Fatal(asMap, "not found") } - nodeArgs, _ = args.AsArgs(args.NonBlockProducer{ + nodeArgs, _ = args.AsArgs(args.BlockConsumer{ NodeArgs: args.NodeArgs{ DataDir: filepath.Join("data", fmt.Sprintf("%d", 2)), StaticPeers: "enode", diff --git a/cmd/devnet/devnet/context.go b/cmd/devnet/devnet/context.go index 54d9faccbc7..b26c4b5fde1 100644 --- a/cmd/devnet/devnet/context.go +++ b/cmd/devnet/devnet/context.go @@ -145,6 +145,10 @@ func CurrentNetwork(ctx context.Context) *Network { } } + if devnet, ok := ctx.Value(ckDevnet).(Devnet); ok { + return devnet.SelectNetwork(ctx, 0) + } + return nil } diff --git a/cmd/devnet/main.go b/cmd/devnet/main.go index 69f66e7a795..655d0d88aff 100644 --- a/cmd/devnet/main.go +++ b/cmd/devnet/main.go @@ -10,6 +10,7 @@ import ( "syscall" "time" + "github.com/ledgerwatch/erigon/cmd/devnet/networks" "github.com/ledgerwatch/erigon/cmd/devnet/services" "github.com/ledgerwatch/erigon/cmd/devnet/services/polygon" @@ -23,7 +24,6 @@ import ( "github.com/ledgerwatch/erigon/cmd/devnet/devnetutils" "github.com/ledgerwatch/erigon/cmd/devnet/requests" "github.com/ledgerwatch/erigon/cmd/devnet/scenarios" - "github.com/ledgerwatch/erigon/cmd/devnet/tests" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/cmd/utils/flags" @@ -119,6 +119,18 @@ var ( Usage: "internal flag", } + txCountFlag = cli.IntFlag{ + Name: "txcount", + Usage: "Transaction count, (scenario dependent - may be total or reoccurring)", + Value: 100, + } + + BlockProducersFlag = cli.UintFlag{ + Name: "block-producers", + Usage: "The number of block producers to instantiate in the network", + Value: 1, + } + WaitFlag = cli.BoolFlag{ Name: "wait", Usage: "Wait until interrupted after all scenarios have run", @@ -156,6 +168,8 @@ func main() { &insecureFlag, &metricsURLsFlag, &WaitFlag, + &txCountFlag, + &BlockProducersFlag, &logging.LogVerbosityFlag, &logging.LogConsoleVerbosityFlag, &logging.LogDirVerbosityFlag, @@ -175,7 +189,7 @@ func setupLogger(ctx *cli.Context) (log.Logger, error) { return nil, err } - logger := logging.SetupLoggerCtx("devnet", ctx, false /* rootLogger */) + logger := logging.SetupLoggerCtx("devnet", ctx, log.LvlInfo, log.LvlInfo, false /* rootLogger */) // Make root logger fail log.Root().SetHandler(PanicHandler{}) @@ -241,7 +255,8 @@ func mainContext(ctx *cli.Context) error { go connectDiagnosticsIfEnabled(ctx, logger) enabledScenarios := strings.Split(ctx.String(ScenariosFlag.Name), ",") - if err = allScenarios(runCtx).Run(runCtx, enabledScenarios...); err != nil { + + if err = allScenarios(ctx, runCtx).Run(runCtx, enabledScenarios...); err != nil { return err } @@ -256,7 +271,7 @@ func mainContext(ctx *cli.Context) error { return nil } -func allScenarios(runCtx devnet.Context) scenarios.Scenarios { +func allScenarios(cliCtx *cli.Context, runCtx devnet.Context) scenarios.Scenarios { // unsubscribe from all the subscriptions made defer services.UnsubscribeAll() @@ -313,6 +328,11 @@ func allScenarios(runCtx devnet.Context) scenarios.Scenarios { //{Text: "BatchProcessTransfers", Args: []any{"child-funder", 1, 10, 2, 2}}, }, }, + "block-production": { + Steps: []*scenarios.Step{ + {Text: "SendTxLoad", Args: []any{recipientAddress, accounts.DevAddress, sendValue, cliCtx.Uint(txCountFlag.Name)}}, + }, + }, } } @@ -321,21 +341,22 @@ func initDevnet(ctx *cli.Context, logger log.Logger) (devnet.Devnet, error) { chainName := ctx.String(ChainFlag.Name) baseRpcHost := ctx.String(BaseRpcHostFlag.Name) baseRpcPort := ctx.Int(BaseRpcPortFlag.Name) + producerCount := int(ctx.Uint(BlockProducersFlag.Name)) switch chainName { case networkname.BorDevnetChainName: if ctx.Bool(WithoutHeimdallFlag.Name) { - return tests.NewBorDevnetWithoutHeimdall(dataDir, baseRpcHost, baseRpcPort, logger), nil + return networks.NewBorDevnetWithoutHeimdall(dataDir, baseRpcHost, baseRpcPort, logger), nil } else if ctx.Bool(LocalHeimdallFlag.Name) { heimdallGrpcAddr := ctx.String(HeimdallGrpcAddressFlag.Name) sprintSize := uint64(ctx.Int(BorSprintSizeFlag.Name)) - return tests.NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, logger), nil + return networks.NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, producerCount, logger), nil } else { - return tests.NewBorDevnetWithRemoteHeimdall(dataDir, baseRpcHost, baseRpcPort, logger), nil + return networks.NewBorDevnetWithRemoteHeimdall(dataDir, baseRpcHost, baseRpcPort, producerCount, logger), nil } case networkname.DevChainName: - return tests.NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, logger), nil + return networks.NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, producerCount, logger), nil default: return nil, fmt.Errorf("unknown network: '%s'", chainName) diff --git a/cmd/devnet/tests/devnet_bor.go b/cmd/devnet/networks/devnet_bor.go similarity index 87% rename from cmd/devnet/tests/devnet_bor.go rename to cmd/devnet/networks/devnet_bor.go index 003c662742b..af9b1f218ec 100644 --- a/cmd/devnet/tests/devnet_bor.go +++ b/cmd/devnet/networks/devnet_bor.go @@ -1,4 +1,4 @@ -package tests +package networks import ( "time" @@ -47,7 +47,7 @@ func NewBorDevnetWithoutHeimdall( }, AccountSlots: 200, }, - &args.NonBlockProducer{ + &args.BlockConsumer{ NodeArgs: args.NodeArgs{ ConsoleVerbosity: "0", DirVerbosity: "5", @@ -67,6 +67,7 @@ func NewBorDevnetWithHeimdall( heimdall *polygon.Heimdall, heimdallGrpcAddr string, checkpointOwner *accounts.Account, + producerCount int, withMilestones bool, logger log.Logger, ) devnet.Devnet { @@ -77,6 +78,23 @@ func NewBorDevnetWithHeimdall( services = append(services, heimdall) } + var nodes []devnet.Node + + if producerCount == 0 { + producerCount++ + } + + for i := 0; i < producerCount; i++ { + nodes = append(nodes, &args.BlockProducer{ + NodeArgs: args.NodeArgs{ + ConsoleVerbosity: "0", + DirVerbosity: "5", + HeimdallGrpcAddr: heimdallGrpcAddr, + }, + AccountSlots: 20000, + }) + } + borNetwork := devnet.Network{ DataDir: dataDir, Chain: networkname.BorDevnetChainName, @@ -91,39 +109,14 @@ func NewBorDevnetWithHeimdall( Alloc: types.GenesisAlloc{ faucetSource.Address: {Balance: accounts.EtherAmount(200_000)}, }, - Nodes: []devnet.Node{ - &args.BlockProducer{ + Nodes: append(nodes, + &args.BlockConsumer{ NodeArgs: args.NodeArgs{ ConsoleVerbosity: "0", DirVerbosity: "5", HeimdallGrpcAddr: heimdallGrpcAddr, }, - AccountSlots: 200, - }, - &args.BlockProducer{ - NodeArgs: args.NodeArgs{ - ConsoleVerbosity: "0", - DirVerbosity: "5", - HeimdallGrpcAddr: heimdallGrpcAddr, - }, - AccountSlots: 200, - }, - /*&args.BlockProducer{ - Node: args.Node{ - ConsoleVerbosity: "0", - DirVerbosity: "5", - HeimdallGrpcAddr: heimdallGrpcAddr, - }, - AccountSlots: 200, - },*/ - &args.NonBlockProducer{ - NodeArgs: args.NodeArgs{ - ConsoleVerbosity: "0", - DirVerbosity: "5", - HeimdallGrpcAddr: heimdallGrpcAddr, - }, - }, - }, + }), } devNetwork := devnet.Network{ @@ -150,7 +143,7 @@ func NewBorDevnetWithHeimdall( DevPeriod: 5, AccountSlots: 200, }, - &args.NonBlockProducer{ + &args.BlockConsumer{ NodeArgs: args.NodeArgs{ ConsoleVerbosity: "0", DirVerbosity: "3", @@ -169,6 +162,7 @@ func NewBorDevnetWithRemoteHeimdall( dataDir string, baseRpcHost string, baseRpcPort int, + producerCount int, logger log.Logger, ) devnet.Devnet { heimdallGrpcAddr := "" @@ -181,6 +175,7 @@ func NewBorDevnetWithRemoteHeimdall( nil, heimdallGrpcAddr, checkpointOwner, + producerCount, withMilestones, logger) } @@ -191,6 +186,7 @@ func NewBorDevnetWithLocalHeimdall( baseRpcPort int, heimdallGrpcAddr string, sprintSize uint64, + producerCount int, logger log.Logger, ) devnet.Devnet { config := *params.BorDevnetChainConfig @@ -216,6 +212,7 @@ func NewBorDevnetWithLocalHeimdall( heimdall, heimdallGrpcAddr, checkpointOwner, + producerCount, // milestones are not supported yet on the local heimdall false, logger) diff --git a/cmd/devnet/tests/devnet_dev.go b/cmd/devnet/networks/devnet_dev.go similarity index 76% rename from cmd/devnet/tests/devnet_dev.go rename to cmd/devnet/networks/devnet_dev.go index f4aeed1d0f7..1429805400d 100644 --- a/cmd/devnet/tests/devnet_dev.go +++ b/cmd/devnet/networks/devnet_dev.go @@ -1,4 +1,4 @@ -package tests +package networks import ( "github.com/ledgerwatch/erigon-lib/chain/networkname" @@ -14,10 +14,27 @@ func NewDevDevnet( dataDir string, baseRpcHost string, baseRpcPort int, + producerCount int, logger log.Logger, ) devnet.Devnet { faucetSource := accounts.NewAccount("faucet-source") + var nodes []devnet.Node + + if producerCount == 0 { + producerCount++ + } + + for i := 0; i < producerCount; i++ { + nodes = append(nodes, &args.BlockProducer{ + NodeArgs: args.NodeArgs{ + ConsoleVerbosity: "0", + DirVerbosity: "5", + }, + AccountSlots: 200, + }) + } + network := devnet.Network{ DataDir: dataDir, Chain: networkname.DevChainName, @@ -32,21 +49,13 @@ func NewDevDevnet( account_services.NewFaucet(networkname.DevChainName, faucetSource), }, MaxNumberOfEmptyBlockChecks: 30, - Nodes: []devnet.Node{ - &args.BlockProducer{ + Nodes: append(nodes, + &args.BlockConsumer{ NodeArgs: args.NodeArgs{ ConsoleVerbosity: "0", DirVerbosity: "5", }, - AccountSlots: 200, - }, - &args.NonBlockProducer{ - NodeArgs: args.NodeArgs{ - ConsoleVerbosity: "0", - DirVerbosity: "5", - }, - }, - }, + }), } return devnet.Devnet{&network} diff --git a/cmd/devnet/services/polygon/heimdall.go b/cmd/devnet/services/polygon/heimdall.go index 0581a703949..11529c93c14 100644 --- a/cmd/devnet/services/polygon/heimdall.go +++ b/cmd/devnet/services/polygon/heimdall.go @@ -202,7 +202,7 @@ func (h *Heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) { return 0, fmt.Errorf("TODO") } -func (h *Heimdall) FetchMilestone(ctx context.Context) (*milestone.Milestone, error) { +func (h *Heimdall) FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) { return nil, fmt.Errorf("TODO") } diff --git a/cmd/devnet/tests/context.go b/cmd/devnet/tests/context.go index 7a1a27f645b..7f9ead04583 100644 --- a/cmd/devnet/tests/context.go +++ b/cmd/devnet/tests/context.go @@ -4,17 +4,19 @@ import ( "fmt" "os" "runtime" + "strconv" "testing" "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon/cmd/devnet/devnet" + "github.com/ledgerwatch/erigon/cmd/devnet/networks" "github.com/ledgerwatch/erigon/cmd/devnet/services" "github.com/ledgerwatch/erigon/cmd/devnet/services/polygon" "github.com/ledgerwatch/erigon/turbo/debug" "github.com/ledgerwatch/log/v3" ) -func initDevnet(chainName string, dataDir string, logger log.Logger) (devnet.Devnet, error) { +func initDevnet(chainName string, dataDir string, producerCount int, logger log.Logger) (devnet.Devnet, error) { const baseRpcHost = "localhost" const baseRpcPort = 8545 @@ -22,17 +24,17 @@ func initDevnet(chainName string, dataDir string, logger log.Logger) (devnet.Dev case networkname.BorDevnetChainName: heimdallGrpcAddr := polygon.HeimdallGrpcAddressDefault const sprintSize uint64 = 0 - return NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, logger), nil + return networks.NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, producerCount, logger), nil case networkname.DevChainName: - return NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, logger), nil + return networks.NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, producerCount, logger), nil case "": envChainName, _ := os.LookupEnv("DEVNET_CHAIN") if envChainName == "" { envChainName = networkname.DevChainName } - return initDevnet(envChainName, dataDir, logger) + return initDevnet(envChainName, dataDir, producerCount, logger) default: return nil, fmt.Errorf("unknown network: '%s'", chainName) @@ -48,8 +50,15 @@ func ContextStart(t *testing.T, chainName string) (devnet.Context, error) { logger := log.New() dataDir := t.TempDir() + envProducerCount, _ := os.LookupEnv("PRODUCER_COUNT") + if envProducerCount == "" { + envProducerCount = "1" + } + + producerCount, _ := strconv.ParseUint(envProducerCount, 10, 64) + var network devnet.Devnet - network, err := initDevnet(chainName, dataDir, logger) + network, err := initDevnet(chainName, dataDir, int(producerCount), logger) if err != nil { return nil, fmt.Errorf("ContextStart initDevnet failed: %w", err) } diff --git a/cmd/devnet/transactions/tx.go b/cmd/devnet/transactions/tx.go index f56775094e9..dcaafd43865 100644 --- a/cmd/devnet/transactions/tx.go +++ b/cmd/devnet/transactions/tx.go @@ -27,6 +27,7 @@ func init() { scenarios.StepHandler(CheckTxPoolContent), scenarios.StepHandler(SendTxWithDynamicFee), scenarios.StepHandler(AwaitBlocks), + scenarios.StepHandler(SendTxLoad), ) } @@ -93,7 +94,7 @@ func SendTxWithDynamicFee(ctx context.Context, to, from string, amount uint64) ( // get the latest nonce for the next transaction logger := devnet.Logger(ctx) - lowerThanBaseFeeTxs, higherThanBaseFeeTxs, err := CreateManyEIP1559TransactionsRefWithBaseFee2(ctx, to, from) + lowerThanBaseFeeTxs, higherThanBaseFeeTxs, err := CreateManyEIP1559TransactionsRefWithBaseFee2(ctx, to, from, 200) if err != nil { logger.Error("failed CreateManyEIP1559TransactionsRefWithBaseFee", "error", err) return nil, err @@ -112,7 +113,7 @@ func SendTxWithDynamicFee(ctx context.Context, to, from string, amount uint64) ( return nil, err } - CheckTxPoolContent(ctx, 100, 0, 100) + CheckTxPoolContent(ctx, len(higherThanBaseFeeHashlist), 0, len(lowerThanBaseFeeHashlist)) CheckTxPoolContent(ctx, -1, -1, -1) @@ -125,6 +126,55 @@ func SendTxWithDynamicFee(ctx context.Context, to, from string, amount uint64) ( return append(lowerThanBaseFeeHashlist, higherThanBaseFeeHashlist...), nil } +func SendTxLoad(ctx context.Context, to, from string, amount uint64, txPerSec uint) error { + logger := devnet.Logger(ctx) + + batchCount := txPerSec / 4 + + if batchCount < 1 { + batchCount = 1 + } + + ms250 := 250 * time.Millisecond + + for { + start := time.Now() + + lowtx, hightx, err := CreateManyEIP1559TransactionsRefWithBaseFee2(ctx, to, from, int(batchCount)) + + if err != nil { + logger.Error("failed Create Txs", "error", err) + return err + } + + _, err = SendManyTransactions(ctx, lowtx) + + if err != nil { + logger.Error("failed SendManyTransactions(higherThanBaseFeeTxs)", "error", err) + return err + } + + _, err = SendManyTransactions(ctx, hightx) + + if err != nil { + logger.Error("failed SendManyTransactions(lowerThanBaseFeeTxs)", "error", err) + return err + } + + select { + case <-ctx.Done(): + return nil + default: + } + + duration := time.Since(start) + + if duration < ms250 { + time.Sleep(ms250 - duration) + } + } +} + func AwaitBlocks(ctx context.Context, sleepTime time.Duration) error { logger := devnet.Logger(ctx) @@ -154,7 +204,6 @@ func AwaitBlocks(ctx context.Context, sleepTime time.Duration) error { } const gasPrice = 912_345_678 -const gasAmount = 875_000_000 func CreateManyEIP1559TransactionsRefWithBaseFee(ctx context.Context, to, from string, logger log.Logger) ([]types.Transaction, []types.Transaction, error) { toAddress := libcommon.HexToAddress(to) @@ -177,7 +226,7 @@ func CreateManyEIP1559TransactionsRefWithBaseFee(ctx context.Context, to, from s return lowerBaseFeeTransactions, higherBaseFeeTransactions, nil } -func CreateManyEIP1559TransactionsRefWithBaseFee2(ctx context.Context, to, from string) ([]types.Transaction, []types.Transaction, error) { +func CreateManyEIP1559TransactionsRefWithBaseFee2(ctx context.Context, to, from string, count int) ([]types.Transaction, []types.Transaction, error) { toAddress := libcommon.HexToAddress(to) fromAddress := libcommon.HexToAddress(from) @@ -188,7 +237,10 @@ func CreateManyEIP1559TransactionsRefWithBaseFee2(ctx context.Context, to, from devnet.Logger(ctx).Info("BaseFeePerGas2", "val", baseFeePerGas) - lowerBaseFeeTransactions, higherBaseFeeTransactions, err := signEIP1559TxsLowerAndHigherThanBaseFee2(ctx, 100, 100, baseFeePerGas, toAddress, fromAddress) + lower := count - devnetutils.RandomInt(count) + higher := count - lower + + lowerBaseFeeTransactions, higherBaseFeeTransactions, err := signEIP1559TxsLowerAndHigherThanBaseFee2(ctx, lower, higher, baseFeePerGas, toAddress, fromAddress) if err != nil { return nil, nil, fmt.Errorf("failed signEIP1559TxsLowerAndHigherThanBaseFee2: %v", err) @@ -207,7 +259,7 @@ func CreateTransaction(node devnet.Node, to, from string, value uint64) (types.T if strings.HasPrefix(to, "0x") { toAddress = libcommon.HexToAddress(from) } else { - return nil, libcommon.Address{}, fmt.Errorf("Unknown to account: %s", to) + return nil, libcommon.Address{}, fmt.Errorf("unknown to account: %s", to) } } else { toAddress = toAccount.Address @@ -216,7 +268,7 @@ func CreateTransaction(node devnet.Node, to, from string, value uint64) (types.T fromAccount := accounts.GetAccount(from) if fromAccount == nil { - return nil, libcommon.Address{}, fmt.Errorf("Unknown from account: %s", from) + return nil, libcommon.Address{}, fmt.Errorf("unknown from account: %s", from) } res, err := node.GetTransactionCount(fromAccount.Address, rpc.PendingBlock) diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index 7dc310a0aa6..7db4324abb6 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -20,7 +20,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/downloader" - downloadercfg2 "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" + "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/kv" @@ -106,6 +106,9 @@ func init() { rootCmd.AddCommand(torrentCat) rootCmd.AddCommand(torrentMagnet) + withDataDir(manifestCmd) + rootCmd.AddCommand(manifestCmd) + withDataDir(printTorrentHashes) printTorrentHashes.PersistentFlags().BoolVar(&forceRebuild, "rebuild", false, "Force re-create .torrent files") printTorrentHashes.Flags().StringVar(&targetFile, "targetfile", "", "write output to file") @@ -161,7 +164,7 @@ func Downloader(ctx context.Context, logger log.Logger) error { if err := checkChainName(ctx, dirs, chain); err != nil { return err } - torrentLogLevel, _, err := downloadercfg2.Int2LogLevel(torrentVerbosity) + torrentLogLevel, _, err := downloadercfg.Int2LogLevel(torrentVerbosity) if err != nil { return err } @@ -183,7 +186,7 @@ func Downloader(ctx context.Context, logger log.Logger) error { if known, ok := snapcfg.KnownWebseeds[chain]; ok { webseedsList = append(webseedsList, known...) } - cfg, err := downloadercfg2.New(dirs, version, torrentLogLevel, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots, staticPeers, webseedsList, chain) + cfg, err := downloadercfg.New(dirs, version, torrentLogLevel, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots, staticPeers, webseedsList, chain) if err != nil { return err } @@ -198,7 +201,8 @@ func Downloader(ctx context.Context, logger log.Logger) error { } downloadernat.DoNat(natif, cfg.ClientConfig, logger) - cfg.DownloadTorrentFilesFromWebseed = true // enable it only for standalone mode now. feature is not fully ready yet + cfg.AddTorrentsFromDisk = true // always true unless using uploader - which wants control of torrent files + d, err := downloader.New(ctx, cfg, dirs, logger, log.LvlInfo, seedbox) if err != nil { return err @@ -241,7 +245,7 @@ var createTorrent = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { //logger := debug.SetupCobra(cmd, "integration") dirs := datadir.New(datadirCli) - err := downloader.BuildTorrentFilesIfNeed(cmd.Context(), dirs) + err := downloader.BuildTorrentFilesIfNeed(cmd.Context(), dirs, downloader.NewAtomicTorrentFiles(dirs.Snap)) if err != nil { return err } @@ -261,6 +265,18 @@ var printTorrentHashes = &cobra.Command{ }, } +var manifestCmd = &cobra.Command{ + Use: "manifest", + Example: "go run ./cmd/downloader torrent_hashes --datadir ", + RunE: func(cmd *cobra.Command, args []string) error { + logger := debug.SetupCobra(cmd, "downloader") + if err := manifest(cmd.Context(), logger); err != nil { + log.Error(err.Error()) + } + return nil + }, +} + var torrentVerify = &cobra.Command{ Use: "torrent_verify", Example: "go run ./cmd/downloader torrent_verify ", @@ -312,12 +328,61 @@ var torrentMagnet = &cobra.Command{ }, } +func manifest(ctx context.Context, logger log.Logger) error { + dirs := datadir.New(datadirCli) + extList := []string{ + ".torrent", + ".seg", ".idx", // e2 + ".kv", ".kvi", ".bt", ".kvei", // e3 domain + ".v", ".vi", //e3 hist + ".ef", ".efi", //e3 idx + ".txt", //salt.txt + } + l, _ := dir.ListFiles(dirs.Snap, extList...) + for _, fPath := range l { + _, fName := filepath.Split(fPath) + fmt.Printf("%s\n", fName) + } + l, _ = dir.ListFiles(dirs.SnapDomain, extList...) + for _, fPath := range l { + _, fName := filepath.Split(fPath) + fmt.Printf("domain/%s\n", fName) + } + l, _ = dir.ListFiles(dirs.SnapHistory, extList...) + for _, fPath := range l { + _, fName := filepath.Split(fPath) + if strings.Contains(fName, "commitment") { + continue + } + fmt.Printf("history/%s\n", fName) + } + l, _ = dir.ListFiles(dirs.SnapIdx, extList...) + for _, fPath := range l { + _, fName := filepath.Split(fPath) + if strings.Contains(fName, "commitment") { + continue + } + fmt.Printf("idx/%s\n", fName) + } + l, _ = dir.ListFiles(dirs.SnapAccessors, extList...) + for _, fPath := range l { + _, fName := filepath.Split(fPath) + if strings.Contains(fName, "commitment") { + continue + } + fmt.Printf("accessors/%s\n", fName) + } + return nil +} + func doPrintTorrentHashes(ctx context.Context, logger log.Logger) error { dirs := datadir.New(datadirCli) if err := datadir.ApplyMigrations(dirs); err != nil { return err } + tf := downloader.NewAtomicTorrentFiles(dirs.Snap) + if forceRebuild { // remove and create .torrent files (will re-read all snapshots) //removePieceCompletionStorage(snapDir) files, err := downloader.AllTorrentPaths(dirs) @@ -329,22 +394,20 @@ func doPrintTorrentHashes(ctx context.Context, logger log.Logger) error { return err } } - if err := downloader.BuildTorrentFilesIfNeed(ctx, dirs); err != nil { + if err := downloader.BuildTorrentFilesIfNeed(ctx, dirs, tf); err != nil { return fmt.Errorf("BuildTorrentFilesIfNeed: %w", err) } } res := map[string]string{} - torrents, err := downloader.AllTorrentSpecs(dirs) + torrents, err := downloader.AllTorrentSpecs(dirs, tf) if err != nil { return err } + for _, t := range torrents { // we don't release commitment history in this time. let's skip it here. - if strings.HasPrefix(t.DisplayName, "history/commitment") { - continue - } - if strings.HasPrefix(t.DisplayName, "idx/commitment") { + if strings.HasPrefix(t.DisplayName, "history/v1-commitment") || strings.HasPrefix(t.DisplayName, "idx/v1-commitment") { continue } res[t.DisplayName] = t.InfoHash.String() @@ -434,7 +497,7 @@ func StartGrpc(snServer *downloader.GrpcServer, addr string, creds *credentials. // Add pre-configured func addPreConfiguredHashes(ctx context.Context, d *downloader.Downloader) error { - for _, it := range snapcfg.KnownCfg(chain).Preverified { + for _, it := range snapcfg.KnownCfg(chain, 0).Preverified { if err := d.AddMagnetLink(ctx, snaptype.Hex2InfoHash(it.Hash), it.Name); err != nil { return err } diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index cde453423ba..6f88d47e85e 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -154,7 +154,7 @@ func runCmd(ctx *cli.Context) error { defer db.Close() if ctx.String(GenesisFlag.Name) != "" { gen := readGenesis(ctx.String(GenesisFlag.Name)) - core.MustCommitGenesis(gen, db, "") + core.MustCommitGenesis(gen, db, "", log.Root()) genesisConfig = gen chainConfig = gen.Config } else { diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 42aa5932bdf..b6585f90f42 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -8,7 +8,6 @@ import ( "encoding/json" "flag" "fmt" - "github.com/ledgerwatch/erigon-lib/kv/dbutils" "math/big" "net/http" _ "net/http/pprof" //nolint:gosec @@ -19,6 +18,8 @@ import ( "strings" "time" + "github.com/ledgerwatch/erigon-lib/kv/dbutils" + "github.com/RoaringBitmap/roaring/roaring64" "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" @@ -59,15 +60,16 @@ import ( ) var ( - action = flag.String("action", "", "action to execute") - cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") - block = flag.Int("block", 1, "specifies a block number for operation") - blockTotal = flag.Int("blocktotal", 1, "specifies a total amount of blocks to process (will offset from head block if <= 0)") - account = flag.String("account", "0x", "specifies account to investigate") - name = flag.String("name", "", "name to add to the file names") - chaindata = flag.String("chaindata", "chaindata", "path to the chaindata database file") - bucket = flag.String("bucket", "", "bucket in the database") - hash = flag.String("hash", "0x00", "image for preimage or state root for testBlockHashes action") + action = flag.String("action", "", "action to execute") + cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") + block = flag.Int("block", 1, "specifies a block number for operation") + blockTotal = flag.Int("blocktotal", 1, "specifies a total amount of blocks to process (will offset from head block if <= 0)") + account = flag.String("account", "0x", "specifies account to investigate") + name = flag.String("name", "", "name to add to the file names") + chaindata = flag.String("chaindata", "chaindata", "path to the chaindata database file") + bucket = flag.String("bucket", "", "bucket in the database") + hash = flag.String("hash", "0x00", "image for preimage or state root for testBlockHashes action") + shapshotVersion = flag.Uint("stapshots.version", 1, "specifies the snapshot file version") ) func dbSlice(chaindata string, bucket string, prefix []byte) { @@ -91,10 +93,10 @@ func dbSlice(chaindata string, bucket string, prefix []byte) { } // Searches 1000 blocks from the given one to try to find the one with the given state root hash -func testBlockHashes(chaindata string, block int, stateRoot libcommon.Hash) { +func testBlockHashes(chaindata string, snapshotVersion uint8, block int, stateRoot libcommon.Hash) { ethDb := mdbx.MustOpen(chaindata) defer ethDb.Close() - br, _ := blocksIO(ethDb) + br, _ := blocksIO(ethDb, snapshotVersion) tool.Check(ethDb.View(context.Background(), func(tx kv.Tx) error { blocksToSearch := 10000000 for i := uint64(block); i < uint64(block+blocksToSearch); i++ { @@ -130,7 +132,7 @@ func printCurrentBlockNumber(chaindata string) { }) } -func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { +func blocksIO(db kv.RoDB, snapshotVersion uint8) (services.FullBlockReader, *blockio.BlockWriter) { var histV3 bool if err := db.View(context.Background(), func(tx kv.Tx) error { histV3, _ = kvcfg.HistoryV3.Enabled(tx) @@ -138,15 +140,15 @@ func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { }); err != nil { panic(err) } - br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */) + br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", snapshotVersion, log.New()), nil /* BorSnapshots */) bw := blockio.NewBlockWriter(histV3) return br, bw } -func printTxHashes(chaindata string, block uint64) error { +func printTxHashes(chaindata string, snapshotVersion uint8, block uint64) error { db := mdbx.MustOpen(chaindata) defer db.Close() - br, _ := blocksIO(db) + br, _ := blocksIO(db, snapshotVersion) if err := db.View(context.Background(), func(tx kv.Tx) error { for b := block; b < block+1; b++ { block, _ := br.BlockByNumber(context.Background(), tx, b) @@ -458,10 +460,10 @@ func getBlockTotal(tx kv.Tx, blockFrom uint64, blockTotalOrOffset int64) uint64 return 1 } -func extractHashes(chaindata string, blockStep uint64, blockTotalOrOffset int64, name string) error { +func extractHashes(chaindata string, snapshotVersion uint8, blockStep uint64, blockTotalOrOffset int64, name string) error { db := mdbx.MustOpen(chaindata) defer db.Close() - br, _ := blocksIO(db) + br, _ := blocksIO(db, snapshotVersion) f, err := os.Create(fmt.Sprintf("preverified_hashes_%s.go", name)) if err != nil { @@ -533,12 +535,12 @@ func extractHeaders(chaindata string, block uint64, blockTotalOrOffset int64) er return nil } -func extractBodies(datadir string) error { +func extractBodies(datadir string, snapshotVersion uint8) error { snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ Enabled: true, KeepBlocks: true, Produce: false, - }, filepath.Join(datadir, "snapshots"), log.New()) + }, filepath.Join(datadir, "snapshots"), snapshotVersion, log.New()) snaps.ReopenFolder() /* method Iterate was removed, need re-implement @@ -577,7 +579,7 @@ func extractBodies(datadir string) error { */ db := mdbx.MustOpen(filepath.Join(datadir, "chaindata")) defer db.Close() - br, _ := blocksIO(db) + br, _ := blocksIO(db, snapshotVersion) tx, err := db.BeginRo(context.Background()) if err != nil { @@ -1023,7 +1025,7 @@ func scanReceipts3(chaindata string, block uint64) error { return nil } -func scanReceipts2(chaindata string) error { +func scanReceipts2(chaindata string, snapshotVersion uint8) error { f, err := os.Create("receipts.txt") if err != nil { return err @@ -1037,7 +1039,7 @@ func scanReceipts2(chaindata string) error { if err != nil { return err } - br, _ := blocksIO(dbdb) + br, _ := blocksIO(dbdb, snapshotVersion) defer tx.Rollback() blockNum, err := historyv2.AvailableFrom(tx) @@ -1386,7 +1388,7 @@ func main() { flow.TestGenCfg() case "testBlockHashes": - testBlockHashes(*chaindata, *block, libcommon.HexToHash(*hash)) + testBlockHashes(*chaindata, uint8(*shapshotVersion), *block, libcommon.HexToHash(*hash)) case "readAccount": if err := readAccount(*chaindata, libcommon.HexToAddress(*account)); err != nil { @@ -1424,7 +1426,7 @@ func main() { err = extractHeaders(*chaindata, uint64(*block), int64(*blockTotal)) case "extractHashes": - err = extractHashes(*chaindata, uint64(*block), int64(*blockTotal), *name) + err = extractHashes(*chaindata, uint8(*shapshotVersion), uint64(*block), int64(*blockTotal), *name) case "defrag": err = hackdb.Defrag() @@ -1433,13 +1435,13 @@ func main() { err = hackdb.TextInfo(*chaindata, &strings.Builder{}) case "extractBodies": - err = extractBodies(*chaindata) + err = extractBodies(*chaindata, uint8(*shapshotVersion)) case "repairCurrent": repairCurrent() case "printTxHashes": - printTxHashes(*chaindata, uint64(*block)) + printTxHashes(*chaindata, uint8(*shapshotVersion), uint64(*block)) case "snapSizes": err = snapSizes(*chaindata) @@ -1466,7 +1468,7 @@ func main() { err = scanTxs(*chaindata) case "scanReceipts2": - err = scanReceipts2(*chaindata) + err = scanReceipts2(*chaindata, uint8(*shapshotVersion)) case "scanReceipts3": err = scanReceipts3(*chaindata, uint64(*block)) diff --git a/cmd/integration/commands/flags.go b/cmd/integration/commands/flags.go index 22e583d0fff..ea80e124110 100644 --- a/cmd/integration/commands/flags.go +++ b/cmd/integration/commands/flags.go @@ -40,6 +40,7 @@ var ( _forceSetHistoryV3 bool workers, reconWorkers uint64 + snapshotVersion uint8 = 1 ) func must(err error) { @@ -170,3 +171,7 @@ func withCommitment(cmd *cobra.Command) { cmd.Flags().StringVar(&commitmentTrie, "commitment.trie", "hex", "hex - use Hex Patricia Hashed Trie for commitments, bin - use of binary patricia trie") cmd.Flags().IntVar(&commitmentFreq, "commitment.freq", 1000000, "how many blocks to skip between calculating commitment") } + +func withSnapshotVersion(cmd *cobra.Command) { + cmd.Flags().Uint8Var(&snapshotVersion, "stapshots.version", 1, "specifies the snapshot file version") +} diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index c281447bfd5..0f995b2cdec 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -9,6 +9,7 @@ import ( "text/tabwriter" "github.com/ledgerwatch/erigon/turbo/backup" + "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" @@ -30,26 +31,26 @@ var cmdResetState = &cobra.Command{ Short: "Reset StateStages (5,6,7,8,9,10) and buckets", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return } ctx, _ := common.RootContext() defer db.Close() - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() - if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, agg) }); err != nil { + if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, borSn, agg) }); err != nil { if !errors.Is(err, context.Canceled) { logger.Error(err.Error()) } return } - if err = reset2.ResetState(db, ctx, chain, ""); err != nil { + if err = reset2.ResetState(db, ctx, chain, "", log.Root()); err != nil { if !errors.Is(err, context.Canceled) { logger.Error(err.Error()) } @@ -58,7 +59,7 @@ var cmdResetState = &cobra.Command{ // set genesis after reset all buckets fmt.Printf("After reset: \n") - if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, agg) }); err != nil { + if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, borSn, agg) }); err != nil { if !errors.Is(err, context.Canceled) { logger.Error(err.Error()) } @@ -73,7 +74,7 @@ var cmdClearBadBlocks = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { logger := debug.SetupCobra(cmd, "integration") ctx, _ := common.RootContext() - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return err @@ -90,14 +91,15 @@ func init() { withConfig(cmdResetState) withDataDir(cmdResetState) withChain(cmdResetState) - + withSnapshotVersion(cmdResetState) rootCmd.AddCommand(cmdResetState) withDataDir(cmdClearBadBlocks) + withSnapshotVersion(cmdClearBadBlocks) rootCmd.AddCommand(cmdClearBadBlocks) } -func printStages(tx kv.Tx, snapshots *freezeblocks.RoSnapshots, agg *state.AggregatorV3) error { +func printStages(tx kv.Tx, snapshots *freezeblocks.RoSnapshots, borSn *freezeblocks.BorRoSnapshots, agg *state.AggregatorV3) error { var err error var progress uint64 w := new(tabwriter.Writer) @@ -121,18 +123,16 @@ func printStages(tx kv.Tx, snapshots *freezeblocks.RoSnapshots, agg *state.Aggre } fmt.Fprintf(w, "--\n") fmt.Fprintf(w, "prune distance: %s\n\n", pm.String()) - fmt.Fprintf(w, "blocks.v2: %t, blocks=%d, segments=%d, indices=%d\n\n", snapshots.Cfg().Enabled, snapshots.BlocksAvailable(), snapshots.SegmentsMax(), snapshots.IndicesMax()) + fmt.Fprintf(w, "blocks.v2: %t, blocks=%d, segments=%d, indices=%d\n", snapshots.Cfg().Enabled, snapshots.BlocksAvailable(), snapshots.SegmentsMax(), snapshots.IndicesMax()) + fmt.Fprintf(w, "blocks.bor.v2: segments=%d, indices=%d\n\n", borSn.SegmentsMax(), borSn.IndicesMax()) h3, err := kvcfg.HistoryV3.Enabled(tx) if err != nil { return err } - lastK, lastV, err := rawdbv3.Last(tx, kv.MaxTxNum) - if err != nil { - return err - } _, lastBlockInHistSnap, _ := rawdbv3.TxNums.FindBlockNum(tx, agg.EndTxNumMinimax()) - fmt.Fprintf(w, "history.v3: %t, idx steps: %.02f, lastMaxTxNum=%d->%d, lastBlockInSnap=%d\n\n", h3, rawdbhelpers.IdxStepsCountV3(tx), u64or0(lastK), u64or0(lastV), lastBlockInHistSnap) + _lb, _lt, _ := rawdbv3.TxNums.Last(tx) + fmt.Fprintf(w, "history.v3: %t, idx steps: %.02f, lastBlockInSnap=%d, TxNums_Index(%d,%d)\n\n", h3, rawdbhelpers.IdxStepsCountV3(tx), lastBlockInHistSnap, _lb, _lt) s1, err := tx.ReadSequence(kv.EthTx) if err != nil { return err diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index 95120c4f822..e90e38b2222 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -72,7 +72,7 @@ func dbCfg(label kv.Label, path string) kv2.MdbxOpts { return opts } -func openDB(opts kv2.MdbxOpts, applyMigrations bool, logger log.Logger) (kv.RwDB, error) { +func openDB(opts kv2.MdbxOpts, applyMigrations bool, snapshotVersion uint8, logger log.Logger) (kv.RwDB, error) { db := opts.MustOpen() if applyMigrations { migrator := migrations.NewMigrator(opts.GetLabel()) @@ -105,7 +105,7 @@ func openDB(opts kv2.MdbxOpts, applyMigrations bool, logger log.Logger) (kv.RwDB return nil, err } if h3 { - _, _, agg := allSnapshots(context.Background(), db, logger) + _, _, agg := allSnapshots(context.Background(), db, snapshotVersion, logger) tdb, err := temporal.New(db, agg, systemcontracts.SystemContractCodeLookup[chain]) if err != nil { return nil, err diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index f497b7786af..fdfa42c44a2 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -65,7 +65,7 @@ var cmdStageSnapshots = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -86,7 +86,7 @@ var cmdStageHeaders = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -107,7 +107,7 @@ var cmdStageBorHeimdall = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -128,7 +128,7 @@ var cmdStageBodies = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -149,7 +149,7 @@ var cmdStageSenders = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -170,7 +170,7 @@ var cmdStageExec = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -193,7 +193,7 @@ var cmdStageTrie = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -214,7 +214,7 @@ var cmdStageHashState = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -235,7 +235,7 @@ var cmdStageHistory = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -256,7 +256,7 @@ var cmdLogIndex = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -277,7 +277,7 @@ var cmdCallTraces = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -298,7 +298,7 @@ var cmdStageTxLookup = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -318,7 +318,7 @@ var cmdPrintStages = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -339,7 +339,7 @@ var cmdPrintMigrations = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -359,7 +359,7 @@ var cmdRemoveMigration = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -381,7 +381,7 @@ var cmdRunMigrations = &cobra.Command{ logger := debug.SetupCobra(cmd, "integration") //non-accede and exclusive mode - to apply create new tables if need. cfg := dbCfg(kv.ChainDB, chaindata).Flags(func(u uint) uint { return u &^ mdbx.Accede }).Exclusive() - db, err := openDB(cfg, true, logger) + db, err := openDB(cfg, true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -396,7 +396,7 @@ var cmdSetPrune = &cobra.Command{ Short: "Override existing --prune flag value (if you know what you are doing)", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -416,13 +416,13 @@ var cmdSetSnap = &cobra.Command{ Short: "Override existing --snapshots flag value (if you know what you are doing)", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return } defer db.Close() - sn, borSn, agg := allSnapshots(cmd.Context(), db, logger) + sn, borSn, agg := allSnapshots(cmd.Context(), db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -452,7 +452,7 @@ var cmdForceSetHistoryV3 = &cobra.Command{ Short: "Override existing --history.v3 flag value (if you know what you are doing)", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -474,6 +474,7 @@ func init() { withDataDir(cmdPrintStages) withChain(cmdPrintStages) withHeimdall(cmdPrintStages) + withSnapshotVersion(cmdPrintStages) rootCmd.AddCommand(cmdPrintStages) withConfig(cmdStageSenders) @@ -484,11 +485,13 @@ func init() { withDataDir(cmdStageSenders) withChain(cmdStageSenders) withHeimdall(cmdStageSenders) + withSnapshotVersion(cmdStageSenders) rootCmd.AddCommand(cmdStageSenders) withConfig(cmdStageSnapshots) withDataDir(cmdStageSnapshots) withReset(cmdStageSnapshots) + withSnapshotVersion(cmdStageSnapshots) rootCmd.AddCommand(cmdStageSnapshots) withConfig(cmdStageHeaders) @@ -498,6 +501,7 @@ func init() { withReset(cmdStageHeaders) withChain(cmdStageHeaders) withHeimdall(cmdStageHeaders) + withSnapshotVersion(cmdStageHeaders) rootCmd.AddCommand(cmdStageHeaders) withConfig(cmdStageBorHeimdall) @@ -505,6 +509,7 @@ func init() { withReset(cmdStageBorHeimdall) withChain(cmdStageBorHeimdall) withHeimdall(cmdStageBorHeimdall) + withSnapshotVersion(cmdStageBorHeimdall) rootCmd.AddCommand(cmdStageBorHeimdall) withConfig(cmdStageBodies) @@ -512,6 +517,7 @@ func init() { withUnwind(cmdStageBodies) withChain(cmdStageBodies) withHeimdall(cmdStageBodies) + withSnapshotVersion(cmdStageBodies) rootCmd.AddCommand(cmdStageBodies) withConfig(cmdStageExec) @@ -526,6 +532,7 @@ func init() { withChain(cmdStageExec) withHeimdall(cmdStageExec) withWorkers(cmdStageExec) + withSnapshotVersion(cmdStageExec) rootCmd.AddCommand(cmdStageExec) withConfig(cmdStageHashState) @@ -537,6 +544,7 @@ func init() { withBatchSize(cmdStageHashState) withChain(cmdStageHashState) withHeimdall(cmdStageHashState) + withSnapshotVersion(cmdStageHashState) rootCmd.AddCommand(cmdStageHashState) withConfig(cmdStageTrie) @@ -548,6 +556,7 @@ func init() { withIntegrityChecks(cmdStageTrie) withChain(cmdStageTrie) withHeimdall(cmdStageTrie) + withSnapshotVersion(cmdStageTrie) rootCmd.AddCommand(cmdStageTrie) withConfig(cmdStageHistory) @@ -558,6 +567,7 @@ func init() { withPruneTo(cmdStageHistory) withChain(cmdStageHistory) withHeimdall(cmdStageHistory) + withSnapshotVersion(cmdStageHistory) rootCmd.AddCommand(cmdStageHistory) withConfig(cmdLogIndex) @@ -568,6 +578,7 @@ func init() { withPruneTo(cmdLogIndex) withChain(cmdLogIndex) withHeimdall(cmdLogIndex) + withSnapshotVersion(cmdLogIndex) rootCmd.AddCommand(cmdLogIndex) withConfig(cmdCallTraces) @@ -578,6 +589,7 @@ func init() { withPruneTo(cmdCallTraces) withChain(cmdCallTraces) withHeimdall(cmdCallTraces) + withSnapshotVersion(cmdCallTraces) rootCmd.AddCommand(cmdCallTraces) withConfig(cmdStageTxLookup) @@ -588,10 +600,12 @@ func init() { withPruneTo(cmdStageTxLookup) withChain(cmdStageTxLookup) withHeimdall(cmdStageTxLookup) + withSnapshotVersion(cmdStageTxLookup) rootCmd.AddCommand(cmdStageTxLookup) withConfig(cmdPrintMigrations) withDataDir(cmdPrintMigrations) + withSnapshotVersion(cmdPrintMigrations) rootCmd.AddCommand(cmdPrintMigrations) withConfig(cmdRemoveMigration) @@ -599,23 +613,27 @@ func init() { withMigration(cmdRemoveMigration) withChain(cmdRemoveMigration) withHeimdall(cmdRemoveMigration) + withSnapshotVersion(cmdRemoveMigration) rootCmd.AddCommand(cmdRemoveMigration) withConfig(cmdRunMigrations) withDataDir(cmdRunMigrations) withChain(cmdRunMigrations) withHeimdall(cmdRunMigrations) + withSnapshotVersion(cmdRunMigrations) rootCmd.AddCommand(cmdRunMigrations) withConfig(cmdSetSnap) withDataDir2(cmdSetSnap) withChain(cmdSetSnap) + withSnapshotVersion(cmdSetSnap) cmdSetSnap.Flags().Bool("snapshots", false, "") must(cmdSetSnap.MarkFlagRequired("snapshots")) rootCmd.AddCommand(cmdSetSnap) withConfig(cmdForceSetHistoryV3) withDataDir2(cmdForceSetHistoryV3) + withSnapshotVersion(cmdForceSetHistoryV3) cmdForceSetHistoryV3.Flags().BoolVar(&_forceSetHistoryV3, "history.v3", false, "") must(cmdForceSetHistoryV3.MarkFlagRequired("history.v3")) rootCmd.AddCommand(cmdForceSetHistoryV3) @@ -623,6 +641,7 @@ func init() { withConfig(cmdSetPrune) withDataDir(cmdSetPrune) withChain(cmdSetPrune) + withSnapshotVersion(cmdSetPrune) cmdSetPrune.Flags().StringVar(&pruneFlag, "prune", "hrtc", "") cmdSetPrune.Flags().Uint64Var(&pruneH, "prune.h.older", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneR, "prune.r.older", 0, "") @@ -658,7 +677,7 @@ func stageHeaders(db kv.RwDB, ctx context.Context, logger log.Logger) error { return err } - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -756,7 +775,7 @@ func stageBorHeimdall(db kv.RwDB, ctx context.Context, logger log.Logger) error } func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -773,7 +792,7 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { } u := sync.NewUnwindState(stages.Bodies, s.BlockNumber-unwind, s.BlockNumber) - cfg := stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, br, historyV3, bw) + cfg := stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, br, historyV3, bw, nil) if err := stagedsync.UnwindBodiesStage(u, tx, cfg, ctx); err != nil { return err } @@ -796,7 +815,7 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { func stageSenders(db kv.RwDB, ctx context.Context, logger log.Logger) error { tmpdir := datadir.New(datadirCli).Tmp chainConfig := fromdb.ChainConfig(db) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -863,7 +882,7 @@ func stageSenders(db kv.RwDB, ctx context.Context, logger log.Logger) error { return err } - cfg := stagedsync.StageSendersCfg(db, chainConfig, false, tmpdir, pm, br, nil) + cfg := stagedsync.StageSendersCfg(db, chainConfig, false, tmpdir, pm, br, nil, nil) if unwind > 0 { u := sync.NewUnwindState(stages.Senders, s.BlockNumber-unwind, s.BlockNumber) if err = stagedsync.UnwindSendersStage(u, tx, cfg, ctx); err != nil { @@ -894,7 +913,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { engine, vmConfig, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) must(sync.SetCurrentStage(stages.Execution)) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -903,7 +922,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { return reset2.WarmupExec(ctx, db) } if reset { - return reset2.ResetExec(ctx, db, chain, "") + return reset2.ResetExec(ctx, db, chain, "", logger) } if txtrace { @@ -976,7 +995,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -1034,7 +1053,7 @@ func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { func stageHashState(db kv.RwDB, ctx context.Context, logger log.Logger) error { dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -1212,7 +1231,7 @@ func stageHistory(db kv.RwDB, ctx context.Context, logger log.Logger) error { if historyV3 { return fmt.Errorf("this stage is disable in --history.v3=true") } - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -1271,7 +1290,7 @@ func stageHistory(db kv.RwDB, ctx context.Context, logger log.Logger) error { if err != nil { return err } - _ = printStages(tx, sn, agg) + _ = printStages(tx, sn, borSn, agg) } else { if err := stagedsync.SpawnAccountHistoryIndex(stageAcc, tx, cfg, ctx, logger); err != nil { return err @@ -1288,7 +1307,7 @@ func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error { _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) chainConfig := fromdb.ChainConfig(db) must(sync.SetCurrentStage(stages.TxLookup)) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -1338,11 +1357,11 @@ func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error { } func printAllStages(db kv.RoDB, ctx context.Context, logger log.Logger) error { - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() - return db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, agg) }) + return db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, borSn, agg) }) } func printAppliedMigrations(db kv.RwDB, ctx context.Context, logger log.Logger) error { @@ -1374,7 +1393,7 @@ var _allSnapshotsSingleton *freezeblocks.RoSnapshots var _allBorSnapshotsSingleton *freezeblocks.BorRoSnapshots var _aggSingleton *libstate.AggregatorV3 -func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.AggregatorV3) { +func allSnapshots(ctx context.Context, db kv.RoDB, version uint8, logger log.Logger) (*freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.AggregatorV3) { openSnapshotOnce.Do(func() { var useSnapshots bool _ = db.View(context.Background(), func(tx kv.Tx) error { @@ -1385,8 +1404,8 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl dir.MustExist(dirs.SnapHistory) snapCfg := ethconfig.NewSnapCfg(useSnapshots, true, true) - _allSnapshotsSingleton = freezeblocks.NewRoSnapshots(snapCfg, dirs.Snap, logger) - _allBorSnapshotsSingleton = freezeblocks.NewBorRoSnapshots(snapCfg, dirs.Snap, logger) + _allSnapshotsSingleton = freezeblocks.NewRoSnapshots(snapCfg, dirs.Snap, version, logger) + _allBorSnapshotsSingleton = freezeblocks.NewBorRoSnapshots(snapCfg, dirs.Snap, snapshotVersion, logger) var err error _aggSingleton, err = libstate.NewAggregatorV3(ctx, dirs.SnapHistory, dirs.Tmp, ethconfig.HistoryV3AggregationStep, db, logger) @@ -1402,11 +1421,11 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl if err := _allSnapshotsSingleton.ReopenFolder(); err != nil { panic(err) } - _allSnapshotsSingleton.LogStat() + _allSnapshotsSingleton.LogStat("all") if err := _allBorSnapshotsSingleton.ReopenFolder(); err != nil { panic(err) } - _allBorSnapshotsSingleton.LogStat() + _allBorSnapshotsSingleton.LogStat("all") db.View(context.Background(), func(tx kv.Tx) error { _aggSingleton.LogStats(tx, func(endTxNumMinimax uint64) uint64 { _, histBlockNumProgress, _ := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax) @@ -1425,7 +1444,7 @@ var _blockWriterSingleton *blockio.BlockWriter func blocksIO(db kv.RoDB, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter) { openBlockReaderOnce.Do(func() { - sn, borSn, _ := allSnapshots(context.Background(), db, logger) + sn, borSn, _ := allSnapshots(context.Background(), db, snapshotVersion, logger) histV3 := kvcfg.HistoryV3.FromDB(db) _blockReaderSingleton = freezeblocks.NewBlockReader(sn, borSn) _blockWriterSingleton = blockio.NewBlockWriter(histV3) @@ -1447,7 +1466,7 @@ func allDomains(ctx context.Context, db kv.RoDB, stepSize uint64, mode libstate. dir.MustExist(dirs.SnapHistory) snapCfg := ethconfig.NewSnapCfg(useSnapshots, true, true) - _allSnapshotsSingleton = freezeblocks.NewRoSnapshots(snapCfg, dirs.Snap, logger) + _allSnapshotsSingleton = freezeblocks.NewRoSnapshots(snapCfg, dirs.Snap, snapshotVersion, logger) var err error _aggDomainSingleton, err = libstate.NewAggregator(filepath.Join(dirs.DataDir, "state"), dirs.Tmp, stepSize, mode, trie, logger) @@ -1462,7 +1481,7 @@ func allDomains(ctx context.Context, db kv.RoDB, stepSize uint64, mode libstate. if err := _allSnapshotsSingleton.ReopenFolder(); err != nil { panic(err) } - _allSnapshotsSingleton.LogStat() + _allSnapshotsSingleton.LogStat("all:singleton") //db.View(context.Background(), func(tx kv.Tx) error { // _aggSingleton.LogStats(tx, func(endTxNumMinimax uint64) uint64 { // _, histBlockNumProgress, _ := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax) @@ -1539,7 +1558,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, cfg.Miner = *miningConfig } cfg.Dirs = datadir.New(datadirCli) - allSn, _, agg := allSnapshots(ctx, db, logger) + allSn, _, agg := allSnapshots(ctx, db, snapshotVersion, logger) cfg.Snapshot = allSn.Cfg() blockReader, blockWriter := blocksIO(db, logger) @@ -1569,7 +1588,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, } notifications := &shards.Notifications{} - blockRetire := freezeblocks.NewBlockRetire(1, dirs, blockReader, blockWriter, db, notifications.Events, logger) + blockRetire := freezeblocks.NewBlockRetire(1, dirs, blockReader, blockWriter, db, chainConfig, notifications.Events, logger) var ( snapDb kv.RwDB @@ -1583,7 +1602,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, } stages := stages2.NewDefaultStages(context.Background(), db, snapDb, p2p.Config{}, &cfg, sentryControlServer, notifications, nil, blockReader, blockRetire, agg, nil, nil, heimdallClient, recents, signatures, logger) - sync := stagedsync.New(stages, stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, logger) + sync := stagedsync.New(cfg.Sync, stages, stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, logger) miner := stagedsync.NewMiningState(&cfg.Miner) miningCancel := make(chan struct{}) @@ -1593,9 +1612,10 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, }() miningSync := stagedsync.New( + cfg.Sync, stagedsync.MiningStages(ctx, stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, dirs.Tmp, blockReader), - stagedsync.StageBorHeimdallCfg(db, snapDb, miner, *chainConfig, heimdallClient, blockReader, nil, nil, recents, signatures), + stagedsync.StageBorHeimdallCfg(db, snapDb, miner, *chainConfig, heimdallClient, blockReader, nil, nil, nil, recents, signatures), stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, dirs.Tmp, nil, 0, nil, nil, blockReader), stagedsync.StageHashStateCfg(db, dirs, historyV3), stagedsync.StageTrieCfg(db, false, true, false, dirs.Tmp, blockReader, nil, historyV3, agg), diff --git a/cmd/integration/commands/state_domains.go b/cmd/integration/commands/state_domains.go index fabde89f2f4..cacbb6238de 100644 --- a/cmd/integration/commands/state_domains.go +++ b/cmd/integration/commands/state_domains.go @@ -5,12 +5,13 @@ import ( "encoding/hex" "errors" "fmt" - "github.com/ledgerwatch/erigon-lib/metrics" "path/filepath" "runtime" "strings" "time" + "github.com/ledgerwatch/erigon-lib/metrics" + "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" @@ -92,7 +93,7 @@ var readDomains = &cobra.Command{ } dirs := datadir.New(datadirCli) - chainDb, err := openDB(dbCfg(kv.ChainDB, dirs.Chaindata), true, logger) + chainDb, err := openDB(dbCfg(kv.ChainDB, dirs.Chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index 3401cf669de..63733b5fc2f 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -6,11 +6,12 @@ import ( "encoding/json" "errors" "fmt" - "github.com/ledgerwatch/erigon-lib/kv/dbutils" "os" "sort" "time" + "github.com/ledgerwatch/erigon-lib/kv/dbutils" + "github.com/c2h5oh/datasize" chain2 "github.com/ledgerwatch/erigon-lib/chain" common2 "github.com/ledgerwatch/erigon-lib/common" @@ -63,7 +64,7 @@ Examples: erigoncli.ApplyFlagsForEthConfigCobra(cmd.Flags(), ethConfig) miningConfig := params.MiningConfig{} utils.SetupMinerCobra(cmd, &miningConfig) - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -93,7 +94,7 @@ var loopIhCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") ctx, _ := common2.RootContext() - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -117,7 +118,7 @@ var loopExecCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") ctx, _ := common2.RootContext() - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -147,6 +148,7 @@ func init() { withChain(stateStages) withHeimdall(stateStages) withWorkers(stateStages) + withSnapshotVersion(stateStages) rootCmd.AddCommand(stateStages) withConfig(loopIhCmd) @@ -155,6 +157,7 @@ func init() { withUnwind(loopIhCmd) withChain(loopIhCmd) withHeimdall(loopIhCmd) + withSnapshotVersion(loopIhCmd) rootCmd.AddCommand(loopIhCmd) withConfig(loopExecCmd) @@ -164,6 +167,7 @@ func init() { withChain(loopExecCmd) withHeimdall(loopExecCmd) withWorkers(loopExecCmd) + withSnapshotVersion(loopExecCmd) rootCmd.AddCommand(loopExecCmd) } @@ -173,7 +177,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. return err } - sn, borSn, agg := allSnapshots(ctx, db, logger1) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger1) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -313,7 +317,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. stateStages.MockExecFunc(stages.Execution, execUntilFunc(execToBlock)) _ = stateStages.SetCurrentStage(stages.Execution) - if err := stateStages.Run(db, tx, false /* firstCycle */); err != nil { + if _, err := stateStages.Run(db, tx, false /* firstCycle */); err != nil { return err } @@ -371,7 +375,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. //}) _ = miningStages.SetCurrentStage(stages.MiningCreateBlock) - if err := miningStages.Run(db, tx, false /* firstCycle */); err != nil { + if _, err := miningStages.Run(db, tx, false /* firstCycle */); err != nil { return err } tx.Rollback() @@ -450,7 +454,7 @@ func checkMinedBlock(b1, b2 *types.Block, chainConfig *chain2.Config) { } func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) error { - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -464,7 +468,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e } defer tx.Rollback() sync.DisableStages(stages.Snapshots, stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders, stages.Execution, stages.AccountHistoryIndex, stages.StorageHistoryIndex, stages.TxLookup, stages.Finish) - if err = sync.Run(db, tx, false /* firstCycle */); err != nil { + if _, err = sync.Run(db, tx, false /* firstCycle */); err != nil { return err } execStage := stage(sync, tx, nil, stages.HashState) @@ -488,7 +492,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e sync.DisableStages(stages.IntermediateHashes) _ = sync.SetCurrentStage(stages.HashState) - if err = sync.Run(db, tx, false /* firstCycle */); err != nil { + if _, err = sync.Run(db, tx, false /* firstCycle */); err != nil { return err } must(tx.Commit()) @@ -508,7 +512,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e _ = sync.SetCurrentStage(stages.IntermediateHashes) t := time.Now() - if err = sync.Run(db, tx, false /* firstCycle */); err != nil { + if _, err = sync.Run(db, tx, false /* firstCycle */); err != nil { return err } logger.Warn("loop", "time", time.Since(t).String()) @@ -524,7 +528,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) error { chainConfig := fromdb.ChainConfig(db) dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -579,7 +583,7 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) _ = sync.SetCurrentStage(stages.Execution) t := time.Now() - if err = sync.Run(db, tx, initialCycle); err != nil { + if _, err = sync.Run(db, tx, initialCycle); err != nil { return err } logger.Info("[Integration] ", "loop time", time.Since(t)) diff --git a/cmd/observer/observer/server.go b/cmd/observer/observer/server.go index 99c2cb4bbc2..7eb2bfab476 100644 --- a/cmd/observer/observer/server.go +++ b/cmd/observer/observer/server.go @@ -85,7 +85,7 @@ func NewServer(ctx context.Context, flags CommandFlags, logger log.Logger) (*Ser } func makeLocalNode(ctx context.Context, nodeDBPath string, privateKey *ecdsa.PrivateKey, chain string, logger log.Logger) (*enode.LocalNode, error) { - db, err := enode.OpenDB(ctx, nodeDBPath, "") + db, err := enode.OpenDB(ctx, nodeDBPath, "", logger) if err != nil { return nil, err } diff --git a/cmd/p2psim/main.go b/cmd/p2psim/main.go index 3f567847bbd..e7202866500 100644 --- a/cmd/p2psim/main.go +++ b/cmd/p2psim/main.go @@ -39,12 +39,14 @@ import ( "context" "encoding/json" "fmt" - "github.com/ledgerwatch/erigon-lib/common" "io" "os" "strings" "text/tabwriter" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/turbo/logging" "github.com/urfave/cli/v2" @@ -70,7 +72,7 @@ func main() { }, } app.Before = func(ctx *cli.Context) error { - logger := logging.SetupLoggerCtx("p2psim", ctx, false /* rootLogger */) + logger := logging.SetupLoggerCtx("p2psim", ctx, log.LvlInfo, log.LvlInfo, false /* rootLogger */) client = simulations.NewClient(ctx.String("api"), logger) return nil } diff --git a/cmd/rpcdaemon/README.md b/cmd/rpcdaemon/README.md index 7a275d2ac5b..79833756911 100644 --- a/cmd/rpcdaemon/README.md +++ b/cmd/rpcdaemon/README.md @@ -1,21 +1,21 @@ - [Introduction](#introduction) - [Getting Started](#getting-started) - * [Running locally](#running-locally) - * [Running remotely](#running-remotely) - * [Healthcheck](#healthcheck) - * [Testing](#testing) + - [Running locally](#running-locally) + - [Running remotely](#running-remotely) + - [Healthcheck](#healthcheck) + - [Testing](#testing) - [FAQ](#faq) - * [Relations between prune options and rpc methods](#relations-between-prune-options-and-rpc-method) - * [RPC Implementation Status](#rpc-implementation-status) - * [Securing the communication between RPC daemon and Erigon instance via TLS and authentication](#securing-the-communication-between-rpc-daemon-and-erigon-instance-via-tls-and-authentication) - * [Ethstats](#ethstats) - * [Allowing only specific methods (Allowlist)](#allowing-only-specific-methods--allowlist-) - * [Trace transactions progress](#trace-transactions-progress) - * [Clients getting timeout, but server load is low](#clients-getting-timeout--but-server-load-is-low) - * [Server load too high](#server-load-too-high) - * [Faster Batch requests](#faster-batch-requests) + - [Relations between prune options and rpc methods](#relations-between-prune-options-and-rpc-method) + - [RPC Implementation Status](#rpc-implementation-status) + - [Securing the communication between RPC daemon and Erigon instance via TLS and authentication](#securing-the-communication-between-rpc-daemon-and-erigon-instance-via-tls-and-authentication) + - [Ethstats](#ethstats) + - [Allowing only specific methods (Allowlist)](#allowing-only-specific-methods--allowlist-) + - [Trace transactions progress](#trace-transactions-progress) + - [Clients getting timeout, but server load is low](#clients-getting-timeout--but-server-load-is-low) + - [Server load too high](#server-load-too-high) + - [Faster Batch requests](#faster-batch-requests) - [For Developers](#for-developers) - * [Code generation](#code-generation) + - [Code generation](#code-generation) ## Introduction @@ -72,7 +72,7 @@ it may scale well for some workloads that are heavy on the current state queries ### Healthcheck -There are 2 options for running healtchecks, POST request, or GET request with custom headers. Both options are available +There are 2 options for running healtchecks, POST request, or GET request with custom headers. Both options are available at the `/health` endpoint. #### POST request @@ -99,7 +99,7 @@ Not adding a check disables that. `eth` namespace to be listed in `http.api`. Example request -```http POST http://localhost:8545/health --raw '{"min_peer_count": 3, "known_block": "0x1F"}'``` +`http POST http://localhost:8545/health --raw '{"min_peer_count": 3, "known_block": "0x1F"}'` Example response ``` @@ -114,19 +114,21 @@ Example response If the healthcheck is successful it will return a 200 status code. -If the healthcheck fails for any reason a status 500 will be returned. This is true if one of the criteria requested +If the healthcheck fails for any reason a status 500 will be returned. This is true if one of the criteria requested fails its check. -You can set any number of values on the `X-ERIGON-HEALTHCHECK` header. Ones that are not included are skipped in the +You can set any number of values on the `X-ERIGON-HEALTHCHECK` header. Ones that are not included are skipped in the checks. Available Options: + - `synced` - will check if the node has completed syncing - `min_peer_count` - will check that the node has at least `` many peers - `check_block` - will check that the node is at least ahead of the `` specified - `max_seconds_behind` - will check that the node is no more than `` behind from its latest block Example Request + ``` curl --location --request GET 'http://localhost:8545/health' \ --header 'X-ERIGON-HEALTHCHECK: min_peer_count1' \ @@ -135,6 +137,7 @@ curl --location --request GET 'http://localhost:8545/health' \ ``` Example Response + ``` { "check_block":"DISABLED", @@ -194,7 +197,6 @@ If the `--http.url` flag is set, then `--http.addr` and `--http.port` with both note that this is NOT geth-style IPC. for that, read the next section, IPC endpoint(geth-compatible) - ### HTTPS, HTTP2, and H2C Erigon supports HTTPS, HTTP2, and H2C out of the box. H2C is served by the default HTTP handler. @@ -207,7 +209,6 @@ The HTTPS server will inherit all other configuration parameters from http, for If the `--https.url` flag is set, then `--https.addr` and `--https.port` with both be ignored. - ### IPC endpoint (geth compatible) erigon supports the geth-style unix socket IPC. you can enable this with `--socket.enabled` flag, @@ -225,7 +226,7 @@ Label "remote" means: `--private.api.addr` flag is required. The following table shows the current implementation status of Erigon's RPC daemon. | Command | Avail | Notes | -| ------------------------------------------ |---------|--------------------------------------| +| ------------------------------------------ | ------- | ------------------------------------ | | admin_nodeInfo | Yes | | | admin_peers | Yes | | | admin_addPeer | Yes | | @@ -280,7 +281,7 @@ The following table shows the current implementation status of Erigon's RPC daem | eth_getFilterChanges | Yes | | | eth_uninstallFilter | Yes | | | eth_getLogs | Yes | | -| interned spe | | | +| interned spe | | | | eth_accounts | No | deprecated | | eth_sendRawTransaction | Yes | `remote`. | | eth_sendTransaction | - | not yet implemented | @@ -337,6 +338,7 @@ The following table shows the current implementation status of Erigon's RPC daem | trace_transaction | Yes | | | | | | | txpool_content | Yes | `remote` | +| txpool_contentFrom | Yes | `remote` | | txpool_status | Yes | `remote` | | | | | | eth_getCompilers | No | deprecated | @@ -371,10 +373,10 @@ The following table shows the current implementation status of Erigon's RPC daem ### GraphQL -| Command | Avail | Notes | -|--------------------------------------------|---------|--------------------------------------| -| GetBlockDetails | Yes | | -| GetChainID | Yes | | +| Command | Avail | Notes | +| --------------- | ----- | ----- | +| GetBlockDetails | Yes | | +| GetChainID | Yes | | This table is constantly updated. Please visit again. @@ -530,10 +532,7 @@ with `rpc.accessList` flag. ```json { - "allow": [ - "net_version", - "web3_eth_getBlockByHash" - ] + "allow": ["net_version", "web3_eth_getBlockByHash"] } ``` @@ -568,7 +567,7 @@ Currently batch requests are spawn multiple goroutines and process all sub-reque huge batch to other users - added flag `--rpc.batch.concurrency` (default: 2). Increase it to process large batches faster. -Known Issue: if at least 1 request is "streamable" (has parameter of type *jsoniter.Stream) - then whole batch will +Known Issue: if at least 1 request is "streamable" (has parameter of type \*jsoniter.Stream) - then whole batch will processed sequentially (on 1 goroutine). ## For Developers diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 2d8b381a254..18127a88e02 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -14,6 +14,7 @@ import ( "time" "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/common/dir" @@ -372,15 +373,17 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger logger.Info("Use --snapshots=false") } + snapshotVersion := snapcfg.KnownCfg(cc.ChainName, 0).Version + // Configure sapshots - allSnapshots = freezeblocks.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap, logger) - allBorSnapshots = freezeblocks.NewBorRoSnapshots(cfg.Snap, cfg.Dirs.Snap, logger) + allSnapshots = freezeblocks.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap, snapshotVersion, logger) + allBorSnapshots = freezeblocks.NewBorRoSnapshots(cfg.Snap, cfg.Dirs.Snap, snapshotVersion, logger) // To povide good UX - immediatly can read snapshots after RPCDaemon start, even if Erigon is down // Erigon does store list of snapshots in db: means RPCDaemon can read this list now, but read by `remoteKvClient.Snapshots` after establish grpc connection allSnapshots.OptimisticReopenWithDB(db) allBorSnapshots.OptimisticalyReopenWithDB(db) - allSnapshots.LogStat() - allBorSnapshots.LogStat() + allSnapshots.LogStat("remote") + allBorSnapshots.LogStat("remote") if agg, err = libstate.NewAggregatorV3(ctx, cfg.Dirs.SnapHistory, cfg.Dirs.Tmp, ethconfig.HistoryV3AggregationStep, db, logger); err != nil { return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("create aggregator: %w", err) @@ -404,12 +407,12 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger if err := allSnapshots.ReopenList(reply.BlocksFiles, true); err != nil { logger.Error("[snapshots] reopen", "err", err) } else { - allSnapshots.LogStat() + allSnapshots.LogStat("reopen") } if err := allBorSnapshots.ReopenList(reply.BlocksFiles, true); err != nil { logger.Error("[bor snapshots] reopen", "err", err) } else { - allSnapshots.LogStat() + allBorSnapshots.LogStat("reopen") } _ = reply.HistoryFiles @@ -780,7 +783,7 @@ func isWebsocket(r *http.Request) bool { // obtainJWTSecret loads the jwt-secret, either from the provided config, // or from the default location. If neither of those are present, it generates // a new secret and stores to the default location. -func obtainJWTSecret(cfg *httpcfg.HttpCfg, logger log.Logger) ([]byte, error) { +func ObtainJWTSecret(cfg *httpcfg.HttpCfg, logger log.Logger) ([]byte, error) { // try reading from file logger.Info("Reading JWT secret", "path", cfg.JWTSecretPath) // If we run the rpcdaemon and datadir is not specified we just use jwt.hex in current directory. @@ -840,7 +843,7 @@ func createEngineListener(cfg *httpcfg.HttpCfg, engineApi []rpc.API, logger log. return nil, nil, "", fmt.Errorf("could not start register RPC engine api: %w", err) } - jwtSecret, err := obtainJWTSecret(cfg, logger) + jwtSecret, err := ObtainJWTSecret(cfg, logger) if err != nil { return nil, nil, "", err } diff --git a/cmd/rpcdaemon/health/check_time.go b/cmd/rpcdaemon/health/check_time.go index d604521656c..e78f8aee858 100644 --- a/cmd/rpcdaemon/health/check_time.go +++ b/cmd/rpcdaemon/health/check_time.go @@ -27,7 +27,7 @@ func checkTime( timestamp = int(cs) } } - if timestamp > seconds { + if timestamp < seconds { return fmt.Errorf("%w: got ts: %d, need: %d", errTimestampTooOld, timestamp, seconds) } diff --git a/cmd/rpcdaemon/health/health_test.go b/cmd/rpcdaemon/health/health_test.go index a843923edfe..f46146a4feb 100644 --- a/cmd/rpcdaemon/health/health_test.go +++ b/cmd/rpcdaemon/health/health_test.go @@ -245,7 +245,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { netApiResponse: hexutil.Uint(1), netApiError: nil, ethApiBlockResult: map[string]interface{}{ - "timestamp": time.Now().Add(1 * time.Second).Unix(), + "timestamp": uint64(time.Now().Add(-10 * time.Second).Unix()), }, ethApiBlockError: nil, ethApiSyncingResult: false, @@ -264,7 +264,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { netApiResponse: hexutil.Uint(1), netApiError: nil, ethApiBlockResult: map[string]interface{}{ - "timestamp": uint64(time.Now().Add(1 * time.Hour).Unix()), + "timestamp": uint64(time.Now().Add(-1 * time.Hour).Unix()), }, ethApiBlockError: nil, ethApiSyncingResult: false, @@ -319,7 +319,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { netApiResponse: hexutil.Uint(10), netApiError: nil, ethApiBlockResult: map[string]interface{}{ - "timestamp": time.Now().Add(1 * time.Second).Unix(), + "timestamp": uint64(time.Now().Add(1 * time.Second).Unix()), }, ethApiBlockError: nil, ethApiSyncingResult: false, diff --git a/cmd/rpctest/main.go b/cmd/rpctest/main.go index 7f5c8a8847d..44b53f85d11 100644 --- a/cmd/rpctest/main.go +++ b/cmd/rpctest/main.go @@ -73,6 +73,19 @@ func main() { } with(benchEthCallCmd, withErigonUrl, withGethUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile, withLatest) + var benchEthCreateAccessListCmd = &cobra.Command{ + Use: "benchEthCreateAccessList", + Short: "", + Long: ``, + Run: func(cmd *cobra.Command, args []string) { + err := rpctest.BenchEthCreateAccessList(erigonURL, gethURL, needCompare, latest, blockFrom, blockTo, recordFile, errorFile) + if err != nil { + logger.Error(err.Error()) + } + }, + } + with(benchEthCreateAccessListCmd, withErigonUrl, withGethUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile, withLatest) + var benchEthGetBlockByHash = &cobra.Command{ Use: "benchEthGetBlockByHash", Short: "", @@ -434,6 +447,7 @@ func main() { benchEthGetBlockByNumber2Cmd, benchEthGetBlockByHash, benchEthCallCmd, + benchEthCreateAccessListCmd, benchEthGetTransactionByHashCmd, bench1Cmd, bench2Cmd, diff --git a/cmd/rpctest/rpctest/bench_ethcreateaccesslist.go b/cmd/rpctest/rpctest/bench_ethcreateaccesslist.go new file mode 100644 index 00000000000..af9f1e59ac0 --- /dev/null +++ b/cmd/rpctest/rpctest/bench_ethcreateaccesslist.go @@ -0,0 +1,113 @@ +package rpctest + +import ( + "bufio" + "fmt" + "net/http" + "os" + "time" +) + +// BenchEthCreateAccessList compares response of Erigon with Geth +// but also can be used for comparing RPCDaemon with Geth or infura +// parameters: +// needCompare - if false - doesn't call Erigon and doesn't compare responses +// +// false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon +// recordFile stores all eth_call returned with success +// errorFile stores information when erigon and geth doesn't return same data +func BenchEthCreateAccessList(erigonURL, gethURL string, needCompare, latest bool, blockFrom, blockTo uint64, recordFileName string, errorFileName string) error { + setRoutes(erigonURL, gethURL) + var client = &http.Client{ + Timeout: time.Second * 600, + } + + var rec *bufio.Writer + var errs *bufio.Writer + var resultsCh chan CallResult = nil + var nTransactions = 0 + + if errorFileName != "" { + f, err := os.Create(errorFileName) + if err != nil { + return fmt.Errorf("Cannot create file %s for errorFile: %v\n", errorFileName, err) + } + defer f.Close() + errs = bufio.NewWriter(f) + defer errs.Flush() + } + + if recordFileName != "" { + frec, errRec := os.Create(recordFileName) + if errRec != nil { + return fmt.Errorf("Cannot create file %s for errorFile: %v\n", recordFileName, errRec) + } + defer frec.Close() + rec = bufio.NewWriter(frec) + defer rec.Flush() + } + + if !needCompare { + resultsCh = make(chan CallResult, 1000) + defer close(resultsCh) + go vegetaWrite(true, []string{"eth_createAccessList"}, resultsCh) + } + var res CallResult + + reqGen := &RequestGenerator{ + client: client, + } + + reqGen.reqID++ + + for bn := blockFrom; bn <= blockTo; bn++ { + reqGen.reqID++ + var b EthBlockByNumber + res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b) + if res.Err != nil { + return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err) + } + + if b.Error != nil { + return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message) + } + + if needCompare { + var bg EthBlockByNumber + res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg) + if res.Err != nil { + return fmt.Errorf("Could not retrieve block (geth) %d: %v\n", bn, res.Err) + } + if bg.Error != nil { + return fmt.Errorf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message) + } + if !compareBlocks(&b, &bg) { + if rec != nil { + fmt.Fprintf(rec, "Block difference for block=%d\n", bn) + rec.Flush() + continue + } else { + return fmt.Errorf("Block one or more fields areis different for block %d\n", bn) + } + } + } + + for _, tx := range b.Result.Transactions { + + reqGen.reqID++ + nTransactions = nTransactions + 1 + + var request string + request = reqGen.ethCreateAccessList(tx.From, tx.To, &tx.Gas, &tx.GasPrice, &tx.Value, tx.Input, bn-1) + errCtx := fmt.Sprintf(" bn=%d hash=%s", bn, tx.Hash) + + if err := requestAndCompare(request, "eth_createAccessList", errCtx, reqGen, needCompare, rec, errs, resultsCh, + false); err != nil { + return err + } + } + + fmt.Println("\nProcessed Transactions: ", nTransactions) + } + return nil +} diff --git a/cmd/rpctest/rpctest/request_generator.go b/cmd/rpctest/rpctest/request_generator.go index dfb75763005..a38ea1ef3a4 100644 --- a/cmd/rpctest/rpctest/request_generator.go +++ b/cmd/rpctest/rpctest/request_generator.go @@ -236,6 +236,28 @@ func (g *RequestGenerator) ethCall(from libcommon.Address, to *libcommon.Address return sb.String() } +func (g *RequestGenerator) ethCreateAccessList(from libcommon.Address, to *libcommon.Address, gas *hexutil.Big, gasPrice *hexutil.Big, value *hexutil.Big, data hexutility.Bytes, bn uint64) string { + var sb strings.Builder + fmt.Fprintf(&sb, `{ "jsonrpc": "2.0", "method": "eth_createAccessList", "params": [{"from":"0x%x"`, from) + if to != nil { + fmt.Fprintf(&sb, `,"to":"0x%x"`, *to) + } + if gas != nil { + fmt.Fprintf(&sb, `,"gas":"%s"`, gas) + } + if gasPrice != nil { + fmt.Fprintf(&sb, `,"gasPrice":"%s"`, gasPrice) + } + if len(data) > 0 { + fmt.Fprintf(&sb, `,"data":"%s"`, data) + } + if value != nil { + fmt.Fprintf(&sb, `,"value":"%s"`, value) + } + fmt.Fprintf(&sb, `},"0x%x"], "id":%d}`, bn, g.reqID) + return sb.String() +} + func (g *RequestGenerator) ethCallLatest(from libcommon.Address, to *libcommon.Address, gas *hexutil.Big, gasPrice *hexutil.Big, value *hexutil.Big, data hexutility.Bytes) string { var sb strings.Builder fmt.Fprintf(&sb, `{ "jsonrpc": "2.0", "method": "eth_call", "params": [{"from":"0x%x"`, from) diff --git a/cmd/sentinel/main.go b/cmd/sentinel/main.go index 3208d9c3e36..9453bf492b2 100644 --- a/cmd/sentinel/main.go +++ b/cmd/sentinel/main.go @@ -54,7 +54,7 @@ func runSentinelNode(cliCtx *cli.Context) error { BeaconConfig: cfg.BeaconCfg, NoDiscovery: cfg.NoDiscovery, LocalDiscovery: cfg.LocalDiscovery, - }, nil, &service.ServerConfig{Network: cfg.ServerProtocol, Addr: cfg.ServerAddr}, nil, nil, log.Root()) + }, nil, nil, &service.ServerConfig{Network: cfg.ServerProtocol, Addr: cfg.ServerAddr}, nil, nil, log.Root()) if err != nil { log.Error("[Sentinel] Could not start sentinel", "err", err) return err diff --git a/cmd/silkworm_api/snapshot_idx.go b/cmd/silkworm_api/snapshot_idx.go index 4265ef19471..8f728ddf06f 100644 --- a/cmd/silkworm_api/snapshot_idx.go +++ b/cmd/silkworm_api/snapshot_idx.go @@ -6,6 +6,7 @@ import ( "path/filepath" "time" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" @@ -37,7 +38,7 @@ func main() { }, }, Action: func(cCtx *cli.Context) error { - return buildIndex(cCtx, cCtx.String("datadir"), cCtx.StringSlice("snapshot_path")) + return buildIndex(cCtx, cCtx.String("datadir"), cCtx.StringSlice("snapshot_path"), 0) }, } @@ -55,7 +56,7 @@ func FindIf(segments []snaptype.FileInfo, predicate func(snaptype.FileInfo) bool return snaptype.FileInfo{}, false // Return zero value and false if not found } -func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string) error { +func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string, minBlock uint64) error { logger, _, err := debug.Setup(cliCtx, true /* rootLogger */) if err != nil { return err @@ -75,7 +76,7 @@ func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string) err chainConfig := fromdb.ChainConfig(chainDB) - segments, _, err := freezeblocks.Segments(dirs.Snap) + segments, _, err := freezeblocks.Segments(dirs.Snap, snapcfg.KnownCfg(chainConfig.ChainName, 0).Version, minBlock) if err != nil { return err } @@ -97,7 +98,7 @@ func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string) err jobProgress := &background.Progress{} ps.Add(jobProgress) defer ps.Delete(jobProgress) - return freezeblocks.HeadersIdx(ctx, chainConfig, segment.Path, segment.From, dirs.Tmp, jobProgress, logLevel, logger) + return freezeblocks.HeadersIdx(ctx, segment.Path, segment.Version, segment.From, dirs.Tmp, jobProgress, logLevel, logger) }) case snaptype.Bodies: g.Go(func() error { @@ -112,7 +113,7 @@ func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string) err ps.Add(jobProgress) defer ps.Delete(jobProgress) dir, _ := filepath.Split(segment.Path) - return freezeblocks.TransactionsIdx(ctx, chainConfig, segment.From, segment.To, dir, dirs.Tmp, jobProgress, logLevel, logger) + return freezeblocks.TransactionsIdx(ctx, chainConfig, segment.Version, segment.From, segment.To, dir, dirs.Tmp, jobProgress, logLevel, logger) }) } } diff --git a/cmd/snapshots/README.md b/cmd/snapshots/README.md new file mode 100644 index 00000000000..110f2d20ab4 --- /dev/null +++ b/cmd/snapshots/README.md @@ -0,0 +1,79 @@ +# Snapshots - tool for managing remote stanshots + +In the root of `Erigon` project, use this command to build the the commands: + +```shell +make snapshots +``` + +It can then be run using the following command + +```shell +./buid/bin/snapshots sub-command options... +``` + +Snapshots supports the following sub commands: + +## cmp - compare snapshots + +This command takes the follwoing form: + +```shell + snapshots cmp +``` + +This will cause the .seg files from each location to be copied to the local machine, indexed and then have their rlp contents compared. + +Optionally a `` and optionally an `` may be specified to limit the scope of the operation + +It is also possible to set the `--types` flag to limit the type of segment file being downloaded and compared. The currently supported types are `header` and `body` + +## copy - copy snapshots + +This command can be used to copy segment files from one location to another. + +This command takes the follwoing form: + +```shell + snapshots copy +``` + +Optionally a `` and optionally an `` may be specified to limit the scope of the operation + +## verify - verify snapshots + +-- TBD + +## manifest - manage the manifest file in the root of remote snapshot locations + +The `manifest` command supports the following actions + +| Action | Description | +|--------|-------------| +| list | list manifest from storage location| +| update | update the manifest to match the files available at its storage location | +| verify |verify that manifest matches the files available at its storage location| + +All actions take a `` argument which specified the remote location which contains the manifest + +Optionally a `` and optionally an `` may be specified to limit the scope of the operation + +## torrent - manage snapshot torrent files + +The `torrent` command supports the following actions + +| Action | Description | +|--------|-------------| +| list | list torrents available at the specified storage location | +| hashes | list the hashes (in toml format) at the specified storage location | +| update | update re-create the torrents for the contents available at its storage location | +| verify |verify that manifest contents are available at its storage location| + +All actions take a `` argument which specified the remote location which contains the torrents. + +Optionally a ``` and optionally an `` may be specified to limit the scope of the operation + + + + + diff --git a/cmd/snapshots/cmp/cmp.go b/cmd/snapshots/cmp/cmp.go new file mode 100644 index 00000000000..2ba6e0fde47 --- /dev/null +++ b/cmd/snapshots/cmp/cmp.go @@ -0,0 +1,788 @@ +package cmp + +import ( + "bytes" + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "strconv" + "sync/atomic" + "time" + + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/snapshots/flags" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" + "github.com/ledgerwatch/log/v3" + "github.com/urfave/cli/v2" + "golang.org/x/sync/errgroup" +) + +var Command = cli.Command{ + Action: cmp, + Name: "cmp", + Usage: "Compare snapshot segments", + ArgsUsage: " ", + Flags: []cli.Flag{ + &flags.SegTypes, + &utils.DataDirFlag, + &logging.LogVerbosityFlag, + &logging.LogConsoleVerbosityFlag, + &logging.LogDirVerbosityFlag, + &utils.WebSeedsFlag, + &utils.NATFlag, + &utils.DisableIPV6, + &utils.DisableIPV4, + &utils.TorrentDownloadRateFlag, + &utils.TorrentUploadRateFlag, + &utils.TorrentVerbosityFlag, + &utils.TorrentPortFlag, + &utils.TorrentMaxPeersFlag, + &utils.TorrentConnsPerFileFlag, + }, + Description: ``, +} + +func cmp(cliCtx *cli.Context) error { + + logger := sync.Logger(cliCtx.Context) + + var loc1, loc2 *sync.Locator + + var rcCli *downloader.RCloneClient + var torrentCli *sync.TorrentClient + + dataDir := cliCtx.String(utils.DataDirFlag.Name) + var tempDir string + + if len(dataDir) == 0 { + dataDir, err := os.MkdirTemp("", "snapshot-cpy-") + if err != nil { + return err + } + tempDir = dataDir + defer os.RemoveAll(dataDir) + } else { + tempDir = filepath.Join(dataDir, "temp") + + if err := os.MkdirAll(tempDir, 0755); err != nil { + return err + } + } + + cliCtx.Context = sync.WithTempDir(cliCtx.Context, tempDir) + + var err error + + checkRemote := func(src string) error { + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + return sync.CheckRemote(rcCli, src) + } + + var chain string + + pos := 0 + + if cliCtx.Args().Len() > pos { + val := cliCtx.Args().Get(pos) + + if loc1, err = sync.ParseLocator(val); err != nil { + return err + } + + switch loc1.LType { + case sync.RemoteFs: + if err = checkRemote(loc1.Src); err != nil { + return err + } + + chain = loc1.Chain + } + } + + pos++ + + if cliCtx.Args().Len() > pos { + val := cliCtx.Args().Get(pos) + + if loc2, err = sync.ParseLocator(val); err != nil { + return err + } + + switch loc2.LType { + case sync.RemoteFs: + if err = checkRemote(loc2.Src); err != nil { + return err + } + + chain = loc2.Chain + } + + pos++ + } + + if loc1.LType == sync.TorrentFs || loc2.LType == sync.TorrentFs { + torrentCli, err = sync.NewTorrentClient(cliCtx, chain) + if err != nil { + return fmt.Errorf("can't create torrent: %w", err) + } + } + + typeValues := cliCtx.StringSlice(flags.SegTypes.Name) + snapTypes := make([]snaptype.Type, 0, len(typeValues)) + + for _, val := range typeValues { + segType, ok := snaptype.ParseFileType(val) + + if !ok { + return fmt.Errorf("unknown file type: %s", val) + } + + snapTypes = append(snapTypes, segType) + } + + var firstBlock, lastBlock uint64 + + if cliCtx.Args().Len() > pos { + firstBlock, err = strconv.ParseUint(cliCtx.Args().Get(0), 10, 64) + } + + if cliCtx.Args().Len() > 1 { + lastBlock, err = strconv.ParseUint(cliCtx.Args().Get(1), 10, 64) + } + + var session1 sync.DownloadSession + var session2 sync.DownloadSession + + if rcCli != nil { + if loc1.LType == sync.RemoteFs { + session1, err = rcCli.NewSession(cliCtx.Context, filepath.Join(tempDir, "l1"), loc1.Src+":"+loc1.Root) + + if err != nil { + return err + } + } + + if loc2.LType == sync.RemoteFs { + session2, err = rcCli.NewSession(cliCtx.Context, filepath.Join(tempDir, "l2"), loc2.Src+":"+loc2.Root) + + if err != nil { + return err + } + } + } + + if torrentCli != nil { + if loc1.LType == sync.TorrentFs { + session1 = sync.NewTorrentSession(torrentCli, chain) + } + + if loc2.LType == sync.TorrentFs { + session2 = sync.NewTorrentSession(torrentCli, chain) + } + } + + if session1 == nil { + return fmt.Errorf("no first session established") + } + + if session1 == nil { + return fmt.Errorf("no second session established") + } + + logger.Info(fmt.Sprintf("Starting compare: %s==%s", loc1.String(), loc2.String()), "first", firstBlock, "last", lastBlock, "types", snapTypes, "dir", tempDir) + + logger.Info("Reading s1 dir", "remoteFs", session1.RemoteFsRoot(), "label", session1.Label()) + files, err := sync.DownloadManifest(cliCtx.Context, session1) + + if err != nil { + files, err = session1.ReadRemoteDir(cliCtx.Context, true) + } + + if err != nil { + return err + } + + h1ents, b1ents := splitEntries(files, loc1.Version, firstBlock, lastBlock) + + logger.Info("Reading s2 dir", "remoteFs", session2.RemoteFsRoot(), "label", session2.Label()) + files, err = sync.DownloadManifest(cliCtx.Context, session2) + + if err != nil { + files, err = session2.ReadRemoteDir(cliCtx.Context, true) + } + + if err != nil { + return err + } + + h2ents, b2ents := splitEntries(files, loc2.Version, firstBlock, lastBlock) + + c := comparitor{ + chain: chain, + loc1: loc1, + loc2: loc2, + session1: session1, + session2: session2, + } + + var funcs []func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error) + + bodyWorkers := 4 + headerWorkers := 4 + + if len(snapTypes) == 0 { + funcs = append(funcs, func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error) { + return c.compareHeaders(ctx, h1ents, h2ents, headerWorkers, logger) + }, func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error) { + return c.compareBodies(ctx, b1ents, b2ents, bodyWorkers, logger) + }) + } else { + for _, snapType := range snapTypes { + if snapType == snaptype.Headers { + funcs = append(funcs, func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error) { + return c.compareHeaders(ctx, h1ents, h2ents, headerWorkers, logger) + }) + } + + if snapType == snaptype.Bodies { + funcs = append(funcs, func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error) { + return c.compareBodies(ctx, b1ents, b2ents, bodyWorkers, logger) + }) + } + } + } + + if len(funcs) > 0 { + startTime := time.Now() + + var downloadTime uint64 + var indexTime uint64 + var compareTime uint64 + + g, ctx := errgroup.WithContext(cliCtx.Context) + g.SetLimit(len(funcs)) + + for _, f := range funcs { + func(ctx context.Context, f func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error)) { + g.Go(func() error { + dt, it, ct, err := f(ctx) + + atomic.AddUint64(&downloadTime, uint64(dt)) + atomic.AddUint64(&indexTime, uint64(it)) + atomic.AddUint64(&compareTime, uint64(ct)) + + return err + }) + }(ctx, f) + } + + err = g.Wait() + + if err == nil { + logger.Info(fmt.Sprintf("Finished compare: %s==%s", loc1.String(), loc2.String()), "elapsed", time.Since(startTime), + "downloading", time.Duration(downloadTime), "indexing", time.Duration(indexTime), "comparing", time.Duration(compareTime)) + } else { + logger.Info(fmt.Sprintf("Failed compare: %s==%s", loc1.String(), loc2.String()), "err", err, "elapsed", time.Since(startTime), + "downloading", time.Duration(downloadTime), "indexing", time.Duration(indexTime), "comparing", time.Duration(compareTime)) + } + + } + return nil +} + +type BodyEntry struct { + From, To uint64 + Body, Transactions fs.DirEntry +} + +func splitEntries(files []fs.DirEntry, version uint8, firstBlock, lastBlock uint64) (hents []fs.DirEntry, bents []*BodyEntry) { + for _, ent := range files { + if info, err := ent.Info(); err == nil { + if snapInfo, ok := info.Sys().(downloader.SnapInfo); ok && snapInfo.Version() > 0 { + if version == snapInfo.Version() && + (firstBlock == 0 || snapInfo.From() >= firstBlock) && + (lastBlock == 0 || snapInfo.From() < lastBlock) { + + if snapInfo.Type() == snaptype.Headers { + hents = append(hents, ent) + } + + if snapInfo.Type() == snaptype.Bodies { + found := false + + for _, bent := range bents { + if snapInfo.From() == bent.From && + snapInfo.To() == bent.To { + bent.Body = ent + found = true + } + } + + if !found { + bents = append(bents, &BodyEntry{snapInfo.From(), snapInfo.To(), ent, nil}) + } + } + + if snapInfo.Type() == snaptype.Transactions { + found := false + + for _, bent := range bents { + if snapInfo.From() == bent.From && + snapInfo.To() == bent.To { + bent.Transactions = ent + found = true + + } + } + + if !found { + bents = append(bents, &BodyEntry{snapInfo.From(), snapInfo.To(), nil, ent}) + } + } + } + } + } + } + + return hents, bents +} + +type comparitor struct { + chain string + loc1, loc2 *sync.Locator + session1 sync.DownloadSession + session2 sync.DownloadSession +} + +func (c comparitor) chainConfig() *chain.Config { + return params.ChainConfigByChainName(c.chain) +} + +func (c comparitor) compareHeaders(ctx context.Context, f1ents []fs.DirEntry, f2ents []fs.DirEntry, workers int, logger log.Logger) (time.Duration, time.Duration, time.Duration, error) { + var downloadTime uint64 + var compareTime uint64 + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(workers) + + for i1, ent1 := range f1ents { + var snapInfo1 downloader.SnapInfo + + if info, err := ent1.Info(); err == nil { + snapInfo1, _ = info.Sys().(downloader.SnapInfo) + } + + if snapInfo1 == nil { + continue + } + + for i2, ent2 := range f2ents { + + var snapInfo2 downloader.SnapInfo + + ent2Info, err := ent2.Info() + + if err == nil { + snapInfo2, _ = ent2Info.Sys().(downloader.SnapInfo) + } + + if snapInfo2 == nil || + snapInfo1.Type() != snapInfo2.Type() || + snapInfo1.From() != snapInfo2.From() || + snapInfo1.To() != snapInfo2.To() { + continue + } + + i1, i2, ent1, ent2 := i1, i2, ent1, ent2 + + g.Go(func() error { + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(2) + + g.Go(func() error { + logger.Info(fmt.Sprintf("Downloading %s", ent1.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + startTime := time.Now() + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + + err := c.session1.Download(gctx, ent1.Name()) + + if err != nil { + return err + } + + return nil + }) + + g.Go(func() error { + startTime := time.Now() + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Downloading %s", ent2.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents)), "size", datasize.ByteSize(ent2Info.Size())) + err := c.session2.Download(gctx, ent2.Name()) + + if err != nil { + return err + } + + return nil + }) + + if err := g.Wait(); err != nil { + return err + } + + f1snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, c.session1.LocalFsRoot(), c.loc1.Version, logger) + + f1snaps.ReopenList([]string{ent1.Name()}, false) + + f2snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, c.session2.LocalFsRoot(), c.loc2.Version, logger) + + f2snaps.ReopenList([]string{ent2.Name()}, false) + + err = func() error { + logger.Info(fmt.Sprintf("Comparing %s %s", ent1.Name(), ent2.Name())) + startTime := time.Now() + + defer func() { + atomic.AddUint64(&compareTime, uint64(time.Since(startTime))) + }() + + blockReader1 := freezeblocks.NewBlockReader(f1snaps, nil) + blockReader2 := freezeblocks.NewBlockReader(f2snaps, nil) + + g, gctx = errgroup.WithContext(ctx) + g.SetLimit(2) + + h2chan := make(chan *types.Header) + + g.Go(func() error { + blockReader2.HeadersRange(gctx, func(h2 *types.Header) error { + select { + case h2chan <- h2: + return nil + case <-gctx.Done(): + return gctx.Err() + } + }) + + close(h2chan) + return nil + }) + + g.Go(func() error { + err := blockReader1.HeadersRange(gctx, func(h1 *types.Header) error { + select { + case h2 := <-h2chan: + if h2 == nil { + return fmt.Errorf("header %d unknown", h1.Number.Uint64()) + } + + if h1.Number.Uint64() != h2.Number.Uint64() { + return fmt.Errorf("mismatched headers: expected %d, Got: %d", h1.Number.Uint64(), h2.Number.Uint64()) + } + + var h1buf, h2buf bytes.Buffer + + h1.EncodeRLP(&h1buf) + h2.EncodeRLP(&h2buf) + + if !bytes.Equal(h1buf.Bytes(), h2buf.Bytes()) { + return fmt.Errorf("%d: headers do not match", h1.Number.Uint64()) + } + + return nil + case <-gctx.Done(): + return gctx.Err() + } + }) + + return err + }) + + return g.Wait() + }() + + files := f1snaps.OpenFiles() + f1snaps.Close() + + files = append(files, f2snaps.OpenFiles()...) + f2snaps.Close() + + for _, file := range files { + os.Remove(file) + } + + return err + }) + } + } + + err := g.Wait() + + return time.Duration(downloadTime), 0, time.Duration(compareTime), err +} + +func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2ents []*BodyEntry, workers int, logger log.Logger) (time.Duration, time.Duration, time.Duration, error) { + var downloadTime uint64 + var indexTime uint64 + var compareTime uint64 + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(workers) + + for i1, ent1 := range f1ents { + for i2, ent2 := range f2ents { + if ent1.From != ent2.From || + ent1.To != ent2.To { + continue + } + + i1, i2, ent1, ent2 := i1, i2, ent1, ent2 + + g.Go(func() error { + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(4) + + b1err := make(chan error, 1) + + g.Go(func() error { + + err := func() error { + startTime := time.Now() + + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Downloading %s", ent1.Body.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + return c.session1.Download(ctx, ent1.Body.Name()) + }() + + b1err <- err + + if err != nil { + return fmt.Errorf("can't download %s: %w", ent1.Body.Name(), err) + } + + startTime := time.Now() + + defer func() { + atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Indexing %s", ent1.Body.Name())) + return freezeblocks.BodiesIdx(ctx, + filepath.Join(c.session1.LocalFsRoot(), ent1.Body.Name()), ent1.From, c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) + }) + + g.Go(func() error { + err := func() error { + startTime := time.Now() + + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + logger.Info(fmt.Sprintf("Downloading %s", ent1.Transactions.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + return c.session1.Download(ctx, ent1.Transactions.Name()) + }() + + if err != nil { + return fmt.Errorf("can't download %s: %w", ent1.Transactions.Name(), err) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case err = <-b1err: + if err != nil { + return fmt.Errorf("can't create transaction index: no bodies: %w", err) + } + } + + startTime := time.Now() + + defer func() { + atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Indexing %s", ent1.Transactions.Name())) + return freezeblocks.TransactionsIdx(ctx, c.chainConfig(), c.loc1.Version, ent1.From, ent1.To, + c.session1.LocalFsRoot(), c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) + }) + + b2err := make(chan error, 1) + + g.Go(func() error { + err := func() error { + startTime := time.Now() + + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Downloading %s", ent2.Body.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) + return c.session2.Download(ctx, ent2.Body.Name()) + }() + + b2err <- err + + if err != nil { + return fmt.Errorf("can't download %s: %w", ent2.Body.Name(), err) + } + + startTime := time.Now() + + defer func() { + atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Indexing %s", ent2.Body.Name())) + return freezeblocks.BodiesIdx(ctx, + filepath.Join(c.session2.LocalFsRoot(), ent2.Body.Name()), ent2.From, c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) + }) + + g.Go(func() error { + err := func() error { + startTime := time.Now() + + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + logger.Info(fmt.Sprintf("Downloading %s", ent2.Transactions.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) + return c.session2.Download(ctx, ent2.Transactions.Name()) + }() + + if err != nil { + return fmt.Errorf("can't download %s: %w", ent2.Transactions.Name(), err) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case err = <-b2err: + if err != nil { + return fmt.Errorf("can't create transaction index: no bodies: %w", err) + } + } + + startTime := time.Now() + + defer func() { + atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Indexing %s", ent2.Transactions.Name())) + return freezeblocks.TransactionsIdx(ctx, c.chainConfig(), c.loc2.Version, ent2.From, ent2.To, + c.session2.LocalFsRoot(), c.session2.LocalFsRoot(), nil, log.LvlDebug, logger) + }) + + if err := g.Wait(); err != nil { + return err + } + + f1snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, c.session1.LocalFsRoot(), c.loc1.Version, logger) + + f1snaps.ReopenList([]string{ent1.Body.Name(), ent1.Transactions.Name()}, false) + + f2snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, c.session2.LocalFsRoot(), c.loc2.Version, logger) + + f2snaps.ReopenList([]string{ent2.Body.Name(), ent2.Transactions.Name()}, false) + + err := func() error { + logger.Info(fmt.Sprintf("Comparing %s %s", ent1.Body.Name(), ent2.Body.Name())) + + startTime := time.Now() + + defer func() { + atomic.AddUint64(&compareTime, uint64(time.Since(startTime))) + }() + + blockReader1 := freezeblocks.NewBlockReader(f1snaps, nil) + blockReader2 := freezeblocks.NewBlockReader(f2snaps, nil) + + return func() error { + for i := ent1.From; i < ent1.To; i++ { + body1, err := blockReader1.BodyWithTransactions(ctx, nil, common.Hash{}, i) + + if err != nil { + return fmt.Errorf("%d: can't get body 1: %w", i, err) + } + + body2, err := blockReader2.BodyWithTransactions(ctx, nil, common.Hash{}, i) + + if err != nil { + return fmt.Errorf("%d: can't get body 2: %w", i, err) + } + + var b1buf, b2buf bytes.Buffer + + body1.EncodeRLP(&b1buf) + body2.EncodeRLP(&b2buf) + + if !bytes.Equal(b1buf.Bytes(), b2buf.Bytes()) { + return fmt.Errorf("%d: bodies do not match", i) + } + } + + return nil + }() + }() + + files := f1snaps.OpenFiles() + f1snaps.Close() + + files = append(files, f2snaps.OpenFiles()...) + f2snaps.Close() + + for _, file := range files { + os.Remove(file) + } + + return err + }) + } + } + + err := g.Wait() + + return time.Duration(downloadTime), time.Duration(indexTime), time.Duration(compareTime), err +} diff --git a/cmd/snapshots/copy/copy.go b/cmd/snapshots/copy/copy.go new file mode 100644 index 00000000000..4faebc1c6bc --- /dev/null +++ b/cmd/snapshots/copy/copy.go @@ -0,0 +1,333 @@ +package copy + +import ( + "context" + "fmt" + "io/fs" + "path/filepath" + "strconv" + "strings" + + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/snapshots/flags" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/urfave/cli/v2" +) + +var ( + TorrentsFlag = cli.BoolFlag{ + Name: "torrents", + Usage: `Include torrent files in copy`, + Required: false, + } + + HashesFlag = cli.BoolFlag{ + Name: "hashes", + Usage: `Include hash .toml in copy`, + Required: false, + } + + ManifestFlag = cli.BoolFlag{ + Name: "manifest", + Usage: `Include mannfest .txt in copy`, + Required: false, + } + + VersionFlag = cli.IntFlag{ + Name: "version", + Usage: `File versions to copy`, + Required: false, + Value: 0, + } +) + +var Command = cli.Command{ + Action: copy, + Name: "copy", + Usage: "copy snapshot segments", + ArgsUsage: " ", + Flags: []cli.Flag{ + &VersionFlag, + &flags.SegTypes, + &TorrentsFlag, + &HashesFlag, + &ManifestFlag, + &utils.DataDirFlag, + &logging.LogVerbosityFlag, + &logging.LogConsoleVerbosityFlag, + &logging.LogDirVerbosityFlag, + &utils.WebSeedsFlag, + &utils.NATFlag, + &utils.DisableIPV6, + &utils.DisableIPV4, + &utils.TorrentDownloadRateFlag, + &utils.TorrentUploadRateFlag, + &utils.TorrentVerbosityFlag, + &utils.TorrentPortFlag, + &utils.TorrentMaxPeersFlag, + &utils.TorrentConnsPerFileFlag, + }, + Description: ``, +} + +func copy(cliCtx *cli.Context) error { + logger := sync.Logger(cliCtx.Context) + + logger.Info("Starting copy") + + var src, dst *sync.Locator + var err error + + var rcCli *downloader.RCloneClient + var torrentCli *sync.TorrentClient + + pos := 0 + + if cliCtx.Args().Len() > pos { + val := cliCtx.Args().Get(pos) + + if src, err = sync.ParseLocator(val); err != nil { + return err + } + } + + pos++ + + if cliCtx.Args().Len() > pos { + val := cliCtx.Args().Get(pos) + + if src, err = sync.ParseLocator(val); err != nil { + return err + } + + pos++ + } + + switch dst.LType { + case sync.TorrentFs: + return fmt.Errorf("can't copy to torrent - need intermediate local fs") + + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + } + + switch src.LType { + case sync.TorrentFs: + torrentCli, err = sync.NewTorrentClient(cliCtx, dst.Chain) + if err != nil { + return fmt.Errorf("can't create torrent: %w", err) + } + + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + } + + typeValues := cliCtx.StringSlice(flags.SegTypes.Name) + snapTypes := make([]snaptype.Type, 0, len(typeValues)) + + for _, val := range typeValues { + segType, ok := snaptype.ParseFileType(val) + + if !ok { + return fmt.Errorf("unknown file type: %s", val) + } + + snapTypes = append(snapTypes, segType) + } + + torrents := cliCtx.Bool(TorrentsFlag.Name) + hashes := cliCtx.Bool(HashesFlag.Name) + manifest := cliCtx.Bool(ManifestFlag.Name) + + var firstBlock, lastBlock uint64 + + version := cliCtx.Int(VersionFlag.Name) + + if version != 0 { + dst.Version = uint8(version) + } + + if cliCtx.Args().Len() > pos { + if firstBlock, err = strconv.ParseUint(cliCtx.Args().Get(pos), 10, 64); err != nil { + return err + } + + pos++ + } + + if cliCtx.Args().Len() > pos { + if lastBlock, err = strconv.ParseUint(cliCtx.Args().Get(pos), 10, 64); err != nil { + return err + } + } + + switch src.LType { + case sync.LocalFs: + switch dst.LType { + case sync.LocalFs: + return localToLocal(src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + case sync.RemoteFs: + return localToRemote(rcCli, src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + default: + return fmt.Errorf("unhandled torrent destination: %s", dst) + } + + case sync.RemoteFs: + switch dst.LType { + case sync.LocalFs: + return remoteToLocal(cliCtx.Context, rcCli, src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + case sync.RemoteFs: + return remoteToRemote(rcCli, src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + default: + return fmt.Errorf("unhandled torrent destination: %s", dst) + } + + case sync.TorrentFs: + switch dst.LType { + case sync.LocalFs: + return torrentToLocal(torrentCli, src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + case sync.RemoteFs: + return torrentToRemote(torrentCli, rcCli, src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + default: + return fmt.Errorf("unhandled torrent destination: %s", dst) + } + + } + return nil +} + +func torrentToLocal(torrentCli *sync.TorrentClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} + +func torrentToRemote(torrentCli *sync.TorrentClient, rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} + +func localToRemote(rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} + +func localToLocal(src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} + +func remoteToLocal(ctx context.Context, rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + logger := sync.Logger(ctx) + + if rcCli == nil { + return fmt.Errorf("no remote downloader") + } + + session, err := rcCli.NewSession(ctx, dst.Root, src.Src+":"+src.Root) + + if err != nil { + return err + } + + logger.Info("Reading src dir", "remoteFs", session.RemoteFsRoot(), "label", session.Label()) + fileEntries, err := session.ReadRemoteDir(ctx, true) + + if err != nil { + return err + } + + files := selectFiles(fileEntries, dst.Version, from, to, snapTypes, torrents, hashes, manifest) + + logger.Info(fmt.Sprintf("Downloading %s", files)) + + return session.Download(ctx, files...) +} + +func remoteToRemote(rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} + +type sinf struct { + snaptype.FileInfo +} + +func (i sinf) Version() uint8 { + return i.FileInfo.Version +} + +func (i sinf) From() uint64 { + return i.FileInfo.From +} + +func (i sinf) To() uint64 { + return i.FileInfo.To +} + +func (i sinf) Type() snaptype.Type { + return i.FileInfo.T +} + +func selectFiles(entries []fs.DirEntry, version uint8, firstBlock, lastBlock uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) []string { + var files []string + + for _, ent := range entries { + if info, err := ent.Info(); err == nil { + snapInfo, _ := info.Sys().(downloader.SnapInfo) + + if torrents { + if ext := filepath.Ext(info.Name()); ext == ".torrent" { + fileName := strings.TrimSuffix(info.Name(), ".torrent") + + if fileInfo, ok := snaptype.ParseFileName("", fileName); ok { + snapInfo = sinf{fileInfo} + } + } + } + + switch { + case snapInfo != nil && snapInfo.Type() != snaptype.Unknown: + if (version == 0 || version == snapInfo.Version()) && + (firstBlock == 0 || snapInfo.From() >= firstBlock) && + (lastBlock == 0 || snapInfo.From() < lastBlock) { + + if len(snapTypes) == 0 { + files = append(files, info.Name()) + } else { + for _, snapType := range snapTypes { + if snapType == snapInfo.Type() { + files = append(files, info.Name()) + break + } + } + } + } + + case manifest: + + case hashes: + + } + } + } + + return files +} diff --git a/cmd/snapshots/flags/flags.go b/cmd/snapshots/flags/flags.go new file mode 100644 index 00000000000..b905ffa1cc0 --- /dev/null +++ b/cmd/snapshots/flags/flags.go @@ -0,0 +1,11 @@ +package flags + +import "github.com/urfave/cli/v2" + +var ( + SegTypes = cli.StringSliceFlag{ + Name: "types", + Usage: `Segment types to comparre with optional e.g. headers,bodies,transactions`, + Required: false, + } +) diff --git a/cmd/snapshots/main.go b/cmd/snapshots/main.go new file mode 100644 index 00000000000..47e2f447616 --- /dev/null +++ b/cmd/snapshots/main.go @@ -0,0 +1,112 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "path/filepath" + "syscall" + + "github.com/ledgerwatch/erigon/cmd/snapshots/cmp" + "github.com/ledgerwatch/erigon/cmd/snapshots/copy" + "github.com/ledgerwatch/erigon/cmd/snapshots/manifest" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/snapshots/torrents" + "github.com/ledgerwatch/erigon/cmd/snapshots/verify" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/debug" + "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/ledgerwatch/log/v3" + "github.com/urfave/cli/v2" +) + +func main() { + logging.LogVerbosityFlag.Value = log.LvlError.String() + logging.LogConsoleVerbosityFlag.Value = log.LvlError.String() + + app := cli.NewApp() + app.Name = "snapshots" + app.Version = params.VersionWithCommit(params.GitCommit) + + app.Commands = []*cli.Command{ + &cmp.Command, + ©.Command, + &verify.Command, + &torrents.Command, + &manifest.Command, + } + + app.Flags = []cli.Flag{} + + app.UsageText = app.Name + ` [command] [flags]` + + app.Action = func(context *cli.Context) error { + if context.Args().Present() { + var goodNames []string + for _, c := range app.VisibleCommands() { + goodNames = append(goodNames, c.Name) + } + _, _ = fmt.Fprintf(os.Stderr, "Command '%s' not found. Available commands: %s\n", context.Args().First(), goodNames) + cli.ShowAppHelpAndExit(context, 1) + } + + return nil + } + + for _, command := range app.Commands { + command.Before = func(ctx *cli.Context) error { + debug.RaiseFdLimit() + + logger, err := setupLogger(ctx) + + if err != nil { + return err + } + + var cancel context.CancelFunc + + ctx.Context, cancel = context.WithCancel(sync.WithLogger(ctx.Context, logger)) + + go handleTerminationSignals(cancel, logger) + + return nil + } + } + + if err := app.Run(os.Args); err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func setupLogger(ctx *cli.Context) (log.Logger, error) { + dataDir := ctx.String(utils.DataDirFlag.Name) + + if len(dataDir) > 0 { + logsDir := filepath.Join(dataDir, "logs") + + if err := os.MkdirAll(logsDir, 0755); err != nil { + return nil, err + } + } + + logger := logging.SetupLoggerCtx("snapshots-"+ctx.Command.Name, ctx, log.LvlError, log.LvlInfo, false) + + return logger, nil +} + +func handleTerminationSignals(stopFunc func(), logger log.Logger) { + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, syscall.SIGTERM, syscall.SIGINT) + + switch s := <-signalCh; s { + case syscall.SIGTERM: + logger.Info("Stopping") + stopFunc() + case syscall.SIGINT: + logger.Info("Terminating") + os.Exit(-int(syscall.SIGINT)) + } +} diff --git a/cmd/snapshots/manifest/manifest.go b/cmd/snapshots/manifest/manifest.go new file mode 100644 index 00000000000..54e803fb0c2 --- /dev/null +++ b/cmd/snapshots/manifest/manifest.go @@ -0,0 +1,365 @@ +package manifest + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/urfave/cli/v2" +) + +var ( + VersionFlag = cli.IntFlag{ + Name: "version", + Usage: `Manifest file versions`, + Required: false, + Value: 0, + } +) + +var Command = cli.Command{ + Action: func(cliCtx *cli.Context) error { + return manifest(cliCtx, "list") + }, + Name: "manifest", + Usage: "manifest utilities", + Subcommands: []*cli.Command{ + { + Action: func(cliCtx *cli.Context) error { + return manifest(cliCtx, "list") + }, + Name: "list", + Usage: "list manifest from storage location", + ArgsUsage: "", + }, + { + Action: func(cliCtx *cli.Context) error { + return manifest(cliCtx, "update") + }, + Name: "update", + Usage: "update the manifest to match the files available at its storage location", + ArgsUsage: "", + }, + { + Action: func(cliCtx *cli.Context) error { + return manifest(cliCtx, "verify") + }, + Name: "verify", + Usage: "verify that manifest matches the files available at its storage location", + ArgsUsage: "", + }, + }, + Flags: []cli.Flag{ + &VersionFlag, + &utils.DataDirFlag, + &logging.LogVerbosityFlag, + &logging.LogConsoleVerbosityFlag, + &logging.LogDirVerbosityFlag, + }, + Description: ``, +} + +func manifest(cliCtx *cli.Context, command string) error { + logger := sync.Logger(cliCtx.Context) + + var src *sync.Locator + var err error + + var rcCli *downloader.RCloneClient + + pos := 0 + + if cliCtx.Args().Len() == 0 { + return fmt.Errorf("missing manifest location") + } + + arg := cliCtx.Args().Get(pos) + + if src, err = sync.ParseLocator(arg); err != nil { + return err + } + + switch src.LType { + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + } + + var srcSession *downloader.RCloneSession + + tempDir, err := os.MkdirTemp("", "snapshot-manifest-") + + if err != nil { + return err + } + + defer os.RemoveAll(tempDir) + + if rcCli != nil { + if src != nil && src.LType == sync.RemoteFs { + srcSession, err = rcCli.NewSession(cliCtx.Context, tempDir, src.Src+":"+src.Root) + + if err != nil { + return err + } + } + } + + if src != nil && srcSession == nil { + return fmt.Errorf("no src session established") + } + + logger.Debug("Starting manifest " + command) + + var version *uint8 + + if val := cliCtx.Int(VersionFlag.Name); val != 0 { + v := uint8(val) + version = &v + } + + switch command { + case "update": + return updateManifest(cliCtx.Context, tempDir, srcSession, version) + case "verify": + return verifyManifest(cliCtx.Context, srcSession, version, os.Stdout) + default: + return listManifest(cliCtx.Context, srcSession, os.Stdout) + } +} + +func listManifest(ctx context.Context, srcSession *downloader.RCloneSession, out *os.File) error { + entries, err := DownloadManifest(ctx, srcSession) + + if err != nil { + return err + } + + for _, fi := range entries { + fmt.Fprintln(out, fi.Name()) + } + + return nil +} + +func updateManifest(ctx context.Context, tmpDir string, srcSession *downloader.RCloneSession, version *uint8) error { + entities, err := srcSession.ReadRemoteDir(ctx, true) + + if err != nil { + return err + } + + manifestFile := "manifest.txt" + + fileMap := map[string]string{} + torrentMap := map[string]string{} + + for _, fi := range entities { + var file string + var files map[string]string + + if filepath.Ext(fi.Name()) == ".torrent" { + file = strings.TrimSuffix(fi.Name(), ".torrent") + files = torrentMap + } else { + file = fi.Name() + files = fileMap + } + + info, ok := snaptype.ParseFileName("", file) + + if !ok || (version != nil && *version != info.Version) { + continue + } + + files[file] = fi.Name() + } + + var files []string + + for file := range fileMap { + if torrent, ok := torrentMap[file]; ok { + files = append(files, file, torrent) + } + } + + sort.Strings(files) + + manifestEntries := bytes.Buffer{} + + for _, file := range files { + fmt.Fprintln(&manifestEntries, file) + } + + _ = os.WriteFile(filepath.Join(tmpDir, manifestFile), manifestEntries.Bytes(), 0644) + defer os.Remove(filepath.Join(tmpDir, manifestFile)) + + return srcSession.Upload(ctx, manifestFile) +} + +func verifyManifest(ctx context.Context, srcSession *downloader.RCloneSession, version *uint8, out *os.File) error { + manifestEntries, err := DownloadManifest(ctx, srcSession) + + if err != nil { + return fmt.Errorf("verification failed: can't read manifest: %w", err) + } + + manifestFiles := map[string]struct{}{} + + for _, fi := range manifestEntries { + var file string + + if filepath.Ext(fi.Name()) == ".torrent" { + file = strings.TrimSuffix(fi.Name(), ".torrent") + } else { + file = fi.Name() + } + + info, ok := snaptype.ParseFileName("", file) + + if !ok || (version != nil && *version != info.Version) { + continue + } + + manifestFiles[fi.Name()] = struct{}{} + } + + dirEntries, err := srcSession.ReadRemoteDir(ctx, true) + + if err != nil { + return fmt.Errorf("verification failed: can't read dir: %w", err) + } + + dirFiles := map[string]struct{}{} + + for _, fi := range dirEntries { + + var file string + + if filepath.Ext(fi.Name()) == ".torrent" { + file = strings.TrimSuffix(fi.Name(), ".torrent") + } else { + file = fi.Name() + } + + info, ok := snaptype.ParseFileName("", file) + + if !ok || (version != nil && *version != info.Version) { + continue + } + + if _, ok := manifestFiles[fi.Name()]; ok { + delete(manifestFiles, fi.Name()) + } else { + dirFiles[fi.Name()] = struct{}{} + } + } + + var missing string + var extra string + + if len(manifestFiles) != 0 { + files := make([]string, len(manifestFiles)) + + for file := range manifestFiles { + files = append(files, file) + } + + missing = fmt.Sprintf(": manifest files not in src: %s", files) + } + + if len(dirFiles) != 0 { + files := make([]string, len(dirFiles)) + + for file := range dirFiles { + files = append(files, file) + } + + extra = fmt.Sprintf(": src files not in manifest: %s", files) + } + + if len(missing) > 0 || len(extra) != 0 { + return fmt.Errorf("manifest does not match src contents%s%s", missing, extra) + } + return nil +} + +type dirEntry struct { + name string +} + +func (e dirEntry) Name() string { + return e.name +} + +func (e dirEntry) IsDir() bool { + return false +} + +func (e dirEntry) Type() fs.FileMode { + return e.Mode() +} + +func (e dirEntry) Size() int64 { + return -1 +} + +func (e dirEntry) Mode() fs.FileMode { + return fs.ModeIrregular +} + +func (e dirEntry) ModTime() time.Time { + return time.Time{} +} + +func (e dirEntry) Sys() any { + return nil +} + +func (e dirEntry) Info() (fs.FileInfo, error) { + return e, nil +} + +func DownloadManifest(ctx context.Context, session *downloader.RCloneSession) ([]fs.DirEntry, error) { + + reader, err := session.Cat(ctx, "manifest.txt") + + if err != nil { + return nil, err + } + + var entries []fs.DirEntry + + scanner := bufio.NewScanner(reader) + + for scanner.Scan() { + entries = append(entries, dirEntry{scanner.Text()}) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil +} diff --git a/cmd/snapshots/sync/context.go b/cmd/snapshots/sync/context.go new file mode 100644 index 00000000000..fce2de1215c --- /dev/null +++ b/cmd/snapshots/sync/context.go @@ -0,0 +1,38 @@ +package sync + +import ( + "context" + + "github.com/ledgerwatch/log/v3" +) + +type ctxKey int + +const ( + ckLogger ctxKey = iota + ckTempDir +) + +func WithLogger(ctx context.Context, logger log.Logger) context.Context { + return context.WithValue(ctx, ckLogger, logger) +} + +func Logger(ctx context.Context) log.Logger { + if logger, ok := ctx.Value(ckLogger).(log.Logger); ok { + return logger + } + + return log.Root() +} + +func WithTempDir(ctx context.Context, tempDir string) context.Context { + return context.WithValue(ctx, ckTempDir, tempDir) +} + +func TempDir(ctx context.Context) string { + if tempDir, ok := ctx.Value(ckTempDir).(string); ok { + return tempDir + } + + return "" +} diff --git a/cmd/snapshots/sync/sync.go b/cmd/snapshots/sync/sync.go new file mode 100644 index 00000000000..c01626f0678 --- /dev/null +++ b/cmd/snapshots/sync/sync.go @@ -0,0 +1,444 @@ +package sync + +import ( + "bufio" + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/storage" + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/downloader/downloadernat" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/p2p/nat" + "github.com/ledgerwatch/erigon/params" + "github.com/urfave/cli/v2" + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" +) + +type LType int + +const ( + TorrentFs LType = iota + LocalFs + RemoteFs +) + +type Locator struct { + LType LType + Src string + Root string + Version uint8 + Chain string +} + +func (l Locator) String() string { + var val string + + switch l.LType { + case TorrentFs: + val = "torrent" + case LocalFs: + val = l.Root + case RemoteFs: + val = l.Src + ":" + l.Root + } + + if l.Version > 0 { + val += fmt.Sprint(":v", l.Version) + } + + return val +} + +var locatorExp, _ = regexp.Compile(`^(?:(\w+)\:)?([^\:]*)(?:\:(v\d+))?`) +var srcExp, _ = regexp.Compile(`^erigon-v\d+-snapshots-(.*)$`) + +func ParseLocator(value string) (*Locator, error) { + if matches := locatorExp.FindStringSubmatch(value); len(matches) > 0 { + var loc Locator + + switch { + case matches[1] == "torrent": + loc.LType = TorrentFs + + if len(matches[2]) > 0 { + version, err := strconv.ParseUint(matches[2][1:], 10, 8) + if err != nil { + return nil, fmt.Errorf("can't parse version: %s: %w", matches[3], err) + } + + loc.Version = uint8(version) + } + + case len(matches[1]) > 0: + loc.LType = RemoteFs + loc.Src = matches[1] + loc.Root = matches[2] + + if matches := srcExp.FindStringSubmatch(loc.Root); len(matches) > 1 { + loc.Chain = matches[1] + } + + if len(matches[3]) > 0 { + version, err := strconv.ParseUint(matches[3][1:], 10, 8) + if err != nil { + return nil, fmt.Errorf("can't parse version: %s: %w", matches[3], err) + } + + loc.Version = uint8(version) + } + + default: + loc.LType = LocalFs + loc.Root = downloader.Clean(matches[2]) + } + + return &loc, nil + } + + if path, err := filepath.Abs(value); err == nil { + return &Locator{ + LType: LocalFs, + Root: path, + }, nil + } + + return nil, fmt.Errorf("Invalid locator syntax") +} + +type TorrentClient struct { + *torrent.Client + cfg *torrent.ClientConfig +} + +func NewTorrentClient(cliCtx *cli.Context, chain string) (*TorrentClient, error) { + logger := Logger(cliCtx.Context) + tempDir := TempDir(cliCtx.Context) + + torrentDir := filepath.Join(tempDir, "torrents", chain) + + dirs := datadir.New(torrentDir) + + webseedsList := common.CliString2Array(cliCtx.String(utils.WebSeedsFlag.Name)) + + if known, ok := snapcfg.KnownWebseeds[chain]; ok { + webseedsList = append(webseedsList, known...) + } + + var downloadRate, uploadRate datasize.ByteSize + + if err := downloadRate.UnmarshalText([]byte(cliCtx.String(utils.TorrentDownloadRateFlag.Name))); err != nil { + return nil, err + } + + if err := uploadRate.UnmarshalText([]byte(cliCtx.String(utils.TorrentUploadRateFlag.Name))); err != nil { + return nil, err + } + + logLevel, _, err := downloadercfg.Int2LogLevel(cliCtx.Int(utils.TorrentVerbosityFlag.Name)) + + if err != nil { + return nil, err + } + + version := "erigon: " + params.VersionWithCommit(params.GitCommit) + + cfg, err := downloadercfg.New(dirs, version, logLevel, downloadRate, uploadRate, + cliCtx.Int(utils.TorrentPortFlag.Name), + cliCtx.Int(utils.TorrentConnsPerFileFlag.Name), 0, nil, webseedsList, chain) + + if err != nil { + return nil, err + } + + err = os.RemoveAll(torrentDir) + + if err != nil { + return nil, fmt.Errorf("can't clean torrent dir: %w", err) + } + + if err := os.MkdirAll(torrentDir, 0755); err != nil { + return nil, err + } + + cfg.ClientConfig.DataDir = torrentDir + + cfg.ClientConfig.PieceHashersPerTorrent = 32 * runtime.NumCPU() + cfg.ClientConfig.DisableIPv6 = cliCtx.Bool(utils.DisableIPV6.Name) + cfg.ClientConfig.DisableIPv4 = cliCtx.Bool(utils.DisableIPV4.Name) + + natif, err := nat.Parse(utils.NATFlag.Value) + + if err != nil { + return nil, fmt.Errorf("invalid nat option %s: %w", utils.NATFlag.Value, err) + } + + downloadernat.DoNat(natif, cfg.ClientConfig, logger) + + cfg.ClientConfig.DefaultStorage = storage.NewMMap(torrentDir) + + cli, err := torrent.NewClient(cfg.ClientConfig) + + if err != nil { + return nil, fmt.Errorf("can't create torrent client: %w", err) + } + + return &TorrentClient{cli, cfg.ClientConfig}, nil +} + +type torrentSession struct { + cli *TorrentClient + items map[string]snapcfg.PreverifiedItem +} + +type fileInfo struct { + info snapcfg.PreverifiedItem +} + +func (fi *fileInfo) Name() string { + return fi.info.Name +} + +func (fi *fileInfo) Size() int64 { + return 0 +} + +func (fi *fileInfo) Mode() fs.FileMode { + return fs.ModeIrregular +} + +func (fi *fileInfo) ModTime() time.Time { + return time.Time{} +} + +func (fi *fileInfo) IsDir() bool { + return false +} + +type torrentInfo struct { + snapInfo *snaptype.FileInfo + hash string +} + +func (i *torrentInfo) Version() uint8 { + if i.snapInfo != nil { + return i.snapInfo.Version + } + + return 0 +} + +func (i *torrentInfo) From() uint64 { + if i.snapInfo != nil { + return i.snapInfo.From + } + + return 0 +} + +func (i *torrentInfo) To() uint64 { + if i.snapInfo != nil { + return i.snapInfo.To + } + + return 0 +} + +func (i *torrentInfo) Type() snaptype.Type { + if i.snapInfo != nil { + return i.snapInfo.T + } + + return 0 +} + +func (i *torrentInfo) Hash() string { + return i.hash +} + +func (fi *fileInfo) Sys() any { + info := torrentInfo{hash: fi.info.Hash} + if snapInfo, ok := snaptype.ParseFileName("", fi.Name()); ok { + info.snapInfo = &snapInfo + } + + return &info +} + +type dirEntry struct { + info *fileInfo +} + +func (e dirEntry) Name() string { + return e.info.Name() +} + +func (e dirEntry) IsDir() bool { + return e.info.IsDir() +} + +func (e dirEntry) Type() fs.FileMode { + return fs.ModeIrregular +} + +func (e dirEntry) Info() (fs.FileInfo, error) { + return e.info, nil +} + +func (s *torrentSession) ReadRemoteDir(ctx context.Context, refresh bool) ([]fs.DirEntry, error) { + var entries = make([]fs.DirEntry, 0, len(s.items)) + + for _, info := range s.items { + entries = append(entries, &dirEntry{&fileInfo{info}}) + } + + slices.SortFunc(entries, func(a, b fs.DirEntry) int { + return strings.Compare(a.Name(), b.Name()) + }) + + return entries, nil +} + +func (s *torrentSession) LocalFsRoot() string { + return s.cli.cfg.DataDir +} + +func (s *torrentSession) RemoteFsRoot() string { + return "" +} + +func (s *torrentSession) Download(ctx context.Context, files ...string) error { + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(len(files)) + + for _, f := range files { + file := f + + g.Go(func() error { + it, ok := s.items[file] + + if !ok { + return fs.ErrNotExist + } + + t, err := func() (*torrent.Torrent, error) { + infoHash := snaptype.Hex2InfoHash(it.Hash) + + for _, t := range s.cli.Torrents() { + if t.Name() == file { + return t, nil + } + } + + mi := &metainfo.MetaInfo{AnnounceList: downloader.Trackers} + magnet := mi.Magnet(&infoHash, &metainfo.Info{Name: file}) + spec, err := torrent.TorrentSpecFromMagnetUri(magnet.String()) + + if err != nil { + return nil, err + } + + spec.DisallowDataDownload = true + + t, _, err := s.cli.AddTorrentSpec(spec) + if err != nil { + return nil, err + } + + return t, nil + }() + + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.GotInfo(): + } + + if !t.Complete.Bool() { + t.AllowDataDownload() + t.DownloadAll() + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.Complete.On(): + } + } + + closed := t.Closed() + t.Drop() + <-closed + + return nil + }) + } + + return g.Wait() +} + +func (s *torrentSession) Label() string { + return "torrents" +} + +func NewTorrentSession(cli *TorrentClient, chain string) *torrentSession { + session := &torrentSession{cli, map[string]snapcfg.PreverifiedItem{}} + for _, it := range snapcfg.KnownCfg(chain, 0).Preverified { + session.items[it.Name] = it + } + + return session +} + +func DownloadManifest(ctx context.Context, session DownloadSession) ([]fs.DirEntry, error) { + if session, ok := session.(*downloader.RCloneSession); ok { + reader, err := session.Cat(ctx, "manifest.txt") + + if err != nil { + return nil, err + } + + var entries []fs.DirEntry + + scanner := bufio.NewScanner(reader) + + for scanner.Scan() { + entries = append(entries, dirEntry{&fileInfo{snapcfg.PreverifiedItem{Name: scanner.Text()}}}) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil + } + + return nil, fmt.Errorf("not implemented for %T", session) +} + +type DownloadSession interface { + Download(ctx context.Context, files ...string) error + ReadRemoteDir(ctx context.Context, refresh bool) ([]fs.DirEntry, error) + LocalFsRoot() string + RemoteFsRoot() string + Label() string +} diff --git a/cmd/snapshots/sync/util.go b/cmd/snapshots/sync/util.go new file mode 100644 index 00000000000..a0a69547bd6 --- /dev/null +++ b/cmd/snapshots/sync/util.go @@ -0,0 +1,32 @@ +package sync + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon-lib/downloader" +) + +func CheckRemote(rcCli *downloader.RCloneClient, src string) error { + + remotes, err := rcCli.ListRemotes(context.Background()) + + if err != nil { + return err + } + + hasRemote := false + + for _, remote := range remotes { + if src == remote { + hasRemote = true + break + } + } + + if !hasRemote { + return fmt.Errorf("unknown remote: %s", src) + } + + return nil +} diff --git a/cmd/snapshots/torrents/torrents.go b/cmd/snapshots/torrents/torrents.go new file mode 100644 index 00000000000..01f01ab6e14 --- /dev/null +++ b/cmd/snapshots/torrents/torrents.go @@ -0,0 +1,504 @@ +package torrents + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + gosync "sync" + "time" + + "golang.org/x/exp/slices" + + "github.com/ledgerwatch/log/v3" + + "github.com/anacrolix/torrent/metainfo" + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/snapshots/manifest" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/urfave/cli/v2" + "golang.org/x/sync/errgroup" +) + +var Command = cli.Command{ + Action: func(cliCtx *cli.Context) error { + return torrents(cliCtx, "list") + }, + Name: "torrent", + Usage: "torrent utilities", + Subcommands: []*cli.Command{ + { + Action: func(cliCtx *cli.Context) error { + return torrents(cliCtx, "list") + }, + Name: "list", + Usage: "list torrents available at the specified storage location", + ArgsUsage: "", + }, + { + Action: func(cliCtx *cli.Context) error { + return torrents(cliCtx, "hashes") + }, + Name: "hashes", + Usage: "list the hashes (in toml format) at the specified storage location", + ArgsUsage: " ", + }, + { + Action: func(cliCtx *cli.Context) error { + return torrents(cliCtx, "update") + }, + Name: "update", + Usage: "update re-create the torrents for the contents available at its storage location", + ArgsUsage: " ", + }, + { + Action: func(cliCtx *cli.Context) error { + return torrents(cliCtx, "verify") + }, + Name: "verify", + Usage: "verify that manifest contents are available at its storage location", + ArgsUsage: " ", + }, + }, + Flags: []cli.Flag{ + &utils.DataDirFlag, + &logging.LogVerbosityFlag, + &logging.LogConsoleVerbosityFlag, + &logging.LogDirVerbosityFlag, + }, + Description: ``, +} + +func torrents(cliCtx *cli.Context, command string) error { + logger := sync.Logger(cliCtx.Context) + + var src *sync.Locator + var err error + + var firstBlock, lastBlock uint64 + + pos := 0 + + if src, err = sync.ParseLocator(cliCtx.Args().Get(pos)); err != nil { + return err + } + + pos++ + + if cliCtx.Args().Len() > pos { + if src, err = sync.ParseLocator(cliCtx.Args().Get(pos)); err != nil { + return err + } + + if err != nil { + return err + } + } + + pos++ + + if cliCtx.Args().Len() > pos { + firstBlock, err = strconv.ParseUint(cliCtx.Args().Get(pos), 10, 64) + if err != nil { + return err + } + } + + pos++ + + if cliCtx.Args().Len() > pos { + lastBlock, err = strconv.ParseUint(cliCtx.Args().Get(pos), 10, 64) + + if err != nil { + return err + } + } + + if src == nil { + return fmt.Errorf("missing data source") + } + + var rcCli *downloader.RCloneClient + + switch src.LType { + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + } + + var srcSession *downloader.RCloneSession + + dataDir := cliCtx.String(utils.DataDirFlag.Name) + var tempDir string + + if len(dataDir) == 0 { + dataDir, err := os.MkdirTemp("", "snapshot-torrents-") + if err != nil { + return err + } + tempDir = dataDir + defer os.RemoveAll(dataDir) + } else { + tempDir = filepath.Join(dataDir, "temp") + + if err := os.MkdirAll(tempDir, 0755); err != nil { + return err + } + } + + if rcCli != nil { + if src != nil && src.LType == sync.RemoteFs { + srcSession, err = rcCli.NewSession(cliCtx.Context, filepath.Join(tempDir, "src"), src.Src+":"+src.Root) + + if err != nil { + return err + } + } + } + + if src != nil && srcSession == nil { + return fmt.Errorf("no src session established") + } + + logger.Debug("Starting torrents " + command) + + switch command { + case "hashes": + return torrentHashes(cliCtx.Context, srcSession, firstBlock, lastBlock) + case "update": + startTime := time.Now() + + logger.Info(fmt.Sprintf("Starting update: %s", src.String()), "first", firstBlock, "last", lastBlock, "dir", tempDir) + + err := updateTorrents(cliCtx.Context, srcSession, firstBlock, lastBlock, logger) + + if err == nil { + logger.Info(fmt.Sprintf("Finished update: %s", src.String()), "elapsed", time.Since(startTime)) + } else { + logger.Info(fmt.Sprintf("Aborted update: %s", src.String()), "err", err) + } + + return err + + case "verify": + startTime := time.Now() + + logger.Info(fmt.Sprintf("Starting verify: %s", src.String()), "first", firstBlock, "last", lastBlock, "dir", tempDir) + + err := verifyTorrents(cliCtx.Context, srcSession, firstBlock, lastBlock, logger) + + if err == nil { + logger.Info(fmt.Sprintf("Verified: %s", src.String()), "elapsed", time.Since(startTime)) + } else { + logger.Info(fmt.Sprintf("Verification failed: %s", src.String()), "err", err) + } + + return err + } + + return listTorrents(cliCtx.Context, srcSession, os.Stdout, firstBlock, lastBlock) +} + +func listTorrents(ctx context.Context, srcSession *downloader.RCloneSession, out *os.File, from uint64, to uint64) error { + entries, err := manifest.DownloadManifest(ctx, srcSession) + + if err != nil { + entries, err = srcSession.ReadRemoteDir(ctx, true) + } + + if err != nil { + return err + } + + for _, fi := range entries { + if filepath.Ext(fi.Name()) == ".torrent" { + if from > 0 || to > 0 { + info, _ := snaptype.ParseFileName("", strings.TrimSuffix(fi.Name(), ".torrent")) + + if from > 0 && info.From < from { + continue + } + + if to > 0 && info.From > to { + continue + } + } + + fmt.Fprintln(out, fi.Name()) + } + } + + return nil +} + +func torrentHashes(ctx context.Context, srcSession *downloader.RCloneSession, from uint64, to uint64) error { + entries, err := manifest.DownloadManifest(ctx, srcSession) + + if err != nil { + return err + } + + type hashInfo struct { + name, hash string + } + + var hashes []hashInfo + var hashesMutex gosync.Mutex + + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(16) + + for _, fi := range entries { + if filepath.Ext(fi.Name()) == ".torrent" { + if from > 0 || to > 0 { + info, _ := snaptype.ParseFileName("", strings.TrimSuffix(fi.Name(), ".torrent")) + + if from > 0 && info.From < from { + continue + } + + if to > 0 && info.From > to { + continue + } + } + + file := fi.Name() + + g.Go(func() error { + var mi *metainfo.MetaInfo + + errs := 0 + + for { + reader, err := srcSession.Cat(gctx, file) + + if err != nil { + return fmt.Errorf("can't read remote torrent: %s: %w", file, err) + } + + mi, err = metainfo.Load(reader) + + if err != nil { + errs++ + + if errs == 4 { + return fmt.Errorf("can't parse remote torrent: %s: %w", file, err) + } + + continue + } + + break + } + + info, err := mi.UnmarshalInfo() + + if err != nil { + return fmt.Errorf("can't unmarshal torrent info: %s: %w", file, err) + } + + hashesMutex.Lock() + defer hashesMutex.Unlock() + hashes = append(hashes, hashInfo{info.Name, mi.HashInfoBytes().String()}) + + return nil + }) + } + } + + if err := g.Wait(); err != nil { + return err + } + + slices.SortFunc(hashes, func(a, b hashInfo) int { + return strings.Compare(a.name, b.name) + }) + + for _, hi := range hashes { + fmt.Printf("'%s' = '%s'\n", hi.name, hi.hash) + } + + return nil +} + +func updateTorrents(ctx context.Context, srcSession *downloader.RCloneSession, from uint64, to uint64, logger log.Logger) error { + entries, err := manifest.DownloadManifest(ctx, srcSession) + + if err != nil { + return err + } + + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(16) + + torrentFiles := downloader.NewAtomicTorrentFiles(srcSession.LocalFsRoot()) + + for _, fi := range entries { + if filepath.Ext(fi.Name()) == ".torrent" { + file := strings.TrimSuffix(fi.Name(), ".torrent") + + g.Go(func() error { + if from > 0 || to > 0 { + info, _ := snaptype.ParseFileName("", file) + + if from > 0 && info.From < from { + return nil + } + + if to > 0 && info.From > to { + return nil + } + } + + logger.Info(fmt.Sprintf("Updating %s", file+".torrent")) + + err := srcSession.Download(gctx, file) + + if err != nil { + return err + } + + defer os.Remove(filepath.Join(srcSession.LocalFsRoot(), file)) + + err = downloader.BuildTorrentIfNeed(gctx, file, srcSession.LocalFsRoot(), torrentFiles) + + if err != nil { + return err + } + + defer os.Remove(filepath.Join(srcSession.LocalFsRoot(), file+".torrent")) + + return srcSession.Upload(gctx, file+".torrent") + }) + } + } + + return g.Wait() +} + +func verifyTorrents(ctx context.Context, srcSession *downloader.RCloneSession, from uint64, to uint64, logger log.Logger) error { + entries, err := manifest.DownloadManifest(ctx, srcSession) + + if err != nil { + return err + } + + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(16) + + torrentFiles := downloader.NewAtomicTorrentFiles(srcSession.LocalFsRoot()) + + for _, fi := range entries { + if filepath.Ext(fi.Name()) == ".torrent" { + file := strings.TrimSuffix(fi.Name(), ".torrent") + + g.Go(func() error { + if from > 0 || to > 0 { + info, _ := snaptype.ParseFileName("", file) + + if from > 0 && info.From < from { + return nil + } + + if to > 0 && info.From > to { + return nil + } + } + + logger.Info(fmt.Sprintf("Validating %s", file+".torrent")) + + var mi *metainfo.MetaInfo + + errs := 0 + + for { + reader, err := srcSession.Cat(gctx, file+".torrent") + + if err != nil { + return fmt.Errorf("can't read remote torrent: %s: %w", file+".torrent", err) + } + + mi, err = metainfo.Load(reader) + + if err != nil { + errs++ + + if errs == 4 { + return fmt.Errorf("can't parse remote torrent: %s: %w", file+".torrent", err) + } + + continue + } + + break + } + + info, err := mi.UnmarshalInfo() + + if err != nil { + return fmt.Errorf("can't unmarshal torrent info: %s: %w", file+".torrent", err) + } + + if info.Name != file { + return fmt.Errorf("torrent name does not match file: %s", file) + } + + err = srcSession.Download(gctx, file) + + if err != nil { + return err + } + + defer os.Remove(filepath.Join(srcSession.LocalFsRoot(), file)) + + err = downloader.BuildTorrentIfNeed(gctx, file, srcSession.LocalFsRoot(), torrentFiles) + + if err != nil { + return err + } + + torrentPath := filepath.Join(srcSession.LocalFsRoot(), file+".torrent") + + defer os.Remove(torrentPath) + + lmi, err := metainfo.LoadFromFile(torrentPath) + + if err != nil { + return fmt.Errorf("can't load local torrent from: %s: %w", torrentPath, err) + } + + if lmi.HashInfoBytes() != mi.HashInfoBytes() { + return fmt.Errorf("computed local hash does not match torrent: %s: expected: %s, got: %s", file+".torrent", lmi.HashInfoBytes(), mi.HashInfoBytes()) + } + + localInfo, err := lmi.UnmarshalInfo() + + if err != nil { + return fmt.Errorf("can't unmarshal local torrent info: %s: %w", torrentPath, err) + } + + if localInfo.Name != info.Name { + return fmt.Errorf("computed local name does not match torrent: %s: expected: %s, got: %s", file+".torrent", localInfo.Name, info.Name) + } + + return nil + }) + } + } + + return g.Wait() +} diff --git a/cmd/snapshots/verify/verify.go b/cmd/snapshots/verify/verify.go new file mode 100644 index 00000000000..bb0fbc83b70 --- /dev/null +++ b/cmd/snapshots/verify/verify.go @@ -0,0 +1,249 @@ +package verify + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/snapshots/flags" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/urfave/cli/v2" +) + +var ( + SrcFlag = cli.StringFlag{ + Name: "src", + Usage: `Source location for verification files (torrent,hash,manifest)`, + Required: false, + } + DstFlag = cli.StringFlag{ + Name: "dst", + Usage: `Destination location containiong copies to be verified`, + Required: true, + } + ChainFlag = cli.StringFlag{ + Name: "chain", + Usage: `The chain being validated, required if not included src or dst naming`, + Required: false, + } + TorrentsFlag = cli.BoolFlag{ + Name: "torrents", + Usage: `Verify against torrent files`, + Required: false, + } + + HashesFlag = cli.BoolFlag{ + Name: "hashes", + Usage: `Verify against hash .toml contents`, + Required: false, + } + + ManifestFlag = cli.BoolFlag{ + Name: "manifest", + Usage: `Verify against manifest .txt contents`, + Required: false, + } +) + +var Command = cli.Command{ + Action: verify, + Name: "verify", + Usage: "verify snapshot segments against hashes and torrents", + ArgsUsage: " ", + Flags: []cli.Flag{ + &SrcFlag, + &DstFlag, + &ChainFlag, + &flags.SegTypes, + &TorrentsFlag, + &HashesFlag, + &ManifestFlag, + &utils.WebSeedsFlag, + &utils.NATFlag, + &utils.DisableIPV6, + &utils.DisableIPV4, + &utils.TorrentDownloadRateFlag, + &utils.TorrentUploadRateFlag, + &utils.TorrentVerbosityFlag, + &utils.TorrentPortFlag, + &utils.TorrentMaxPeersFlag, + &utils.TorrentConnsPerFileFlag, + }, + Description: ``, +} + +func verify(cliCtx *cli.Context) error { + logger := sync.Logger(cliCtx.Context) + + logger.Info("Starting verify") + + var src, dst *sync.Locator + var err error + + var rcCli *downloader.RCloneClient + var torrentCli *sync.TorrentClient + + if src, err = sync.ParseLocator(cliCtx.String(SrcFlag.Name)); err != nil { + return err + } + + if dst, err = sync.ParseLocator(cliCtx.String(DstFlag.Name)); err != nil { + return err + } + + chain := cliCtx.String(ChainFlag.Name) + + switch dst.LType { + case sync.TorrentFs: + torrentCli, err = sync.NewTorrentClient(cliCtx, dst.Chain) + if err != nil { + return fmt.Errorf("can't create torrent: %w", err) + } + + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + + if len(chain) == 0 { + chain = dst.Chain + } + } + + switch src.LType { + case sync.TorrentFs: + if torrentCli == nil { + torrentCli, err = sync.NewTorrentClient(cliCtx, dst.Chain) + if err != nil { + return fmt.Errorf("can't create torrent: %w", err) + } + } + + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + + if len(chain) == 0 { + chain = src.Chain + } + } + + typeValues := cliCtx.StringSlice(flags.SegTypes.Name) + snapTypes := make([]snaptype.Type, 0, len(typeValues)) + + for _, val := range typeValues { + segType, ok := snaptype.ParseFileType(val) + + if !ok { + return fmt.Errorf("unknown file type: %s", val) + } + + snapTypes = append(snapTypes, segType) + } + + torrents := cliCtx.Bool(TorrentsFlag.Name) + hashes := cliCtx.Bool(HashesFlag.Name) + manifest := cliCtx.Bool(ManifestFlag.Name) + + var firstBlock, lastBlock uint64 + + if cliCtx.Args().Len() > 0 { + if firstBlock, err = strconv.ParseUint(cliCtx.Args().Get(0), 10, 64); err != nil { + return err + } + } + + if cliCtx.Args().Len() > 1 { + if lastBlock, err = strconv.ParseUint(cliCtx.Args().Get(1), 10, 64); err != nil { + return err + } + } + + var srcSession sync.DownloadSession + var dstSession sync.DownloadSession + + dataDir := cliCtx.String(utils.DataDirFlag.Name) + var tempDir string + + if len(dataDir) == 0 { + dataDir, err := os.MkdirTemp("", "snapshot-verify-") + if err != nil { + return err + } + tempDir = dataDir + defer os.RemoveAll(dataDir) + } else { + tempDir = filepath.Join(dataDir, "temp") + + if err := os.MkdirAll(tempDir, 0755); err != nil { + return err + } + } + + if rcCli != nil { + if src != nil && src.LType == sync.RemoteFs { + srcSession, err = rcCli.NewSession(cliCtx.Context, filepath.Join(tempDir, "src"), src.Src+":"+src.Root) + + if err != nil { + return err + } + } + + if dst.LType == sync.RemoteFs { + dstSession, err = rcCli.NewSession(cliCtx.Context, filepath.Join(tempDir, "dst"), dst.Src+":"+dst.Root) + + if err != nil { + return err + } + } + } + + if torrentCli != nil { + if src != nil && src.LType == sync.TorrentFs { + srcSession = sync.NewTorrentSession(torrentCli, chain) + } + + if dst.LType == sync.TorrentFs { + dstSession = sync.NewTorrentSession(torrentCli, chain) + } + } + + if src != nil && srcSession == nil { + return fmt.Errorf("no src session established") + } + + if dstSession == nil { + return fmt.Errorf("no dst session established") + } + + if srcSession == nil { + srcSession = dstSession + } + + return verfifySnapshots(srcSession, dstSession, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) +} + +func verfifySnapshots(srcSession sync.DownloadSession, rcSession sync.DownloadSession, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go index 85308fdcd3a..19eceebb5ca 100644 --- a/cmd/state/commands/check_change_sets.go +++ b/cmd/state/commands/check_change_sets.go @@ -46,6 +46,7 @@ func init() { withBlock(checkChangeSetsCmd) withDataDir(checkChangeSetsCmd) withSnapshotBlocks(checkChangeSetsCmd) + withSnapshotVersion(checkChangeSetsCmd) checkChangeSetsCmd.Flags().StringVar(&historyfile, "historyfile", "", "path to the file where the changesets and history are expected to be. If omitted, the same as /erion/chaindata") checkChangeSetsCmd.Flags().BoolVar(&nocheck, "nocheck", false, "set to turn off the changeset checking and only execute transaction (for performance testing)") rootCmd.AddCommand(checkChangeSetsCmd) @@ -56,13 +57,13 @@ var checkChangeSetsCmd = &cobra.Command{ Short: "Re-executes historical transactions in read-only mode and checks that their outputs match the database ChangeSets", RunE: func(cmd *cobra.Command, args []string) error { logger := debug.SetupCobra(cmd, "check_change_sets") - return CheckChangeSets(cmd.Context(), genesis, block, chaindata, historyfile, nocheck, logger) + return CheckChangeSets(cmd.Context(), genesis, snapshotVersion, block, chaindata, historyfile, nocheck, logger) }, } // CheckChangeSets re-executes historical transactions in read-only mode // and checks that their outputs match the database ChangeSets. -func CheckChangeSets(ctx context.Context, genesis *types.Genesis, blockNum uint64, chaindata string, historyfile string, nocheck bool, logger log.Logger) error { +func CheckChangeSets(ctx context.Context, genesis *types.Genesis, snapshotVersion uint8, blockNum uint64, chaindata string, historyfile string, nocheck bool, logger log.Logger) error { if len(historyfile) == 0 { historyfile = chaindata } @@ -81,7 +82,7 @@ func CheckChangeSets(ctx context.Context, genesis *types.Genesis, blockNum uint6 if err != nil { return err } - allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadirCli, "snapshots"), logger) + allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadirCli, "snapshots"), snapshotVersion, logger) defer allSnapshots.Close() if err := allSnapshots.ReopenFolder(); err != nil { return fmt.Errorf("reopen snapshot segments: %w", err) diff --git a/cmd/state/commands/global_flags_vars.go b/cmd/state/commands/global_flags_vars.go index dd81e19aee6..a45471410b7 100644 --- a/cmd/state/commands/global_flags_vars.go +++ b/cmd/state/commands/global_flags_vars.go @@ -19,6 +19,7 @@ var ( snapshotsCli bool chain string logdir string + snapshotVersion uint8 ) func must(err error) { @@ -39,6 +40,10 @@ func withDataDir(cmd *cobra.Command) { must(cmd.MarkFlagDirname("chaindata")) } +func withSnapshotVersion(cmd *cobra.Command) { + cmd.Flags().Uint8Var(&snapshotVersion, "stapshots.version", 1, "specifies the snapshot file version") +} + func withStatsfile(cmd *cobra.Command) { cmd.Flags().StringVar(&statsfile, "statsfile", "stateless.csv", "path where to write the stats file") must(cmd.MarkFlagFilename("statsfile", "csv")) diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index c9cebb45e03..72901c7b1fa 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -44,6 +44,7 @@ var ( func init() { withBlock(opcodeTracerCmd) withDataDir(opcodeTracerCmd) + withSnapshotVersion(opcodeTracerCmd) opcodeTracerCmd.Flags().Uint64Var(&numBlocks, "numBlocks", 1, "number of blocks to run the operation on") opcodeTracerCmd.Flags().BoolVar(&saveOpcodes, "saveOpcodes", false, "set to save the opcodes") opcodeTracerCmd.Flags().BoolVar(&saveBBlocks, "saveBBlocks", false, "set to save the basic blocks") @@ -56,7 +57,7 @@ var opcodeTracerCmd = &cobra.Command{ Short: "Re-executes historical transactions in read-only mode and traces them at the opcode level", RunE: func(cmd *cobra.Command, args []string) error { logger := log.New("opcode-tracer", genesis.Config.ChainID) - return OpcodeTracer(genesis, block, chaindata, numBlocks, saveOpcodes, saveBBlocks, logger) + return OpcodeTracer(genesis, snapshotVersion, block, chaindata, numBlocks, saveOpcodes, saveBBlocks, logger) }, } @@ -395,7 +396,7 @@ type segPrefix struct { // OpcodeTracer re-executes historical transactions in read-only mode // and traces them at the opcode level -func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, numBlocks uint64, +func OpcodeTracer(genesis *types.Genesis, snapshotVersion uint8, blockNum uint64, chaindata string, numBlocks uint64, saveOpcodes bool, saveBblocks bool, logger log.Logger) error { blockNumOrig := blockNum @@ -428,7 +429,7 @@ func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, num } return nil }) - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", snapshotVersion, log.New()), nil /* BorSnapshots */) chainConfig := genesis.Config vmConfig := vm.Config{Tracer: ot, Debug: true} diff --git a/cmd/state/commands/state_root.go b/cmd/state/commands/state_root.go index 8945289cff3..18e32915fe6 100644 --- a/cmd/state/commands/state_root.go +++ b/cmd/state/commands/state_root.go @@ -35,6 +35,7 @@ import ( func init() { withBlock(stateRootCmd) withDataDir(stateRootCmd) + withSnapshotVersion(stateRootCmd) rootCmd.AddCommand(stateRootCmd) } @@ -43,11 +44,11 @@ var stateRootCmd = &cobra.Command{ Short: "Exerimental command to re-execute blocks from beginning and compute state root", RunE: func(cmd *cobra.Command, args []string) error { logger := debug.SetupCobra(cmd, "stateroot") - return StateRoot(cmd.Context(), genesis, block, datadirCli, logger) + return StateRoot(cmd.Context(), genesis, snapshotVersion, block, datadirCli, logger) }, } -func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { +func blocksIO(db kv.RoDB, snapshotVersion uint8) (services.FullBlockReader, *blockio.BlockWriter) { var histV3 bool if err := db.View(context.Background(), func(tx kv.Tx) error { histV3, _ = kvcfg.HistoryV3.Enabled(tx) @@ -55,12 +56,12 @@ func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { }); err != nil { panic(err) } - br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */) + br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", snapshotVersion, log.New()), nil /* BorSnapshots */) bw := blockio.NewBlockWriter(histV3) return br, bw } -func StateRoot(ctx context.Context, genesis *types.Genesis, blockNum uint64, datadir string, logger log.Logger) error { +func StateRoot(ctx context.Context, genesis *types.Genesis, snapshotVersion uint8, blockNum uint64, datadir string, logger log.Logger) error { sigs := make(chan os.Signal, 1) interruptCh := make(chan bool, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) @@ -93,7 +94,7 @@ func StateRoot(ctx context.Context, genesis *types.Genesis, blockNum uint64, dat return err2 } defer db.Close() - blockReader, _ := blocksIO(db) + blockReader, _ := blocksIO(db, snapshotVersion) chainConfig := genesis.Config vmConfig := vm.Config{} @@ -108,7 +109,7 @@ func StateRoot(ctx context.Context, genesis *types.Genesis, blockNum uint64, dat if rwTx, err = db.BeginRw(ctx); err != nil { return err } - _, genesisIbs, err4 := core.GenesisToBlock(genesis, "") + _, genesisIbs, err4 := core.GenesisToBlock(genesis, "", logger) if err4 != nil { return err4 } diff --git a/cmd/state/commands/verify_txlookup.go b/cmd/state/commands/verify_txlookup.go index 8dd27671015..3b5c4707c22 100644 --- a/cmd/state/commands/verify_txlookup.go +++ b/cmd/state/commands/verify_txlookup.go @@ -8,6 +8,7 @@ import ( func init() { withDataDir(verifyTxLookupCmd) + withSnapshotVersion(verifyTxLookupCmd) rootCmd.AddCommand(verifyTxLookupCmd) } @@ -16,6 +17,6 @@ var verifyTxLookupCmd = &cobra.Command{ Short: "Generate tx lookup index", RunE: func(cmd *cobra.Command, args []string) error { logger := debug.SetupCobra(cmd, "verify_txlookup") - return verify.ValidateTxLookups(chaindata, logger) + return verify.ValidateTxLookups(chaindata, snapshotVersion, logger) }, } diff --git a/cmd/state/exec3/state.go b/cmd/state/exec3/state.go index 4e3297219d0..b687065e9b5 100644 --- a/cmd/state/exec3/state.go +++ b/cmd/state/exec3/state.go @@ -143,7 +143,7 @@ func (rw *Worker) RunTxTaskNoLock(txTask *exec22.TxTask) { if txTask.BlockNum == 0 { // Genesis block // fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) - _, ibs, err = core.GenesisToBlock(rw.genesis, "") + _, ibs, err = core.GenesisToBlock(rw.genesis, "", logger) if err != nil { panic(err) } diff --git a/cmd/state/exec3/state_recon.go b/cmd/state/exec3/state_recon.go index 0172f9a3653..1700c342816 100644 --- a/cmd/state/exec3/state_recon.go +++ b/cmd/state/exec3/state_recon.go @@ -297,7 +297,7 @@ func (rw *ReconWorker) runTxTask(txTask *exec22.TxTask) error { if txTask.BlockNum == 0 && txTask.TxIndex == -1 { //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) // Genesis block - _, ibs, err = core.GenesisToBlock(rw.genesis, "") + _, ibs, err = core.GenesisToBlock(rw.genesis, "", logger) if err != nil { return err } diff --git a/cmd/state/verify/verify_txlookup.go b/cmd/state/verify/verify_txlookup.go index 3a7351d11b8..625ef1fc717 100644 --- a/cmd/state/verify/verify_txlookup.go +++ b/cmd/state/verify/verify_txlookup.go @@ -20,7 +20,7 @@ import ( "github.com/ledgerwatch/log/v3" ) -func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { +func blocksIO(db kv.RoDB, snapshotVersion uint8) (services.FullBlockReader, *blockio.BlockWriter) { var histV3 bool if err := db.View(context.Background(), func(tx kv.Tx) error { histV3, _ = kvcfg.HistoryV3.Enabled(tx) @@ -28,14 +28,14 @@ func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { }); err != nil { panic(err) } - br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */) + br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", snapshotVersion, log.New()), nil /* BorSnapshots */) bw := blockio.NewBlockWriter(histV3) return br, bw } -func ValidateTxLookups(chaindata string, logger log.Logger) error { +func ValidateTxLookups(chaindata string, snapshotVersion uint8, logger log.Logger) error { db := mdbx.MustOpen(chaindata) - br, _ := blocksIO(db) + br, _ := blocksIO(db, snapshotVersion) tx, err := db.BeginRo(context.Background()) if err != nil { return err diff --git a/cmd/tooling/cli.go b/cmd/tooling/cli.go index 622bc9e6ca6..a30a30a4ad8 100644 --- a/cmd/tooling/cli.go +++ b/cmd/tooling/cli.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" "golang.org/x/net/context" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" "github.com/ledgerwatch/erigon/cl/persistence" @@ -78,7 +79,10 @@ func (c *BucketCaplinAutomation) Run(ctx *Context) error { tickerTriggerer := time.NewTicker(c.UploadPeriod) defer tickerTriggerer.Stop() // do the checking at first run - if err := checkSnapshots(ctx, beaconConfig, dirs); err != nil { + + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version + + if err := checkSnapshots(ctx, beaconConfig, dirs, snapshotVersion); err != nil { return err } log.Info("Uploading snapshots to R2 bucket") @@ -93,7 +97,9 @@ func (c *BucketCaplinAutomation) Run(ctx *Context) error { select { case <-tickerTriggerer.C: log.Info("Checking snapshots") - if err := checkSnapshots(ctx, beaconConfig, dirs); err != nil { + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version + + if err := checkSnapshots(ctx, beaconConfig, dirs, snapshotVersion); err != nil { return err } log.Info("Finishing snapshots") @@ -111,7 +117,7 @@ func (c *BucketCaplinAutomation) Run(ctx *Context) error { } } -func checkSnapshots(ctx context.Context, beaconConfig *clparams.BeaconChainConfig, dirs datadir.Dirs) error { +func checkSnapshots(ctx context.Context, beaconConfig *clparams.BeaconChainConfig, dirs datadir.Dirs, snapshotVersion uint8) error { rawDB, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory) _, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false) if err != nil { @@ -130,9 +136,9 @@ func checkSnapshots(ctx context.Context, beaconConfig *clparams.BeaconChainConfi return err } - to = (to / snaptype.Erigon2RecentMergeLimit) * snaptype.Erigon2RecentMergeLimit + to = (to / snaptype.Erigon2MergeLimit) * snaptype.Erigon2MergeLimit - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root()) + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, log.Root()) if err := csn.ReopenFolder(); err != nil { return err } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 4d30daa13ce..240d5a84e58 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -148,6 +148,7 @@ var ( TxPoolDisableFlag = cli.BoolFlag{ Name: "txpool.disable", Usage: "Experimental external pool and block producer, see ./cmd/txpool/readme.md for more info. Disabling internal txpool and block producer.", + Value: false, } TxPoolGossipDisableFlag = cli.BoolFlag{ Name: "txpool.gossip.disable", @@ -1335,7 +1336,7 @@ func setGPOCobra(f *pflag.FlagSet, cfg *gaspricecfg.Config) { func setTxPool(ctx *cli.Context, fullCfg *ethconfig.Config) { cfg := &fullCfg.DeprecatedTxPool - if ctx.IsSet(TxPoolDisableFlag.Name) { + if ctx.IsSet(TxPoolDisableFlag.Name) || TxPoolDisableFlag.Value { cfg.Disable = true } if ctx.IsSet(TxPoolLocalsFlag.Name) { diff --git a/consensus/aura/aura_test.go b/consensus/aura/aura_test.go index 1772905f68a..dd9a94fee8b 100644 --- a/consensus/aura/aura_test.go +++ b/consensus/aura/aura_test.go @@ -17,6 +17,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/turbo/stages/mock" "github.com/ledgerwatch/erigon/turbo/trie" + "github.com/ledgerwatch/log/v3" ) // Check that the first block of Gnosis Chain, which doesn't have any transactions, @@ -24,7 +25,7 @@ import ( func TestEmptyBlock(t *testing.T) { require := require.New(t) genesis := core.GnosisGenesisBlock() - genesisBlock, _, err := core.GenesisToBlock(genesis, "") + genesisBlock, _, err := core.GenesisToBlock(genesis, "", log.Root()) require.NoError(err) genesis.Config.TerminalTotalDifficultyPassed = false diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index fe913ed779e..ed49c131b6d 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -26,7 +26,6 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/bor/finality" @@ -55,8 +54,6 @@ const ( ) const ( - spanLength = 6400 // Number of blocks in a span - zerothSpanEnd = 255 // End block of 0th span snapshotPersistInterval = 1024 // Number of blocks after which to persist the vote snapshot to the database inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory inmemorySignatures = 4096 // Number of recent block signatures to keep in memory @@ -114,7 +111,7 @@ var ( // errInvalidSpanValidators is returned if a block contains an // invalid list of validators (i.e. non divisible by 40 bytes). - ErrInvalidSpanValidators = errors.New("invalid validator list on sprint end block") + errInvalidSpanValidators = errors.New("invalid validator list on sprint end block") // errInvalidMixDigest is returned if a block's mix digest is non-zero. errInvalidMixDigest = errors.New("non-zero mix digest") @@ -545,7 +542,7 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head } if isSprintEnd && signersBytes%validatorHeaderBytesLength != 0 { - return ErrInvalidSpanValidators + return errInvalidSpanValidators } // Ensure that the mix digest is zero as we don't have fork protection currently @@ -926,10 +923,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, s // where it fetches producers internally. As we fetch data from span // in Erigon, use directly the `GetCurrentProducers` function. if isSprintStart(number+1, c.config.CalculateSprint(number)) { - var spanID uint64 - if number+1 > zerothSpanEnd { - spanID = 1 + (number+1-zerothSpanEnd-1)/spanLength - } + spanID := span.IDAt(number + 1) newValidators, err := c.spanner.GetCurrentProducers(spanID, c.authorizedSigner.Load().signer, chain) if err != nil { return errUnknownValidators diff --git a/consensus/bor/bor_test.go b/consensus/bor/bor_test.go index 352686e5034..373b3bd10d5 100644 --- a/consensus/bor/bor_test.go +++ b/consensus/bor/bor_test.go @@ -105,7 +105,7 @@ func (h test_heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) return 0, fmt.Errorf("TODO") } -func (h test_heimdall) FetchMilestone(ctx context.Context) (*milestone.Milestone, error) { +func (h test_heimdall) FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) { return nil, fmt.Errorf("TODO") } diff --git a/consensus/bor/finality/whitelist_helpers.go b/consensus/bor/finality/whitelist_helpers.go index 54dbff49690..ddeb1e19dfb 100644 --- a/consensus/bor/finality/whitelist_helpers.go +++ b/consensus/bor/finality/whitelist_helpers.go @@ -71,7 +71,7 @@ func fetchWhitelistMilestone(ctx context.Context, heimdallClient heimdall.IHeimd ) // fetch latest milestone - milestone, err := heimdallClient.FetchMilestone(ctx) + milestone, err := heimdallClient.FetchMilestone(ctx, -1) if errors.Is(err, heimdall.ErrServiceUnavailable) { config.logger.Debug("[bor.heimdall] Failed to fetch latest milestone for whitelisting", "err", err) return num, hash, err diff --git a/consensus/bor/genesis.go b/consensus/bor/genesis_contract.go similarity index 74% rename from consensus/bor/genesis.go rename to consensus/bor/genesis_contract.go index 24b0964f454..7a232733bf2 100644 --- a/consensus/bor/genesis.go +++ b/consensus/bor/genesis_contract.go @@ -7,7 +7,7 @@ import ( "github.com/ledgerwatch/erigon/rlp" ) -//go:generate mockgen -destination=./genesis_contract_mock.go -package=bor . GenesisContract +//go:generate mockgen -destination=./mock/genesis_contract_mock.go -package=mock . GenesisContract type GenesisContract interface { CommitState(event rlp.RawValue, syscall consensus.SystemCall) error LastStateId(syscall consensus.SystemCall) (*big.Int, error) diff --git a/consensus/bor/heimdall/client.go b/consensus/bor/heimdall/client.go index 717da531f60..0085a636024 100644 --- a/consensus/bor/heimdall/client.go +++ b/consensus/bor/heimdall/client.go @@ -8,16 +8,18 @@ import ( "io" "net/http" "net/url" + "path" "sort" + "strings" "time" - "github.com/ledgerwatch/erigon-lib/metrics" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/metrics" "github.com/ledgerwatch/erigon/consensus/bor/clerk" "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" - "github.com/ledgerwatch/log/v3" ) var ( @@ -33,7 +35,8 @@ var ( const ( stateFetchLimit = 50 apiHeimdallTimeout = 10 * time.Second - retryCall = 5 * time.Second + retryBackOff = time.Second + maxRetries = 5 ) type StateSyncEventsResponse struct { @@ -46,27 +49,42 @@ type SpanResponse struct { Result span.HeimdallSpan `json:"result"` } -type HeimdallClient struct { - urlString string - client http.Client - closeCh chan struct{} - logger log.Logger +type Client struct { + urlString string + client HttpClient + retryBackOff time.Duration + maxRetries int + closeCh chan struct{} + logger log.Logger } type Request struct { - client http.Client + client HttpClient url *url.URL start time.Time } -func NewHeimdallClient(urlString string, logger log.Logger) *HeimdallClient { - return &HeimdallClient{ - urlString: urlString, - logger: logger, - client: http.Client{ - Timeout: apiHeimdallTimeout, - }, - closeCh: make(chan struct{}), +//go:generate mockgen -destination=./mock/http_client_mock.go -package=mock . HttpClient +type HttpClient interface { + Do(req *http.Request) (*http.Response, error) + CloseIdleConnections() +} + +func NewHeimdallClient(urlString string, logger log.Logger) *Client { + httpClient := &http.Client{ + Timeout: apiHeimdallTimeout, + } + return newHeimdallClient(urlString, httpClient, retryBackOff, maxRetries, logger) +} + +func newHeimdallClient(urlString string, httpClient HttpClient, retryBackOff time.Duration, maxRetries int, logger log.Logger) *Client { + return &Client{ + urlString: urlString, + logger: logger, + client: httpClient, + retryBackOff: retryBackOff, + maxRetries: maxRetries, + closeCh: make(chan struct{}), } } @@ -77,8 +95,9 @@ const ( fetchCheckpoint = "/checkpoints/%s" fetchCheckpointCount = "/checkpoints/count" - fetchMilestone = "/milestone/latest" - fetchMilestoneCount = "/milestone/count" + fetchMilestoneAt = "/milestone/%d" + fetchMilestoneLatest = "/milestone/latest" + fetchMilestoneCount = "/milestone/count" fetchLastNoAckMilestone = "/milestone/lastNoAck" fetchNoAckMilestone = "/milestone/noAck/%s" @@ -87,21 +106,29 @@ const ( fetchSpanFormat = "bor/span/%d" ) -func (h *HeimdallClient) StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error) { +func (c *Client) StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error) { eventRecords := make([]*clerk.EventRecordWithTime, 0) for { - url, err := stateSyncURL(h.urlString, fromID, to) + url, err := stateSyncURL(c.urlString, fromID, to) if err != nil { return nil, err } - h.logger.Debug("[bor.heimdall] Fetching state sync events", "queryParams", url.RawQuery) + c.logger.Debug("[bor.heimdall] Fetching state sync events", "queryParams", url.RawQuery) ctx = withRequestType(ctx, stateSyncRequest) - response, err := FetchWithRetry[StateSyncEventsResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[StateSyncEventsResponse](ctx, c, url) if err != nil { + if errors.Is(err, ErrNoResponse) { + // for more info check https://github.com/maticnetwork/heimdall/pull/993 + c.logger.Warn( + "[bor.heimdall] check heimdall logs to see if it is in sync - no response when querying state sync events", + "path", url.Path, + "queryParams", url.RawQuery, + ) + } return nil, err } @@ -126,15 +153,15 @@ func (h *HeimdallClient) StateSyncEvents(ctx context.Context, fromID uint64, to return eventRecords, nil } -func (h *HeimdallClient) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) { - url, err := spanURL(h.urlString, spanID) +func (c *Client) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) { + url, err := spanURL(c.urlString, spanID) if err != nil { return nil, err } ctx = withRequestType(ctx, spanRequest) - response, err := FetchWithRetry[SpanResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[SpanResponse](ctx, c, url) if err != nil { return nil, err } @@ -143,15 +170,15 @@ func (h *HeimdallClient) Span(ctx context.Context, spanID uint64) (*span.Heimdal } // FetchCheckpoint fetches the checkpoint from heimdall -func (h *HeimdallClient) FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) { - url, err := checkpointURL(h.urlString, number) +func (c *Client) FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) { + url, err := checkpointURL(c.urlString, number) if err != nil { return nil, err } ctx = withRequestType(ctx, checkpointRequest) - response, err := FetchWithRetry[checkpoint.CheckpointResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[checkpoint.CheckpointResponse](ctx, c, url) if err != nil { return nil, err } @@ -159,17 +186,29 @@ func (h *HeimdallClient) FetchCheckpoint(ctx context.Context, number int64) (*ch return &response.Result, nil } -// FetchMilestone fetches the checkpoint from heimdall -func (h *HeimdallClient) FetchMilestone(ctx context.Context) (*milestone.Milestone, error) { - url, err := milestoneURL(h.urlString) +func isInvalidMilestoneIndexError(err error) bool { + return errors.Is(err, ErrNotSuccessfulResponse) && + strings.Contains(err.Error(), "Invalid milestone index") +} + +// FetchMilestone fetches a milestone from heimdall +func (c *Client) FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) { + url, err := milestoneURL(c.urlString, number) if err != nil { return nil, err } ctx = withRequestType(ctx, milestoneRequest) - response, err := FetchWithRetry[milestone.MilestoneResponse](ctx, h.client, url, h.closeCh, h.logger) + isRecoverableError := func(err error) bool { + return !isInvalidMilestoneIndexError(err) + } + + response, err := FetchWithRetryEx[milestone.MilestoneResponse](ctx, c, url, isRecoverableError) if err != nil { + if isInvalidMilestoneIndexError(err) { + return nil, fmt.Errorf("%w: number %d", ErrNotInMilestoneList, number) + } return nil, err } @@ -177,15 +216,15 @@ func (h *HeimdallClient) FetchMilestone(ctx context.Context) (*milestone.Milesto } // FetchCheckpointCount fetches the checkpoint count from heimdall -func (h *HeimdallClient) FetchCheckpointCount(ctx context.Context) (int64, error) { - url, err := checkpointCountURL(h.urlString) +func (c *Client) FetchCheckpointCount(ctx context.Context) (int64, error) { + url, err := checkpointCountURL(c.urlString) if err != nil { return 0, err } ctx = withRequestType(ctx, checkpointCountRequest) - response, err := FetchWithRetry[checkpoint.CheckpointCountResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[checkpoint.CheckpointCountResponse](ctx, c, url) if err != nil { return 0, err } @@ -194,15 +233,15 @@ func (h *HeimdallClient) FetchCheckpointCount(ctx context.Context) (int64, error } // FetchMilestoneCount fetches the milestone count from heimdall -func (h *HeimdallClient) FetchMilestoneCount(ctx context.Context) (int64, error) { - url, err := milestoneCountURL(h.urlString) +func (c *Client) FetchMilestoneCount(ctx context.Context) (int64, error) { + url, err := milestoneCountURL(c.urlString) if err != nil { return 0, err } ctx = withRequestType(ctx, milestoneCountRequest) - response, err := FetchWithRetry[milestone.MilestoneCountResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[milestone.MilestoneCountResponse](ctx, c, url) if err != nil { return 0, err } @@ -211,15 +250,15 @@ func (h *HeimdallClient) FetchMilestoneCount(ctx context.Context) (int64, error) } // FetchLastNoAckMilestone fetches the last no-ack-milestone from heimdall -func (h *HeimdallClient) FetchLastNoAckMilestone(ctx context.Context) (string, error) { - url, err := lastNoAckMilestoneURL(h.urlString) +func (c *Client) FetchLastNoAckMilestone(ctx context.Context) (string, error) { + url, err := lastNoAckMilestoneURL(c.urlString) if err != nil { return "", err } ctx = withRequestType(ctx, milestoneLastNoAckRequest) - response, err := FetchWithRetry[milestone.MilestoneLastNoAckResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[milestone.MilestoneLastNoAckResponse](ctx, c, url) if err != nil { return "", err } @@ -228,15 +267,15 @@ func (h *HeimdallClient) FetchLastNoAckMilestone(ctx context.Context) (string, e } // FetchNoAckMilestone fetches the last no-ack-milestone from heimdall -func (h *HeimdallClient) FetchNoAckMilestone(ctx context.Context, milestoneID string) error { - url, err := noAckMilestoneURL(h.urlString, milestoneID) +func (c *Client) FetchNoAckMilestone(ctx context.Context, milestoneID string) error { + url, err := noAckMilestoneURL(c.urlString, milestoneID) if err != nil { return err } ctx = withRequestType(ctx, milestoneNoAckRequest) - response, err := FetchWithRetry[milestone.MilestoneNoAckResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[milestone.MilestoneNoAckResponse](ctx, c, url) if err != nil { return err } @@ -248,17 +287,17 @@ func (h *HeimdallClient) FetchNoAckMilestone(ctx context.Context, milestoneID st return nil } -// FetchMilestoneID fetches the bool result from Heimdal whether the ID corresponding +// FetchMilestoneID fetches the bool result from Heimdall whether the ID corresponding // to the given milestone is in process in Heimdall -func (h *HeimdallClient) FetchMilestoneID(ctx context.Context, milestoneID string) error { - url, err := milestoneIDURL(h.urlString, milestoneID) +func (c *Client) FetchMilestoneID(ctx context.Context, milestoneID string) error { + url, err := milestoneIDURL(c.urlString, milestoneID) if err != nil { return err } ctx = withRequestType(ctx, milestoneIDRequest) - response, err := FetchWithRetry[milestone.MilestoneIDResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[milestone.MilestoneIDResponse](ctx, c, url) if err != nil { return err @@ -272,65 +311,53 @@ func (h *HeimdallClient) FetchMilestoneID(ctx context.Context, milestoneID strin } // FetchWithRetry returns data from heimdall with retry -func FetchWithRetry[T any](ctx context.Context, client http.Client, url *url.URL, closeCh chan struct{}, logger log.Logger) (*T, error) { - // request data once - request := &Request{client: client, url: url, start: time.Now()} - result, err := Fetch[T](ctx, request) - if err == nil { - return result, nil - } +func FetchWithRetry[T any](ctx context.Context, client *Client, url *url.URL) (*T, error) { + return FetchWithRetryEx[T](ctx, client, url, nil) +} - // 503 (Service Unavailable) is thrown when an endpoint isn't activated - // yet in heimdall. E.g. when the hardfork hasn't hit yet but heimdall - // is upgraded. - if errors.Is(err, ErrServiceUnavailable) { - logger.Debug("[bor.heimdall] service unavailable at the moment", "path", url.Path, "error", err) - return nil, err - } +// FetchWithRetryEx returns data from heimdall with retry +func FetchWithRetryEx[T any](ctx context.Context, client *Client, url *url.URL, isRecoverableError func(error) bool) (result *T, err error) { + attempt := 0 + // create a new ticker for retrying the request + ticker := time.NewTicker(client.retryBackOff) + defer ticker.Stop() - // attempt counter - attempt := 1 + for attempt < client.maxRetries { + attempt++ - logger.Warn("[bor.heimdall] an error while fetching", "path", url.Path, "attempt", attempt, "error", err) + request := &Request{client: client.client, url: url, start: time.Now()} + result, err = Fetch[T](ctx, request) + if err == nil { + return result, nil + } - // create a new ticker for retrying the request - ticker := time.NewTicker(retryCall) - defer ticker.Stop() + // 503 (Service Unavailable) is thrown when an endpoint isn't activated + // yet in heimdall. E.g. when the hard fork hasn't hit yet but heimdall + // is upgraded. + if errors.Is(err, ErrServiceUnavailable) { + client.logger.Debug("[bor.heimdall] service unavailable at the moment", "path", url.Path, "queryParams", url.RawQuery, "attempt", attempt, "err", err) + return nil, err + } - const logEach = 5 + if (isRecoverableError != nil) && !isRecoverableError(err) { + return nil, err + } -retryLoop: - for { - attempt++ + client.logger.Warn("[bor.heimdall] an error while fetching", "path", url.Path, "queryParams", url.RawQuery, "attempt", attempt, "err", err) select { case <-ctx.Done(): - logger.Debug("[bor.heimdall] request canceled", "reason", ctx.Err(), "path", url.Path, "attempt", attempt) + client.logger.Debug("[bor.heimdall] request canceled", "reason", ctx.Err(), "path", url.Path, "queryParams", url.RawQuery, "attempt", attempt) return nil, ctx.Err() - case <-closeCh: - logger.Debug("[bor.heimdall] shutdown detected, terminating request", "path", url.Path) - + case <-client.closeCh: + client.logger.Debug("[bor.heimdall] shutdown detected, terminating request", "path", url.Path, "queryParams", url.RawQuery) return nil, ErrShutdownDetected case <-ticker.C: - request = &Request{client: client, url: url, start: time.Now()} - result, err = Fetch[T](ctx, request) - - if errors.Is(err, ErrServiceUnavailable) { - logger.Debug("[bor.heimdall] service unavailable at the moment", "path", url.Path, "attempt", attempt, "error", err) - return nil, err - } - - if err != nil { - if attempt%logEach == 0 { - logger.Warn("[bor.heimdall] an error while trying fetching", "path", url.Path, "attempt", attempt, "error", err) - } - - continue retryLoop - } - - return result, nil + // retry } } + + return nil, err } // Fetch fetches response from heimdall @@ -350,7 +377,7 @@ func Fetch[T any](ctx context.Context, request *Request) (*T, error) { return nil, err } - if body == nil { + if len(body) == 0 { return nil, ErrNoResponse } @@ -385,18 +412,21 @@ func checkpointURL(urlString string, number int64) (*url.URL, error) { return makeURL(urlString, url, "") } -func milestoneURL(urlString string) (*url.URL, error) { - url := fetchMilestone - return makeURL(urlString, url, "") -} - func checkpointCountURL(urlString string) (*url.URL, error) { return makeURL(urlString, fetchCheckpointCount, "") } +func milestoneURL(urlString string, number int64) (*url.URL, error) { + if number == -1 { + return makeURL(urlString, fetchMilestoneLatest, "") + } + return makeURL(urlString, fmt.Sprintf(fetchMilestoneAt, number), "") +} + func milestoneCountURL(urlString string) (*url.URL, error) { return makeURL(urlString, fetchMilestoneCount, "") } + func lastNoAckMilestoneURL(urlString string) (*url.URL, error) { return makeURL(urlString, fetchLastNoAckMilestone, "") } @@ -415,14 +445,14 @@ func makeURL(urlString, rawPath, rawQuery string) (*url.URL, error) { return nil, err } - u.Path = rawPath + u.Path = path.Join(u.Path, rawPath) u.RawQuery = rawQuery return u, err } // internal fetch method -func internalFetch(ctx context.Context, client http.Client, u *url.URL) ([]byte, error) { +func internalFetch(ctx context.Context, client HttpClient, u *url.URL) ([]byte, error) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) if err != nil { return nil, err @@ -433,15 +463,12 @@ func internalFetch(ctx context.Context, client http.Client, u *url.URL) ([]byte, return nil, err } - defer res.Body.Close() + defer func() { + _ = res.Body.Close() + }() if res.StatusCode == http.StatusServiceUnavailable { - return nil, fmt.Errorf("%w: response code %d", ErrServiceUnavailable, res.StatusCode) - } - - // check status code - if res.StatusCode != 200 && res.StatusCode != 204 { - return nil, fmt.Errorf("%w: %s:response code %d", ErrNotSuccessfulResponse, u.String(), res.StatusCode) + return nil, fmt.Errorf("%w: url='%s', status=%d", ErrServiceUnavailable, u.String(), res.StatusCode) } // unmarshall data from buffer @@ -455,10 +482,15 @@ func internalFetch(ctx context.Context, client http.Client, u *url.URL) ([]byte, return nil, err } + // check status code + if res.StatusCode != 200 { + return nil, fmt.Errorf("%w: url='%s', status=%d, body='%s'", ErrNotSuccessfulResponse, u.String(), res.StatusCode, string(body)) + } + return body, nil } -func internalFetchWithTimeout(ctx context.Context, client http.Client, url *url.URL) ([]byte, error) { +func internalFetchWithTimeout(ctx context.Context, client HttpClient, url *url.URL) ([]byte, error) { ctx, cancel := context.WithTimeout(ctx, apiHeimdallTimeout) defer cancel() @@ -467,7 +499,7 @@ func internalFetchWithTimeout(ctx context.Context, client http.Client, url *url. } // Close sends a signal to stop the running process -func (h *HeimdallClient) Close() { - close(h.closeCh) - h.client.CloseIdleConnections() +func (c *Client) Close() { + close(c.closeCh) + c.client.CloseIdleConnections() } diff --git a/consensus/bor/heimdall/client_test.go b/consensus/bor/heimdall/client_test.go new file mode 100644 index 00000000000..8f3c88e7671 --- /dev/null +++ b/consensus/bor/heimdall/client_test.go @@ -0,0 +1,64 @@ +package heimdall + +import ( + "context" + "io" + "net/http" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/require" + + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/mock" + "github.com/ledgerwatch/erigon/turbo/testlog" +) + +type emptyBodyReadCloser struct{} + +func (ebrc emptyBodyReadCloser) Read(_ []byte) (n int, err error) { + return 0, io.EOF +} + +func (ebrc emptyBodyReadCloser) Close() error { + return nil +} + +func TestHeimdallClientFetchesTerminateUponTooManyErrors(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + httpClient := mock.NewMockHttpClient(ctrl) + httpClient.EXPECT(). + Do(gomock.Any()). + Return(&http.Response{ + StatusCode: 404, + Body: emptyBodyReadCloser{}, + }, nil). + Times(5) + logger := testlog.Logger(t, log.LvlDebug) + heimdallClient := newHeimdallClient("https://dummyheimdal.com", httpClient, 100*time.Millisecond, 5, logger) + + spanRes, err := heimdallClient.Span(ctx, 1534) + require.Nil(t, spanRes) + require.Error(t, err) +} + +func TestHeimdallClientStateSyncEventsReturnsErrNoResponseWhenHttp200WithEmptyBody(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + httpClient := mock.NewMockHttpClient(ctrl) + httpClient.EXPECT(). + Do(gomock.Any()). + Return(&http.Response{ + StatusCode: 200, + Body: emptyBodyReadCloser{}, + }, nil). + Times(2) + logger := testlog.Logger(t, log.LvlDebug) + heimdallClient := newHeimdallClient("https://dummyheimdal.com", httpClient, time.Millisecond, 2, logger) + + spanRes, err := heimdallClient.StateSyncEvents(ctx, 100, time.Now().Unix()) + require.Nil(t, spanRes) + require.ErrorIs(t, err, ErrNoResponse) +} diff --git a/consensus/bor/heimdall/heimall.go b/consensus/bor/heimdall/heimdall.go similarity index 88% rename from consensus/bor/heimdall/heimall.go rename to consensus/bor/heimdall/heimdall.go index 2ef405290f2..6d81f1aac2b 100644 --- a/consensus/bor/heimdall/heimall.go +++ b/consensus/bor/heimdall/heimdall.go @@ -14,13 +14,13 @@ func MilestoneRewindPending() bool { return generics.BorMilestoneRewind.Load() != nil && *generics.BorMilestoneRewind.Load() != 0 } -//go:generate mockgen -destination=../../tests/bor/mocks/IHeimdallClient.go -package=mocks . IHeimdallClient +//go:generate mockgen -destination=./mock/heimdall_client_mock.go -package=mock . IHeimdallClient type IHeimdallClient interface { StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) FetchCheckpointCount(ctx context.Context) (int64, error) - FetchMilestone(ctx context.Context) (*milestone.Milestone, error) + FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) FetchMilestoneCount(ctx context.Context) (int64, error) FetchNoAckMilestone(ctx context.Context, milestoneID string) error //Fetch the bool value whether milestone corresponding to the given id failed in the Heimdall FetchLastNoAckMilestone(ctx context.Context) (string, error) //Fetch latest failed milestone id @@ -33,7 +33,7 @@ type HeimdallServer interface { Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) FetchCheckpointCount(ctx context.Context) (int64, error) - FetchMilestone(ctx context.Context) (*milestone.Milestone, error) + FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) FetchMilestoneCount(ctx context.Context) (int64, error) FetchNoAckMilestone(ctx context.Context, milestoneID string) error FetchLastNoAckMilestone(ctx context.Context) (string, error) diff --git a/tests/bor/mocks/IHeimdallClient.go b/consensus/bor/heimdall/mock/heimdall_client_mock.go similarity index 54% rename from tests/bor/mocks/IHeimdallClient.go rename to consensus/bor/heimdall/mock/heimdall_client_mock.go index 1737cae8852..e7d29b17ee6 100644 --- a/tests/bor/mocks/IHeimdallClient.go +++ b/consensus/bor/heimdall/mock/heimdall_client_mock.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ledgerwatch/erigon/consensus/bor (interfaces: IHeimdallClient) +// Source: github.com/ledgerwatch/erigon/consensus/bor/heimdall (interfaces: IHeimdallClient) -// Package mocks is a generated GoMock package. -package mocks +// Package mock is a generated GoMock package. +package mock import ( context "context" @@ -11,6 +11,7 @@ import ( gomock "github.com/golang/mock/gomock" clerk "github.com/ledgerwatch/erigon/consensus/bor/clerk" checkpoint "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" + milestone "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" span "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" ) @@ -79,6 +80,79 @@ func (mr *MockIHeimdallClientMockRecorder) FetchCheckpointCount(arg0 interface{} return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchCheckpointCount", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchCheckpointCount), arg0) } +// FetchLastNoAckMilestone mocks base method. +func (m *MockIHeimdallClient) FetchLastNoAckMilestone(arg0 context.Context) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchLastNoAckMilestone", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchLastNoAckMilestone indicates an expected call of FetchLastNoAckMilestone. +func (mr *MockIHeimdallClientMockRecorder) FetchLastNoAckMilestone(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchLastNoAckMilestone", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchLastNoAckMilestone), arg0) +} + +// FetchMilestone mocks base method. +func (m *MockIHeimdallClient) FetchMilestone(arg0 context.Context, arg1 int64) (*milestone.Milestone, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchMilestone", arg0, arg1) + ret0, _ := ret[0].(*milestone.Milestone) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchMilestone indicates an expected call of FetchMilestone. +func (mr *MockIHeimdallClientMockRecorder) FetchMilestone(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestone", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchMilestone), arg0, arg1) +} + +// FetchMilestoneCount mocks base method. +func (m *MockIHeimdallClient) FetchMilestoneCount(arg0 context.Context) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchMilestoneCount", arg0) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchMilestoneCount indicates an expected call of FetchMilestoneCount. +func (mr *MockIHeimdallClientMockRecorder) FetchMilestoneCount(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestoneCount", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchMilestoneCount), arg0) +} + +// FetchMilestoneID mocks base method. +func (m *MockIHeimdallClient) FetchMilestoneID(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchMilestoneID", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// FetchMilestoneID indicates an expected call of FetchMilestoneID. +func (mr *MockIHeimdallClientMockRecorder) FetchMilestoneID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestoneID", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchMilestoneID), arg0, arg1) +} + +// FetchNoAckMilestone mocks base method. +func (m *MockIHeimdallClient) FetchNoAckMilestone(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchNoAckMilestone", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// FetchNoAckMilestone indicates an expected call of FetchNoAckMilestone. +func (mr *MockIHeimdallClientMockRecorder) FetchNoAckMilestone(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchNoAckMilestone", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchNoAckMilestone), arg0, arg1) +} + // Span mocks base method. func (m *MockIHeimdallClient) Span(arg0 context.Context, arg1 uint64) (*span.HeimdallSpan, error) { m.ctrl.T.Helper() diff --git a/consensus/bor/heimdall/mock/http_client_mock.go b/consensus/bor/heimdall/mock/http_client_mock.go new file mode 100644 index 00000000000..aa6310b1715 --- /dev/null +++ b/consensus/bor/heimdall/mock/http_client_mock.go @@ -0,0 +1,62 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/consensus/bor/heimdall (interfaces: HttpClient) + +// Package mock is a generated GoMock package. +package mock + +import ( + http "net/http" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockHttpClient is a mock of HttpClient interface. +type MockHttpClient struct { + ctrl *gomock.Controller + recorder *MockHttpClientMockRecorder +} + +// MockHttpClientMockRecorder is the mock recorder for MockHttpClient. +type MockHttpClientMockRecorder struct { + mock *MockHttpClient +} + +// NewMockHttpClient creates a new mock instance. +func NewMockHttpClient(ctrl *gomock.Controller) *MockHttpClient { + mock := &MockHttpClient{ctrl: ctrl} + mock.recorder = &MockHttpClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockHttpClient) EXPECT() *MockHttpClientMockRecorder { + return m.recorder +} + +// CloseIdleConnections mocks base method. +func (m *MockHttpClient) CloseIdleConnections() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "CloseIdleConnections") +} + +// CloseIdleConnections indicates an expected call of CloseIdleConnections. +func (mr *MockHttpClientMockRecorder) CloseIdleConnections() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseIdleConnections", reflect.TypeOf((*MockHttpClient)(nil).CloseIdleConnections)) +} + +// Do mocks base method. +func (m *MockHttpClient) Do(arg0 *http.Request) (*http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Do", arg0) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Do indicates an expected call of Do. +func (mr *MockHttpClientMockRecorder) Do(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockHttpClient)(nil).Do), arg0) +} diff --git a/consensus/bor/heimdall/span/span_id.go b/consensus/bor/heimdall/span/span_id.go new file mode 100644 index 00000000000..7c4113bf848 --- /dev/null +++ b/consensus/bor/heimdall/span/span_id.go @@ -0,0 +1,33 @@ +package span + +import "github.com/ledgerwatch/erigon-lib/chain" + +const ( + spanLength = 6400 // Number of blocks in a span + zerothSpanEnd = 255 // End block of 0th span +) + +// IDAt returns the corresponding span id for the given block number. +func IDAt(blockNum uint64) uint64 { + if blockNum > zerothSpanEnd { + return 1 + (blockNum-zerothSpanEnd-1)/spanLength + } + return 0 +} + +// EndBlockNum returns the number of the last block in the given span. +func EndBlockNum(spanID uint64) uint64 { + if spanID > 0 { + return spanID*spanLength + zerothSpanEnd + } + return zerothSpanEnd +} + +// BlockInLastSprintOfSpan returns true if a block num is within the last sprint of a span and false otherwise. +func BlockInLastSprintOfSpan(blockNum uint64, config *chain.BorConfig) bool { + spanNum := IDAt(blockNum) + endBlockNum := EndBlockNum(spanNum) + sprintLen := config.CalculateSprint(blockNum) + startBlockNum := endBlockNum - sprintLen + 1 + return startBlockNum <= blockNum && blockNum <= endBlockNum +} diff --git a/consensus/bor/heimdall/span/span_id_test.go b/consensus/bor/heimdall/span/span_id_test.go new file mode 100644 index 00000000000..8ab45ed425d --- /dev/null +++ b/consensus/bor/heimdall/span/span_id_test.go @@ -0,0 +1,44 @@ +package span + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/ledgerwatch/erigon-lib/chain" +) + +func TestSpanIDAt(t *testing.T) { + assert.Equal(t, uint64(0), IDAt(0)) + assert.Equal(t, uint64(0), IDAt(1)) + assert.Equal(t, uint64(0), IDAt(2)) + assert.Equal(t, uint64(0), IDAt(zerothSpanEnd)) + assert.Equal(t, uint64(1), IDAt(zerothSpanEnd+1)) + assert.Equal(t, uint64(1), IDAt(zerothSpanEnd+2)) + assert.Equal(t, uint64(1), IDAt(6655)) + assert.Equal(t, uint64(2), IDAt(6656)) + assert.Equal(t, uint64(2), IDAt(6657)) + assert.Equal(t, uint64(2), IDAt(13055)) + assert.Equal(t, uint64(3), IDAt(13056)) + assert.Equal(t, uint64(6839), IDAt(43763456)) +} + +func TestSpanEndBlockNum(t *testing.T) { + assert.Equal(t, uint64(zerothSpanEnd), EndBlockNum(0)) + assert.Equal(t, uint64(6655), EndBlockNum(1)) + assert.Equal(t, uint64(13055), EndBlockNum(2)) + assert.Equal(t, uint64(43769855), EndBlockNum(6839)) +} + +func TestBlockInLastSprintOfSpan(t *testing.T) { + config := &chain.BorConfig{ + Sprint: map[string]uint64{ + "0": 16, + }, + } + assert.True(t, BlockInLastSprintOfSpan(6640, config)) + assert.True(t, BlockInLastSprintOfSpan(6645, config)) + assert.True(t, BlockInLastSprintOfSpan(6655, config)) + assert.False(t, BlockInLastSprintOfSpan(6639, config)) + assert.False(t, BlockInLastSprintOfSpan(6656, config)) +} diff --git a/consensus/bor/heimdallgrpc/milestone.go b/consensus/bor/heimdallgrpc/milestone.go index ab39cbb3952..a42bab955c5 100644 --- a/consensus/bor/heimdallgrpc/milestone.go +++ b/consensus/bor/heimdallgrpc/milestone.go @@ -24,9 +24,10 @@ func (h *HeimdallGRPCClient) FetchMilestoneCount(ctx context.Context) (int64, er return res.Result.Count, nil } -func (h *HeimdallGRPCClient) FetchMilestone(ctx context.Context) (*milestone.Milestone, error) { +func (h *HeimdallGRPCClient) FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) { h.logger.Info("Fetching milestone") + // TODO: use number res, err := h.client.FetchMilestone(ctx, nil) if err != nil { return nil, err diff --git a/consensus/bor/genesis_contract_mock.go b/consensus/bor/mock/genesis_contract_mock.go similarity index 90% rename from consensus/bor/genesis_contract_mock.go rename to consensus/bor/mock/genesis_contract_mock.go index 6cba2c64b18..9ad12ae63d4 100644 --- a/consensus/bor/genesis_contract_mock.go +++ b/consensus/bor/mock/genesis_contract_mock.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ledgerwatch/erigon/consensus/bor (interfaces: GenesisContract) -// Package bor is a generated GoMock package. -package bor +// Package mock is a generated GoMock package. +package mock import ( big "math/big" @@ -10,7 +10,7 @@ import ( gomock "github.com/golang/mock/gomock" consensus "github.com/ledgerwatch/erigon/consensus" - clerk "github.com/ledgerwatch/erigon/consensus/bor/clerk" + rlp "github.com/ledgerwatch/erigon/rlp" ) // MockGenesisContract is a mock of GenesisContract interface. @@ -37,7 +37,7 @@ func (m *MockGenesisContract) EXPECT() *MockGenesisContractMockRecorder { } // CommitState mocks base method. -func (m *MockGenesisContract) CommitState(arg0 *clerk.EventRecordWithTime, arg1 consensus.SystemCall) error { +func (m *MockGenesisContract) CommitState(arg0 rlp.RawValue, arg1 consensus.SystemCall) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CommitState", arg0, arg1) ret0, _ := ret[0].(error) diff --git a/consensus/bor/span_mock.go b/consensus/bor/mock/spanner_mock.go similarity index 93% rename from consensus/bor/span_mock.go rename to consensus/bor/mock/spanner_mock.go index ced3dee6a0d..70db933edd2 100644 --- a/consensus/bor/span_mock.go +++ b/consensus/bor/mock/spanner_mock.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ledgerwatch/erigon/consensus/bor (interfaces: Spanner) -// Package bor is a generated GoMock package. -package bor +// Package mock is a generated GoMock package. +package mock import ( reflect "reflect" @@ -52,7 +52,7 @@ func (mr *MockSpannerMockRecorder) CommitSpan(arg0, arg1 interface{}) *gomock.Ca } // GetCurrentProducers mocks base method. -func (m *MockSpanner) GetCurrentProducers(arg0 uint64, arg1 common.Address, arg2 func(uint64) (*span.HeimdallSpan, error)) ([]*valset.Validator, error) { +func (m *MockSpanner) GetCurrentProducers(arg0 uint64, arg1 common.Address, arg2 consensus.ChainHeaderReader) ([]*valset.Validator, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCurrentProducers", arg0, arg1, arg2) ret0, _ := ret[0].([]*valset.Validator) @@ -82,7 +82,7 @@ func (mr *MockSpannerMockRecorder) GetCurrentSpan(arg0 interface{}) *gomock.Call } // GetCurrentValidators mocks base method. -func (m *MockSpanner) GetCurrentValidators(arg0 uint64, arg1 common.Address, arg2 func(uint64) (*span.HeimdallSpan, error)) ([]*valset.Validator, error) { +func (m *MockSpanner) GetCurrentValidators(arg0 uint64, arg1 common.Address, arg2 consensus.ChainHeaderReader) ([]*valset.Validator, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCurrentValidators", arg0, arg1, arg2) ret0, _ := ret[0].([]*valset.Validator) diff --git a/consensus/bor/span.go b/consensus/bor/spanner.go similarity index 88% rename from consensus/bor/span.go rename to consensus/bor/spanner.go index 41e8abec8db..77769ea835e 100644 --- a/consensus/bor/span.go +++ b/consensus/bor/spanner.go @@ -7,7 +7,7 @@ import ( "github.com/ledgerwatch/erigon/consensus/bor/valset" ) -//go:generate mockgen -destination=./span_mock.go -package=bor . Spanner +//go:generate mockgen -destination=./mock/spanner_mock.go -package=mock . Spanner type Spanner interface { GetCurrentSpan(syscall consensus.SystemCall) (*span.Span, error) GetCurrentValidators(spanId uint64, signer libcommon.Address, chain consensus.ChainHeaderReader) ([]*valset.Validator, error) diff --git a/consensus/consensus.go b/consensus/consensus.go index 0a98706fa34..d9ba40fffc1 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -21,19 +21,20 @@ import ( "math/big" "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/log/v3" ) // ChainHeaderReader defines a small collection of methods needed to access the local // blockchain during header verification. +// +//go:generate mockgen -destination=./mock/chain_header_reader_mock.go -package=mock . ChainHeaderReader type ChainHeaderReader interface { // Config retrieves the blockchain's chain configuration. Config() *chain.Config diff --git a/consensus/mock/chain_header_reader_mock.go b/consensus/mock/chain_header_reader_mock.go new file mode 100644 index 00000000000..5131b49e374 --- /dev/null +++ b/consensus/mock/chain_header_reader_mock.go @@ -0,0 +1,150 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/consensus (interfaces: ChainHeaderReader) + +// Package mock is a generated GoMock package. +package mock + +import ( + big "math/big" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + chain "github.com/ledgerwatch/erigon-lib/chain" + common "github.com/ledgerwatch/erigon-lib/common" + types "github.com/ledgerwatch/erigon/core/types" +) + +// MockChainHeaderReader is a mock of ChainHeaderReader interface. +type MockChainHeaderReader struct { + ctrl *gomock.Controller + recorder *MockChainHeaderReaderMockRecorder +} + +// MockChainHeaderReaderMockRecorder is the mock recorder for MockChainHeaderReader. +type MockChainHeaderReaderMockRecorder struct { + mock *MockChainHeaderReader +} + +// NewMockChainHeaderReader creates a new mock instance. +func NewMockChainHeaderReader(ctrl *gomock.Controller) *MockChainHeaderReader { + mock := &MockChainHeaderReader{ctrl: ctrl} + mock.recorder = &MockChainHeaderReaderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockChainHeaderReader) EXPECT() *MockChainHeaderReaderMockRecorder { + return m.recorder +} + +// BorSpan mocks base method. +func (m *MockChainHeaderReader) BorSpan(arg0 uint64) []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BorSpan", arg0) + ret0, _ := ret[0].([]byte) + return ret0 +} + +// BorSpan indicates an expected call of BorSpan. +func (mr *MockChainHeaderReaderMockRecorder) BorSpan(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BorSpan", reflect.TypeOf((*MockChainHeaderReader)(nil).BorSpan), arg0) +} + +// Config mocks base method. +func (m *MockChainHeaderReader) Config() *chain.Config { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Config") + ret0, _ := ret[0].(*chain.Config) + return ret0 +} + +// Config indicates an expected call of Config. +func (mr *MockChainHeaderReaderMockRecorder) Config() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Config", reflect.TypeOf((*MockChainHeaderReader)(nil).Config)) +} + +// CurrentHeader mocks base method. +func (m *MockChainHeaderReader) CurrentHeader() *types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CurrentHeader") + ret0, _ := ret[0].(*types.Header) + return ret0 +} + +// CurrentHeader indicates an expected call of CurrentHeader. +func (mr *MockChainHeaderReaderMockRecorder) CurrentHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentHeader", reflect.TypeOf((*MockChainHeaderReader)(nil).CurrentHeader)) +} + +// FrozenBlocks mocks base method. +func (m *MockChainHeaderReader) FrozenBlocks() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FrozenBlocks") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// FrozenBlocks indicates an expected call of FrozenBlocks. +func (mr *MockChainHeaderReaderMockRecorder) FrozenBlocks() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FrozenBlocks", reflect.TypeOf((*MockChainHeaderReader)(nil).FrozenBlocks)) +} + +// GetHeader mocks base method. +func (m *MockChainHeaderReader) GetHeader(arg0 common.Hash, arg1 uint64) *types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeader", arg0, arg1) + ret0, _ := ret[0].(*types.Header) + return ret0 +} + +// GetHeader indicates an expected call of GetHeader. +func (mr *MockChainHeaderReaderMockRecorder) GetHeader(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeader", reflect.TypeOf((*MockChainHeaderReader)(nil).GetHeader), arg0, arg1) +} + +// GetHeaderByHash mocks base method. +func (m *MockChainHeaderReader) GetHeaderByHash(arg0 common.Hash) *types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeaderByHash", arg0) + ret0, _ := ret[0].(*types.Header) + return ret0 +} + +// GetHeaderByHash indicates an expected call of GetHeaderByHash. +func (mr *MockChainHeaderReaderMockRecorder) GetHeaderByHash(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeaderByHash", reflect.TypeOf((*MockChainHeaderReader)(nil).GetHeaderByHash), arg0) +} + +// GetHeaderByNumber mocks base method. +func (m *MockChainHeaderReader) GetHeaderByNumber(arg0 uint64) *types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeaderByNumber", arg0) + ret0, _ := ret[0].(*types.Header) + return ret0 +} + +// GetHeaderByNumber indicates an expected call of GetHeaderByNumber. +func (mr *MockChainHeaderReaderMockRecorder) GetHeaderByNumber(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeaderByNumber", reflect.TypeOf((*MockChainHeaderReader)(nil).GetHeaderByNumber), arg0) +} + +// GetTd mocks base method. +func (m *MockChainHeaderReader) GetTd(arg0 common.Hash, arg1 uint64) *big.Int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTd", arg0, arg1) + ret0, _ := ret[0].(*big.Int) + return ret0 +} + +// GetTd indicates an expected call of GetTd. +func (mr *MockChainHeaderReaderMockRecorder) GetTd(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTd", reflect.TypeOf((*MockChainHeaderReader)(nil).GetTd), arg0, arg1) +} diff --git a/core/genesis_test.go b/core/genesis_test.go index d5039236e7d..174a9df7c8b 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -50,12 +50,12 @@ func TestGenesisBlockRoots(t *testing.T) { require := require.New(t) var err error - block, _, _ := core.GenesisToBlock(core.MainnetGenesisBlock(), "") + block, _, _ := core.GenesisToBlock(core.MainnetGenesisBlock(), "", log.Root()) if block.Hash() != params.MainnetGenesisHash { t.Errorf("wrong mainnet genesis hash, got %v, want %v", block.Hash(), params.MainnetGenesisHash) } - block, _, err = core.GenesisToBlock(core.GnosisGenesisBlock(), "") + block, _, err = core.GenesisToBlock(core.GnosisGenesisBlock(), "", log.Root()) require.NoError(err) if block.Root() != params.GnosisGenesisStateRoot { t.Errorf("wrong Gnosis Chain genesis state root, got %v, want %v", block.Root(), params.GnosisGenesisStateRoot) @@ -64,7 +64,7 @@ func TestGenesisBlockRoots(t *testing.T) { t.Errorf("wrong Gnosis Chain genesis hash, got %v, want %v", block.Hash(), params.GnosisGenesisHash) } - block, _, err = core.GenesisToBlock(core.ChiadoGenesisBlock(), "") + block, _, err = core.GenesisToBlock(core.ChiadoGenesisBlock(), "", log.Root()) require.NoError(err) if block.Root() != params.ChiadoGenesisStateRoot { t.Errorf("wrong Chiado genesis state root, got %v, want %v", block.Root(), params.ChiadoGenesisStateRoot) diff --git a/core/genesis_write.go b/core/genesis_write.go index 7c7f9750fe9..f30ca60bc92 100644 --- a/core/genesis_write.go +++ b/core/genesis_write.go @@ -111,7 +111,7 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideCancunTime *b custom = false } applyOverrides(genesis.Config) - block, _, err1 := write(tx, genesis, tmpDir) + block, _, err1 := write(tx, genesis, tmpDir, logger) if err1 != nil { return genesis.Config, nil, err1 } @@ -123,7 +123,7 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideCancunTime *b // Check whether the genesis block is already written. if genesis != nil { - block, _, err1 := GenesisToBlock(genesis, tmpDir) + block, _, err1 := GenesisToBlock(genesis, tmpDir, logger) if err1 != nil { return genesis.Config, nil, err1 } @@ -180,8 +180,8 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideCancunTime *b return newCfg, storedBlock, nil } -func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string) (*types.Block, *state.IntraBlockState, error) { - block, statedb, err := GenesisToBlock(g, tmpDir) +func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { + block, statedb, err := GenesisToBlock(g, tmpDir, logger) if err != nil { return nil, nil, err } @@ -229,13 +229,13 @@ func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string) (*types.Bloc } return block, statedb, nil } -func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string) *types.Block { +func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string, logger log.Logger) *types.Block { tx, err := db.BeginRw(context.Background()) if err != nil { panic(err) } defer tx.Rollback() - block, _, err := write(tx, g, tmpDir) + block, _, err := write(tx, g, tmpDir, logger) if err != nil { panic(err) } @@ -248,8 +248,8 @@ func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string) *types.Block // Write writes the block and state of a genesis specification to the database. // The block is committed as the canonical head block. -func write(tx kv.RwTx, g *types.Genesis, tmpDir string) (*types.Block, *state.IntraBlockState, error) { - block, statedb, err2 := WriteGenesisState(g, tx, tmpDir) +func write(tx kv.RwTx, g *types.Genesis, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { + block, statedb, err2 := WriteGenesisState(g, tx, tmpDir, logger) if err2 != nil { return block, statedb, err2 } @@ -309,9 +309,9 @@ func write(tx kv.RwTx, g *types.Genesis, tmpDir string) (*types.Block, *state.In } // GenesisBlockForTesting creates and writes a block in which addr has the given wei balance. -func GenesisBlockForTesting(db kv.RwDB, addr libcommon.Address, balance *big.Int, tmpDir string) *types.Block { +func GenesisBlockForTesting(db kv.RwDB, addr libcommon.Address, balance *big.Int, tmpDir string, logger log.Logger) *types.Block { g := types.Genesis{Alloc: types.GenesisAlloc{addr: {Balance: balance}}, Config: params.TestChainConfig} - block := MustCommitGenesis(&g, db, tmpDir) + block := MustCommitGenesis(&g, db, tmpDir, logger) return block } @@ -320,14 +320,14 @@ type GenAccount struct { Balance *big.Int } -func GenesisWithAccounts(db kv.RwDB, accs []GenAccount, tmpDir string) *types.Block { +func GenesisWithAccounts(db kv.RwDB, accs []GenAccount, tmpDir string, logger log.Logger) *types.Block { g := types.Genesis{Config: params.TestChainConfig} allocs := make(map[libcommon.Address]types.GenesisAccount) for _, acc := range accs { allocs[acc.Addr] = types.GenesisAccount{Balance: acc.Balance} } g.Alloc = allocs - block := MustCommitGenesis(&g, db, tmpDir) + block := MustCommitGenesis(&g, db, tmpDir, logger) return block } @@ -489,7 +489,7 @@ func DeveloperGenesisBlock(period uint64, faucet libcommon.Address) *types.Genes // ToBlock creates the genesis block and writes state of a genesis specification // to the given database (or discards it if nil). -func GenesisToBlock(g *types.Genesis, tmpDir string) (*types.Block, *state.IntraBlockState, error) { +func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { _ = g.Alloc //nil-check head := &types.Header{ @@ -556,7 +556,7 @@ func GenesisToBlock(g *types.Genesis, tmpDir string) (*types.Block, *state.Intra // TODO(yperbasis): use memdb.MemoryMutation instead defer wg.Done() - genesisTmpDB := mdbx.NewMDBX(log.New()).InMem(tmpDir).MapSize(2 * datasize.GB).GrowthStep(1 * datasize.MB).MustOpen() + genesisTmpDB := mdbx.NewMDBX(logger).InMem(tmpDir).MapSize(2 * datasize.GB).GrowthStep(1 * datasize.MB).MustOpen() defer genesisTmpDB.Close() var tx kv.RwTx if tx, err = genesisTmpDB.BeginRw(context.Background()); err != nil { diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index c15de0e3de3..905643429fe 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -43,11 +43,6 @@ import ( "github.com/ledgerwatch/erigon/rlp" ) -const ( - spanLength = 6400 // Number of blocks in a span - zerothSpanEnd = 255 // End block of 0th span -) - // ReadCanonicalHash retrieves the hash assigned to a canonical block number. func ReadCanonicalHash(db kv.Getter, number uint64) (common.Hash, error) { data, err := db.GetOne(kv.HeaderCanonical, hexutility.EncodeTs(number)) @@ -1074,7 +1069,7 @@ func PruneBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error { // keeps genesis in db: [1, to) // doesn't change sequences of kv.EthTx and kv.NonCanonicalTxs // doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty -func PruneBorBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error { +func PruneBorBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int, spanIDAt func(number uint64) uint64) error { c, err := tx.Cursor(kv.BorEventNums) if err != nil { return err @@ -1109,10 +1104,7 @@ func PruneBorBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error { if err != nil { return err } - var firstSpanToKeep uint64 - if blockTo > zerothSpanEnd { - firstSpanToKeep = 1 + (blockTo-zerothSpanEnd-1)/spanLength - } + firstSpanToKeep := spanIDAt(blockTo) c2, err := tx.RwCursor(kv.BorSpans) if err != nil { return err diff --git a/core/rawdb/blockio/block_writer.go b/core/rawdb/blockio/block_writer.go index 73264cda1fa..096f1dd0fac 100644 --- a/core/rawdb/blockio/block_writer.go +++ b/core/rawdb/blockio/block_writer.go @@ -114,6 +114,6 @@ func (w *BlockWriter) PruneBlocks(ctx context.Context, tx kv.RwTx, blockTo uint6 // keeps genesis in db // doesn't change sequences of kv.EthTx and kv.NonCanonicalTxs // doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty -func (w *BlockWriter) PruneBorBlocks(ctx context.Context, tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error { - return rawdb.PruneBorBlocks(tx, blockTo, blocksDeleteLimit) +func (w *BlockWriter) PruneBorBlocks(ctx context.Context, tx kv.RwTx, blockTo uint64, blocksDeleteLimit int, spanIDAt func(number uint64) uint64) error { + return rawdb.PruneBorBlocks(tx, blockTo, blocksDeleteLimit, spanIDAt) } diff --git a/core/rawdb/rawdbreset/reset_stages.go b/core/rawdb/rawdbreset/reset_stages.go index cb5e238b8c8..b791e0e84cf 100644 --- a/core/rawdb/rawdbreset/reset_stages.go +++ b/core/rawdb/rawdbreset/reset_stages.go @@ -20,7 +20,7 @@ import ( "github.com/ledgerwatch/log/v3" ) -func ResetState(db kv.RwDB, ctx context.Context, chain string, tmpDir string) error { +func ResetState(db kv.RwDB, ctx context.Context, chain string, tmpDir string, logger log.Logger) error { // don't reset senders here if err := Reset(ctx, db, stages.HashState); err != nil { return err @@ -44,7 +44,7 @@ func ResetState(db kv.RwDB, ctx context.Context, chain string, tmpDir string) er return err } - if err := ResetExec(ctx, db, chain, tmpDir); err != nil { + if err := ResetExec(ctx, db, chain, tmpDir, logger); err != nil { return err } return nil @@ -130,7 +130,7 @@ func WarmupExec(ctx context.Context, db kv.RwDB) (err error) { return } -func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string) (err error) { +func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string, logger log.Logger) (err error) { historyV3 := kvcfg.HistoryV3.FromDB(db) if historyV3 { stateHistoryBuckets = append(stateHistoryBuckets, stateHistoryV3Buckets...) @@ -156,7 +156,7 @@ func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string) (er } if !historyV3 { genesis := core.GenesisBlockByChainName(chain) - if _, _, err := core.WriteGenesisState(genesis, tx, tmpDir); err != nil { + if _, _, err := core.WriteGenesisState(genesis, tx, tmpDir, logger); err != nil { return err } } diff --git a/core/rlp_test.go b/core/rlp_test.go index 65b98dbff43..0d03b0cdd75 100644 --- a/core/rlp_test.go +++ b/core/rlp_test.go @@ -25,6 +25,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon/core/state/temporal" + "github.com/ledgerwatch/log/v3" "golang.org/x/crypto/sha3" "github.com/ledgerwatch/erigon/common/u256" @@ -35,7 +36,7 @@ import ( "github.com/ledgerwatch/erigon/rlp" ) -func getBlock(tb testing.TB, transactions int, uncles int, dataSize int, tmpDir string) *types.Block { +func getBlock(tb testing.TB, transactions int, uncles int, dataSize int, tmpDir string, logger log.Logger) *types.Block { _, db, _ := temporal.NewTestDB(tb, datadir.New(tmpDir), nil) var ( aa = libcommon.HexToAddress("0x000000000000000000000000000000000000aaaa") @@ -49,7 +50,7 @@ func getBlock(tb testing.TB, transactions int, uncles int, dataSize int, tmpDir Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: funds}}, } - genesis = MustCommitGenesis(gspec, db, tmpDir) + genesis = MustCommitGenesis(gspec, db, tmpDir, logger) ) // We need to generate as many blocks +1 as uncles @@ -91,7 +92,7 @@ func TestRlpIterator(t *testing.T) { func testRlpIterator(t *testing.T, txs, uncles, datasize int) { desc := fmt.Sprintf("%d txs [%d datasize] and %d uncles", txs, datasize, uncles) - bodyRlp, _ := rlp.EncodeToBytes(getBlock(t, txs, uncles, datasize, "").Body()) + bodyRlp, _ := rlp.EncodeToBytes(getBlock(t, txs, uncles, datasize, "", log.Root()).Body()) it, err := rlp.NewListIterator(bodyRlp) if err != nil { t.Fatal(err) @@ -150,7 +151,7 @@ func BenchmarkHashing(b *testing.B) { blockRlp []byte ) { - block := getBlock(b, 200, 2, 50, "") + block := getBlock(b, 200, 2, 50, "", log.Root()) bodyRlp, _ = rlp.EncodeToBytes(block.Body()) blockRlp, _ = rlp.EncodeToBytes(block) } diff --git a/diagnostics/diagnostic.go b/diagnostics/diagnostic.go index c045057d451..69470f7a0d9 100644 --- a/diagnostics/diagnostic.go +++ b/diagnostics/diagnostic.go @@ -25,7 +25,9 @@ func NewDiagnosticClient(ctx *cli.Context, metricsMux *http.ServeMux, node *node func (d *DiagnosticClient) Setup() { d.runSnapshotListener() - d.runTorrentListener() + d.runSegmentDownloadingListener() + d.runSegmentIndexingListener() + d.runSegmentIndexingFinishedListener() } func (d *DiagnosticClient) runSnapshotListener() { @@ -68,7 +70,7 @@ func (d *DiagnosticClient) SnapshotDownload() diaglib.SnapshotDownloadStatistics return d.snapshotDownload } -func (d *DiagnosticClient) runTorrentListener() { +func (d *DiagnosticClient) runSegmentDownloadingListener() { go func() { ctx, ch, cancel := diaglib.Context[diaglib.SegmentDownloadStatistics](context.Background(), 1) defer cancel() @@ -82,12 +84,92 @@ func (d *DiagnosticClient) runTorrentListener() { cancel() return case info := <-ch: - if d.snapshotDownload.Segments == nil { - d.snapshotDownload.Segments = map[string]diaglib.SegmentDownloadStatistics{} + if d.snapshotDownload.SegmentsDownloading == nil { + d.snapshotDownload.SegmentsDownloading = map[string]diaglib.SegmentDownloadStatistics{} } - d.snapshotDownload.Segments[info.Name] = info + d.snapshotDownload.SegmentsDownloading[info.Name] = info } } }() } + +func (d *DiagnosticClient) runSegmentIndexingListener() { + go func() { + ctx, ch, cancel := diaglib.Context[diaglib.SnapshotIndexingStatistics](context.Background(), 1) + defer cancel() + + rootCtx, _ := common.RootContext() + + diaglib.StartProviders(ctx, diaglib.TypeOf(diaglib.SnapshotIndexingStatistics{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + cancel() + return + case info := <-ch: + d.addOrUpdateSegmentIndexingState(info) + } + } + }() +} + +func (d *DiagnosticClient) runSegmentIndexingFinishedListener() { + go func() { + ctx, ch, cancel := diaglib.Context[diaglib.SnapshotSegmentIndexingFinishedUpdate](context.Background(), 1) + defer cancel() + + rootCtx, _ := common.RootContext() + + diaglib.StartProviders(ctx, diaglib.TypeOf(diaglib.SnapshotSegmentIndexingFinishedUpdate{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + cancel() + return + case info := <-ch: + found := false + for i := range d.snapshotDownload.SegmentIndexing.Segments { + if d.snapshotDownload.SegmentIndexing.Segments[i].SegmentName == info.SegmentName { + found = true + d.snapshotDownload.SegmentIndexing.Segments[i].Percent = 100 + } + } + + if !found { + d.snapshotDownload.SegmentIndexing.Segments = append(d.snapshotDownload.SegmentIndexing.Segments, diaglib.SnapshotSegmentIndexingStatistics{ + SegmentName: info.SegmentName, + Percent: 100, + Alloc: 0, + Sys: 0, + }) + } + } + } + }() +} + +func (d *DiagnosticClient) addOrUpdateSegmentIndexingState(upd diaglib.SnapshotIndexingStatistics) { + if d.snapshotDownload.SegmentIndexing.Segments == nil { + d.snapshotDownload.SegmentIndexing.Segments = []diaglib.SnapshotSegmentIndexingStatistics{} + } + + for i := range upd.Segments { + found := false + for j := range d.snapshotDownload.SegmentIndexing.Segments { + if d.snapshotDownload.SegmentIndexing.Segments[j].SegmentName == upd.Segments[i].SegmentName { + d.snapshotDownload.SegmentIndexing.Segments[j].Percent = upd.Segments[i].Percent + d.snapshotDownload.SegmentIndexing.Segments[j].Alloc = upd.Segments[i].Alloc + d.snapshotDownload.SegmentIndexing.Segments[j].Sys = upd.Segments[i].Sys + found = true + break + } + } + + if !found { + d.snapshotDownload.SegmentIndexing.Segments = append(d.snapshotDownload.SegmentIndexing.Segments, upd.Segments[i]) + } + } + + d.snapshotDownload.SegmentIndexing.TimeElapsed = upd.TimeElapsed +} diff --git a/erigon-lib/chain/chain_config.go b/erigon-lib/chain/chain_config.go index 6e93c59ff90..cecba46e21c 100644 --- a/erigon-lib/chain/chain_config.go +++ b/erigon-lib/chain/chain_config.go @@ -476,7 +476,7 @@ type BorConfig struct { } // String implements the stringer interface, returning the consensus engine details. -func (b *BorConfig) String() string { +func (c *BorConfig) String() string { return "bor" } diff --git a/erigon-lib/chain/snapcfg/util.go b/erigon-lib/chain/snapcfg/util.go index 8f5ecf2f3ae..db1e42d3276 100644 --- a/erigon-lib/chain/snapcfg/util.go +++ b/erigon-lib/chain/snapcfg/util.go @@ -49,23 +49,54 @@ func doSort(in preverified) Preverified { } var ( - MainnetChainSnapshotCfg = newCfg(Mainnet) - // HoleskyChainSnapshotCfg = newCfg(Holesky, HoleskyHistory) - SepoliaChainSnapshotCfg = newCfg(Sepolia) - GoerliChainSnapshotCfg = newCfg(Goerli) - MumbaiChainSnapshotCfg = newCfg(Mumbai) - AmoyChainSnapshotCfg = newCfg(Amoy) - BorMainnetChainSnapshotCfg = newCfg(BorMainnet) - GnosisChainSnapshotCfg = newCfg(Gnosis) - ChiadoChainSnapshotCfg = newCfg(Chiado) + isDefaultVersion bool = true + snapshotVersion uint8 = 1 ) -func newCfg(preverified Preverified) *Cfg { - return &Cfg{ExpectBlocks: maxBlockNum(preverified), Preverified: preverified} +func SnapshotVersion(version uint8) { + snapshotVersion = version + isDefaultVersion = false } -func maxBlockNum(preverified Preverified) uint64 { +func newCfg(preverified Preverified, version uint8) *Cfg { + + if version == 0 { + version = snapshotVersion + + var pv Preverified + + for _, p := range preverified { + if v, _, ok := strings.Cut(p.Name, "-"); ok && strings.HasPrefix(v, "v") { + if v, err := strconv.ParseUint(v[1:], 10, 8); err == nil && uint64(version) == v { + pv = append(pv, p) + } + } + } + + // don't do this check if the SnapshotVersion has been explicitly set + if len(pv) == 0 && isDefaultVersion { + version = maxVersion(preverified) + + for _, p := range preverified { + if v, _, ok := strings.Cut(p.Name, "-"); ok && strings.HasPrefix(v, "v") { + if v, err := strconv.ParseUint(v[1:], 10, 8); err == nil && uint64(version) == v { + pv = append(pv, p) + } + } + } + } + + preverified = pv + } + + maxBlockNum, version := cfgInfo(preverified, version) + return &Cfg{ExpectBlocks: maxBlockNum, Preverified: preverified, Version: version} +} + +func cfgInfo(preverified Preverified, defaultVersion uint8) (uint64, uint8) { max := uint64(0) + version := defaultVersion + for _, p := range preverified { _, fileName := filepath.Split(p.Name) ext := filepath.Ext(fileName) @@ -84,37 +115,61 @@ func maxBlockNum(preverified Preverified) uint64 { if max < to { max = to } + + if vp := parts[0]; strings.HasPrefix(vp, "v") { + if v, err := strconv.ParseUint(vp[1:], 10, 8); err == nil { + version = uint8(v) + } + } } if max == 0 { // to prevent underflow - return 0 + return 0, version } - return max*1_000 - 1 + return max*1_000 - 1, version } type Cfg struct { ExpectBlocks uint64 + Version uint8 Preverified Preverified } -var KnownCfgs = map[string]*Cfg{ - networkname.MainnetChainName: MainnetChainSnapshotCfg, +var knownPreverified = map[string]Preverified{ + networkname.MainnetChainName: Mainnet, // networkname.HoleskyChainName: HoleskyChainSnapshotCfg, - networkname.SepoliaChainName: SepoliaChainSnapshotCfg, - networkname.GoerliChainName: GoerliChainSnapshotCfg, - networkname.MumbaiChainName: MumbaiChainSnapshotCfg, - networkname.AmoyChainName: AmoyChainSnapshotCfg, - networkname.BorMainnetChainName: BorMainnetChainSnapshotCfg, - networkname.GnosisChainName: GnosisChainSnapshotCfg, - networkname.ChiadoChainName: ChiadoChainSnapshotCfg, + networkname.SepoliaChainName: Sepolia, + networkname.GoerliChainName: Goerli, + networkname.MumbaiChainName: Mumbai, + networkname.AmoyChainName: Amoy, + networkname.BorMainnetChainName: BorMainnet, + networkname.GnosisChainName: Gnosis, + networkname.ChiadoChainName: Chiado, } // KnownCfg return list of preverified hashes for given network, but apply whiteList filter if it's not empty -func KnownCfg(networkName string) *Cfg { - c, ok := KnownCfgs[networkName] +func KnownCfg(networkName string, version uint8) *Cfg { + c, ok := knownPreverified[networkName] if !ok { - return newCfg(Preverified{}) + return newCfg(Preverified{}, version) + } + return newCfg(c, version) +} + +func maxVersion(pv Preverified) uint8 { + var max uint8 + + for _, p := range pv { + if v, _, ok := strings.Cut(p.Name, "-"); ok && strings.HasPrefix(v, "v") { + if v, err := strconv.ParseUint(v[1:], 10, 8); err == nil { + version := uint8(v) + if max < version { + max = version + } + } + } } - return newCfg(c.Preverified) + + return max } var KnownWebseeds = map[string][]string{ diff --git a/erigon-lib/common/background/progress.go b/erigon-lib/common/background/progress.go index 5a4f702bfa5..44b99a07b35 100644 --- a/erigon-lib/common/background/progress.go +++ b/erigon-lib/common/background/progress.go @@ -96,3 +96,21 @@ func (s *ProgressSet) String() string { }) return sb.String() } + +func (s *ProgressSet) DiagnossticsData() map[string]int { + s.lock.RLock() + defer s.lock.RUnlock() + var arr = make(map[string]int, s.list.Len()) + s.list.Scan(func(_ int, p *Progress) bool { + if p == nil { + return true + } + namePtr := p.Name.Load() + if namePtr == nil { + return true + } + arr[*namePtr] = p.percent() + return true + }) + return arr +} diff --git a/erigon-lib/common/dbg/dbg_env.go b/erigon-lib/common/dbg/dbg_env.go new file mode 100644 index 00000000000..4e4ba1e8cf4 --- /dev/null +++ b/erigon-lib/common/dbg/dbg_env.go @@ -0,0 +1,57 @@ +package dbg + +import ( + "fmt" + "os" + "strconv" + + "github.com/c2h5oh/datasize" +) + +func EnvString(envVarName string, defaultVal string) string { + v, _ := os.LookupEnv(envVarName) + if v != "" { + fmt.Printf("[dbg] env %s=%s\n", envVarName, v) + return v + } + return defaultVal +} +func EnvBool(envVarName string, defaultVal bool) bool { + v, _ := os.LookupEnv(envVarName) + if v == "true" { + fmt.Printf("[dbg] env %s=%t\n", envVarName, true) + return true + } + if v == "false" { + fmt.Printf("[dbg] env %s=%t\n", envVarName, false) + return false + } + return defaultVal +} +func EnvInt(envVarName string, defaultVal int) int { + v, _ := os.LookupEnv(envVarName) + if v != "" { + i, err := strconv.Atoi(v) + if err != nil { + panic(err) + } + if i < 0 || i > 4 { + panic(i) + } + fmt.Printf("[dbg] env %s=%d\n", envVarName, i) + return i + } + return defaultVal +} +func EnvDataSize(envVarName string, defaultVal datasize.ByteSize) datasize.ByteSize { + v, _ := os.LookupEnv(envVarName) + if v != "" { + val, err := datasize.ParseString(v) + if err != nil { + panic(err) + } + fmt.Printf("[dbg] env %s=%s\n", envVarName, val) + return val + } + return defaultVal +} diff --git a/erigon-lib/common/dbg/experiments.go b/erigon-lib/common/dbg/experiments.go index ff4f966d63f..e9df7ace44e 100644 --- a/erigon-lib/common/dbg/experiments.go +++ b/erigon-lib/common/dbg/experiments.go @@ -26,6 +26,8 @@ import ( "github.com/ledgerwatch/log/v3" ) +var StagesOnlyBlocks = EnvBool("STAGES_ONLY_BLOCKS", false) + var doMemstat = true func init() { @@ -276,8 +278,24 @@ func StopAfterReconst() bool { v, _ := os.LookupEnv("STOP_AFTER_RECONSTITUTE") if v == "true" { stopAfterReconst = true - log.Info("[Experiment]", "STOP_AFTER_RECONSTITUTE", writeMap) + log.Info("[Experiment]", "STOP_AFTER_RECONSTITUTE", stopAfterReconst) } }) return stopAfterReconst } + +var ( + snapshotVersion uint8 + snapshotVersionOnce sync.Once +) + +func SnapshotVersion() uint8 { + snapshotVersionOnce.Do(func() { + v, _ := os.LookupEnv("SNAPSHOT_VERSION") + if i, _ := strconv.ParseUint(v, 10, 8); i > 0 { + snapshotVersion = uint8(i) + log.Info("[Experiment]", "SNAPSHOT_VERSION", snapshotVersion) + } + }) + return snapshotVersion +} diff --git a/erigon-lib/common/dir/rw_dir.go b/erigon-lib/common/dir/rw_dir.go index 0bbf76d8f5f..2d0e7066493 100644 --- a/erigon-lib/common/dir/rw_dir.go +++ b/erigon-lib/common/dir/rw_dir.go @@ -49,6 +49,17 @@ func FileExist(path string) bool { return true } +func FileNonZero(path string) bool { + fi, err := os.Stat(path) + if err != nil && os.IsNotExist(err) { + return false + } + if !fi.Mode().IsRegular() { + return false + } + return fi.Size() > 0 +} + // nolint func WriteFileWithFsync(name string, data []byte, perm os.FileMode) error { f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) diff --git a/erigon-lib/compress/compress.go b/erigon-lib/compress/compress.go index c9ef174d621..d2b57d458d4 100644 --- a/erigon-lib/compress/compress.go +++ b/erigon-lib/compress/compress.go @@ -127,6 +127,7 @@ func (c *Compressor) Close() { } func (c *Compressor) SetTrace(trace bool) { c.trace = trace } +func (c *Compressor) Workers() int { return c.workers } func (c *Compressor) Count() int { return int(c.wordsCount) } diff --git a/erigon-lib/compress/decompress.go b/erigon-lib/compress/decompress.go index 52e6bad505c..7f058628691 100644 --- a/erigon-lib/compress/decompress.go +++ b/erigon-lib/compress/decompress.go @@ -347,6 +347,10 @@ func (d *Decompressor) ModTime() time.Time { return d.modTime } +func (d *Decompressor) IsOpen() bool { + return d != nil && d.f != nil +} + func (d *Decompressor) Close() { if d.f != nil { if err := mmap.Munmap(d.mmapHandle1, d.mmapHandle2); err != nil { diff --git a/erigon-lib/diagnostics/entities.go b/erigon-lib/diagnostics/entities.go index b8bc8c8328c..747697a5c09 100644 --- a/erigon-lib/diagnostics/entities.go +++ b/erigon-lib/diagnostics/entities.go @@ -41,7 +41,8 @@ type SnapshotDownloadStatistics struct { Alloc uint64 `json:"alloc"` Sys uint64 `json:"sys"` DownloadFinished bool `json:"downloadFinished"` - Segments map[string]SegmentDownloadStatistics `json:"segments"` + SegmentsDownloading map[string]SegmentDownloadStatistics `json:"segmentsDownloading"` + SegmentIndexing SnapshotIndexingStatistics `json:"segmentsIndexing"` TorrentMetadataReady int32 `json:"torrentMetadataReady"` } @@ -55,6 +56,22 @@ type SegmentDownloadStatistics struct { PeersRate uint64 `json:"peersRate"` } +type SnapshotIndexingStatistics struct { + Segments []SnapshotSegmentIndexingStatistics `json:"segments"` + TimeElapsed float64 `json:"timeElapsed"` +} + +type SnapshotSegmentIndexingStatistics struct { + SegmentName string `json:"segmentName"` + Percent int `json:"percent"` + Alloc uint64 `json:"alloc"` + Sys uint64 `json:"sys"` +} + +type SnapshotSegmentIndexingFinishedUpdate struct { + SegmentName string `json:"segmentName"` +} + func (ti SnapshotDownloadStatistics) Type() Type { return TypeOf(ti) } @@ -62,3 +79,11 @@ func (ti SnapshotDownloadStatistics) Type() Type { func (ti SegmentDownloadStatistics) Type() Type { return TypeOf(ti) } + +func (ti SnapshotIndexingStatistics) Type() Type { + return TypeOf(ti) +} + +func (ti SnapshotSegmentIndexingFinishedUpdate) Type() Type { + return TypeOf(ti) +} diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go index 3305455928a..6f7d87a8d60 100644 --- a/erigon-lib/downloader/downloader.go +++ b/erigon-lib/downloader/downloader.go @@ -68,6 +68,8 @@ type Downloader struct { webseeds *WebSeeds logger log.Logger verbosity log.Lvl + + torrentFiles *TorrentFiles } type AggStats struct { @@ -112,14 +114,18 @@ func New(ctx context.Context, cfg *downloadercfg.Cfg, dirs datadir.Dirs, logger webseeds: &WebSeeds{logger: logger, verbosity: verbosity, downloadTorrentFile: cfg.DownloadTorrentFilesFromWebseed, torrentsWhitelist: cfg.ExpectedTorrentFilesHashes}, logger: logger, verbosity: verbosity, + torrentFiles: &TorrentFiles{dir: cfg.Dirs.Snap}, } + d.webseeds.torrentFiles = d.torrentFiles d.ctx, d.stopMainLoop = context.WithCancel(ctx) - if err := d.BuildTorrentFilesIfNeed(d.ctx); err != nil { - return nil, err - } - if err := d.addTorrentFilesFromDisk(false); err != nil { - return nil, err + if cfg.AddTorrentsFromDisk { + if err := d.BuildTorrentFilesIfNeed(d.ctx); err != nil { + return nil, err + } + if err := d.addTorrentFilesFromDisk(false); err != nil { + return nil, err + } } // CornerCase: no peers -> no anoncments to trackers -> no magnetlink resolution (but magnetlink has filename) @@ -140,13 +146,13 @@ func New(ctx context.Context, cfg *downloadercfg.Cfg, dirs datadir.Dirs, logger return d, nil } -const prohibitNewDownloadsFileName = "prohibit_new_downloads.lock" +const ProhibitNewDownloadsFileName = "prohibit_new_downloads.lock" // Erigon "download once" - means restart/upgrade/downgrade will not download files (and will be fast) // After "download once" - Erigon will produce and seed new files // Downloader will able: seed new files (already existing on FS), download uncomplete parts of existing files (if Verify found some bad parts) func (d *Downloader) prohibitNewDownloads() error { - fPath := filepath.Join(d.SnapDir(), prohibitNewDownloadsFileName) + fPath := filepath.Join(d.SnapDir(), ProhibitNewDownloadsFileName) f, err := os.Create(fPath) if err != nil { return err @@ -158,7 +164,7 @@ func (d *Downloader) prohibitNewDownloads() error { return nil } func (d *Downloader) newDownloadsAreProhibited() bool { - return dir.FileExist(filepath.Join(d.SnapDir(), prohibitNewDownloadsFileName)) + return dir.FileExist(filepath.Join(d.SnapDir(), ProhibitNewDownloadsFileName)) } func (d *Downloader) MainLoopInBackground(silent bool) { @@ -334,13 +340,13 @@ func (d *Downloader) mainLoop(silent bool) error { func (d *Downloader) SnapDir() string { return d.cfg.Dirs.Snap } func (d *Downloader) ReCalcStats(interval time.Duration) { + d.statsLock.Lock() + defer d.statsLock.Unlock() //Call this methods outside of `statsLock` critical section, because they have own locks with contention torrents := d.torrentClient.Torrents() connStats := d.torrentClient.ConnStats() peers := make(map[torrent.PeerID]struct{}, 16) - d.statsLock.Lock() - defer d.statsLock.Unlock() prevStats, stats := d.stats, d.stats stats.Completed = true @@ -356,42 +362,46 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { select { case <-t.GotInfo(): stats.MetadataReady++ + + // call methods once - to reduce internal mutex contention peersOfThisFile := t.PeerConns() weebseedPeersOfThisFile := t.WebseedPeerConns() + bytesCompleted := t.BytesCompleted() + tLen := t.Length() + torrentName := t.Name() + for _, peer := range peersOfThisFile { stats.ConnectionsTotal++ peers[peer.PeerID] = struct{}{} } - stats.BytesCompleted += uint64(t.BytesCompleted()) - stats.BytesTotal += uint64(t.Length()) + stats.BytesCompleted += uint64(bytesCompleted) + stats.BytesTotal += uint64(tLen) - progress := float32(float64(100) * (float64(t.BytesCompleted()) / float64(t.Length()))) + progress := float32(float64(100) * (float64(bytesCompleted) / float64(tLen))) if progress == 0 { - zeroProgress = append(zeroProgress, t.Name()) + zeroProgress = append(zeroProgress, torrentName) } - d.logger.Log(d.verbosity, "[snapshots] progress", "file", t.Name(), "progress", fmt.Sprintf("%.2f%%", progress), "peers", len(peersOfThisFile), "webseeds", len(weebseedPeersOfThisFile)) - isDiagEnabled := diagnostics.TypeOf(diagnostics.SegmentDownloadStatistics{}).Enabled() - if d.verbosity >= log.LvlInfo || isDiagEnabled { - webseedRates, websRates := getWebseedsRatesForlogs(weebseedPeersOfThisFile) - rates, peersRates := getPeersRatesForlogs(peersOfThisFile) - // more detailed statistic: download rate of each peer (for each file) - if !t.Complete.Bool() && progress != 0 { - d.logger.Info(fmt.Sprintf("[snapshots] webseed peers file=%s", t.Name()), webseedRates...) - d.logger.Info(fmt.Sprintf("[snapshots] bittorrent peers file=%s", t.Name()), rates...) - } + webseedRates, websRates := getWebseedsRatesForlogs(weebseedPeersOfThisFile, torrentName) + rates, peersRates := getPeersRatesForlogs(peersOfThisFile, torrentName) + // more detailed statistic: download rate of each peer (for each file) + if !t.Complete.Bool() && progress != 0 { + d.logger.Log(d.verbosity, "[snapshots] progress", "file", torrentName, "progress", fmt.Sprintf("%.2f%%", progress), "peers", len(peersOfThisFile), "webseeds", len(weebseedPeersOfThisFile)) + d.logger.Log(d.verbosity, "[snapshots] webseed peers", webseedRates...) + d.logger.Log(d.verbosity, "[snapshots] bittorrent peers", rates...) + } - if isDiagEnabled { - diagnostics.Send(diagnostics.SegmentDownloadStatistics{ - Name: t.Name(), - TotalBytes: uint64(t.Length()), - DownloadedBytes: uint64(t.BytesCompleted()), - WebseedsCount: len(weebseedPeersOfThisFile), - PeersCount: len(peersOfThisFile), - WebseedsRate: websRates, - PeersRate: peersRates, - }) - } + isDiagEnabled := diagnostics.TypeOf(diagnostics.SegmentDownloadStatistics{}).Enabled() + if isDiagEnabled { + diagnostics.Send(diagnostics.SegmentDownloadStatistics{ + Name: torrentName, + TotalBytes: uint64(tLen), + DownloadedBytes: uint64(bytesCompleted), + WebseedsCount: len(weebseedPeersOfThisFile), + PeersCount: len(peersOfThisFile), + WebseedsRate: websRates, + PeersRate: peersRates, + }) } default: @@ -433,10 +443,11 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { d.stats = stats } -func getWebseedsRatesForlogs(weebseedPeersOfThisFile []*torrent.Peer) ([]interface{}, uint64) { +func getWebseedsRatesForlogs(weebseedPeersOfThisFile []*torrent.Peer, fName string) ([]interface{}, uint64) { totalRate := uint64(0) averageRate := uint64(0) webseedRates := make([]interface{}, 0, len(weebseedPeersOfThisFile)*2) + webseedRates = append(webseedRates, "file", fName) for _, peer := range weebseedPeersOfThisFile { urlS := strings.Trim(strings.TrimPrefix(peer.String(), "webseed peer for "), "\"") if urlObj, err := url.Parse(urlS); err == nil { @@ -456,10 +467,11 @@ func getWebseedsRatesForlogs(weebseedPeersOfThisFile []*torrent.Peer) ([]interfa return webseedRates, averageRate } -func getPeersRatesForlogs(peersOfThisFile []*torrent.PeerConn) ([]interface{}, uint64) { +func getPeersRatesForlogs(peersOfThisFile []*torrent.PeerConn, fName string) ([]interface{}, uint64) { totalRate := uint64(0) averageRate := uint64(0) rates := make([]interface{}, 0, len(peersOfThisFile)*2) + rates = append(rates, "file", fName) for _, peer := range peersOfThisFile { dr := uint64(peer.DownloadRate()) @@ -574,15 +586,15 @@ func (d *Downloader) AddNewSeedableFile(ctx context.Context, name string) error } // if we don't have the torrent file we build it if we have the .seg file - torrentFilePath, err := BuildTorrentIfNeed(ctx, name, d.SnapDir()) + err := BuildTorrentIfNeed(ctx, name, d.SnapDir(), d.torrentFiles) if err != nil { return fmt.Errorf("AddNewSeedableFile: %w", err) } - ts, err := loadTorrent(torrentFilePath) + ts, err := d.torrentFiles.LoadByName(name) if err != nil { return fmt.Errorf("AddNewSeedableFile: %w", err) } - err = addTorrentFile(ctx, ts, d.torrentClient, d.webseeds) + _, _, err = addTorrentFile(ctx, ts, d.torrentClient, d.webseeds) if err != nil { return fmt.Errorf("addTorrentFile: %w", err) } @@ -619,11 +631,13 @@ func (d *Downloader) AddMagnetLink(ctx context.Context, infoHash metainfo.Hash, if err != nil { return err } - spec.DisallowDataDownload = true - t, _, err := d.torrentClient.AddTorrentSpec(spec) + t, ok, err := addTorrentFile(ctx, spec, d.torrentClient, d.webseeds) if err != nil { return err } + if !ok { + return nil + } d.wg.Add(1) go func(t *torrent.Torrent) { defer d.wg.Done() @@ -634,7 +648,7 @@ func (d *Downloader) AddMagnetLink(ctx context.Context, infoHash metainfo.Hash, } mi := t.Metainfo() - if err := CreateTorrentFileIfNotExists(d.SnapDir(), t.Info(), &mi); err != nil { + if err := CreateTorrentFileIfNotExists(d.SnapDir(), t.Info(), &mi, d.torrentFiles); err != nil { d.logger.Warn("[snapshots] create torrent file", "err", err) return } @@ -667,12 +681,12 @@ func (d *Downloader) addTorrentFilesFromDisk(quiet bool) error { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() - files, err := AllTorrentSpecs(d.cfg.Dirs) + files, err := AllTorrentSpecs(d.cfg.Dirs, d.torrentFiles) if err != nil { return err } for i, ts := range files { - err := addTorrentFile(d.ctx, ts, d.torrentClient, d.webseeds) + _, _, err := addTorrentFile(d.ctx, ts, d.torrentClient, d.webseeds) if err != nil { return err } @@ -687,7 +701,7 @@ func (d *Downloader) addTorrentFilesFromDisk(quiet bool) error { return nil } func (d *Downloader) BuildTorrentFilesIfNeed(ctx context.Context) error { - return BuildTorrentFilesIfNeed(ctx, d.cfg.Dirs) + return BuildTorrentFilesIfNeed(ctx, d.cfg.Dirs, d.torrentFiles) } func (d *Downloader) Stats() AggStats { d.statsLock.RLock() diff --git a/erigon-lib/downloader/downloader_grpc_server.go b/erigon-lib/downloader/downloader_grpc_server.go index 2787fbc280f..5d4e763e8fe 100644 --- a/erigon-lib/downloader/downloader_grpc_server.go +++ b/erigon-lib/downloader/downloader_grpc_server.go @@ -108,7 +108,7 @@ func (s *GrpcServer) Delete(ctx context.Context, request *proto_downloader.Delet fPath := filepath.Join(s.d.SnapDir(), name) _ = os.Remove(fPath) - _ = os.Remove(fPath + ".torrent") + s.d.torrentFiles.Delete(name) } return &emptypb.Empty{}, nil } diff --git a/erigon-lib/downloader/downloader_test.go b/erigon-lib/downloader/downloader_test.go index f94e3fa0d4b..5fd4153a9e1 100644 --- a/erigon-lib/downloader/downloader_test.go +++ b/erigon-lib/downloader/downloader_test.go @@ -48,19 +48,20 @@ func TestNoEscape(t *testing.T) { dirs := datadir.New(t.TempDir()) ctx := context.Background() + tf := NewAtomicTorrentFiles(dirs.Snap) // allow adding files only if they are inside snapshots dir - _, err := BuildTorrentIfNeed(ctx, "a.seg", dirs.Snap) + err := BuildTorrentIfNeed(ctx, "a.seg", dirs.Snap, tf) require.NoError(err) - _, err = BuildTorrentIfNeed(ctx, "b/a.seg", dirs.Snap) + err = BuildTorrentIfNeed(ctx, "b/a.seg", dirs.Snap, tf) require.NoError(err) - _, err = BuildTorrentIfNeed(ctx, filepath.Join(dirs.Snap, "a.seg"), dirs.Snap) + err = BuildTorrentIfNeed(ctx, filepath.Join(dirs.Snap, "a.seg"), dirs.Snap, tf) require.NoError(err) - _, err = BuildTorrentIfNeed(ctx, filepath.Join(dirs.Snap, "b", "a.seg"), dirs.Snap) + err = BuildTorrentIfNeed(ctx, filepath.Join(dirs.Snap, "b", "a.seg"), dirs.Snap, tf) require.NoError(err) // reject escaping snapshots dir - _, err = BuildTorrentIfNeed(ctx, filepath.Join(dirs.Chaindata, "b", "a.seg"), dirs.Snap) + err = BuildTorrentIfNeed(ctx, filepath.Join(dirs.Chaindata, "b", "a.seg"), dirs.Snap, tf) require.Error(err) - _, err = BuildTorrentIfNeed(ctx, "./../a.seg", dirs.Snap) + err = BuildTorrentIfNeed(ctx, "./../a.seg", dirs.Snap, tf) require.Error(err) } diff --git a/erigon-lib/downloader/downloadercfg/downloadercfg.go b/erigon-lib/downloader/downloadercfg/downloadercfg.go index 335429714c3..6a466c2fea5 100644 --- a/erigon-lib/downloader/downloadercfg/downloadercfg.go +++ b/erigon-lib/downloader/downloadercfg/downloadercfg.go @@ -17,9 +17,9 @@ package downloadercfg import ( - "io/ioutil" "net" "net/url" + "os" "path/filepath" "runtime" "strings" @@ -54,6 +54,7 @@ type Cfg struct { WebSeedS3Tokens []string ExpectedTorrentFilesHashes snapcfg.Preverified DownloadTorrentFilesFromWebseed bool + AddTorrentsFromDisk bool ChainName string Dirs datadir.Dirs @@ -188,17 +189,17 @@ func New(dirs datadir.Dirs, version string, verbosity lg.Level, downloadRate, up webseedFileProviders = append(webseedFileProviders, localCfgFile) } //TODO: if don't pass "downloaded files list here" (which we store in db) - synced erigon will download new .torrent files. And erigon can't work with "unfinished" files. - snapCfg := snapcfg.KnownCfg(chainName) + snapCfg := snapcfg.KnownCfg(chainName, 0) return &Cfg{Dirs: dirs, ChainName: chainName, ClientConfig: torrentConfig, DownloadSlots: downloadSlots, WebSeedUrls: webseedHttpProviders, WebSeedFiles: webseedFileProviders, WebSeedS3Tokens: webseedS3Providers, - DownloadTorrentFilesFromWebseed: false, ExpectedTorrentFilesHashes: snapCfg.Preverified, + DownloadTorrentFilesFromWebseed: true, AddTorrentsFromDisk: true, ExpectedTorrentFilesHashes: snapCfg.Preverified, }, nil } func getIpv6Enabled() bool { if runtime.GOOS == "linux" { - file, err := ioutil.ReadFile("/sys/module/ipv6/parameters/disable") + file, err := os.ReadFile("/sys/module/ipv6/parameters/disable") if err != nil { log.Warn("could not read /sys/module/ipv6/parameters/disable for ipv6 detection") return false diff --git a/erigon-lib/downloader/rclone.go b/erigon-lib/downloader/rclone.go new file mode 100644 index 00000000000..4f43eaba6fd --- /dev/null +++ b/erigon-lib/downloader/rclone.go @@ -0,0 +1,783 @@ +package downloader + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "net" + "net/http" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "golang.org/x/exp/slices" + + "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/log/v3" + "github.com/spaolacci/murmur3" + "golang.org/x/sync/errgroup" +) + +type rcloneInfo struct { + sync.Mutex + file string + snapInfo *snaptype.FileInfo + remoteInfo remoteInfo + localInfo fs.FileInfo +} + +func (i *rcloneInfo) Version() uint8 { + if i.snapInfo != nil { + return i.snapInfo.Version + } + + return 0 +} + +func (i *rcloneInfo) From() uint64 { + if i.snapInfo != nil { + return i.snapInfo.From + } + + return 0 +} + +func (i *rcloneInfo) To() uint64 { + if i.snapInfo != nil { + return i.snapInfo.To + } + + return 0 +} + +func (i *rcloneInfo) Type() snaptype.Type { + if i.snapInfo != nil { + return i.snapInfo.T + } + + return snaptype.Unknown +} + +type RCloneClient struct { + rclone *exec.Cmd + rcloneUrl string + rcloneSession *http.Client + logger log.Logger +} + +func (c *RCloneClient) start(logger log.Logger) error { + c.logger = logger + + rclone, _ := exec.LookPath("rclone") + + if len(rclone) == 0 { + logger.Warn("[rclone] Uploading disabled: rclone not found in PATH") + return fmt.Errorf("rclone not found in PATH") + } + + if p, err := freePort(); err == nil { + ctx, cancel := context.WithCancel(context.Background()) + + addr := fmt.Sprintf("127.0.0.1:%d", p) + c.rclone = exec.CommandContext(ctx, rclone, "rcd", "--rc-addr", addr, "--rc-no-auth") + c.rcloneUrl = "http://" + addr + c.rcloneSession = &http.Client{} // no timeout - we're doing sync calls + + if err := c.rclone.Start(); err != nil { + cancel() + logger.Warn("[rclone] Uploading disabled: rclone didn't start", "err", err) + return fmt.Errorf("rclone didn't start: %w", err) + } else { + logger.Info("[rclone] rclone started", "addr", addr) + } + + go func() { + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, syscall.SIGTERM, syscall.SIGINT) + + switch s := <-signalCh; s { + case syscall.SIGTERM, syscall.SIGINT: + cancel() + } + }() + } + + return nil +} + +func (c *RCloneClient) ListRemotes(ctx context.Context) ([]string, error) { + result, err := c.cmd(ctx, "config/listremotes", nil) + + if err != nil { + return nil, err + } + + remotes := struct { + Remotes []string `json:"remotes"` + }{} + + err = json.Unmarshal(result, &remotes) + + if err != nil { + return nil, err + } + + return remotes.Remotes, nil +} + +func (u *RCloneClient) sync(ctx context.Context, request *rcloneRequest) error { + _, err := u.cmd(ctx, "sync/sync", request) + return err +} + +/* +return retryConnects(ctx, func(ctx context.Context) error { + return client.CallContext(ctx, result, string(method), args...) +}) +} +*/ + +func isConnectionError(err error) bool { + var opErr *net.OpError + if errors.As(err, &opErr) { + return opErr.Op == "dial" + } + return false +} + +const connectionTimeout = time.Second * 5 + +func retry(ctx context.Context, op func(context.Context) error, isRecoverableError func(error) bool, delay time.Duration, lastErr error) error { + err := op(ctx) + if err == nil { + return nil + } + if errors.Is(err, context.DeadlineExceeded) && lastErr != nil { + return lastErr + } + if !isRecoverableError(err) { + return err + } + + delayTimer := time.NewTimer(delay) + select { + case <-delayTimer.C: + return retry(ctx, op, isRecoverableError, delay, err) + case <-ctx.Done(): + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + return err + } + return ctx.Err() + } +} + +func (u *RCloneClient) cmd(ctx context.Context, path string, args interface{}) ([]byte, error) { + requestBody, err := json.Marshal(args) + + if err != nil { + return nil, err + } + + request, err := http.NewRequestWithContext(ctx, http.MethodPost, + u.rcloneUrl+"/"+path, bytes.NewBuffer(requestBody)) + + if err != nil { + return nil, err + } + + request.Header.Set("Content-Type", "application/json") + + ctx, cancel := context.WithTimeout(ctx, connectionTimeout) + defer cancel() + + var response *http.Response + + err = retry(ctx, func(ctx context.Context) error { + response, err = u.rcloneSession.Do(request) //nolint:bodyclose + return err + }, isConnectionError, time.Millisecond*200, nil) + + if err != nil { + return nil, err + } + + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + responseBody := struct { + Error string `json:"error"` + }{} + + if err := json.NewDecoder(response.Body).Decode(&responseBody); err == nil && len(responseBody.Error) > 0 { + u.logger.Warn("[rclone] cmd failed", "path", path, "status", response.Status, "err", responseBody.Error) + return nil, fmt.Errorf("cmd: %s failed: %s: %s", path, response.Status, responseBody.Error) + } else { + u.logger.Warn("[rclone] cmd failed", "path", path, "status", response.Status) + return nil, fmt.Errorf("cmd: %s failed: %s", path, response.Status) + } + } + + return io.ReadAll(response.Body) +} + +type RCloneSession struct { + *RCloneClient + sync.Mutex + files map[string]*rcloneInfo + oplock sync.Mutex + remoteFs string + localFs string + syncQueue chan syncRequest + syncScheduled atomic.Bool + activeSyncCount atomic.Int32 + cancel context.CancelFunc +} + +var rcClient RCloneClient +var rcClientStart sync.Once + +func NewRCloneClient(logger log.Logger) (*RCloneClient, error) { + var err error + + rcClientStart.Do(func() { + err = rcClient.start(logger) + }) + + if err != nil { + return nil, err + } + + return &rcClient, nil +} + +func freePort() (port int, err error) { + if a, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0"); err != nil { + return 0, err + } else { + if l, err := net.ListenTCP("tcp", a); err != nil { + return 0, err + } else { + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil + } + } +} + +func (c *RCloneClient) NewSession(ctx context.Context, localFs string, remoteFs string) (*RCloneSession, error) { + ctx, cancel := context.WithCancel(ctx) + + session := &RCloneSession{ + RCloneClient: c, + files: map[string]*rcloneInfo{}, + remoteFs: remoteFs, + localFs: localFs, + cancel: cancel, + syncQueue: make(chan syncRequest, 100), + } + + go func() { + if _, err := session.ReadRemoteDir(ctx, true); err == nil { + session.syncFiles(ctx) + } + }() + + return session, nil +} + +func (c *RCloneSession) RemoteFsRoot() string { + return c.remoteFs +} + +func (c *RCloneSession) LocalFsRoot() string { + return c.localFs +} + +func (c *RCloneSession) Stop() { + c.cancel() +} + +type syncRequest struct { + ctx context.Context + info map[string]*rcloneInfo + cerr chan error + request *rcloneRequest + retryTime time.Duration +} + +func (c *RCloneSession) Upload(ctx context.Context, files ...string) error { + c.Lock() + + reqInfo := map[string]*rcloneInfo{} + + for _, file := range files { + info, ok := c.files[file] + + if !ok || info.localInfo == nil { + localInfo, err := os.Stat(filepath.Join(c.localFs, file)) + + if err != nil { + c.Unlock() + return fmt.Errorf("can't upload: %s: %w", file, err) + } + + if !localInfo.Mode().IsRegular() || localInfo.Size() == 0 { + c.Unlock() + return fmt.Errorf("can't upload: %s: %s", file, "file is not uploadable") + } + + if ok { + info.localInfo = localInfo + } else { + info := &rcloneInfo{ + file: file, + localInfo: localInfo, + } + + if snapInfo, ok := snaptype.ParseFileName(c.localFs, file); ok { + info.snapInfo = &snapInfo + } + + c.files[file] = info + } + } else { + reqInfo[file] = info + } + } + + c.Unlock() + + cerr := make(chan error, 1) + + c.syncQueue <- syncRequest{ctx, reqInfo, cerr, + &rcloneRequest{ + Group: c.Label(), + SrcFs: c.localFs, + DstFs: c.remoteFs, + Filter: rcloneFilter{ + IncludeRule: files, + }}, 0} + + return <-cerr +} + +func (c *RCloneSession) Download(ctx context.Context, files ...string) error { + c.Lock() + + if len(c.files) == 0 { + c.Unlock() + _, err := c.ReadRemoteDir(ctx, false) + if err != nil { + return fmt.Errorf("can't download: %s: %w", files, err) + } + c.Lock() + } + + reqInfo := map[string]*rcloneInfo{} + + for _, file := range files { + info, ok := c.files[file] + + if !ok || info.remoteInfo.Size == 0 { + c.Unlock() + return fmt.Errorf("can't download: %s: %w", file, os.ErrNotExist) + } + + reqInfo[file] = info + } + + c.Unlock() + + cerr := make(chan error, 1) + + c.syncQueue <- syncRequest{ctx, reqInfo, cerr, + &rcloneRequest{ + SrcFs: c.remoteFs, + DstFs: c.localFs, + Filter: rcloneFilter{ + IncludeRule: files, + }}, 0} + + return <-cerr +} + +func (c *RCloneSession) Cat(ctx context.Context, file string) (io.Reader, error) { + rclone, err := exec.LookPath("rclone") + + if err != nil { + return nil, err + } + + cmd := exec.CommandContext(ctx, rclone, "cat", c.remoteFs+"/"+file) + + stdout, err := cmd.StdoutPipe() + + if err != nil { + return nil, err + } + + if err := cmd.Start(); err != nil { + return nil, err + } + + return stdout, nil +} + +func (c *RCloneSession) ReadLocalDir(ctx context.Context) ([]fs.DirEntry, error) { + return os.ReadDir(c.localFs) +} + +func (c *RCloneSession) Label() string { + return strconv.FormatUint(murmur3.Sum64([]byte(c.localFs+"<->"+c.remoteFs)), 36) +} + +type remoteInfo struct { + Name string + Size uint64 + ModTime time.Time +} + +type SnapInfo interface { + Version() uint8 + From() uint64 + To() uint64 + Type() snaptype.Type +} + +type fileInfo struct { + *rcloneInfo +} + +func (fi *fileInfo) Name() string { + return fi.file +} + +func (fi *fileInfo) Size() int64 { + return int64(fi.remoteInfo.Size) +} + +func (fi *fileInfo) Mode() fs.FileMode { + return fs.ModeIrregular +} + +func (fi *fileInfo) ModTime() time.Time { + return fi.remoteInfo.ModTime +} + +func (fi *fileInfo) IsDir() bool { + return false +} + +func (fi *fileInfo) Sys() any { + return fi.rcloneInfo +} + +type dirEntry struct { + info *fileInfo +} + +func (e dirEntry) Name() string { + return e.info.Name() +} + +func (e dirEntry) IsDir() bool { + return e.info.IsDir() +} + +func (e dirEntry) Type() fs.FileMode { + return e.info.Mode() +} + +func (e dirEntry) Info() (fs.FileInfo, error) { + return e.info, nil +} + +var ErrAccessDenied = errors.New("access denied") + +func (c *RCloneSession) ReadRemoteDir(ctx context.Context, refresh bool) ([]fs.DirEntry, error) { + if len(c.remoteFs) == 0 { + return nil, fmt.Errorf("remote fs undefined") + } + + c.oplock.Lock() + defer c.oplock.Unlock() + + c.Lock() + fileCount := len(c.files) + c.Unlock() + + if fileCount == 0 || refresh { + listBody, err := json.Marshal(struct { + Fs string `json:"fs"` + Remote string `json:"remote"` + }{ + Fs: c.remoteFs, + Remote: "", + }) + + if err != nil { + return nil, fmt.Errorf("can't marshal list request: %w", err) + } + + listRequest, err := http.NewRequestWithContext(ctx, http.MethodPost, + c.rcloneUrl+"/operations/list", bytes.NewBuffer(listBody)) + + if err != nil { + return nil, fmt.Errorf("can't create list request: %w", err) + } + + listRequest.Header.Set("Content-Type", "application/json") + + var response *http.Response + + for i := 0; i < 10; i++ { + response, err = c.rcloneSession.Do(listRequest) //nolint:bodyclose + if err == nil { + break + } + time.Sleep(2 * time.Second) + } + + if err != nil { + return nil, fmt.Errorf("can't get remote list: %w", err) + } + + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + body, _ := io.ReadAll(response.Body) + e := struct { + Error string `json:"error"` + }{} + + if err := json.Unmarshal(body, &e); err == nil { + if strings.Contains(e.Error, "AccessDenied") { + return nil, fmt.Errorf("can't get remote list: %w", ErrAccessDenied) + } + } + + return nil, fmt.Errorf("can't get remote list: %s: %s", response.Status, string(body)) + } + + responseBody := struct { + List []remoteInfo `json:"list"` + }{} + + if err := json.NewDecoder(response.Body).Decode(&responseBody); err != nil { + return nil, fmt.Errorf("can't decode remote list: %w", err) + } + + for _, fi := range responseBody.List { + localInfo, _ := os.Stat(filepath.Join(c.localFs, fi.Name)) + + c.Lock() + if rcinfo, ok := c.files[fi.Name]; ok { + rcinfo.localInfo = localInfo + rcinfo.remoteInfo = fi + + if snapInfo, ok := snaptype.ParseFileName(c.localFs, fi.Name); ok { + rcinfo.snapInfo = &snapInfo + } else { + rcinfo.snapInfo = nil + } + + } else { + info := &rcloneInfo{ + file: fi.Name, + localInfo: localInfo, + remoteInfo: fi, + } + + if snapInfo, ok := snaptype.ParseFileName(c.localFs, fi.Name); ok { + info.snapInfo = &snapInfo + } + + c.files[fi.Name] = info + } + c.Unlock() + } + } + + var entries = make([]fs.DirEntry, 0, len(c.files)) + + for _, info := range c.files { + if info.remoteInfo.Size > 0 { + entries = append(entries, &dirEntry{&fileInfo{info}}) + } + } + + slices.SortFunc(entries, func(a, b fs.DirEntry) int { + return strings.Compare(a.Name(), b.Name()) + }) + + return entries, nil +} + +type rcloneFilter struct { + IncludeRule []string `json:"IncludeRule"` +} + +type rcloneRequest struct { + Async bool `json:"_async,omitempty"` + Config map[string]interface{} `json:"_config,omitempty"` + Group string `json:"group"` + SrcFs string `json:"srcFs"` + DstFs string `json:"dstFs"` + Filter rcloneFilter `json:"_filter"` +} + +func (c *RCloneSession) syncFiles(ctx context.Context) { + if !c.syncScheduled.CompareAndSwap(false, true) { + return + } + + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(16) + + minRetryTime := 30 * time.Second + maxRetryTime := 300 * time.Second + + retry := func(request syncRequest) { + switch { + case request.retryTime == 0: + request.retryTime = minRetryTime + case request.retryTime < maxRetryTime: + request.retryTime += request.retryTime + default: + request.retryTime = maxRetryTime + } + + retryTimer := time.NewTicker(request.retryTime) + + select { + case <-request.ctx.Done(): + request.cerr <- request.ctx.Err() + return + case <-retryTimer.C: + } + + c.Lock() + syncQueue := c.syncQueue + c.Unlock() + + if syncQueue != nil { + syncQueue <- request + } else { + request.cerr <- fmt.Errorf("no sync queue available") + } + } + + go func() { + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() + + select { + case <-gctx.Done(): + if syncCount := int(c.activeSyncCount.Load()) + len(c.syncQueue); syncCount > 0 { + log.Info("[rclone] Synced files", "processed", fmt.Sprintf("%d/%d", c.activeSyncCount.Load(), syncCount)) + } + + c.Lock() + syncQueue := c.syncQueue + c.syncQueue = nil + c.Unlock() + + if syncQueue != nil { + close(syncQueue) + } + + return + case <-logEvery.C: + if syncCount := int(c.activeSyncCount.Load()) + len(c.syncQueue); syncCount > 0 { + log.Info("[rclone] Syncing files", "progress", fmt.Sprintf("%d/%d", c.activeSyncCount.Load(), syncCount)) + } + } + }() + + go func() { + for req := range c.syncQueue { + + if gctx.Err() != nil { + req.cerr <- gctx.Err() + continue + } + + func(req syncRequest) { + g.Go(func() error { + c.activeSyncCount.Add(1) + + defer func() { + c.activeSyncCount.Add(-1) + if r := recover(); r != nil { + log.Error("[rclone] snapshot sync failed", "err", r, "stack", dbg.Stack()) + + if gctx.Err() != nil { + req.cerr <- gctx.Err() + } + + var err error + var ok bool + + if err, ok = r.(error); ok { + req.cerr <- fmt.Errorf("snapshot sync failed: %w", err) + } else { + req.cerr <- fmt.Errorf("snapshot sync failed: %s", r) + } + + return + } + }() + + if req.ctx.Err() != nil { + req.cerr <- req.ctx.Err() + return nil //nolint:nilerr + } + + if err := c.sync(gctx, req.request); err != nil { + + if gctx.Err() != nil { + req.cerr <- gctx.Err() + } else { + go retry(req) + } + + return nil //nolint:nilerr + } + + for _, info := range req.info { + localInfo, _ := os.Stat(filepath.Join(c.localFs, info.file)) + + info.Lock() + info.localInfo = localInfo + info.remoteInfo = remoteInfo{ + Name: info.file, + Size: uint64(localInfo.Size()), + ModTime: localInfo.ModTime(), + } + info.Unlock() + } + + req.cerr <- nil + return nil + }) + }(req) + } + + c.syncScheduled.Store(false) + + if err := g.Wait(); err != nil { + c.logger.Debug("[rclone] uploading failed", "err", err) + } + }() +} diff --git a/erigon-lib/downloader/rclone_test.go b/erigon-lib/downloader/rclone_test.go new file mode 100644 index 00000000000..9e58dc333a7 --- /dev/null +++ b/erigon-lib/downloader/rclone_test.go @@ -0,0 +1,100 @@ +package downloader_test + +import ( + "context" + "errors" + "io" + "os" + "os/exec" + "testing" + + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/log/v3" +) + +func hasRClone() bool { + rclone, _ := exec.LookPath("rclone") + + if len(rclone) == 0 { + return false + } + + return true +} + +func TestDownload(t *testing.T) { + t.Skip() + if !hasRClone() { + t.Skip("rclone not available") + } + + ctx := context.Background() + + tmpDir := t.TempDir() + remoteDir := "r2:erigon-v2-snapshots-bor-mainnet" + + cli, err := downloader.NewRCloneClient(log.Root()) + + if err != nil { + t.Fatal(err) + } + + rcc, err := cli.NewSession(ctx, tmpDir, remoteDir) + + if err != nil { + t.Fatal(err) + } + + dir, err := rcc.ReadRemoteDir(ctx, true) + + if err != nil { + if errors.Is(err, downloader.ErrAccessDenied) { + t.Skip("rclone dir not accessible") + } + + t.Fatal(err) + } + + for _, entry := range dir { + if len(entry.Name()) == 0 { + t.Fatal("unexpected nil file name") + } + //fmt.Println(entry.Name()) + } + + err = rcc.Download(ctx, "manifest.txt") + + if err != nil { + t.Fatal(err) + } + + h0, err := os.ReadFile("manifest.txt") + + if err != nil { + t.Fatal(err) + } + + if len(h0) == 0 { + t.Fatal("unexpected nil file") + } + //fmt.Print(string(h0)) + + reader, err := rcc.Cat(ctx, "manifest.txt") + + if err != nil { + t.Fatal(err) + } + + h1, err := io.ReadAll(reader) + + if err != nil { + t.Fatal(err) + } + + if string(h0) != string(h1) { + t.Fatal("Download and Cat contents mismatched") + } + //fmt.Print(string(h1)) + + rcc.Stop() +} diff --git a/erigon-lib/downloader/snaptype/files.go b/erigon-lib/downloader/snaptype/files.go index 069707cfe29..274c91bd35f 100644 --- a/erigon-lib/downloader/snaptype/files.go +++ b/erigon-lib/downloader/snaptype/files.go @@ -35,17 +35,15 @@ import ( type Type int const ( + Unknown Type = -1 Headers Type = iota Bodies Transactions BorEvents BorSpans - NumberOfTypes BeaconBlocks ) -var BorSnapshotTypes = []Type{BorEvents, BorSpans} - func (ft Type) String() string { switch ft { case Headers: @@ -80,7 +78,7 @@ func ParseFileType(s string) (Type, bool) { case "beaconblocks": return BeaconBlocks, true default: - return NumberOfTypes, false + return Unknown, false } } @@ -94,16 +92,25 @@ func (it IdxType) String() string { return string(it) } var BlockSnapshotTypes = []Type{Headers, Bodies, Transactions} +var BorSnapshotTypes = []Type{BorEvents, BorSpans} + var ( ErrInvalidFileName = fmt.Errorf("invalid compressed file name") ) -func FileName(from, to uint64, fileType string) string { - return fmt.Sprintf("v1-%06d-%06d-%s", from/1_000, to/1_000, fileType) +func FileName(version uint8, from, to uint64, fileType string) string { + return fmt.Sprintf("v%d-%06d-%06d-%s", version, from/1_000, to/1_000, fileType) +} + +func SegmentFileName(version uint8, from, to uint64, t Type) string { + return FileName(version, from, to, t.String()) + ".seg" +} +func DatFileName(version uint8, from, to uint64, fType string) string { + return FileName(version, from, to, fType) + ".dat" +} +func IdxFileName(version uint8, from, to uint64, fType string) string { + return FileName(version, from, to, fType) + ".idx" } -func SegmentFileName(from, to uint64, t Type) string { return FileName(from, to, t.String()) + ".seg" } -func DatFileName(from, to uint64, fType string) string { return FileName(from, to, fType) + ".dat" } -func IdxFileName(from, to uint64, fType string) string { return FileName(from, to, fType) + ".idx" } func FilterExt(in []FileInfo, expectExt string) (out []FileInfo) { for _, f := range in { @@ -114,8 +121,8 @@ func FilterExt(in []FileInfo, expectExt string) (out []FileInfo) { } return out } -func FilesWithExt(dir, expectExt string) ([]FileInfo, error) { - files, err := ParseDir(dir) +func FilesWithExt(dir string, version uint8, expectExt string) ([]FileInfo, error) { + files, err := ParseDir(dir, version) if err != nil { return nil, err } @@ -139,8 +146,16 @@ func ParseFileName(dir, fileName string) (res FileInfo, ok bool) { if len(parts) < 4 { return res, ok } - version := parts[0] - _ = version + + var version uint8 + if len(parts[0]) > 1 && parts[0][0] == 'v' { + v, err := strconv.ParseUint(parts[0][1:], 10, 64) + if err != nil { + return + } + version = uint8(v) + } + from, err := strconv.ParseUint(parts[1], 10, 64) if err != nil { return @@ -153,7 +168,8 @@ func ParseFileName(dir, fileName string) (res FileInfo, ok bool) { if !ok { return res, ok } - return FileInfo{From: from * 1_000, To: to * 1_000, Path: filepath.Join(dir, fileName), T: ft, Ext: ext}, ok + + return FileInfo{Version: version, From: from * 1_000, To: to * 1_000, Path: filepath.Join(dir, fileName), T: ft, Ext: ext}, ok } const Erigon3SeedableSteps = 32 @@ -164,10 +180,12 @@ const Erigon3SeedableSteps = 32 // - avoiding having too much files: // more files(shards) - means "more metadata", "more lookups for non-indexed queries", "more dictionaries", "more bittorrent connections", ... // less files - means small files will be removed after merge (no peers for this files). -const Erigon2RecentMergeLimit = 100_000 //nolint -const Erigon2MergeLimit = 500_000 +const Erigon2OldMergeLimit = 500_000 +const Erigon2MergeLimit = 100_000 const Erigon2MinSegmentSize = 1_000 +var MergeSteps = []uint64{100_000, 10_000} + // FileInfo - parsed file metadata type FileInfo struct { Version uint8 @@ -178,13 +196,18 @@ type FileInfo struct { func (f FileInfo) TorrentFileExists() bool { return dir.FileExist(f.Path + ".torrent") } func (f FileInfo) Seedable() bool { - return f.To-f.From == Erigon2MergeLimit || f.To-f.From == Erigon2RecentMergeLimit + return f.To-f.From == Erigon2MergeLimit || f.To-f.From == Erigon2OldMergeLimit } func (f FileInfo) NeedTorrentFile() bool { return f.Seedable() && !f.TorrentFileExists() } +func (f FileInfo) Name() string { return filepath.Base(f.Path) } -func IdxFiles(dir string) (res []FileInfo, err error) { return FilesWithExt(dir, ".idx") } -func Segments(dir string) (res []FileInfo, err error) { return FilesWithExt(dir, ".seg") } -func TmpFiles(dir string) (res []string, err error) { +func IdxFiles(dir string, version uint8) (res []FileInfo, err error) { + return FilesWithExt(dir, version, ".idx") +} +func Segments(dir string, version uint8) (res []FileInfo, err error) { + return FilesWithExt(dir, version, ".seg") +} +func TmpFiles(dir string, version uint8) (res []string, err error) { files, err := os.ReadDir(dir) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -192,20 +215,24 @@ func TmpFiles(dir string) (res []string, err error) { } return nil, err } + + v := fmt.Sprint("v", version) + for _, f := range files { - if f.IsDir() || len(f.Name()) < 3 { + if f.IsDir() || len(f.Name()) < 3 || !strings.HasPrefix(f.Name(), v) { continue } if filepath.Ext(f.Name()) != ".tmp" { continue } + res = append(res, filepath.Join(dir, f.Name())) } return res, nil } // ParseDir - reading dir ( -func ParseDir(dir string) (res []FileInfo, err error) { +func ParseDir(dir string, version uint8) (res []FileInfo, err error) { files, err := os.ReadDir(dir) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -213,12 +240,15 @@ func ParseDir(dir string) (res []FileInfo, err error) { } return nil, err } + + v := fmt.Sprint("v", version) + for _, f := range files { fileInfo, err := f.Info() if err != nil { return nil, err } - if f.IsDir() || fileInfo.Size() == 0 || len(f.Name()) < 3 { + if f.IsDir() || fileInfo.Size() == 0 || len(f.Name()) < 3 || !strings.HasPrefix(f.Name(), v) { continue } diff --git a/erigon-lib/downloader/torrent_files.go b/erigon-lib/downloader/torrent_files.go new file mode 100644 index 00000000000..1e27c8e0e40 --- /dev/null +++ b/erigon-lib/downloader/torrent_files.go @@ -0,0 +1,106 @@ +package downloader + +import ( + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/metainfo" + dir2 "github.com/ledgerwatch/erigon-lib/common/dir" +) + +// TorrentFiles - does provide thread-safe CRUD operations on .torrent files +type TorrentFiles struct { + lock sync.Mutex + dir string +} + +func NewAtomicTorrentFiles(dir string) *TorrentFiles { + return &TorrentFiles{dir: dir} +} + +func (tf *TorrentFiles) Exists(name string) bool { + tf.lock.Lock() + defer tf.lock.Unlock() + return tf.exists(name) +} + +func (tf *TorrentFiles) exists(name string) bool { + fPath := filepath.Join(tf.dir, name) + return dir2.FileExist(fPath + ".torrent") +} +func (tf *TorrentFiles) Delete(name string) error { + tf.lock.Lock() + defer tf.lock.Unlock() + return tf.delete(name) +} + +func (tf *TorrentFiles) delete(name string) error { + fPath := filepath.Join(tf.dir, name) + return os.Remove(fPath + ".torrent") +} + +func (tf *TorrentFiles) Create(torrentFilePath string, res []byte) error { + tf.lock.Lock() + defer tf.lock.Unlock() + return tf.create(torrentFilePath, res) +} +func (tf *TorrentFiles) create(torrentFilePath string, res []byte) error { + if len(res) == 0 { + return fmt.Errorf("try to write 0 bytes to file: %s", torrentFilePath) + } + f, err := os.Create(torrentFilePath) + if err != nil { + return err + } + defer f.Close() + if _, err = f.Write(res); err != nil { + return err + } + if err = f.Sync(); err != nil { + return err + } + return nil +} + +func (tf *TorrentFiles) CreateTorrentFromMetaInfo(fPath string, mi *metainfo.MetaInfo) error { + tf.lock.Lock() + defer tf.lock.Unlock() + return tf.createTorrentFromMetaInfo(fPath, mi) +} +func (tf *TorrentFiles) createTorrentFromMetaInfo(fPath string, mi *metainfo.MetaInfo) error { + file, err := os.Create(fPath) + if err != nil { + return err + } + defer file.Close() + if err := mi.Write(file); err != nil { + return err + } + file.Sync() + return nil +} + +func (tf *TorrentFiles) LoadByName(fName string) (*torrent.TorrentSpec, error) { + tf.lock.Lock() + defer tf.lock.Unlock() + fPath := filepath.Join(tf.dir, fName+".torrent") + return tf.load(fPath) +} + +func (tf *TorrentFiles) LoadByPath(fPath string) (*torrent.TorrentSpec, error) { + tf.lock.Lock() + defer tf.lock.Unlock() + return tf.load(fPath) +} + +func (tf *TorrentFiles) load(fPath string) (*torrent.TorrentSpec, error) { + mi, err := metainfo.LoadFromFile(fPath) + if err != nil { + return nil, fmt.Errorf("LoadFromFile: %w, file=%s", err, fPath) + } + mi.AnnounceList = Trackers + return torrent.TorrentSpecFromMetaInfoErr(mi) +} diff --git a/erigon-lib/downloader/util.go b/erigon-lib/downloader/util.go index 053e830c851..02437c38e5f 100644 --- a/erigon-lib/downloader/util.go +++ b/erigon-lib/downloader/util.go @@ -19,7 +19,6 @@ package downloader import ( "context" "fmt" - "os" "path/filepath" "regexp" "runtime" @@ -142,36 +141,37 @@ func ensureCantLeaveDir(fName, root string) (string, error) { return fName, nil } -func BuildTorrentIfNeed(ctx context.Context, fName, root string) (torrentFilePath string, err error) { +func BuildTorrentIfNeed(ctx context.Context, fName, root string, torrentFiles *TorrentFiles) (err error) { select { case <-ctx.Done(): - return "", ctx.Err() + return ctx.Err() default: } fName, err = ensureCantLeaveDir(fName, root) if err != nil { - return "", err + return err } - fPath := filepath.Join(root, fName) - if dir2.FileExist(fPath + ".torrent") { - return fPath, nil + if torrentFiles.Exists(fName) { + return nil } + + fPath := filepath.Join(root, fName) if !dir2.FileExist(fPath) { - return fPath, nil + return nil } info := &metainfo.Info{PieceLength: downloadercfg.DefaultPieceSize, Name: fName} if err := info.BuildFromFilePath(fPath); err != nil { - return "", fmt.Errorf("createTorrentFileFromSegment: %w", err) + return fmt.Errorf("createTorrentFileFromSegment: %w", err) } info.Name = fName - return fPath + ".torrent", CreateTorrentFileFromInfo(root, info, nil) + return CreateTorrentFileFromInfo(root, info, nil, torrentFiles) } // BuildTorrentFilesIfNeed - create .torrent files from .seg files (big IO) - if .seg files were added manually -func BuildTorrentFilesIfNeed(ctx context.Context, dirs datadir.Dirs) error { +func BuildTorrentFilesIfNeed(ctx context.Context, dirs datadir.Dirs, torrentFiles *TorrentFiles) error { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() @@ -188,7 +188,7 @@ func BuildTorrentFilesIfNeed(ctx context.Context, dirs datadir.Dirs) error { file := file g.Go(func() error { defer i.Add(1) - if _, err := BuildTorrentIfNeed(ctx, file, dirs.Snap); err != nil { + if err := BuildTorrentIfNeed(ctx, file, dirs.Snap, torrentFiles); err != nil { return err } return nil @@ -213,12 +213,11 @@ Loop: return nil } -func CreateTorrentFileIfNotExists(root string, info *metainfo.Info, mi *metainfo.MetaInfo) error { - fPath := filepath.Join(root, info.Name) - if dir2.FileExist(fPath + ".torrent") { +func CreateTorrentFileIfNotExists(root string, info *metainfo.Info, mi *metainfo.MetaInfo, torrentFiles *TorrentFiles) error { + if torrentFiles.Exists(info.Name) { return nil } - if err := CreateTorrentFileFromInfo(root, info, mi); err != nil { + if err := CreateTorrentFileFromInfo(root, info, mi, torrentFiles); err != nil { return err } return nil @@ -241,25 +240,13 @@ func CreateMetaInfo(info *metainfo.Info, mi *metainfo.MetaInfo) (*metainfo.MetaI } return mi, nil } -func CreateTorrentFromMetaInfo(root string, info *metainfo.Info, mi *metainfo.MetaInfo) error { - torrentFileName := filepath.Join(root, info.Name+".torrent") - file, err := os.Create(torrentFileName) - if err != nil { - return err - } - defer file.Close() - if err := mi.Write(file); err != nil { - return err - } - file.Sync() - return nil -} -func CreateTorrentFileFromInfo(root string, info *metainfo.Info, mi *metainfo.MetaInfo) (err error) { +func CreateTorrentFileFromInfo(root string, info *metainfo.Info, mi *metainfo.MetaInfo, torrentFiles *TorrentFiles) (err error) { mi, err = CreateMetaInfo(info, mi) if err != nil { return err } - return CreateTorrentFromMetaInfo(root, info, mi) + fPath := filepath.Join(root, info.Name+".torrent") + return torrentFiles.CreateTorrentFromMetaInfo(fPath, mi) } func AllTorrentPaths(dirs datadir.Dirs) ([]string, error) { @@ -275,7 +262,7 @@ func AllTorrentPaths(dirs datadir.Dirs) ([]string, error) { return files, nil } -func AllTorrentSpecs(dirs datadir.Dirs) (res []*torrent.TorrentSpec, err error) { +func AllTorrentSpecs(dirs datadir.Dirs, torrentFiles *TorrentFiles) (res []*torrent.TorrentSpec, err error) { files, err := AllTorrentPaths(dirs) if err != nil { return nil, err @@ -284,7 +271,7 @@ func AllTorrentSpecs(dirs datadir.Dirs) (res []*torrent.TorrentSpec, err error) if len(fPath) == 0 { continue } - a, err := loadTorrent(fPath) + a, err := torrentFiles.LoadByPath(fPath) if err != nil { return nil, fmt.Errorf("AllTorrentSpecs: %w", err) } @@ -293,42 +280,60 @@ func AllTorrentSpecs(dirs datadir.Dirs) (res []*torrent.TorrentSpec, err error) return res, nil } -func loadTorrent(torrentFilePath string) (*torrent.TorrentSpec, error) { - mi, err := metainfo.LoadFromFile(torrentFilePath) - if err != nil { - return nil, fmt.Errorf("LoadFromFile: %w, file=%s", err, torrentFilePath) - } - mi.AnnounceList = Trackers - return torrent.TorrentSpecFromMetaInfoErr(mi) -} - // addTorrentFile - adding .torrent file to torrentClient (and checking their hashes), if .torrent file // added first time - pieces verification process will start (disk IO heavy) - Progress // kept in `piece completion storage` (surviving reboot). Once it done - no disk IO needed again. // Don't need call torrent.VerifyData manually -func addTorrentFile(ctx context.Context, ts *torrent.TorrentSpec, torrentClient *torrent.Client, webseeds *WebSeeds) error { +func addTorrentFile(ctx context.Context, ts *torrent.TorrentSpec, torrentClient *torrent.Client, webseeds *WebSeeds) (t *torrent.Torrent, ok bool, err error) { + ts.ChunkSize = downloadercfg.DefaultNetworkChunkSize + ts.DisallowDataDownload = true + ts.DisableInitialPieceCheck = true + //re-try on panic, with 0 ChunkSize (lib doesn't allow change this field for existing torrents) + defer func() { + rec := recover() + if rec != nil { + ts.ChunkSize = 0 + t, ok, err = _addTorrentFile(ctx, ts, torrentClient, webseeds) + } + }() + + t, ok, err = _addTorrentFile(ctx, ts, torrentClient, webseeds) + if err != nil { + ts.ChunkSize = 0 + return _addTorrentFile(ctx, ts, torrentClient, webseeds) + } + return t, ok, err +} + +func _addTorrentFile(ctx context.Context, ts *torrent.TorrentSpec, torrentClient *torrent.Client, webseeds *WebSeeds) (t *torrent.Torrent, ok bool, err error) { select { case <-ctx.Done(): - return ctx.Err() + return nil, false, ctx.Err() default: } - wsUrls, ok := webseeds.ByFileName(ts.DisplayName) - if ok { - ts.Webseeds = append(ts.Webseeds, wsUrls...) - } - _, ok = torrentClient.Torrent(ts.InfoHash) - if !ok { // can set ChunkSize only for new torrents - ts.ChunkSize = downloadercfg.DefaultNetworkChunkSize - } else { - ts.ChunkSize = 0 + ts.Webseeds, _ = webseeds.ByFileName(ts.DisplayName) + var have bool + t, have = torrentClient.Torrent(ts.InfoHash) + if !have { + t, _, err := torrentClient.AddTorrentSpec(ts) + if err != nil { + return nil, false, fmt.Errorf("addTorrentFile %s: %w", ts.DisplayName, err) + } + return t, true, nil } - ts.DisallowDataDownload = true - _, _, err := torrentClient.AddTorrentSpec(ts) - if err != nil { - return fmt.Errorf("addTorrentFile %s: %w", ts.DisplayName, err) + + select { + case <-t.GotInfo(): + t.AddWebSeeds(ts.Webseeds) + default: + t, _, err = torrentClient.AddTorrentSpec(ts) + if err != nil { + return nil, false, fmt.Errorf("addTorrentFile %s: %w", ts.DisplayName, err) + } } - return nil + + return t, true, nil } func savePeerID(db kv.RwDB, peerID torrent.PeerID) error { @@ -355,21 +360,3 @@ func readPeerID(db kv.RoDB) (peerID []byte, err error) { func IsLocal(path string) bool { return isLocal(path) } - -func saveTorrent(torrentFilePath string, res []byte) error { - if len(res) == 0 { - return fmt.Errorf("try to write 0 bytes to file: %s", torrentFilePath) - } - f, err := os.Create(torrentFilePath) - if err != nil { - return err - } - defer f.Close() - if _, err = f.Write(res); err != nil { - return err - } - if err = f.Sync(); err != nil { - return err - } - return nil -} diff --git a/erigon-lib/downloader/webseed.go b/erigon-lib/downloader/webseed.go index 0c42f17b28d..2c42b29b62b 100644 --- a/erigon-lib/downloader/webseed.go +++ b/erigon-lib/downloader/webseed.go @@ -40,6 +40,8 @@ type WebSeeds struct { logger log.Logger verbosity log.Lvl + + torrentFiles *TorrentFiles } func (d *WebSeeds) Discover(ctx context.Context, s3tokens []string, urls []*url.URL, files []string, rootDir string) { @@ -237,6 +239,7 @@ func (d *WebSeeds) downloadTorrentFilesFromProviders(ctx context.Context, rootDi } var addedNew int e, ctx := errgroup.WithContext(ctx) + e.SetLimit(1024) urlsByName := d.TorrentUrls() //TODO: // - what to do if node already synced? @@ -261,7 +264,7 @@ func (d *WebSeeds) downloadTorrentFilesFromProviders(ctx context.Context, rootDi continue } d.logger.Log(d.verbosity, "[snapshots] got from webseed", "name", name) - if err := saveTorrent(tPath, res); err != nil { + if err := d.torrentFiles.Create(tPath, res); err != nil { d.logger.Debug("[snapshots] saveTorrent", "err", err) continue } diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index f041253f649..95e0dd8f5ec 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/erigontech/mdbx-go v0.27.21 - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231210032908-6ff6f4c91c60 + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240101230756-23fbc6c56a1d github.com/ledgerwatch/interfaces v0.0.0-20231209102305-b17e86fbe07d github.com/ledgerwatch/log/v3 v3.9.0 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index 9cec96b6b6b..005ce3b3ce1 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -291,8 +291,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231210032908-6ff6f4c91c60 h1:bsZ6XWPJkNp1DeVHkaX9/+/Tqg7+r5/IkRPlyc4Ztq4= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231210032908-6ff6f4c91c60/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240101230756-23fbc6c56a1d h1:rMqDEGLdmVgGdpDmaNp4Do1vc9BtUQ3rjFD9gQBRSx0= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240101230756-23fbc6c56a1d/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/interfaces v0.0.0-20231209102305-b17e86fbe07d h1:7aB9lKmUGAaWt4TzXnGLzJSZkhyuqREMmaao+Gn5Ky0= github.com/ledgerwatch/interfaces v0.0.0-20231209102305-b17e86fbe07d/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= diff --git a/erigon-lib/gointerfaces/downloader/downloader_grpc.pb.go b/erigon-lib/gointerfaces/downloader/downloader_grpc.pb.go index d3a0468ff2c..369c9b494c4 100644 --- a/erigon-lib/gointerfaces/downloader/downloader_grpc.pb.go +++ b/erigon-lib/gointerfaces/downloader/downloader_grpc.pb.go @@ -31,7 +31,7 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type DownloaderClient interface { - // Erigon "download once" - means restart/upgrade will not download files (and will be fast) + // Erigon "download once" - means restart/upgrade/downgrade will not download files (and will be fast) // After "download once" - Erigon will produce and seed new files // Downloader will able: seed new files (already existing on FS), download uncomplete parts of existing files (if Verify found some bad parts) ProhibitNewDownloads(ctx context.Context, in *ProhibitNewDownloadsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) @@ -101,7 +101,7 @@ func (c *downloaderClient) Stats(ctx context.Context, in *StatsRequest, opts ... // All implementations must embed UnimplementedDownloaderServer // for forward compatibility type DownloaderServer interface { - // Erigon "download once" - means restart/upgrade will not download files (and will be fast) + // Erigon "download once" - means restart/upgrade/downgrade will not download files (and will be fast) // After "download once" - Erigon will produce and seed new files // Downloader will able: seed new files (already existing on FS), download uncomplete parts of existing files (if Verify found some bad parts) ProhibitNewDownloads(context.Context, *ProhibitNewDownloadsRequest) (*emptypb.Empty, error) diff --git a/erigon-lib/gointerfaces/sentinel/sentinel.pb.go b/erigon-lib/gointerfaces/sentinel/sentinel.pb.go index 0fc32fe89d8..35477c388e4 100644 --- a/erigon-lib/gointerfaces/sentinel/sentinel.pb.go +++ b/erigon-lib/gointerfaces/sentinel/sentinel.pb.go @@ -21,68 +21,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type GossipType int32 - -const ( - // Global gossip topics. - GossipType_BeaconBlockGossipType GossipType = 0 - GossipType_AggregateAndProofGossipType GossipType = 1 - GossipType_VoluntaryExitGossipType GossipType = 2 - GossipType_ProposerSlashingGossipType GossipType = 3 - GossipType_AttesterSlashingGossipType GossipType = 4 - GossipType_BlobSidecarType GossipType = 5 - GossipType_BlsToExecutionChangeGossipType GossipType = 6 -) - -// Enum value maps for GossipType. -var ( - GossipType_name = map[int32]string{ - 0: "BeaconBlockGossipType", - 1: "AggregateAndProofGossipType", - 2: "VoluntaryExitGossipType", - 3: "ProposerSlashingGossipType", - 4: "AttesterSlashingGossipType", - 5: "BlobSidecarType", - 6: "BlsToExecutionChangeGossipType", - } - GossipType_value = map[string]int32{ - "BeaconBlockGossipType": 0, - "AggregateAndProofGossipType": 1, - "VoluntaryExitGossipType": 2, - "ProposerSlashingGossipType": 3, - "AttesterSlashingGossipType": 4, - "BlobSidecarType": 5, - "BlsToExecutionChangeGossipType": 6, - } -) - -func (x GossipType) Enum() *GossipType { - p := new(GossipType) - *p = x - return p -} - -func (x GossipType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GossipType) Descriptor() protoreflect.EnumDescriptor { - return file_p2psentinel_sentinel_proto_enumTypes[0].Descriptor() -} - -func (GossipType) Type() protoreflect.EnumType { - return &file_p2psentinel_sentinel_proto_enumTypes[0] -} - -func (x GossipType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GossipType.Descriptor instead. -func (GossipType) EnumDescriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{0} -} - type EmptyMessage struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -173,10 +111,9 @@ type GossipData struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // SSZ encoded data - Type GossipType `protobuf:"varint,2,opt,name=type,proto3,enum=sentinel.GossipType" json:"type,omitempty"` - Peer *Peer `protobuf:"bytes,3,opt,name=peer,proto3,oneof" json:"peer,omitempty"` - BlobIndex *uint32 `protobuf:"varint,4,opt,name=blob_index,json=blobIndex,proto3,oneof" json:"blob_index,omitempty"` // Blob identifier for EIP4844 + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // SSZ encoded data + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Peer *Peer `protobuf:"bytes,3,opt,name=peer,proto3,oneof" json:"peer,omitempty"` } func (x *GossipData) Reset() { @@ -218,11 +155,11 @@ func (x *GossipData) GetData() []byte { return nil } -func (x *GossipData) GetType() GossipType { +func (x *GossipData) GetName() string { if x != nil { - return x.Type + return x.Name } - return GossipType_BeaconBlockGossipType + return "" } func (x *GossipData) GetPeer() *Peer { @@ -232,13 +169,6 @@ func (x *GossipData) GetPeer() *Peer { return nil } -func (x *GossipData) GetBlobIndex() uint32 { - if x != nil && x.BlobIndex != nil { - return *x.BlobIndex - } - return 0 -} - type Status struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -492,92 +422,73 @@ var file_p2psentinel_sentinel_proto_rawDesc = []byte{ 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x0e, 0x0a, 0x0c, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x18, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x70, 0x69, 0x64, 0x22, 0xaf, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, - 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, - 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x27, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, - 0x62, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, - 0x09, 0x62, 0x6c, 0x6f, 0x62, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, - 0x05, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xcd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x67, 0x65, 0x73, - 0x74, 0x12, 0x32, 0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x72, - 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, - 0x65, 0x64, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, - 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x28, - 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x08, - 0x68, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, - 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x65, 0x61, - 0x64, 0x53, 0x6c, 0x6f, 0x74, 0x22, 0x23, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x37, 0x0a, 0x0b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x70, 0x69, 0x64, 0x22, 0x66, 0x0a, 0x0a, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x04, 0x70, 0x65, 0x65, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, + 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, + 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x22, 0xcd, 0x01, 0x0a, 0x06, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x64, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x66, 0x6f, 0x72, + 0x6b, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d, 0x66, 0x69, + 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x66, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x45, + 0x70, 0x6f, 0x63, 0x68, 0x12, 0x28, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x72, 0x6f, 0x6f, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x48, 0x32, 0x35, 0x36, 0x52, 0x08, 0x68, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x08, 0x68, 0x65, 0x61, 0x64, 0x53, 0x6c, 0x6f, 0x74, 0x22, 0x23, 0x0a, 0x09, 0x50, + 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x22, 0x37, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x5c, 0x0a, 0x0c, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, - 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x22, 0x5c, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, - 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x22, 0x0a, - 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x65, - 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x04, 0x70, 0x65, 0x65, - 0x72, 0x2a, 0xde, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x19, 0x0a, 0x15, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47, - 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x41, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, - 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, - 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x72, 0x6f, - 0x70, 0x6f, 0x73, 0x65, 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x03, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x74, 0x74, - 0x65, 0x73, 0x74, 0x65, 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x6c, 0x6f, - 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x54, 0x79, 0x70, 0x65, 0x10, 0x05, 0x12, 0x22, - 0x0a, 0x1e, 0x42, 0x6c, 0x73, 0x54, 0x6f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, - 0x10, 0x06, 0x32, 0x90, 0x04, 0x0a, 0x08, 0x53, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x12, - 0x41, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x47, 0x6f, 0x73, 0x73, - 0x69, 0x70, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x14, 0x2e, 0x73, 0x65, 0x6e, - 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, 0x61, - 0x30, 0x01, 0x12, 0x3c, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x15, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, - 0x6e, 0x65, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, - 0x12, 0x35, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x2e, - 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, - 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, 0x65, - 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x13, 0x2e, 0x73, 0x65, - 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x31, 0x0a, 0x07, 0x42, 0x61, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65, - 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, - 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x12, 0x33, 0x0a, 0x09, 0x55, 0x6e, 0x62, 0x61, 0x6e, 0x50, 0x65, 0x65, 0x72, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x22, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, + 0x72, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x32, 0x90, 0x04, 0x0a, 0x08, 0x53, 0x65, 0x6e, 0x74, + 0x69, 0x6e, 0x65, 0x6c, 0x12, 0x41, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, + 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, + 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x44, 0x61, 0x74, 0x61, 0x30, 0x01, 0x12, 0x3c, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, + 0x6c, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, + 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x35, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x10, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, + 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, + 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x1a, 0x13, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x42, 0x61, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x61, - 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, - 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, - 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x34, 0x0a, 0x0a, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, - 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, - 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, - 0x68, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, - 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, + 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x33, 0x0a, 0x09, 0x55, 0x6e, 0x62, 0x61, + 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, + 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, + 0x0c, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, + 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x15, 0x5a, 0x13, 0x2e, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x69, - 0x6e, 0x65, 0x6c, 0x3b, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x50, + 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, + 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, 0x0a, 0x0d, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x14, 0x2e, 0x73, + 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, + 0x74, 0x61, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x15, 0x5a, 0x13, 0x2e, 0x2f, + 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x3b, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, + 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -592,48 +503,45 @@ func file_p2psentinel_sentinel_proto_rawDescGZIP() []byte { return file_p2psentinel_sentinel_proto_rawDescData } -var file_p2psentinel_sentinel_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_p2psentinel_sentinel_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_p2psentinel_sentinel_proto_goTypes = []interface{}{ - (GossipType)(0), // 0: sentinel.GossipType - (*EmptyMessage)(nil), // 1: sentinel.EmptyMessage - (*Peer)(nil), // 2: sentinel.Peer - (*GossipData)(nil), // 3: sentinel.GossipData - (*Status)(nil), // 4: sentinel.Status - (*PeerCount)(nil), // 5: sentinel.PeerCount - (*RequestData)(nil), // 6: sentinel.RequestData - (*ResponseData)(nil), // 7: sentinel.ResponseData - (*types.H256)(nil), // 8: types.H256 + (*EmptyMessage)(nil), // 0: sentinel.EmptyMessage + (*Peer)(nil), // 1: sentinel.Peer + (*GossipData)(nil), // 2: sentinel.GossipData + (*Status)(nil), // 3: sentinel.Status + (*PeerCount)(nil), // 4: sentinel.PeerCount + (*RequestData)(nil), // 5: sentinel.RequestData + (*ResponseData)(nil), // 6: sentinel.ResponseData + (*types.H256)(nil), // 7: types.H256 } var file_p2psentinel_sentinel_proto_depIdxs = []int32{ - 0, // 0: sentinel.GossipData.type:type_name -> sentinel.GossipType - 2, // 1: sentinel.GossipData.peer:type_name -> sentinel.Peer - 8, // 2: sentinel.Status.finalized_root:type_name -> types.H256 - 8, // 3: sentinel.Status.head_root:type_name -> types.H256 - 2, // 4: sentinel.ResponseData.peer:type_name -> sentinel.Peer - 1, // 5: sentinel.Sentinel.SubscribeGossip:input_type -> sentinel.EmptyMessage - 6, // 6: sentinel.Sentinel.SendRequest:input_type -> sentinel.RequestData - 4, // 7: sentinel.Sentinel.SetStatus:input_type -> sentinel.Status - 1, // 8: sentinel.Sentinel.GetPeers:input_type -> sentinel.EmptyMessage - 2, // 9: sentinel.Sentinel.BanPeer:input_type -> sentinel.Peer - 2, // 10: sentinel.Sentinel.UnbanPeer:input_type -> sentinel.Peer - 2, // 11: sentinel.Sentinel.PenalizePeer:input_type -> sentinel.Peer - 2, // 12: sentinel.Sentinel.RewardPeer:input_type -> sentinel.Peer - 3, // 13: sentinel.Sentinel.PublishGossip:input_type -> sentinel.GossipData - 3, // 14: sentinel.Sentinel.SubscribeGossip:output_type -> sentinel.GossipData - 7, // 15: sentinel.Sentinel.SendRequest:output_type -> sentinel.ResponseData - 1, // 16: sentinel.Sentinel.SetStatus:output_type -> sentinel.EmptyMessage - 5, // 17: sentinel.Sentinel.GetPeers:output_type -> sentinel.PeerCount - 1, // 18: sentinel.Sentinel.BanPeer:output_type -> sentinel.EmptyMessage - 1, // 19: sentinel.Sentinel.UnbanPeer:output_type -> sentinel.EmptyMessage - 1, // 20: sentinel.Sentinel.PenalizePeer:output_type -> sentinel.EmptyMessage - 1, // 21: sentinel.Sentinel.RewardPeer:output_type -> sentinel.EmptyMessage - 1, // 22: sentinel.Sentinel.PublishGossip:output_type -> sentinel.EmptyMessage - 14, // [14:23] is the sub-list for method output_type - 5, // [5:14] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 1, // 0: sentinel.GossipData.peer:type_name -> sentinel.Peer + 7, // 1: sentinel.Status.finalized_root:type_name -> types.H256 + 7, // 2: sentinel.Status.head_root:type_name -> types.H256 + 1, // 3: sentinel.ResponseData.peer:type_name -> sentinel.Peer + 0, // 4: sentinel.Sentinel.SubscribeGossip:input_type -> sentinel.EmptyMessage + 5, // 5: sentinel.Sentinel.SendRequest:input_type -> sentinel.RequestData + 3, // 6: sentinel.Sentinel.SetStatus:input_type -> sentinel.Status + 0, // 7: sentinel.Sentinel.GetPeers:input_type -> sentinel.EmptyMessage + 1, // 8: sentinel.Sentinel.BanPeer:input_type -> sentinel.Peer + 1, // 9: sentinel.Sentinel.UnbanPeer:input_type -> sentinel.Peer + 1, // 10: sentinel.Sentinel.PenalizePeer:input_type -> sentinel.Peer + 1, // 11: sentinel.Sentinel.RewardPeer:input_type -> sentinel.Peer + 2, // 12: sentinel.Sentinel.PublishGossip:input_type -> sentinel.GossipData + 2, // 13: sentinel.Sentinel.SubscribeGossip:output_type -> sentinel.GossipData + 6, // 14: sentinel.Sentinel.SendRequest:output_type -> sentinel.ResponseData + 0, // 15: sentinel.Sentinel.SetStatus:output_type -> sentinel.EmptyMessage + 4, // 16: sentinel.Sentinel.GetPeers:output_type -> sentinel.PeerCount + 0, // 17: sentinel.Sentinel.BanPeer:output_type -> sentinel.EmptyMessage + 0, // 18: sentinel.Sentinel.UnbanPeer:output_type -> sentinel.EmptyMessage + 0, // 19: sentinel.Sentinel.PenalizePeer:output_type -> sentinel.EmptyMessage + 0, // 20: sentinel.Sentinel.RewardPeer:output_type -> sentinel.EmptyMessage + 0, // 21: sentinel.Sentinel.PublishGossip:output_type -> sentinel.EmptyMessage + 13, // [13:22] is the sub-list for method output_type + 4, // [4:13] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_p2psentinel_sentinel_proto_init() } @@ -733,14 +641,13 @@ func file_p2psentinel_sentinel_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_p2psentinel_sentinel_proto_rawDesc, - NumEnums: 1, + NumEnums: 0, NumMessages: 7, NumExtensions: 0, NumServices: 1, }, GoTypes: file_p2psentinel_sentinel_proto_goTypes, DependencyIndexes: file_p2psentinel_sentinel_proto_depIdxs, - EnumInfos: file_p2psentinel_sentinel_proto_enumTypes, MessageInfos: file_p2psentinel_sentinel_proto_msgTypes, }.Build() File_p2psentinel_sentinel_proto = out.File diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation.go b/erigon-lib/kv/membatchwithdb/memory_mutation.go index c00f93cb2c6..9e53fec72d7 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation.go @@ -45,8 +45,8 @@ type MemoryMutation struct { // defer batch.Close() // ... some calculations on `batch` // batch.Commit() -func NewMemoryBatch(tx kv.Tx, tmpDir string) *MemoryMutation { - tmpDB := mdbx.NewMDBX(log.New()).InMem(tmpDir).GrowthStep(64 * datasize.MB).MapSize(512 * datasize.GB).MustOpen() +func NewMemoryBatch(tx kv.Tx, tmpDir string, logger log.Logger) *MemoryMutation { + tmpDB := mdbx.NewMDBX(logger).InMem(tmpDir).GrowthStep(64 * datasize.MB).MapSize(512 * datasize.GB).MustOpen() memTx, err := tmpDB.BeginRw(context.Background()) if err != nil { panic(err) diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation_test.go b/erigon-lib/kv/membatchwithdb/memory_mutation_test.go index 6bbc7d00da6..4ad18d8a1f8 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation_test.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/log/v3" ) func initializeDbNonDupSort(rwTx kv.RwTx) { @@ -35,7 +36,7 @@ func TestPutAppendHas(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) require.NoError(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.5"))) require.Error(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3"))) require.NoError(t, batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3"))) @@ -64,7 +65,7 @@ func TestLastMiningDB(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) batch.Put(kv.HashedAccounts, []byte("BCAA"), []byte("value5")) @@ -88,7 +89,7 @@ func TestLastMiningMem(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5")) @@ -111,7 +112,7 @@ func TestDeleteMining(t *testing.T) { _, rwTx := memdb.NewTestTx(t) initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5")) batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5")) @@ -137,7 +138,7 @@ func TestFlush(t *testing.T) { _, rwTx := memdb.NewTestTx(t) initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value5")) batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5")) @@ -158,7 +159,7 @@ func TestForEach(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5")) require.NoError(t, batch.Flush(rwTx)) @@ -200,7 +201,7 @@ func TestForPrefix(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) var keys1 []string var values1 []string @@ -239,7 +240,7 @@ func TestForAmount(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() var keys []string @@ -272,7 +273,7 @@ func TestGetOneAfterClearBucket(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() err := batch.ClearBucket(kv.HashedAccounts) @@ -295,7 +296,7 @@ func TestSeekExactAfterClearBucket(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() err := batch.ClearBucket(kv.HashedAccounts) @@ -331,7 +332,7 @@ func TestFirstAfterClearBucket(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() err := batch.ClearBucket(kv.HashedAccounts) @@ -359,7 +360,7 @@ func TestIncReadSequence(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() _, err := batch.IncrementSequence(kv.HashedAccounts, uint64(12)) @@ -382,7 +383,7 @@ func TestNext(t *testing.T) { initializeDbDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() batch.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.2")) @@ -426,7 +427,7 @@ func TestNextNoDup(t *testing.T) { initializeDbDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() batch.Put(kv.AccountChangeSet, []byte("key2"), []byte("value2.1")) @@ -453,7 +454,7 @@ func TestDeleteCurrentDuplicates(t *testing.T) { initializeDbDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() cursor, err := batch.RwCursorDupSort(kv.AccountChangeSet) @@ -488,7 +489,7 @@ func TestSeekBothRange(t *testing.T) { rwTx.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.1")) rwTx.Put(kv.AccountChangeSet, []byte("key3"), []byte("value3.3")) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() cursor, err := batch.RwCursorDupSort(kv.AccountChangeSet) @@ -522,7 +523,7 @@ func TestAutoConversion(t *testing.T) { initializeDbAutoConversion(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() c, err := batch.RwCursor(kv.PlainState) @@ -578,7 +579,7 @@ func TestAutoConversionDelete(t *testing.T) { initializeDbAutoConversion(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() c, err := batch.RwCursor(kv.PlainState) @@ -615,7 +616,7 @@ func TestAutoConversionSeekBothRange(t *testing.T) { initializeDbAutoConversion(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() c, err := batch.RwCursorDupSort(kv.PlainState) diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go index 423b7303464..75435eef207 100644 --- a/erigon-lib/kv/tables.go +++ b/erigon-lib/kv/tables.go @@ -461,22 +461,26 @@ const ( LightClientUpdates = "LightClientUpdates" // Beacon historical data // ValidatorIndex => [Field] - ValidatorPublicKeys = "ValidatorPublickeys" + ValidatorPublicKeys = "ValidatorPublickeys" + InvertedValidatorPublicKeys = "InvertedValidatorPublickeys" // ValidatorIndex + Slot => [Field] ValidatorEffectiveBalance = "ValidatorEffectiveBalance" ValidatorSlashings = "ValidatorSlashings" ValidatorBalance = "ValidatorBalance" StaticValidators = "StaticValidators" StateEvents = "StateEvents" + ActiveValidatorIndicies = "ActiveValidatorIndicies" // External data - StateRoot = "StateRoot" - BlockRoot = "BlockRoot" - MinimalBeaconState = "MinimalBeaconState" + StateRoot = "StateRoot" + BlockRoot = "BlockRoot" + // Differentiate data stored per-slot vs per-epoch + SlotData = "SlotData" + EpochData = "EpochData" + // State fields InactivityScores = "InactivityScores" PreviousEpochParticipation = "PreviousEpochParticipation" CurrentEpochParticipation = "CurrentEpochParticipation" - Checkpoints = "Checkpoints" NextSyncCommittee = "NextSyncCommittee" CurrentSyncCommittee = "CurrentSyncCommittee" HistoricalRoots = "HistoricalRoots" @@ -653,6 +657,7 @@ var ChaindataTables = []string{ LastBeaconSnapshot, // State Reconstitution ValidatorPublicKeys, + InvertedValidatorPublicKeys, ValidatorEffectiveBalance, ValidatorBalance, ValidatorSlashings, @@ -661,14 +666,14 @@ var ChaindataTables = []string{ // Other stuff (related to state reconstitution) BlockRoot, StateRoot, - MinimalBeaconState, + SlotData, + EpochData, RandaoMixes, Proposers, StatesProcessingProgress, PreviousEpochParticipation, CurrentEpochParticipation, InactivityScores, - Checkpoints, NextSyncCommittee, CurrentSyncCommittee, HistoricalRoots, @@ -677,6 +682,7 @@ var ChaindataTables = []string{ PreviousEpochAttestations, Eth1DataVotes, IntraRandaoMixes, + ActiveValidatorIndicies, } const ( diff --git a/erigon-lib/recsplit/index.go b/erigon-lib/recsplit/index.go index 277db2d5fdf..c10fa0205d4 100644 --- a/erigon-lib/recsplit/index.go +++ b/erigon-lib/recsplit/index.go @@ -178,6 +178,7 @@ func (idx *Index) ModTime() time.Time { return idx.modTime } func (idx *Index) BaseDataID() uint64 { return idx.baseDataID } func (idx *Index) FilePath() string { return idx.filePath } func (idx *Index) FileName() string { return idx.fileName } +func (idx *Index) IsOpen() bool { return idx != nil && idx.f != nil } func (idx *Index) Close() { if idx == nil { diff --git a/erigon-lib/recsplit/recsplit.go b/erigon-lib/recsplit/recsplit.go index a019ca9b3f9..fc41a824c9b 100644 --- a/erigon-lib/recsplit/recsplit.go +++ b/erigon-lib/recsplit/recsplit.go @@ -556,6 +556,9 @@ func (rs *RecSplit) Build(ctx context.Context) error { if rs.indexF, err = os.Create(rs.tmpFilePath); err != nil { return fmt.Errorf("create index file %s: %w", rs.indexFile, err) } + + rs.logger.Debug("[index] created", "file", rs.tmpFilePath, "fs", rs.indexF) + defer rs.indexF.Close() rs.indexW = bufio.NewWriterSize(rs.indexF, etl.BufIOSize) // Write minimal app-specific dataID in this index file @@ -680,9 +683,12 @@ func (rs *RecSplit) Build(ctx context.Context) error { if err = rs.indexF.Close(); err != nil { return err } + if err = os.Rename(rs.tmpFilePath, rs.indexFile); err != nil { + rs.logger.Warn("[index] rename", "file", rs.tmpFilePath, "err", err) return err } + return nil } diff --git a/erigon-lib/sse/README.md b/erigon-lib/sse/README.md deleted file mode 100644 index 6cd1b2090c5..00000000000 --- a/erigon-lib/sse/README.md +++ /dev/null @@ -1,8 +0,0 @@ -## sse - -sse implement server side events also known as eventsource - -see the specification here: https://html.spec.whatwg.org/multipage/server-sent-events.html - - - diff --git a/erigon-lib/sse/conn.go b/erigon-lib/sse/conn.go deleted file mode 100644 index e6a39224ea7..00000000000 --- a/erigon-lib/sse/conn.go +++ /dev/null @@ -1,40 +0,0 @@ -package sse - -import ( - "bufio" - "net/http" - "strings" -) - -// EventSink tracks a event source connection between a client and a server -type EventSink struct { - wr http.ResponseWriter - r *http.Request - bw *bufio.Writer - enc *Encoder - - LastEventId string -} - -func Upgrade(wr http.ResponseWriter, r *http.Request) (*EventSink, error) { - if !strings.EqualFold(r.Header.Get("Content-Type"), "text/event-stream") { - return nil, ErrInvalidContentType - } - o := &EventSink{ - wr: wr, - r: r, - bw: bufio.NewWriter(wr), - } - o.LastEventId = r.Header.Get("Last-Event-ID") - wr.Header().Add("Content-Type", "text/event-stream") - o.enc = NewEncoder(o.bw) - return o, nil -} - -func (e *EventSink) Encode(p *Packet) error { - err := e.enc.Encode(p) - if err != nil { - return err - } - return e.bw.Flush() -} diff --git a/erigon-lib/sse/encoder.go b/erigon-lib/sse/encoder.go deleted file mode 100644 index f1924f10531..00000000000 --- a/erigon-lib/sse/encoder.go +++ /dev/null @@ -1,82 +0,0 @@ -package sse - -import "io" - -// Packet represents an event to send -// the order in this struct is the order that they will be sent. -type Packet struct { - - // as a special case, an empty value of event will not write an event header - Event string - - // additional headers to be added. - // using the reserved headers event, header, data, id is undefined behavior - // note that this is the canonical way to send the "retry" header - Header map[string]string - - // the io.Reader to source the data from - Data io.Reader - - // whether or not to send an id, and if so, what id to send - // a nil id means to not send an id. - // empty string means to simply send the string "id\n" - // otherwise, the id is sent as is - // id is always sent at the end of the packet - ID *string -} - -func ID(x string) *string { - return &x -} - -// Encoder works at a higher level than the encoder. -// it works on the packet level. -type Encoder struct { - wr *Writer - - firstWriteDone bool -} - -func NewEncoder(w io.Writer) *Encoder { - wr := NewWriter(w) - return &Encoder{ - wr: wr, - } -} - -func (e *Encoder) Encode(p *Packet) error { - if e.firstWriteDone { - err := e.wr.Next() - if err != nil { - return err - } - } - e.firstWriteDone = true - if len(p.Event) > 0 { - if err := e.wr.Header("event", p.Event); err != nil { - return err - } - } - if p.Header != nil { - for k, v := range p.Header { - if err := e.wr.Header(k, v); err != nil { - return err - } - } - } - if p.Data != nil { - if err := e.wr.WriteData(p.Data); err != nil { - return err - } - } - err := e.wr.Flush() - if err != nil { - return err - } - if p.ID != nil { - if err := e.wr.Header("id", *p.ID); err != nil { - return err - } - } - return nil -} diff --git a/erigon-lib/sse/encoder_test.go b/erigon-lib/sse/encoder_test.go deleted file mode 100644 index 9415f86403b..00000000000 --- a/erigon-lib/sse/encoder_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package sse - -import ( - "bytes" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestEncoderSimple(t *testing.T) { - type testCase struct { - xs []*Packet - w string - } - cases := []testCase{{ - []*Packet{ - {Event: "hello", Data: strings.NewReader("some data")}, - {Data: strings.NewReader("some other data with no event header")}, - }, - "event: hello\ndata: some data\n\ndata: some other data with no event header\n", - }, - { - []*Packet{ - {Event: "hello", Data: strings.NewReader("some \n funky\r\n data\r")}, - {Data: strings.NewReader("some other data with an id"), ID: ID("dogs")}, - }, - "event: hello\ndata: some \ndata: funky\r\ndata: data\r\ndata: some other data with an id\nid: dogs\n", - }, - } - for _, v := range cases { - buf := &bytes.Buffer{} - enc := NewEncoder(buf) - for _, p := range v.xs { - require.NoError(t, enc.Encode(p)) - } - assert.EqualValues(t, v.w, buf.String()) - } -} diff --git a/erigon-lib/sse/errors.go b/erigon-lib/sse/errors.go deleted file mode 100644 index 8bf380295fb..00000000000 --- a/erigon-lib/sse/errors.go +++ /dev/null @@ -1,8 +0,0 @@ -package sse - -import "errors" - -var ( - ErrInvalidUTF8Bytes = errors.New("invalid utf8 bytes") - ErrInvalidContentType = errors.New("invalid content type") -) diff --git a/erigon-lib/sse/writer.go b/erigon-lib/sse/writer.go deleted file mode 100644 index a261d93dbc9..00000000000 --- a/erigon-lib/sse/writer.go +++ /dev/null @@ -1,170 +0,0 @@ -package sse - -import ( - "io" - "unicode/utf8" - //"github.com/segmentio/asm/utf8" -- can switch to this library in the future if needed -) - -type Option func(*Options) - -func OptionValidateUtf8(enable bool) Option { - return func(o *Options) { - o.validateUTF8 = true - } -} - -type Options struct { - validateUTF8 bool -} - -func (e *Options) ValidateUTF8() bool { - return e.validateUTF8 -} - -type writeState struct { - inMessage bool - trailingCarriage bool -} - -// writer is not thread safe. it is meant for internal usage -type Writer struct { - raw io.Writer - - es writeState - - w io.Writer - - o Options -} - -func NewWriter(w io.Writer, opts ...Option) *Writer { - o := &Options{} - for _, v := range opts { - v(o) - } - return &Writer{ - raw: w, - w: w, - o: *o, - } -} - -func (e *Writer) writeByte(x byte) error { - _, err := e.w.Write([]byte{x}) - return err -} -func (e *Writer) writeString(s string) (int, error) { - return e.w.Write([]byte(s)) -} - -func (e *Writer) Flush() error { - if e.es.inMessage { - // we are in a message, so write a newline to terminate it, as the user did not - err := e.writeByte('\n') - if err != nil { - return err - } - e.es.inMessage = false - } - // and reset the trailingCarriage state as well - e.es.trailingCarriage = false - return nil -} - -// next should be called at the end of an event. it will call Flush and then write a newline -func (e *Writer) Next() error { - - if err := e.Flush(); err != nil { - return err - } - // we write a newline, indicating now that this is a new event - if err := e.writeByte('\n'); err != nil { - return err - } - return nil -} - -// Event will start writing an event with the name topic to the stream -func (e *Writer) Header(name string, topic string) error { - if topic == "" { - return nil - } - if e.o.ValidateUTF8() { - if !utf8.ValidString(topic) { - return ErrInvalidUTF8Bytes - } - } - if len(topic) > 0 { - if _, err := e.writeString(name + ": "); err != nil { - return err - } - // write the supplied topic - if _, err := e.writeString(topic); err != nil { - return err - } - } - if err := e.writeByte('\n'); err != nil { - return err - } - - return nil -} - -// a convenient wrapper for writing data from io.Reader so that one can easily replay events. -func (e *Writer) WriteData(r io.Reader) (err error) { - if _, err = io.Copy(e, r); err != nil { - return err - } - return -} - -// Write underlying write method for piping data. be careful using this! -func (e *Writer) Write(xs []byte) (n int, err error) { - if e.o.ValidateUTF8() && !utf8.Valid(xs) { - return 0, ErrInvalidUTF8Bytes - } - for _, x := range xs { - // now, see if there was a trailing carriage left over from the last write - // only check and write the data if we are do not have a trailing carriage - if !e.es.trailingCarriage { - e.checkMessage() - } - if e.es.trailingCarriage { - // if there is, see if the character is a newline - if x != '\n' { - // its not a newline, so the trailing carriage was a valid end of message. write a new data field - e.es.inMessage = false - e.checkMessage() - } - // in the case that the character is a newline - // we will just write the newline and inMessage=false will be set in the case below - - // in both cases, the trailing carriage is dealt with - e.es.trailingCarriage = false - } - // write the byte no matter what - err = e.writeByte(x) - if err != nil { - return - } - // if success, note that we wrote another byte - n++ - if x == '\n' { - // end message if it's a newline always - e.es.inMessage = false - } else if x == '\r' { - // if x is a carriage return, mark it as trailing carriage - e.es.trailingCarriage = true - e.es.inMessage = false - } - } - return -} - -func (e *Writer) checkMessage() { - if !e.es.inMessage { - e.es.inMessage = true - e.writeString("data: ") - } -} diff --git a/erigon-lib/sse/writer_test.go b/erigon-lib/sse/writer_test.go deleted file mode 100644 index d25c18cad2f..00000000000 --- a/erigon-lib/sse/writer_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package sse - -import ( - "bytes" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestEncoderWrite(t *testing.T) { - type testCase struct { - e string - i string - w string - } - cases := []testCase{{ - "", - "foo bar\nbar foo\nwowwwwza\n", - `data: foo bar -data: bar foo -data: wowwwwza -`}, { - "hello", - "there\nfriend", - `event: hello -data: there -data: friend -`}, - } - - for _, v := range cases { - buf := &bytes.Buffer{} - enc := NewWriter(buf) - err := enc.Header("event", v.e) - require.NoError(t, err) - _, err = enc.Write([]byte(v.i)) - require.NoError(t, err) - require.NoError(t, enc.Flush()) - assert.EqualValues(t, buf.String(), v.w) - } -} - -func TestEncoderWriteData(t *testing.T) { - type testCase struct { - e string - i string - w string - } - cases := []testCase{{ - "", - "foo bar\nbar foo\nwowwwwza\n", - `data: foo bar -data: bar foo -data: wowwwwza -`}, { - "hello", - "there\nfriend", - `event: hello -data: there -data: friend -`}, - } - - for _, v := range cases { - buf := &bytes.Buffer{} - enc := NewWriter(buf) - err := enc.Header("event", v.e) - require.NoError(t, err) - err = enc.WriteData(strings.NewReader(v.i)) - require.NoError(t, err) - require.NoError(t, enc.Flush()) - assert.EqualValues(t, v.w, buf.String()) - } -} diff --git a/erigon-lib/state/aggregator_v3.go b/erigon-lib/state/aggregator_v3.go index af226789c8a..ae7bd5ad31d 100644 --- a/erigon-lib/state/aggregator_v3.go +++ b/erigon-lib/state/aggregator_v3.go @@ -242,6 +242,9 @@ func (a *AggregatorV3) HasBackgroundFilesBuild() bool { return a.ps.Has() } func (a *AggregatorV3) BackgroundProgress() string { return a.ps.String() } func (a *AggregatorV3) Files() (res []string) { + if a == nil { + return res + } a.filesMutationLock.Lock() defer a.filesMutationLock.Unlock() @@ -656,6 +659,9 @@ func (a *AggregatorV3) integrateFiles(sf AggV3StaticFiles, txNumFrom, txNumTo ui } func (a *AggregatorV3) HasNewFrozenFiles() bool { + if a == nil { + return false + } return a.needSaveFilesListInDB.CompareAndSwap(true, false) } diff --git a/erigon-lib/txpool/pool.go b/erigon-lib/txpool/pool.go index 1a4b3b8ad74..69dfb39324f 100644 --- a/erigon-lib/txpool/pool.go +++ b/erigon-lib/txpool/pool.go @@ -1780,7 +1780,7 @@ func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs hashSentTo := send.AnnouncePooledTxs(localTxTypes, localTxSizes, localTxHashes, localTxsBroadcastMaxPeers*2) for i := 0; i < localTxHashes.Len(); i++ { hash := localTxHashes.At(i) - p.logger.Info("local tx announced", "tx_hash", hex.EncodeToString(hash), "to peer", hashSentTo[i], "baseFee", p.pendingBaseFee.Load()) + p.logger.Info("Local tx announced", "txHash", hex.EncodeToString(hash), "to peer", hashSentTo[i], "baseFee", p.pendingBaseFee.Load()) } // broadcast remote transactions diff --git a/erigon-lib/txpool/txpooluitl/all_components.go b/erigon-lib/txpool/txpooluitl/all_components.go index 8ab31eca862..ffa6fdf0310 100644 --- a/erigon-lib/txpool/txpooluitl/all_components.go +++ b/erigon-lib/txpool/txpooluitl/all_components.go @@ -102,7 +102,7 @@ func SaveChainConfigIfNeed(ctx context.Context, coreDB kv.RoDB, txPoolDB kv.RwDB func AllComponents(ctx context.Context, cfg txpoolcfg.Config, cache kvcache.Cache, newTxs chan types.Announcements, chainDB kv.RoDB, sentryClients []direct.SentryClient, stateChangesClient txpool.StateChangesClient, logger log.Logger) (kv.RwDB, *txpool.TxPool, *txpool.Fetch, *txpool.Send, *txpool.GrpcServer, error) { - opts := mdbx.NewMDBX(log.New()).Label(kv.TxPoolDB).Path(cfg.DBDir). + opts := mdbx.NewMDBX(logger).Label(kv.TxPoolDB).Path(cfg.DBDir). WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.TxpoolTablesCfg }). WriteMergeThreshold(3 * 8192). PageSize(uint64(16 * datasize.KB)). diff --git a/eth/backend.go b/eth/backend.go index 90d943e3788..f389a9463c4 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -33,6 +33,7 @@ import ( lru "github.com/hashicorp/golang-lru/arc/v2" "github.com/ledgerwatch/erigon-lib/chain/networkname" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon-lib/downloader/downloadergrpc" "github.com/ledgerwatch/erigon-lib/kv/kvcfg" @@ -40,6 +41,7 @@ import ( "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/fork" "github.com/ledgerwatch/erigon/cl/persistence" + "github.com/ledgerwatch/erigon/cl/persistence/db_config" "github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format/getters" clcore "github.com/ledgerwatch/erigon/cl/phase1/core" "github.com/ledgerwatch/erigon/cl/phase1/execution_client" @@ -72,7 +74,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/direct" - downloader3 "github.com/ledgerwatch/erigon-lib/downloader" + downloader "github.com/ledgerwatch/erigon-lib/downloader" "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" @@ -192,7 +194,7 @@ type Ethereum struct { txPoolGrpcServer txpool_proto.TxpoolServer notifyMiningAboutNewTxs chan struct{} forkValidator *engine_helpers.ForkValidator - downloader *downloader3.Downloader + downloader *downloader.Downloader agg *libstate.AggregatorV3 blockSnapshots *freezeblocks.RoSnapshots @@ -318,9 +320,11 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger logger.Info("Initialised chain configuration", "config", chainConfig, "genesis", genesis.Hash()) + snapshotVersion := snapcfg.KnownCfg(chainConfig.ChainName, 0).Version + // Check if we have an already initialized chain and fall back to // that if so. Otherwise we need to generate a new genesis spec. - blockReader, blockWriter, allSnapshots, agg, err := setUpBlockReader(ctx, chainKv, config.Dirs, config.Snapshot, config.HistoryV3, chainConfig.Bor != nil, logger) + blockReader, blockWriter, allSnapshots, agg, err := setUpBlockReader(ctx, chainKv, config.Dirs, snapshotVersion, config.Snapshot, config.HistoryV3, chainConfig.Bor != nil, logger) if err != nil { return nil, err } @@ -629,9 +633,10 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } // proof-of-work mining mining := stagedsync.New( + config.Sync, stagedsync.MiningStages(backend.sentryCtx, stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPoolDB, nil, tmpdir, backend.blockReader), - stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miner, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, recents, signatures), + stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miner, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, nil, recents, signatures), stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil, 0, backend.txPool, backend.txPoolDB, blockReader), stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3), stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg), @@ -649,9 +654,10 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger miningStatePos := stagedsync.NewProposingState(&config.Miner) miningStatePos.MiningConfig.Etherbase = param.SuggestedFeeRecipient proposingSync := stagedsync.New( + config.Sync, stagedsync.MiningStages(backend.sentryCtx, stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miningStatePos, *backend.chainConfig, backend.engine, backend.txPoolDB, param, tmpdir, backend.blockReader), - stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miningStatePos, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, recents, signatures), + stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miningStatePos, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, nil, recents, signatures), stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, backend.txPool, backend.txPoolDB, blockReader), stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3), stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg), @@ -659,7 +665,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder, logger) // We start the mining step - if err := stages2.MiningStep(ctx, backend.chainDB, proposingSync, tmpdir); err != nil { + if err := stages2.MiningStep(ctx, backend.chainDB, proposingSync, tmpdir, logger); err != nil { return nil, err } block := <-miningStatePos.MiningResultPOSCh @@ -669,9 +675,8 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger // Initialize ethbackend ethBackendRPC := privateapi.NewEthBackendServer(ctx, backend, backend.chainDB, backend.notifications.Events, blockReader, logger, latestBlockBuiltStore) // intiialize engine backend - var engine *execution_client.ExecutionClientDirect - blockRetire := freezeblocks.NewBlockRetire(1, dirs, blockReader, blockWriter, backend.chainDB, backend.notifications.Events, logger) + blockRetire := freezeblocks.NewBlockRetire(1, dirs, blockReader, blockWriter, backend.chainDB, backend.chainConfig, backend.notifications.Events, logger) miningRPC = privateapi.NewMiningServer(ctx, backend, ethashApi, logger) @@ -778,7 +783,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } }() - if err := backend.StartMining(context.Background(), backend.chainDB, mining, backend.config.Miner, backend.gasPrice, backend.sentriesClient.Hd.QuitPoWMining, tmpdir); err != nil { + if err := backend.StartMining(context.Background(), backend.chainDB, mining, backend.config.Miner, backend.gasPrice, backend.sentriesClient.Hd.QuitPoWMining, tmpdir, logger); err != nil { return nil, err } @@ -788,13 +793,13 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, heimdallClient, recents, signatures, logger) backend.syncUnwindOrder = stagedsync.DefaultUnwindOrder backend.syncPruneOrder = stagedsync.DefaultPruneOrder - backend.stagedSync = stagedsync.New(backend.syncStages, backend.syncUnwindOrder, backend.syncPruneOrder, logger) + backend.stagedSync = stagedsync.New(config.Sync, backend.syncStages, backend.syncUnwindOrder, backend.syncPruneOrder, logger) hook := stages2.NewHook(backend.sentryCtx, backend.chainDB, backend.notifications, backend.stagedSync, backend.blockReader, backend.chainConfig, backend.logger, backend.sentriesClient.UpdateHead) checkStateRoot := true - pipelineStages := stages2.NewPipelineStages(ctx, chainKv, config, backend.sentriesClient, backend.notifications, backend.downloaderClient, blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, logger, checkStateRoot) - backend.pipelineStagedSync = stagedsync.New(pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) + pipelineStages := stages2.NewPipelineStages(ctx, chainKv, config, stack.Config().P2P, backend.sentriesClient, backend.notifications, backend.downloaderClient, blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, logger, checkStateRoot) + backend.pipelineStagedSync = stagedsync.New(config.Sync, pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) backend.eth1ExecutionServer = eth1.NewEthereumExecutionModule(blockReader, chainKv, backend.pipelineStagedSync, backend.forkValidator, chainConfig, assembleBlockPOS, hook, backend.notifications.Accumulator, backend.notifications.StateChangesConsumer, logger, backend.engine, config.HistoryV3) executionRpc := direct.NewExecutionClientDirect(backend.eth1ExecutionServer) engineBackendRPC := engineapi.NewEngineServer( @@ -809,9 +814,25 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger false, config.Miner.EnabledPOS) backend.engineBackendRPC = engineBackendRPC - engine, err = execution_client.NewExecutionClientDirect(ctx, eth1_chain_reader.NewChainReaderEth1(ctx, chainConfig, executionRpc, 1000)) - if err != nil { - return nil, err + + var engine execution_client.ExecutionEngine + + // Gnosis has too few blocks on his network for phase2 to work. Once we have proper snapshot automation, it can go back to normal. + if config.NetworkID == uint64(clparams.GnosisNetwork) { + // Read the jwt secret + jwtSecret, err := cli.ObtainJWTSecret(&stack.Config().Http, logger) + if err != nil { + return nil, err + } + engine, err = execution_client.NewExecutionClientRPC(ctx, jwtSecret, stack.Config().Http.AuthRpcHTTPListenAddress, stack.Config().Http.AuthRpcPort) + if err != nil { + return nil, err + } + } else { + engine, err = execution_client.NewExecutionClientDirect(ctx, eth1_chain_reader.NewChainReaderEth1(ctx, chainConfig, executionRpc, 1000)) + if err != nil { + return nil, err + } } // If we choose not to run a consensus layer, run our embedded. @@ -831,6 +852,10 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } rawBeaconBlockChainDb, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconCfg, dirs.CaplinHistory) + historyDB, indiciesDB, err := caplin1.OpenCaplinDatabase(ctx, db_config.DefaultDatabaseConfiguration, beaconCfg, rawBeaconBlockChainDb, dirs.CaplinIndexing, engine, false) + if err != nil { + return nil, err + } client, err := service.StartSentinelService(&sentinel.SentinelConfig{ IpAddr: config.LightClientDiscoveryAddr, @@ -840,7 +865,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger NetworkConfig: networkCfg, BeaconConfig: beaconCfg, TmpDir: tmpdir, - }, rawBeaconBlockChainDb, &service.ServerConfig{Network: "tcp", Addr: fmt.Sprintf("%s:%d", config.SentinelAddr, config.SentinelPort)}, creds, &cltypes.Status{ + }, rawBeaconBlockChainDb, indiciesDB, &service.ServerConfig{Network: "tcp", Addr: fmt.Sprintf("%s:%d", config.SentinelAddr, config.SentinelPort)}, creds, &cltypes.Status{ ForkDigest: forkDigest, FinalizedRoot: state.FinalizedCheckpoint().BlockRoot(), FinalizedEpoch: state.FinalizedCheckpoint().Epoch(), @@ -854,8 +879,8 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.sentinel = client go func() { - eth1Getter := getters.NewExecutionSnapshotReader(ctx, blockReader, backend.chainDB) - if err := caplin1.RunCaplinPhase1(ctx, client, engine, beaconCfg, genesisCfg, state, nil, dirs, config.BeaconRouter, eth1Getter, backend.downloaderClient, config.CaplinConfig.Backfilling, config.CaplinConfig.Archive); err != nil { + eth1Getter := getters.NewExecutionSnapshotReader(ctx, beaconCfg, blockReader, backend.chainDB) + if err := caplin1.RunCaplinPhase1(ctx, client, engine, beaconCfg, genesisCfg, state, nil, dirs, snapshotVersion, config.BeaconRouter, eth1Getter, backend.downloaderClient, config.CaplinConfig.Backfilling, config.CaplinConfig.Archive, historyDB, indiciesDB); err != nil { logger.Error("could not start caplin", "err", err) } ctxCancel() @@ -988,7 +1013,7 @@ func (s *Ethereum) shouldPreserve(block *types.Block) bool { //nolint // StartMining starts the miner with the given number of CPU threads. If mining // is already running, this method adjust the number of threads allowed to use // and updates the minimum price required by the transaction pool. -func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsync.Sync, cfg params.MiningConfig, gasPrice *uint256.Int, quitCh chan struct{}, tmpDir string) error { +func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsync.Sync, cfg params.MiningConfig, gasPrice *uint256.Int, quitCh chan struct{}, tmpDir string, logger log.Logger) error { var borcfg *bor.Bor if b, ok := s.engine.(*bor.Bor); ok { @@ -1116,7 +1141,7 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsy works = true hasWork = false mineEvery.Reset(cfg.Recommit) - go func() { errc <- stages2.MiningStep(ctx, db, mining, tmpDir) }() + go func() { errc <- stages2.MiningStep(ctx, db, mining, tmpDir, logger) }() } } }() @@ -1175,28 +1200,22 @@ func (s *Ethereum) setUpSnapDownloader(ctx context.Context, downloaderCfg *downl if s.config.Snapshot.NoDownloader { return nil } - var discover bool - if err := s.chainDB.View(ctx, func(tx kv.Tx) error { - p, err := stages.GetStageProgress(tx, stages.Snapshots) - if err != nil { - return err - } - discover = p == 0 - return nil - }); err != nil { - return err - } if s.config.Snapshot.DownloaderAddr != "" { // connect to external Downloader s.downloaderClient, err = downloadergrpc.NewClient(ctx, s.config.Snapshot.DownloaderAddr) } else { // start embedded Downloader - s.downloader, err = downloader3.New(ctx, downloaderCfg, s.config.Dirs, s.logger, log.LvlDebug, discover) + if uploadFs := s.config.Sync.UploadLocation; len(uploadFs) > 0 { + downloaderCfg.AddTorrentsFromDisk = false + } + + discover := true + s.downloader, err = downloader.New(ctx, downloaderCfg, s.config.Dirs, s.logger, log.LvlDebug, discover) if err != nil { return err } s.downloader.MainLoopInBackground(true) - bittorrentServer, err := downloader3.NewGrpcServer(s.downloader) + bittorrentServer, err := downloader.NewGrpcServer(s.downloader) if err != nil { return fmt.Errorf("new server: %w", err) } @@ -1221,14 +1240,21 @@ func (s *Ethereum) setUpSnapDownloader(ctx context.Context, downloaderCfg *downl return err } -func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snConfig ethconfig.BlocksFreezing, histV3 bool, isBor bool, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter, *freezeblocks.RoSnapshots, *libstate.AggregatorV3, error) { - allSnapshots := freezeblocks.NewRoSnapshots(snConfig, dirs.Snap, logger) +func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snashotVersion uint8, snConfig ethconfig.BlocksFreezing, histV3 bool, isBor bool, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter, *freezeblocks.RoSnapshots, *libstate.AggregatorV3, error) { + allSnapshots := freezeblocks.NewRoSnapshots(snConfig, dirs.Snap, snashotVersion, logger) + var allBorSnapshots *freezeblocks.BorRoSnapshots if isBor { - allBorSnapshots = freezeblocks.NewBorRoSnapshots(snConfig, dirs.Snap, logger) + allBorSnapshots = freezeblocks.NewBorRoSnapshots(snConfig, dirs.Snap, snashotVersion, logger) } + var err error - if !snConfig.NoDownloader { + if snConfig.NoDownloader { + allSnapshots.ReopenFolder() + if isBor { + allBorSnapshots.ReopenFolder() + } + } else { allSnapshots.OptimisticalyReopenWithDB(db) if isBor { allBorSnapshots.OptimisticalyReopenWithDB(db) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index f103987ce1b..61274a41939 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -42,6 +42,7 @@ import ( "github.com/ledgerwatch/erigon/eth/gasprice/gaspricecfg" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc" ) // AggregationStep number of transactions in smallest static file @@ -77,6 +78,7 @@ var Defaults = Config{ ReconWorkerCount: estimate.ReconstituteState.Workers(), BodyCacheLimit: 256 * 1024 * 1024, BodyDownloadTimeoutSeconds: 2, + PruneLimit: 100, }, Ethash: ethashcfg.Config{ CachesInMem: 2, @@ -99,7 +101,7 @@ var Defaults = Config{ ImportMode: false, Snapshot: BlocksFreezing{ - Enabled: false, + Enabled: true, KeepBlocks: false, Produce: true, }, @@ -269,6 +271,13 @@ type Sync struct { BodyCacheLimit datasize.ByteSize BodyDownloadTimeoutSeconds int // TODO: change to duration + PruneLimit int //the maxumum records to delete from the DB during pruning + BreakAfterStage string + LoopBlockLimit uint + + UploadLocation string + UploadFrom rpc.BlockNumber + FrozenBlockLimit uint64 } // Chains where snapshots are enabled by default diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index e4b52eefdb4..a882729abdc 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -3,6 +3,7 @@ package stagedsync import ( "context" + "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" @@ -39,7 +40,7 @@ func DefaultStages(ctx context.Context, return nil }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { - return SnapshotsPrune(p, firstCycle, snapshots, ctx, tx) + return SnapshotsPrune(p, firstCycle, snapshots, ctx, tx, logger) }, }, { @@ -116,6 +117,7 @@ func DefaultStages(ctx context.Context, { ID: stages.Execution, Description: "Execute blocks w/o hash checks", + Disabled: dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle, logger) }, @@ -129,7 +131,7 @@ func DefaultStages(ctx context.Context, { ID: stages.HashState, Description: "Hash the key in the state", - Disabled: bodies.historyV3 && ethconfig.EnableHistoryV4InTest, + Disabled: bodies.historyV3 || ethconfig.EnableHistoryV4InTest || dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { return SpawnHashStateStage(s, tx, hashState, ctx, logger) }, @@ -143,7 +145,7 @@ func DefaultStages(ctx context.Context, { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Disabled: bodies.historyV3 && ethconfig.EnableHistoryV4InTest, + Disabled: bodies.historyV3 || ethconfig.EnableHistoryV4InTest || dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { if exec.chainConfig.IsPrague(0) { _, err := SpawnVerkleTrie(s, u, tx, trieCfg, ctx, logger) @@ -166,7 +168,7 @@ func DefaultStages(ctx context.Context, ID: stages.CallTraces, Description: "Generate call traces index", DisabledDescription: "Work In Progress", - Disabled: bodies.historyV3, + Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { return SpawnCallTraces(s, tx, callTraces, ctx, logger) }, @@ -180,7 +182,7 @@ func DefaultStages(ctx context.Context, { ID: stages.AccountHistoryIndex, Description: "Generate account history index", - Disabled: bodies.historyV3, + Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { return SpawnAccountHistoryIndex(s, tx, history, ctx, logger) }, @@ -194,7 +196,7 @@ func DefaultStages(ctx context.Context, { ID: stages.StorageHistoryIndex, Description: "Generate storage history index", - Disabled: bodies.historyV3, + Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { return SpawnStorageHistoryIndex(s, tx, history, ctx, logger) }, @@ -208,7 +210,7 @@ func DefaultStages(ctx context.Context, { ID: stages.LogIndex, Description: "Generate receipt logs index", - Disabled: bodies.historyV3, + Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { return SpawnLogIndex(s, tx, logIndex, ctx, 0, logger) }, @@ -222,6 +224,7 @@ func DefaultStages(ctx context.Context, { ID: stages.TxLookup, Description: "Generate tx lookup index", + Disabled: dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { return SpawnTxLookup(s, tx, 0 /* toBlock */, txLookup, ctx, logger) }, @@ -263,7 +266,7 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl return nil }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { - return SnapshotsPrune(p, firstCycle, snapshots, ctx, tx) + return SnapshotsPrune(p, firstCycle, snapshots, ctx, tx, logger) }, }, { @@ -427,6 +430,215 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl } } +// when uploading - potentially from zero we need to include headers and bodies stages otherwise we won't recover the POW portion of the chain +func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers HeadersCfg, blockHashCfg BlockHashesCfg, senders SendersCfg, bodies BodiesCfg, exec ExecuteBlockCfg, hashState HashStateCfg, trieCfg TrieCfg, history HistoryCfg, logIndex LogIndexCfg, callTraces CallTracesCfg, txLookup TxLookupCfg, finish FinishCfg, test bool) []*Stage { + return []*Stage{ + { + ID: stages.Snapshots, + Description: "Download snapshots", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + if badBlockUnwind { + return nil + } + return SpawnStageSnapshots(s, ctx, tx, snapshots, firstCycle, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return nil + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return SnapshotsPrune(p, firstCycle, snapshots, ctx, tx, logger) + }, + }, + { + ID: stages.Headers, + Description: "Download headers", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + if badBlockUnwind { + return nil + } + return SpawnStageHeaders(s, u, ctx, tx, headers, firstCycle, test, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return HeadersUnwind(u, s, tx, headers, test) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return nil + }, + }, + { + ID: stages.BlockHashes, + Description: "Write block hashes", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + return SpawnBlockHashStage(s, tx, blockHashCfg, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return UnwindBlockHashStage(u, tx, blockHashCfg, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneBlockHashStage(p, tx, blockHashCfg, ctx) + }, + }, + { + ID: stages.Bodies, + Description: "Download block bodies", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + return BodiesForward(s, u, ctx, tx, bodies, test, firstCycle, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return UnwindBodiesStage(u, tx, bodies, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return nil + }, + }, + { + ID: stages.Senders, + Description: "Recover senders from tx signatures", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return UnwindSendersStage(u, tx, senders, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneSendersStage(p, tx, senders, ctx) + }, + }, + { + ID: stages.Execution, + Description: "Execute blocks w/o hash checks", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle, logger) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneExecutionStage(p, tx, exec, ctx, firstCycle) + }, + }, + { + ID: stages.HashState, + Description: "Hash the key in the state", + Disabled: exec.historyV3 && ethconfig.EnableHistoryV4InTest, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + return SpawnHashStateStage(s, tx, hashState, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return UnwindHashStateStage(u, s, tx, hashState, ctx, logger) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneHashStateStage(p, tx, hashState, ctx) + }, + }, + { + ID: stages.IntermediateHashes, + Description: "Generate intermediate hashes and computing state root", + Disabled: exec.historyV3 && ethconfig.EnableHistoryV4InTest, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + if exec.chainConfig.IsPrague(0) { + _, err := SpawnVerkleTrie(s, u, tx, trieCfg, ctx, logger) + return err + } + _, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, logger) + return err + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + if exec.chainConfig.IsPrague(0) { + return UnwindVerkleTrie(u, s, tx, trieCfg, ctx, logger) + } + return UnwindIntermediateHashesStage(u, s, tx, trieCfg, ctx, logger) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneIntermediateHashesStage(p, tx, trieCfg, ctx) + }, + }, + { + ID: stages.CallTraces, + Description: "Generate call traces index", + DisabledDescription: "Work In Progress", + Disabled: exec.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + return SpawnCallTraces(s, tx, callTraces, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return UnwindCallTraces(u, s, tx, callTraces, ctx, logger) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneCallTraces(p, tx, callTraces, ctx, logger) + }, + }, + { + ID: stages.AccountHistoryIndex, + Description: "Generate account history index", + Disabled: exec.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + return SpawnAccountHistoryIndex(s, tx, history, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return UnwindAccountHistoryIndex(u, s, tx, history, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneAccountHistoryIndex(p, tx, history, ctx, logger) + }, + }, + { + ID: stages.StorageHistoryIndex, + Description: "Generate storage history index", + Disabled: exec.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + return SpawnStorageHistoryIndex(s, tx, history, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return UnwindStorageHistoryIndex(u, s, tx, history, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneStorageHistoryIndex(p, tx, history, ctx, logger) + }, + }, + { + ID: stages.LogIndex, + Description: "Generate receipt logs index", + Disabled: exec.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + return SpawnLogIndex(s, tx, logIndex, ctx, 0, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return UnwindLogIndex(u, s, tx, logIndex, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneLogIndex(p, tx, logIndex, ctx, logger) + }, + }, + { + ID: stages.TxLookup, + Description: "Generate tx lookup index", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + return SpawnTxLookup(s, tx, 0 /* toBlock */, txLookup, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return UnwindTxLookup(u, s, tx, txLookup, ctx, logger) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneTxLookup(p, tx, txLookup, ctx, firstCycle, logger) + }, + }, + { + ID: stages.Finish, + Description: "Final: update current block for the RPC API", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, tx kv.RwTx, logger log.Logger) error { + return FinishForward(s, tx, finish, firstCycle) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + return UnwindFinish(u, tx, finish, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneFinish(p, tx, finish, ctx) + }, + }, + } +} + // StateStages are all stages necessary for basic unwind and stage computation, it is primarily used to process side forks and memory execution. func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, blockHashCfg BlockHashesCfg, senders SendersCfg, exec ExecuteBlockCfg, hashState HashStateCfg, trieCfg TrieCfg) []*Stage { return []*Stage{ diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go index 3191ccf0b5f..41513721d95 100644 --- a/eth/stagedsync/exec3.go +++ b/eth/stagedsync/exec3.go @@ -29,7 +29,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/metrics" libstate "github.com/ledgerwatch/erigon-lib/state" - state2 "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/cmd/state/exec22" "github.com/ledgerwatch/erigon/cmd/state/exec3" "github.com/ledgerwatch/erigon/common/math" @@ -760,7 +759,7 @@ func blockWithSenders(db kv.RoDB, tx kv.Tx, blockReader services.BlockReader, bl return blockReader.BlockByNumber(context.Background(), tx, blockNum) } -func processResultQueue(in *exec22.QueueWithRetry, rws *exec22.ResultsQueue, outputTxNumIn uint64, rs *state.StateV3, agg *state2.AggregatorV3, applyTx kv.Tx, backPressure chan struct{}, applyWorker *exec3.Worker, canRetry, forceStopAtBlockEnd bool) (outputTxNum uint64, conflicts, triggers int, processedBlockNum uint64, stopedAtBlockEnd bool, err error) { +func processResultQueue(in *exec22.QueueWithRetry, rws *exec22.ResultsQueue, outputTxNumIn uint64, rs *state.StateV3, agg *libstate.AggregatorV3, applyTx kv.Tx, backPressure chan struct{}, applyWorker *exec3.Worker, canRetry, forceStopAtBlockEnd bool) (outputTxNum uint64, conflicts, triggers int, processedBlockNum uint64, stopedAtBlockEnd bool, err error) { rwsIt := rws.Iter() defer rwsIt.Close() @@ -1044,7 +1043,7 @@ func reconstituteStep(last bool, return err } if b == nil { - return fmt.Errorf("could not find block %d\n", bn) + return fmt.Errorf("could not find block %d", bn) } txs := b.Transactions() header := b.HeaderNoCopy() @@ -1334,7 +1333,7 @@ func safeCloseTxTaskCh(ch chan *exec22.TxTask) { func ReconstituteState(ctx context.Context, s *StageState, dirs datadir.Dirs, workerCount int, batchSize datasize.ByteSize, chainDb kv.RwDB, blockReader services.FullBlockReader, - logger log.Logger, agg *state2.AggregatorV3, engine consensus.Engine, + logger log.Logger, agg *libstate.AggregatorV3, engine consensus.Engine, chainConfig *chain.Config, genesis *types.Genesis) (err error) { startTime := time.Now() defer agg.EnableMadvNormal().DisableReadAhead() diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index bf2feb97c9b..62811264323 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -35,6 +35,7 @@ type BodiesCfg struct { blockReader services.FullBlockReader blockWriter *blockio.BlockWriter historyV3 bool + loopBreakCheck func(int) bool } func StageBodiesCfg(db kv.RwDB, bd *bodydownload.BodyDownload, @@ -43,8 +44,12 @@ func StageBodiesCfg(db kv.RwDB, bd *bodydownload.BodyDownload, chanConfig chain.Config, blockReader services.FullBlockReader, historyV3 bool, - blockWriter *blockio.BlockWriter) BodiesCfg { - return BodiesCfg{db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, timeout: timeout, chanConfig: chanConfig, blockReader: blockReader, historyV3: historyV3, blockWriter: blockWriter} + blockWriter *blockio.BlockWriter, + loopBreakCheck func(int) bool) BodiesCfg { + return BodiesCfg{ + db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, + timeout: timeout, chanConfig: chanConfig, blockReader: blockReader, + historyV3: historyV3, blockWriter: blockWriter, loopBreakCheck: loopBreakCheck} } // BodiesForward progresses Bodies stage in the forward direction @@ -59,6 +64,9 @@ func BodiesForward( logger log.Logger, ) error { var doUpdate bool + + startTime := time.Now() + if s.BlockNumber < cfg.blockReader.FrozenBlocks() { s.BlockNumber = cfg.blockReader.FrozenBlocks() doUpdate = true @@ -221,6 +229,10 @@ func BodiesForward( } } cfg.bd.AdvanceLow() + + if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(i)) { + return true, nil + } } d5 += time.Since(start) @@ -282,7 +294,10 @@ func BodiesForward( return libcommon.ErrStopped } if bodyProgress > s.BlockNumber+16 { - logger.Info(fmt.Sprintf("[%s] Processed", logPrefix), "highest", bodyProgress) + blocks := bodyProgress - s.BlockNumber + secs := time.Since(startTime).Seconds() + logger.Info(fmt.Sprintf("[%s] Processed", logPrefix), "highest", bodyProgress, + "blocks", blocks, "in", secs, "blk/sec", uint64(float64(blocks)/secs)) } return nil } @@ -304,6 +319,7 @@ func logDownloadingBodies(logPrefix string, committed, remaining uint64, totalDe "wasted/sec", libcommon.ByteCount(uint64(wastedSpeed)), "remaining", remaining, "delivered", totalDelivered, + "blk/sec", totalDelivered/uint64(logInterval/time.Second), "cache", libcommon.ByteCount(uint64(bodyCacheSize)), "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), diff --git a/eth/stagedsync/stage_bor_heimdall.go b/eth/stagedsync/stage_bor_heimdall.go index 41c5645b6d0..6a36a553059 100644 --- a/eth/stagedsync/stage_bor_heimdall.go +++ b/eth/stagedsync/stage_bor_heimdall.go @@ -5,6 +5,7 @@ import ( "context" "encoding/binary" "encoding/json" + "errors" "fmt" "math/big" "sort" @@ -12,6 +13,9 @@ import ( "time" lru "github.com/hashicorp/golang-lru/arc/v2" + "github.com/ledgerwatch/log/v3" + "golang.org/x/sync/errgroup" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -32,13 +36,9 @@ import ( "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" - "github.com/ledgerwatch/log/v3" - "golang.org/x/sync/errgroup" ) const ( - spanLength = 6400 // Number of blocks in a span - zerothSpanEnd = 255 // End block of 0th span inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory inmemorySignatures = 4096 // Number of recent block signatures to keep in memory snapshotPersistInterval = 1024 // Number of blocks after which to persist the vote snapshot to the database @@ -46,6 +46,11 @@ const ( extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal ) +var ( + ErrHeaderValidatorsLengthMismatch = errors.New("header validators length mismatch") + ErrHeaderValidatorsBytesMismatch = errors.New("header validators bytes mismatch") +) + type BorHeimdallCfg struct { db kv.RwDB snapDb kv.RwDB // Database to store and retrieve snapshot checkpoints @@ -56,6 +61,7 @@ type BorHeimdallCfg struct { hd *headerdownload.HeaderDownload penalize func(context.Context, []headerdownload.PenaltyItem) stateReceiverABI abi.ABI + loopBreakCheck func(int) bool recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot] signatures *lru.ARCCache[libcommon.Hash, libcommon.Address] } @@ -69,6 +75,7 @@ func StageBorHeimdallCfg( blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload, penalize func(context.Context, []headerdownload.PenaltyItem), + loopBreakCheck func(int) bool, recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], signatures *lru.ARCCache[libcommon.Hash, libcommon.Address], ) BorHeimdallCfg { @@ -82,6 +89,7 @@ func StageBorHeimdallCfg( hd: hd, penalize: penalize, stateReceiverABI: contract.StateReceiver(), + loopBreakCheck: loopBreakCheck, recents: recents, signatures: signatures, } @@ -201,17 +209,25 @@ func BorHeimdallForward( if err != nil { return err } - var nextSpanId uint64 + var lastSpanId uint64 if k != nil { - nextSpanId = binary.BigEndian.Uint64(k) + 1 + lastSpanId = binary.BigEndian.Uint64(k) } snapshotLastSpanId := cfg.blockReader.(LastFrozen).LastFrozenSpanID() - if snapshotLastSpanId+1 > nextSpanId { - nextSpanId = snapshotLastSpanId + 1 + if snapshotLastSpanId > lastSpanId { + lastSpanId = snapshotLastSpanId + } + var nextSpanId uint64 + if lastSpanId > 0 { + nextSpanId = lastSpanId + 1 } var endSpanID uint64 - if headNumber > zerothSpanEnd { - endSpanID = 2 + (headNumber-zerothSpanEnd)/spanLength + if span.IDAt(headNumber) > 0 { + endSpanID = span.IDAt(headNumber + 1) + } + + if span.BlockInLastSprintOfSpan(headNumber, cfg.chainConfig.Bor) { + endSpanID++ } lastBlockNum := s.BlockNumber @@ -231,7 +247,6 @@ func BorHeimdallForward( var blockNum uint64 var fetchTime time.Duration var eventRecords int - var lastSpanId uint64 logTimer := time.NewTicker(logInterval) defer logTimer.Stop() @@ -260,7 +275,7 @@ func BorHeimdallForward( return err } if header == nil { - return fmt.Errorf("["+s.LogPrefix()+"] header not found: %d", blockNum) + return fmt.Errorf("header not found: %d", blockNum) } // Whitelist service is called to check if the bor chain is @@ -272,12 +287,20 @@ func BorHeimdallForward( {Penalty: headerdownload.BadBlockPenalty, PeerID: cfg.hd.SourcePeerId(header.Hash())}}) dataflow.HeaderDownloadStates.AddChange(blockNum, dataflow.HeaderInvalidated) s.state.UnwindTo(blockNum-1, ForkReset(header.Hash())) - return fmt.Errorf("["+s.LogPrefix()+"] verification failed for header %d: %x", blockNum, header.Hash()) + return fmt.Errorf("verification failed for header %d: %x", blockNum, header.Hash()) + } + } + + sprintLength := cfg.chainConfig.Bor.CalculateSprint(blockNum) + spanID := span.IDAt(blockNum) + if (spanID > 0) && ((blockNum+1)%sprintLength == 0) { + if err = checkHeaderExtraData(u, ctx, chain, blockNum, header, cfg.chainConfig.Bor); err != nil { + return err } } } - if blockNum%cfg.chainConfig.Bor.CalculateSprint(blockNum) == 0 { + if blockNum > 0 && blockNum%cfg.chainConfig.Bor.CalculateSprint(blockNum) == 0 { var callTime time.Duration var records int if lastEventId, records, callTime, err = fetchAndWriteBorEvents(ctx, cfg.blockReader, cfg.chainConfig.Bor, header, lastEventId, cfg.chainConfig.ChainID.String(), tx, cfg.heimdallClient, cfg.stateReceiverABI, s.LogPrefix(), logger); err != nil { @@ -291,30 +314,28 @@ func BorHeimdallForward( var snap *bor.Snapshot if header != nil { - snap = loadSnapshot(blockNum, header.Hash(), cfg.chainConfig.Bor, recents, signatures, cfg.snapDb, logger) + if cfg.blockReader.BorSnapshots().SegmentsMin() == 0 { + snap = loadSnapshot(blockNum, header.Hash(), cfg.chainConfig.Bor, recents, signatures, cfg.snapDb, logger) - if snap == nil { - snap, err = initValidatorSets(ctx, tx, cfg.blockReader, cfg.chainConfig.Bor, - chain, blockNum, recents, signatures, cfg.snapDb, logger, s.LogPrefix()) + if snap == nil { + snap, err = initValidatorSets(ctx, tx, cfg.blockReader, cfg.chainConfig.Bor, + cfg.heimdallClient, chain, blockNum, recents, signatures, cfg.snapDb, logger, s.LogPrefix()) - if err != nil { - return fmt.Errorf("can't initialise validator sets: %w", err) + if err != nil { + return fmt.Errorf("can't initialise validator sets: %w", err) + } } - } - if err = persistValidatorSets(ctx, snap, u, tx, cfg.blockReader, cfg.chainConfig.Bor, chain, blockNum, header.Hash(), recents, signatures, cfg.snapDb, logger, s.LogPrefix()); err != nil { - return fmt.Errorf("can't persist validator sets: %w", err) - } - - if !mine { - sprintLength := cfg.chainConfig.Bor.CalculateSprint(blockNum) - if blockNum > zerothSpanEnd && ((blockNum+1)%sprintLength == 0) { - if err = checkHeaderExtraData(u, ctx, chain, blockNum, header, cfg.chainConfig.Bor); err != nil { - return err - } + if err = persistValidatorSets(ctx, snap, u, tx, cfg.blockReader, cfg.chainConfig.Bor, chain, blockNum, header.Hash(), recents, signatures, cfg.snapDb, logger, s.LogPrefix()); err != nil { + return fmt.Errorf("can't persist validator sets: %w", err) } } } + + if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(blockNum-lastBlockNum)) { + break + } + } if err = s.Update(tx, headNumber); err != nil { @@ -340,10 +361,7 @@ func checkHeaderExtraData( header *types.Header, config *chain.BorConfig, ) error { - var spanID uint64 - if blockNum+1 > zerothSpanEnd { - spanID = 1 + (blockNum+1-zerothSpanEnd-1)/spanLength - } + spanID := span.IDAt(blockNum + 1) spanBytes := chain.BorSpan(spanID) var sp span.HeimdallSpan if err := json.Unmarshal(spanBytes, &sp); err != nil { @@ -362,12 +380,12 @@ func checkHeaderExtraData( } if len(producerSet) != len(headerVals) { - return bor.ErrInvalidSpanValidators + return ErrHeaderValidatorsLengthMismatch } for i, val := range producerSet { if !bytes.Equal(val.HeaderBytes(), headerVals[i].HeaderBytes()) { - return bor.ErrInvalidSpanValidators + return ErrHeaderValidatorsBytesMismatch } } return nil @@ -645,9 +663,10 @@ func persistValidatorSets( func initValidatorSets( ctx context.Context, - tx kv.Tx, + tx kv.RwTx, blockReader services.FullBlockReader, config *chain.BorConfig, + heimdallClient heimdall.IHeimdallClient, chain consensus.ChainHeaderReader, blockNum uint64, recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], @@ -673,8 +692,17 @@ func initValidatorSets( // get validators and current span zeroSpanBytes, err := blockReader.Span(ctx, tx, 0) + if err != nil { - return nil, err + if _, err := fetchAndWriteSpans(ctx, 0, tx, heimdallClient, logPrefix, logger); err != nil { + return nil, err + } + + zeroSpanBytes, err = blockReader.Span(ctx, tx, 0) + + if err != nil { + return nil, err + } } if zeroSpanBytes == nil { @@ -791,10 +819,7 @@ func BorHeimdallUnwind(u *UnwindState, ctx context.Context, s *StageState, tx kv return err } defer spanCursor.Close() - var lastSpanToKeep uint64 - if u.UnwindPoint > zerothSpanEnd { - lastSpanToKeep = 1 + (u.UnwindPoint-zerothSpanEnd-1)/spanLength - } + lastSpanToKeep := span.IDAt(u.UnwindPoint) var spanIdBytes [8]byte binary.BigEndian.PutUint64(spanIdBytes[:], lastSpanToKeep+1) for k, _, err = spanCursor.Seek(spanIdBytes[:]); err == nil && k != nil; k, _, err = spanCursor.Next() { diff --git a/eth/stagedsync/stage_bor_heimdall_test.go b/eth/stagedsync/stage_bor_heimdall_test.go new file mode 100644 index 00000000000..45a27311905 --- /dev/null +++ b/eth/stagedsync/stage_bor_heimdall_test.go @@ -0,0 +1,213 @@ +package stagedsync_test + +import ( + "bytes" + "context" + "errors" + "math/big" + "testing" + "time" + + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/require" + + "github.com/ledgerwatch/erigon/consensus/bor" + "github.com/ledgerwatch/erigon/consensus/bor/valset" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stagedsynctest" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" +) + +func TestBorHeimdallForwardPersistsSpans(t *testing.T) { + t.Parallel() + + ctx := context.Background() + numBlocks := 4000 + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(), + GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, + }) + // pretend-update previous stage progress + testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks)) + + // run stage under test + testHarness.RunStageForward(t, stages.BorHeimdall) + + // asserts + spans, err := testHarness.ReadSpansFromDB(ctx) + require.NoError(t, err) + require.Len(t, spans, 2) + require.Equal(t, uint64(0), spans[0].ID) + require.Equal(t, uint64(0), spans[0].StartBlock) + require.Equal(t, uint64(255), spans[0].EndBlock) + require.Equal(t, uint64(1), spans[1].ID) + require.Equal(t, uint64(256), spans[1].StartBlock) + require.Equal(t, uint64(6655), spans[1].EndBlock) +} + +func TestBorHeimdallForwardFetchesNextSpanDuringLastSprintOfCurrentSpan(t *testing.T) { + // heimdall prepares the next span a number of sprints before the end of the current one + // we should be fetching the next span once we reach the last sprint of the current span + // this mimics the behaviour in bor + t.Parallel() + + ctx := context.Background() + numBlocks := 6640 + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(), + GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, + }) + // pretend-update previous stage progress + testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks)) + + // run stage under test + testHarness.RunStageForward(t, stages.BorHeimdall) + + // asserts + spans, err := testHarness.ReadSpansFromDB(ctx) + require.NoError(t, err) + require.Len(t, spans, 3) + require.Equal(t, uint64(0), spans[0].ID) + require.Equal(t, uint64(0), spans[0].StartBlock) + require.Equal(t, uint64(255), spans[0].EndBlock) + require.Equal(t, uint64(1), spans[1].ID) + require.Equal(t, uint64(256), spans[1].StartBlock) + require.Equal(t, uint64(6655), spans[1].EndBlock) + require.Equal(t, uint64(2), spans[2].ID) + require.Equal(t, uint64(6656), spans[2].StartBlock) + require.Equal(t, uint64(13055), spans[2].EndBlock) +} + +func TestBorHeimdallForwardPersistsStateSyncEvents(t *testing.T) { + t.Parallel() + + ctx := context.Background() + numBlocks := 96 + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(), + GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, + }) + // pretend-update previous stage progress + testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks)) + + // run stage under test + testHarness.RunStageForward(t, stages.BorHeimdall) + + // asserts + // 1 event per sprint expected + events, err := testHarness.ReadStateSyncEventsFromDB(ctx) + require.NoError(t, err) + require.Len(t, events, 6) + + firstEventNumPerBlock, err := testHarness.ReadFirstStateSyncEventNumPerBlockFromDB(ctx) + require.NoError(t, err) + require.Len(t, firstEventNumPerBlock, 6) + require.Equal(t, uint64(1), firstEventNumPerBlock[16]) + require.Equal(t, uint64(2), firstEventNumPerBlock[32]) + require.Equal(t, uint64(3), firstEventNumPerBlock[48]) + require.Equal(t, uint64(4), firstEventNumPerBlock[64]) + require.Equal(t, uint64(5), firstEventNumPerBlock[80]) + require.Equal(t, uint64(6), firstEventNumPerBlock[96]) +} + +func TestBorHeimdallForwardErrHeaderValidatorsLengthMismatch(t *testing.T) { + t.Parallel() + + ctx := context.Background() + numBlocks := 271 + validatorKey1, err := crypto.GenerateKey() + require.NoError(t, err) + validatorKey2, err := crypto.GenerateKey() + require.NoError(t, err) + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(), + GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, + HeimdallProducersOverride: map[uint64][]valset.Validator{ + 1: { + *valset.NewValidator(crypto.PubkeyToAddress(validatorKey1.PublicKey), 1), + *valset.NewValidator(crypto.PubkeyToAddress(validatorKey2.PublicKey), 1), + }, + }, + }) + // pretend-update previous stage progress + testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks)) + + // run stage under test + testHarness.RunStageForwardWithErrorIs(t, stages.BorHeimdall, stagedsync.ErrHeaderValidatorsLengthMismatch) +} + +func TestBorHeimdallForwardErrHeaderValidatorsBytesMismatch(t *testing.T) { + t.Parallel() + + ctx := context.Background() + numBlocks := 271 + validatorKey1, err := crypto.GenerateKey() + require.NoError(t, err) + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(), + GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, + HeimdallProducersOverride: map[uint64][]valset.Validator{ + 1: { + *valset.NewValidator(crypto.PubkeyToAddress(validatorKey1.PublicKey), 1), + }, + }, + }) + // pretend-update previous stage progress + testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks)) + + // run stage under test + testHarness.RunStageForwardWithErrorIs(t, stages.BorHeimdall, stagedsync.ErrHeaderValidatorsBytesMismatch) +} + +func TestBorHeimdallForwardDetectsUnauthorizedSignerError(t *testing.T) { + t.Parallel() + + ctx := context.Background() + numBlocks := 312 + chainConfig := stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays() + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: chainConfig, + GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, + }) + + // prepare invalid header and insert it in the db + latestHeader, err := testHarness.ReadHeaderByNumber(ctx, uint64(numBlocks)) + require.NoError(t, err) + gasLimit := uint64(15500) + invalidHeader := core.MakeEmptyHeader(latestHeader, chainConfig, uint64(time.Now().Unix()), &gasLimit) + invalidHeader.Number = new(big.Int).Add(latestHeader.Number, big.NewInt(1)) + invalidHeader.Extra = bytes.Repeat([]byte{0x00}, types.ExtraVanityLength+types.ExtraSealLength) + validatorKey1, err := crypto.GenerateKey() + require.NoError(t, err) + sighash, err := crypto.Sign(crypto.Keccak256(bor.BorRLP(invalidHeader, chainConfig.Bor)), validatorKey1) + require.NoError(t, err) + copy(invalidHeader.Extra[len(invalidHeader.Extra)-types.ExtraSealLength:], sighash) + testHarness.SaveHeader(ctx, t, invalidHeader) + // pretend-update previous stage progress + testHarness.SaveStageProgress(ctx, t, stages.Headers, invalidHeader.Number.Uint64()) + require.Equal(t, uint64(numBlocks+1), testHarness.GetStageProgress(ctx, t, stages.Headers)) + require.Equal(t, uint64(0), testHarness.GetStageProgress(ctx, t, stages.BorHeimdall)) + + // run stage under test + testHarness.RunStageForward(t, stages.BorHeimdall) + + // asserts + require.Equal(t, uint64(numBlocks+1), testHarness.GetStageProgress(ctx, t, stages.BorHeimdall)) + require.Equal(t, invalidHeader.Number.Uint64()-1, testHarness.StateSyncUnwindPoint()) + unwindReason := testHarness.StateSyncUnwindReason() + require.Equal(t, invalidHeader.Hash(), *unwindReason.Block) + var unauthorizedSignerErr *bor.UnauthorizedSignerError + ok := errors.As(unwindReason.Err, &unauthorizedSignerErr) + require.True(t, ok) + require.Equal(t, invalidHeader.Number.Uint64(), unauthorizedSignerErr.Number) + require.Equal(t, crypto.PubkeyToAddress(validatorKey1.PublicKey).Bytes(), unauthorizedSignerErr.Signer) +} diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 6dc0a9a3b27..aa60d348ae7 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -15,6 +15,8 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb/blockio" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/common" @@ -49,7 +51,8 @@ type HeadersCfg struct { forkValidator *engine_helpers.ForkValidator notifications *shards.Notifications - loopBreakCheck func() bool + syncConfig ethconfig.Sync + loopBreakCheck func(int) bool } func StageHeadersCfg( @@ -57,6 +60,7 @@ func StageHeadersCfg( headerDownload *headerdownload.HeaderDownload, bodyDownload *bodydownload.BodyDownload, chainConfig chain.Config, + syncConfig ethconfig.Sync, headerReqSend func(context.Context, *headerdownload.HeaderRequest) ([64]byte, bool), announceNewHashes func(context.Context, []headerdownload.Announce), penalize func(context.Context, []headerdownload.PenaltyItem), @@ -67,12 +71,13 @@ func StageHeadersCfg( tmpdir string, notifications *shards.Notifications, forkValidator *engine_helpers.ForkValidator, - loopBreakCheck func() bool) HeadersCfg { + loopBreakCheck func(int) bool) HeadersCfg { return HeadersCfg{ db: db, hd: headerDownload, bodyDownload: bodyDownload, chainConfig: chainConfig, + syncConfig: syncConfig, headerReqSend: headerReqSend, announceNewHashes: announceNewHashes, penalize: penalize, @@ -128,20 +133,21 @@ func HeadersPOW( useExternalTx bool, logger log.Logger, ) error { - var headerProgress uint64 var err error + startTime := time.Now() + if err = cfg.hd.ReadProgressFromDb(tx); err != nil { return err } cfg.hd.SetPOSSync(false) cfg.hd.SetFetchingNew(true) defer cfg.hd.SetFetchingNew(false) - headerProgress = cfg.hd.Progress() + startProgress := cfg.hd.Progress() logPrefix := s.LogPrefix() // Check if this is called straight after the unwinds, which means we need to create new canonical markings - hash, err := cfg.blockReader.CanonicalHash(ctx, tx, headerProgress) + hash, err := cfg.blockReader.CanonicalHash(ctx, tx, startProgress) if err != nil { return err } @@ -149,7 +155,7 @@ func HeadersPOW( defer logEvery.Stop() if hash == (libcommon.Hash{}) { headHash := rawdb.ReadHeadHeaderHash(tx) - if err = fixCanonicalChain(logPrefix, logEvery, headerProgress, headHash, tx, cfg.blockReader, logger); err != nil { + if err = fixCanonicalChain(logPrefix, logEvery, startProgress, headHash, tx, cfg.blockReader, logger); err != nil { return err } if !useExternalTx { @@ -165,21 +171,23 @@ func HeadersPOW( return nil } - logger.Info(fmt.Sprintf("[%s] Waiting for headers...", logPrefix), "from", headerProgress) + logger.Info(fmt.Sprintf("[%s] Waiting for headers...", logPrefix), "from", startProgress) - localTd, err := rawdb.ReadTd(tx, hash, headerProgress) + localTd, err := rawdb.ReadTd(tx, hash, startProgress) if err != nil { return err } + /* TEMP TESTING if localTd == nil { - return fmt.Errorf("localTD is nil: %d, %x", headerProgress, hash) + return fmt.Errorf("localTD is nil: %d, %x", startProgress, hash) } - headerInserter := headerdownload.NewHeaderInserter(logPrefix, localTd, headerProgress, cfg.blockReader) + TEMP TESTING */ + headerInserter := headerdownload.NewHeaderInserter(logPrefix, localTd, startProgress, cfg.blockReader) cfg.hd.SetHeaderReader(&ChainReaderImpl{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}) stopped := false var noProgressCounter uint = 0 - prevProgress := headerProgress + prevProgress := startProgress var wasProgress bool var lastSkeletonTime time.Time var peer [64]byte @@ -187,14 +195,15 @@ func HeadersPOW( Loop: for !stopped { - transitionedToPoS, err := rawdb.Transitioned(tx, headerProgress, cfg.chainConfig.TerminalTotalDifficulty) + transitionedToPoS, err := rawdb.Transitioned(tx, startProgress, cfg.chainConfig.TerminalTotalDifficulty) if err != nil { return err } if transitionedToPoS { - if err := s.Update(tx, headerProgress); err != nil { + if err := s.Update(tx, startProgress); err != nil { return err } + s.state.posTransition = &startProgress break } @@ -241,8 +250,9 @@ Loop: } } // Load headers into the database - var inSync bool - if inSync, err = cfg.hd.InsertHeaders(headerInserter.NewFeedHeaderFunc(tx, cfg.blockReader), cfg.chainConfig.TerminalTotalDifficulty, logPrefix, logEvery.C, uint64(currentTime.Unix())); err != nil { + inSync, err := cfg.hd.InsertHeaders(headerInserter.NewFeedHeaderFunc(tx, cfg.blockReader), cfg.syncConfig.LoopBlockLimit, cfg.chainConfig.TerminalTotalDifficulty, logPrefix, logEvery.C, uint64(currentTime.Unix())) + + if err != nil { return err } @@ -255,7 +265,15 @@ Loop: } } - if cfg.loopBreakCheck != nil && cfg.loopBreakCheck() { + if cfg.syncConfig.LoopBlockLimit > 0 { + if bodyProgress, err := stages.GetStageProgress(tx, stages.Bodies); err == nil { + if cfg.hd.Progress() > bodyProgress && cfg.hd.Progress()-bodyProgress > uint64(cfg.syncConfig.LoopBlockLimit*2) { + break + } + } + } + + if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(cfg.hd.Progress()-startProgress)) { break } @@ -324,7 +342,16 @@ Loop: return libcommon.ErrStopped } // We do not print the following line if the stage was interrupted - logger.Info(fmt.Sprintf("[%s] Processed", logPrefix), "highest inserted", headerInserter.GetHighest(), "age", common.PrettyAge(time.Unix(int64(headerInserter.GetHighestTimestamp()), 0))) + + if s.state.posTransition != nil { + logger.Info(fmt.Sprintf("[%s] Transitioned to POS", logPrefix), "block", *s.state.posTransition) + } else { + headers := headerInserter.GetHighest() - startProgress + secs := time.Since(startTime).Seconds() + logger.Info(fmt.Sprintf("[%s] Processed", logPrefix), + "highest", headerInserter.GetHighest(), "age", common.PrettyAge(time.Unix(int64(headerInserter.GetHighestTimestamp()), 0)), + "headers", headers, "in", secs, "blk/sec", uint64(float64(headers)/secs)) + } return nil } diff --git a/eth/stagedsync/stage_interhashes_test.go b/eth/stagedsync/stage_interhashes_test.go index 3bf6c7faac3..107369b1659 100644 --- a/eth/stagedsync/stage_interhashes_test.go +++ b/eth/stagedsync/stage_interhashes_test.go @@ -3,9 +3,10 @@ package stagedsync_test import ( "context" "encoding/binary" - "github.com/ledgerwatch/erigon-lib/kv/dbutils" "testing" + "github.com/ledgerwatch/erigon-lib/kv/dbutils" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/common/length" @@ -80,7 +81,7 @@ func TestAccountAndStorageTrie(t *testing.T) { // ---------------------------------------------------------------- historyV3 := false - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New())) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New())) cfg := stagedsync.StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil) _, err := stagedsync.RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx, log.New()) assert.Nil(t, err) @@ -202,7 +203,7 @@ func TestAccountTrieAroundExtensionNode(t *testing.T) { hash6 := libcommon.HexToHash("0x3100000000000000000000000000000000000000000000000000000000000000") assert.Nil(t, tx.Put(kv.HashedAccounts, hash6[:], encoded)) - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New())) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New())) _, err := stagedsync.RegenerateIntermediateHashes("IH", tx, stagedsync.StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil), libcommon.Hash{} /* expectedRootHash */, ctx, log.New()) assert.Nil(t, err) @@ -265,7 +266,7 @@ func TestStorageDeletion(t *testing.T) { // Populate account & storage trie DB tables // ---------------------------------------------------------------- historyV3 := false - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New())) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New())) cfg := stagedsync.StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil) _, err = stagedsync.RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx, log.New()) assert.Nil(t, err) @@ -384,7 +385,7 @@ func TestHiveTrieRoot(t *testing.T) { common.FromHex("02081bc16d674ec80000"))) historyV3 := false - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New())) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New())) cfg := stagedsync.StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil) logger := log.New() _, err := stagedsync.RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx, logger) diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index 453562c4e20..3de8e13904b 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -44,9 +44,10 @@ type SendersCfg struct { chainConfig *chain.Config hd *headerdownload.HeaderDownload blockReader services.FullBlockReader + loopBreakCheck func(int) bool } -func StageSendersCfg(db kv.RwDB, chainCfg *chain.Config, badBlockHalt bool, tmpdir string, prune prune.Mode, blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload) SendersCfg { +func StageSendersCfg(db kv.RwDB, chainCfg *chain.Config, badBlockHalt bool, tmpdir string, prune prune.Mode, blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload, loopBreakCheck func(int) bool) SendersCfg { const sendersBatchSize = 10000 const sendersBlockSize = 4096 @@ -62,8 +63,8 @@ func StageSendersCfg(db kv.RwDB, chainCfg *chain.Config, badBlockHalt bool, tmpd chainConfig: chainCfg, prune: prune, hd: hd, - - blockReader: blockReader, + blockReader: blockReader, + loopBreakCheck: loopBreakCheck, } } @@ -198,6 +199,10 @@ Loop: break } + if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(blockNumber-startFrom)) { + break + } + has, err := cfg.blockReader.HasSenders(ctx, tx, blockHash, blockNumber) if err != nil { return err diff --git a/eth/stagedsync/stage_senders_test.go b/eth/stagedsync/stage_senders_test.go index 15bda777a11..bda8d5e90f4 100644 --- a/eth/stagedsync/stage_senders_test.go +++ b/eth/stagedsync/stage_senders_test.go @@ -128,7 +128,7 @@ func TestSenders(t *testing.T) { require.NoError(stages.SaveStageProgress(tx, stages.Bodies, 3)) - cfg := stagedsync.StageSendersCfg(db, params.TestChainConfig, false, "", prune.Mode{}, br, nil) + cfg := stagedsync.StageSendersCfg(db, params.TestChainConfig, false, "", prune.Mode{}, br, nil, nil) err = stagedsync.SpawnRecoverSendersStage(cfg, &stagedsync.StageState{ID: stages.Senders}, nil, tx, 3, m.Ctx, log.New()) require.NoError(err) diff --git a/eth/stagedsync/stage_snapshots.go b/eth/stagedsync/stage_snapshots.go index 3e1c03f3625..7be716c40b5 100644 --- a/eth/stagedsync/stage_snapshots.go +++ b/eth/stagedsync/stage_snapshots.go @@ -1,17 +1,35 @@ package stagedsync import ( + "bufio" + "bytes" "context" "encoding/binary" + "errors" "fmt" + "io/fs" "math/big" + "os" + "path/filepath" "reflect" + "runtime" + "sort" + "strings" + "sync" + "sync/atomic" "time" + "github.com/anacrolix/torrent" "github.com/ledgerwatch/log/v3" + "golang.org/x/sync/errgroup" "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon-lib/common/dir" + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" "github.com/ledgerwatch/erigon-lib/etl" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/kv" @@ -21,11 +39,15 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/silkworm" "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" ) type SnapshotsCfg struct { @@ -36,39 +58,79 @@ type SnapshotsCfg struct { blockRetire services.BlockRetire snapshotDownloader proto_downloader.DownloaderClient blockReader services.FullBlockReader - dbEventNotifier services.DBEventNotifier + notifier *shards.Notifications - historyV3 bool - caplin bool - agg *state.AggregatorV3 - silkworm *silkworm.Silkworm + historyV3 bool + caplin bool + agg *state.AggregatorV3 + silkworm *silkworm.Silkworm + snapshotUploader *snapshotUploader + syncConfig ethconfig.Sync } func StageSnapshotsCfg(db kv.RwDB, chainConfig chain.Config, + syncConfig ethconfig.Sync, dirs datadir.Dirs, blockRetire services.BlockRetire, snapshotDownloader proto_downloader.DownloaderClient, blockReader services.FullBlockReader, - dbEventNotifier services.DBEventNotifier, + notifier *shards.Notifications, historyV3 bool, agg *state.AggregatorV3, caplin bool, silkworm *silkworm.Silkworm, ) SnapshotsCfg { - return SnapshotsCfg{ + cfg := SnapshotsCfg{ db: db, chainConfig: chainConfig, dirs: dirs, blockRetire: blockRetire, snapshotDownloader: snapshotDownloader, blockReader: blockReader, - dbEventNotifier: dbEventNotifier, + notifier: notifier, historyV3: historyV3, caplin: caplin, agg: agg, silkworm: silkworm, + syncConfig: syncConfig, } + + if uploadFs := cfg.syncConfig.UploadLocation; len(uploadFs) > 0 { + + cfg.snapshotUploader = &snapshotUploader{ + cfg: &cfg, + uploadFs: uploadFs, + version: snapcfg.KnownCfg(chainConfig.ChainName, 0).Version, + torrentFiles: downloader.NewAtomicTorrentFiles(cfg.dirs.Snap), + } + + cfg.blockRetire.SetWorkers(estimate.CompressSnapshot.Workers()) + + freezingCfg := cfg.blockReader.FreezingCfg() + + if freezingCfg.Enabled && freezingCfg.Produce { + u := cfg.snapshotUploader + + if maxSeedable := u.maxSeedableHeader(); u.cfg.syncConfig.FrozenBlockLimit > 0 && maxSeedable > u.cfg.syncConfig.FrozenBlockLimit { + blockLimit := maxSeedable - u.minBlockNumber() + + if u.cfg.syncConfig.FrozenBlockLimit < blockLimit { + blockLimit = u.cfg.syncConfig.FrozenBlockLimit + } + + if snapshots, ok := u.cfg.blockReader.Snapshots().(*freezeblocks.RoSnapshots); ok { + snapshots.SetSegmentsMin(maxSeedable - blockLimit) + } + + if snapshots, ok := u.cfg.blockReader.BorSnapshots().(*freezeblocks.BorRoSnapshots); ok { + snapshots.SetSegmentsMin(maxSeedable - blockLimit) + } + } + } + } + + return cfg } func SpawnStageSnapshots( @@ -99,7 +161,12 @@ func SpawnStageSnapshots( if minProgress == 0 || progress < minProgress { minProgress = progress } + + if stage == stages.SyncStage(cfg.syncConfig.BreakAfterStage) { + break + } } + if minProgress > s.BlockNumber { if err = s.Update(tx, minProgress); err != nil { return err @@ -126,21 +193,60 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R cstate = snapshotsync.AlsoCaplin } - if err := snapshotsync.WaitForDownloader(s.LogPrefix(), ctx, cfg.historyV3, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader); err != nil { - return err + if cfg.snapshotUploader != nil { + u := cfg.snapshotUploader + + u.init(ctx, logger) + + if cfg.syncConfig.UploadFrom != rpc.EarliestBlockNumber { + u.downloadLatestSnapshots(ctx, cfg.syncConfig.UploadFrom, u.version) + } + + if maxSeedable := u.maxSeedableHeader(); u.cfg.syncConfig.FrozenBlockLimit > 0 && maxSeedable > u.cfg.syncConfig.FrozenBlockLimit { + blockLimit := maxSeedable - u.minBlockNumber() + + if u.cfg.syncConfig.FrozenBlockLimit < blockLimit { + blockLimit = u.cfg.syncConfig.FrozenBlockLimit + } + + if snapshots, ok := u.cfg.blockReader.Snapshots().(*freezeblocks.RoSnapshots); ok { + snapshots.SetSegmentsMin(maxSeedable - blockLimit) + } + + if snapshots, ok := u.cfg.blockReader.BorSnapshots().(*freezeblocks.BorRoSnapshots); ok { + snapshots.SetSegmentsMin(maxSeedable - blockLimit) + } + } + + if err := cfg.blockReader.Snapshots().ReopenFolder(); err != nil { + return err + } + + if cfg.chainConfig.Bor != nil { + if err := cfg.blockReader.BorSnapshots().ReopenFolder(); err != nil { + return err + } + } + if cfg.notifier.Events != nil { // can notify right here, even that write txn is not commit + cfg.notifier.Events.OnNewSnapshot() + } + } else { + if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix(), cfg.historyV3, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader); err != nil { + return err + } } // It's ok to notify before tx.Commit(), because RPCDaemon does read list of files by gRPC (not by reading from db) - if cfg.dbEventNotifier != nil { - cfg.dbEventNotifier.OnNewSnapshot() + if cfg.notifier.Events != nil { + cfg.notifier.Events.OnNewSnapshot() } - cfg.blockReader.Snapshots().LogStat() + cfg.blockReader.Snapshots().LogStat("download") cfg.agg.LogStats(tx, func(endTxNumMinimax uint64) uint64 { _, histBlockNumProgress, _ := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax) return histBlockNumProgress }) - if err := cfg.blockRetire.BuildMissedIndicesIfNeed(ctx, s.LogPrefix(), cfg.dbEventNotifier, &cfg.chainConfig); err != nil { + if err := cfg.blockRetire.BuildMissedIndicesIfNeed(ctx, s.LogPrefix(), cfg.notifier.Events, &cfg.chainConfig); err != nil { return err } @@ -157,8 +263,8 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R if err := cfg.agg.BuildMissedIndices(ctx, indexWorkers); err != nil { return err } - if cfg.dbEventNotifier != nil { - cfg.dbEventNotifier.OnNewSnapshot() + if cfg.notifier.Events != nil { + cfg.notifier.Events.OnNewSnapshot() } } @@ -173,6 +279,7 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R if err := FillDBFromSnapshots(s.LogPrefix(), ctx, tx, cfg.dirs, cfg.blockReader, cfg.agg, logger); err != nil { return err } + return nil } @@ -300,7 +407,7 @@ func FillDBFromSnapshots(logPrefix string, ctx context.Context, tx kv.RwTx, dirs /* ====== PRUNING ====== */ // snapshots pruning sections works more as a retiring of blocks // retiring blocks means moving block data from db into snapshots -func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx context.Context, tx kv.RwTx) (err error) { +func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx context.Context, tx kv.RwTx, logger log.Logger) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -311,32 +418,71 @@ func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx cont } freezingCfg := cfg.blockReader.FreezingCfg() + if freezingCfg.Enabled { - if err := cfg.blockRetire.PruneAncientBlocks(tx, 100, cfg.chainConfig.Bor != nil); err != nil { - return err - } - } - if freezingCfg.Enabled && freezingCfg.Produce { - //TODO: initialSync maybe save files progress here - if cfg.blockRetire.HasNewFrozenFiles() || cfg.agg.HasNewFrozenFiles() { - if err := rawdb.WriteSnapshots(tx, cfg.blockReader.FrozenFiles(), cfg.agg.Files()); err != nil { - return err + if freezingCfg.Produce { + //TODO: initialSync maybe save files progress here + if cfg.blockRetire.HasNewFrozenFiles() || cfg.agg.HasNewFrozenFiles() { + if err := rawdb.WriteSnapshots(tx, cfg.blockReader.FrozenFiles(), cfg.agg.Files()); err != nil { + return err + } } - } - cfg.blockRetire.RetireBlocksInBackground(ctx, s.ForwardProgress, cfg.chainConfig.Bor != nil, log.LvlInfo, func(downloadRequest []services.DownloadRequest) error { - if cfg.snapshotDownloader == nil || reflect.ValueOf(cfg.snapshotDownloader).IsNil() { - return nil + var minBlockNumber uint64 + + if cfg.snapshotUploader != nil { + minBlockNumber = cfg.snapshotUploader.minBlockNumber() } - return snapshotsync.RequestSnapshotsDownload(ctx, downloadRequest, cfg.snapshotDownloader) - }, func(l []string) error { - if cfg.snapshotDownloader == nil || reflect.ValueOf(cfg.snapshotDownloader).IsNil() { + + cfg.blockRetire.RetireBlocksInBackground(ctx, minBlockNumber, s.ForwardProgress, log.LvlInfo, func(downloadRequest []services.DownloadRequest) error { + if cfg.snapshotDownloader != nil && !reflect.ValueOf(cfg.snapshotDownloader).IsNil() { + if err := snapshotsync.RequestSnapshotsDownload(ctx, downloadRequest, cfg.snapshotDownloader); err != nil { + return err + } + } + return nil - } - _, err := cfg.snapshotDownloader.Delete(ctx, &proto_downloader.DeleteRequest{Paths: l}) + }, func(l []string) error { + //if cfg.snapshotUploader != nil { + // TODO - we need to also remove files from the uploader (100k->500K transition) + //} + + if !(cfg.snapshotDownloader == nil || reflect.ValueOf(cfg.snapshotDownloader).IsNil()) { + _, err := cfg.snapshotDownloader.Delete(ctx, &proto_downloader.DeleteRequest{Paths: l}) + return err + } + + return nil + }) + + //cfg.agg.BuildFilesInBackground() + } + + if err := cfg.blockRetire.PruneAncientBlocks(tx, cfg.syncConfig.PruneLimit); err != nil { return err - }) - //cfg.agg.BuildFilesInBackground() + } + } + + if cfg.snapshotUploader != nil { + // if we're uploading make sure that the DB does not get too far + // ahead of the snapshot production process - otherwise DB will + // grow larger than necessary - we may also want to increase the + // workers + if s.ForwardProgress > cfg.blockReader.FrozenBlocks()+300_000 { + func() { + checkEvery := time.NewTicker(logInterval) + defer checkEvery.Stop() + + for s.ForwardProgress > cfg.blockReader.FrozenBlocks()+300_000 { + select { + case <-ctx.Done(): + return + case <-checkEvery.C: + log.Info(fmt.Sprintf("[%s] Waiting for snapshots...", s.LogPrefix()), "progress", s.ForwardProgress, "frozen", cfg.blockReader.FrozenBlocks(), "gap", s.ForwardProgress-cfg.blockReader.FrozenBlocks()) + } + } + }() + } } if !useExternalTx { @@ -347,3 +493,779 @@ func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx cont return nil } + +type uploadState struct { + sync.Mutex + file string + info *snaptype.FileInfo + torrent *torrent.TorrentSpec + buildingTorrent bool + uploads []string + remote bool + hasRemoteTorrent bool + //remoteHash string + local bool + localHash string +} + +type snapshotUploader struct { + cfg *SnapshotsCfg + files map[string]*uploadState + uploadFs string + rclone *downloader.RCloneClient + uploadSession *downloader.RCloneSession + uploadScheduled atomic.Bool + uploading atomic.Bool + manifestMutex sync.Mutex + version uint8 + torrentFiles *downloader.TorrentFiles +} + +func (u *snapshotUploader) init(ctx context.Context, logger log.Logger) { + if u.files == nil { + freezingCfg := u.cfg.blockReader.FreezingCfg() + + if freezingCfg.Enabled && freezingCfg.Produce { + u.files = map[string]*uploadState{} + u.start(ctx, logger) + } + } +} + +func (u *snapshotUploader) maxUploadedHeader() uint64 { + var max uint64 + + if len(u.files) > 0 { + for _, state := range u.files { + if state.local && state.remote { + if state.info != nil { + if state.info.T == snaptype.Headers { + if state.info.To > max { + max = state.info.To + } + } + } else { + if info, ok := snaptype.ParseFileName(u.cfg.dirs.Snap, state.file); ok { + if info.T == snaptype.Headers { + if info.To > max { + max = info.To + } + } + state.info = &info + } + } + } + } + } + + return max +} + +type dirEntry struct { + name string +} + +func (e dirEntry) Name() string { + return e.name +} + +func (e dirEntry) IsDir() bool { + return false +} + +func (e dirEntry) Type() fs.FileMode { + return e.Mode() +} + +func (e dirEntry) Size() int64 { + return -1 +} + +func (e dirEntry) Mode() fs.FileMode { + return fs.ModeIrregular +} + +func (e dirEntry) ModTime() time.Time { + return time.Time{} +} + +func (e dirEntry) Sys() any { + return nil +} + +func (e dirEntry) Info() (fs.FileInfo, error) { + return e, nil +} + +var checkKnownSizes = false + +func (u *snapshotUploader) seedable(fi snaptype.FileInfo) bool { + if !fi.Seedable() { + return false + } + + if checkKnownSizes { + for _, it := range snapcfg.KnownCfg(u.cfg.chainConfig.ChainName, 1).Preverified { + info, _ := snaptype.ParseFileName("", it.Name) + + if fi.From == info.From { + return fi.To == info.To + } + + if fi.From < info.From { + return info.To-info.From == fi.To-fi.From + } + + if fi.From < info.To { + return false + } + } + } + + return true +} + +func (u *snapshotUploader) downloadManifest(ctx context.Context) ([]fs.DirEntry, error) { + u.manifestMutex.Lock() + defer u.manifestMutex.Unlock() + + reader, err := u.uploadSession.Cat(ctx, "manifest.txt") + + if err != nil { + return nil, err + } + + var entries []fs.DirEntry + + scanner := bufio.NewScanner(reader) + + for scanner.Scan() { + entries = append(entries, dirEntry{scanner.Text()}) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil +} + +func (u *snapshotUploader) uploadManifest(ctx context.Context, remoteRefresh bool) error { + u.manifestMutex.Lock() + defer u.manifestMutex.Unlock() + + if remoteRefresh { + u.refreshFromRemote(ctx) + } + + manifestFile := "manifest.txt" + + fileMap := map[string]string{} + + for file, state := range u.files { + if state.remote { + if state.hasRemoteTorrent { + fileMap[file] = file + ".torrent" + } else { + fileMap[file] = "" + } + } + } + + files := make([]string, 0, len(fileMap)) + + for torrent, file := range fileMap { + files = append(files, file) + + if len(torrent) > 0 { + files = append(files, torrent) + } + } + + sort.Strings(files) + + manifestEntries := bytes.Buffer{} + + for _, file := range files { + fmt.Fprintln(&manifestEntries, file) + } + + _ = os.WriteFile(filepath.Join(u.cfg.dirs.Snap, manifestFile), manifestEntries.Bytes(), 0644) + defer os.Remove(filepath.Join(u.cfg.dirs.Snap, manifestFile)) + + return u.uploadSession.Upload(ctx, manifestFile) +} + +func (u *snapshotUploader) refreshFromRemote(ctx context.Context) { + remoteFiles, err := u.uploadSession.ReadRemoteDir(ctx, true) + + if err != nil { + return + } + + u.updateRemotes(remoteFiles) +} + +func (u *snapshotUploader) updateRemotes(remoteFiles []fs.DirEntry) { + for _, fi := range remoteFiles { + var file string + var hasTorrent bool + + if hasTorrent = filepath.Ext(fi.Name()) == ".torrent"; hasTorrent { + file = strings.TrimSuffix(fi.Name(), ".torrent") + } else { + file = fi.Name() + } + + // if we have found the file & its torrent we don't + // need to attempt another sync operation + if state, ok := u.files[file]; ok { + state.remote = true + + if hasTorrent { + state.hasRemoteTorrent = true + } + + } else { + info, ok := snaptype.ParseFileName(u.cfg.dirs.Snap, fi.Name()) + + if !ok || info.Version != u.version { + continue + } + + u.files[file] = &uploadState{ + file: file, + info: &info, + local: dir.FileNonZero(info.Path), + hasRemoteTorrent: hasTorrent, + } + } + } +} + +func (u *snapshotUploader) downloadLatestSnapshots(ctx context.Context, blockNumber rpc.BlockNumber, version uint8) error { + + entries, err := u.downloadManifest(ctx) + + if err != nil { + entries, err = u.uploadSession.ReadRemoteDir(ctx, true) + } + + if err != nil { + return err + } + + lastSegments := map[snaptype.Type]fs.FileInfo{} + torrents := map[string]string{} + + for _, ent := range entries { + if info, err := ent.Info(); err == nil { + + if info.Size() > -1 && info.Size() <= 32 { + continue + } + + snapInfo, ok := info.Sys().(downloader.SnapInfo) + + if ok && snapInfo.Type() != snaptype.Unknown && snapInfo.Version() == version { + if last, ok := lastSegments[snapInfo.Type()]; ok { + if lastInfo, ok := last.Sys().(downloader.SnapInfo); ok && snapInfo.To() > lastInfo.To() { + lastSegments[snapInfo.Type()] = info + } + } else { + lastSegments[snapInfo.Type()] = info + } + } else { + if ext := filepath.Ext(info.Name()); ext == ".torrent" { + fileName := strings.TrimSuffix(info.Name(), ".torrent") + torrents[fileName] = info.Name() + } + } + } + } + + var min uint64 + + for _, info := range lastSegments { + if lastInfo, ok := info.Sys().(downloader.SnapInfo); ok { + if min == 0 || lastInfo.From() < min { + min = lastInfo.From() + } + } + } + + for segType, info := range lastSegments { + if lastInfo, ok := info.Sys().(downloader.SnapInfo); ok { + if lastInfo.From() > min { + for _, ent := range entries { + if info, err := ent.Info(); err == nil { + snapInfo, ok := info.Sys().(downloader.SnapInfo) + + if ok && snapInfo.Type() == segType && + snapInfo.Version() == version && + snapInfo.From() == min { + lastSegments[segType] = info + } + } + } + } + } + } + + downloads := make([]string, 0, len(lastSegments)) + + for _, info := range lastSegments { + downloads = append(downloads, info.Name()) + if torrent, ok := torrents[info.Name()]; ok { + downloads = append(downloads, torrent) + } + } + + if len(downloads) > 0 { + return u.uploadSession.Download(ctx, downloads...) + } + + return nil +} + +func (u *snapshotUploader) maxSeedableHeader() uint64 { + var max uint64 + + if list, err := snaptype.Segments(u.cfg.dirs.Snap, u.version); err == nil { + for _, info := range list { + if u.seedable(info) && info.T == snaptype.Headers && info.To > max { + max = info.To + } + } + } + + return max +} + +func (u *snapshotUploader) minBlockNumber() uint64 { + var min uint64 + + if list, err := snaptype.Segments(u.cfg.dirs.Snap, u.version); err == nil { + for _, info := range list { + if u.seedable(info) && min == 0 || info.From < min { + min = info.From + } + } + } + + return min +} + +func expandHomeDir(dirpath string) string { + home, err := os.UserHomeDir() + if err != nil { + return dirpath + } + prefix := fmt.Sprintf("~%c", os.PathSeparator) + if strings.HasPrefix(dirpath, prefix) { + return filepath.Join(home, dirpath[len(prefix):]) + } else if dirpath == "~" { + return home + } + return dirpath +} + +func isLocalFs(ctx context.Context, rclient *downloader.RCloneClient, fs string) bool { + + remotes, _ := rclient.ListRemotes(ctx) + + if remote, _, ok := strings.Cut(fs, ":"); ok { + for _, r := range remotes { + if remote == r { + return false + } + } + + return filepath.VolumeName(fs) == remote + } + + return true +} + +func (u *snapshotUploader) start(ctx context.Context, logger log.Logger) { + var err error + + u.rclone, err = downloader.NewRCloneClient(logger) + + if err != nil { + logger.Warn("[uploader] Uploading disabled: rclone start failed", "err", err) + return + } + + uploadFs := u.uploadFs + + if isLocalFs(ctx, u.rclone, uploadFs) { + uploadFs = expandHomeDir(filepath.Clean(uploadFs)) + + uploadFs, err = filepath.Abs(uploadFs) + + if err != nil { + logger.Warn("[uploader] Uploading disabled: invalid upload fs", "err", err, "fs", u.uploadFs) + return + } + + if err := os.MkdirAll(uploadFs, 0755); err != nil { + logger.Warn("[uploader] Uploading disabled: can't create upload fs", "err", err, "fs", u.uploadFs) + return + } + } + + u.uploadSession, err = u.rclone.NewSession(ctx, u.cfg.dirs.Snap, uploadFs) + + if err != nil { + logger.Warn("[uploader] Uploading disabled: rclone session failed", "err", err) + return + } + + go func() { + + remoteFiles, _ := u.downloadManifest(ctx) + refreshFromRemote := false + + if len(remoteFiles) > 0 { + u.updateRemotes(remoteFiles) + refreshFromRemote = true + } else { + u.refreshFromRemote(ctx) + } + + go u.uploadManifest(ctx, refreshFromRemote) + + logger.Debug("[snapshot uploader] starting snapshot subscription...") + snapshotSubCh, snapshotSubClean := u.cfg.notifier.Events.AddNewSnapshotSubscription() + defer snapshotSubClean() + + logger.Info("[snapshot uploader] subscription established") + + defer func() { + if err != nil { + if !errors.Is(err, context.Canceled) { + logger.Warn("[snapshot uploader] subscription closed", "reason", err) + } + } else { + logger.Warn("[snapshot uploader] subscription closed") + } + }() + + u.scheduleUpload(ctx, logger) + + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-snapshotSubCh: + logger.Info("[snapshot uploader] new snapshot received") + u.scheduleUpload(ctx, logger) + } + } + }() +} + +func (u *snapshotUploader) scheduleUpload(ctx context.Context, logger log.Logger) { + if !u.uploadScheduled.CompareAndSwap(false, true) { + return + } + + if u.uploading.CompareAndSwap(false, true) { + go func() { + defer u.uploading.Store(false) + for u.uploadScheduled.Load() { + u.uploadScheduled.Store(false) + u.upload(ctx, logger) + } + }() + } +} + +func (u *snapshotUploader) removeBefore(before uint64) { + list, err := snaptype.Segments(u.cfg.dirs.Snap, u.version) + + if err != nil { + return + } + + var toReopen []string + var borToReopen []string + + var toRemove []string //nolint:prealloc + + for _, f := range list { + if f.To > before { + switch f.T { + case snaptype.BorEvents, snaptype.BorSpans: + borToReopen = append(borToReopen, filepath.Base(f.Path)) + default: + toReopen = append(toReopen, filepath.Base(f.Path)) + } + + continue + } + + toRemove = append(toRemove, f.Path) + } + + if len(toRemove) > 0 { + if snapshots, ok := u.cfg.blockReader.Snapshots().(*freezeblocks.RoSnapshots); ok { + snapshots.SetSegmentsMin(before) + snapshots.ReopenList(toReopen, true) + } + + if snapshots, ok := u.cfg.blockReader.BorSnapshots().(*freezeblocks.BorRoSnapshots); ok { + snapshots.ReopenList(borToReopen, true) + snapshots.SetSegmentsMin(before) + } + + for _, f := range toRemove { + _ = os.Remove(f) + _ = os.Remove(f + ".torrent") + ext := filepath.Ext(f) + withoutExt := f[:len(f)-len(ext)] + _ = os.Remove(withoutExt + ".idx") + + if strings.HasSuffix(withoutExt, "transactions") { + _ = os.Remove(withoutExt + "-to-block.idx") + } + } + } +} + +func (u *snapshotUploader) upload(ctx context.Context, logger log.Logger) { + defer func() { + if r := recover(); r != nil { + log.Error("[snapshot uploader] snapshot upload failed", "err", r, "stack", dbg.Stack()) + } + }() + + retryTime := 30 * time.Second + maxRetryTime := 300 * time.Second + + var uploadCount int + + for { + var processList []*uploadState + + for _, f := range u.cfg.blockReader.FrozenFiles() { + if state, ok := u.files[f]; !ok { + if fi, ok := snaptype.ParseFileName(u.cfg.dirs.Snap, f); ok { + if u.seedable(fi) { + state := &uploadState{ + file: f, + info: &fi, + local: true, + } + + if fi.TorrentFileExists() { + state.torrent, _ = u.torrentFiles.LoadByName(f) + } + + u.files[f] = state + processList = append(processList, state) + } + } + } else { + func() { + state.Lock() + defer state.Unlock() + + state.local = true + + if state.torrent == nil && state.info.TorrentFileExists() { + state.torrent, _ = u.torrentFiles.LoadByName(f) + if state.torrent != nil { + state.localHash = state.torrent.InfoHash.String() + } + } + + if !state.remote { + processList = append(processList, state) + } + }() + } + } + + var torrentList []*uploadState + + for _, state := range processList { + func() { + state.Lock() + defer state.Unlock() + if !(state.torrent != nil || state.buildingTorrent) { + torrentList = append(torrentList, state) + state.buildingTorrent = true + } + }() + } + + if len(torrentList) > 0 { + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(runtime.GOMAXPROCS(-1) * 4) + var i atomic.Int32 + + go func() { + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() + + for int(i.Load()) < len(torrentList) { + select { + case <-gctx.Done(): + return + case <-logEvery.C: + if int(i.Load()) == len(torrentList) { + return + } + log.Info("[snapshot uploader] Creating .torrent files", "progress", fmt.Sprintf("%d/%d", i.Load(), len(torrentList))) + } + } + }() + + for _, s := range torrentList { + state := s + + g.Go(func() error { + defer i.Add(1) + + err := downloader.BuildTorrentIfNeed(gctx, state.file, u.cfg.dirs.Snap, u.torrentFiles) + + state.Lock() + state.buildingTorrent = false + state.Unlock() + + if err != nil { + return err + } + + torrent, err := u.torrentFiles.LoadByName(state.file) + + if err != nil { + return err + } + + state.Lock() + state.torrent = torrent + state.Unlock() + + state.localHash = state.torrent.InfoHash.String() + + logger.Info("[snapshot uploader] built torrent", "file", state.file, "hash", state.localHash) + + return nil + }) + } + + if err := g.Wait(); err != nil { + logger.Debug(".torrent file creation failed", "err", err) + } + } + + var f atomic.Int32 + + var uploadList []*uploadState + + for _, state := range processList { + err := func() error { + state.Lock() + defer state.Unlock() + if !state.remote && state.torrent != nil && len(state.uploads) == 0 && u.rclone != nil { + state.uploads = []string{state.file, state.file + ".torrent"} + uploadList = append(uploadList, state) + } + + return nil + }() + + if err != nil { + logger.Debug("upload failed", "file", state.file, "err", err) + } + } + + if len(uploadList) > 0 { + log.Info("[snapshot uploader] Starting upload", "count", len(uploadList)) + + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(16) + var i atomic.Int32 + + go func() { + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() + + for int(i.Load()) < len(processList) { + select { + case <-gctx.Done(): + log.Info("[snapshot uploader] Uploaded files", "processed", fmt.Sprintf("%d/%d/%d", i.Load(), len(processList), f.Load())) + return + case <-logEvery.C: + if int(i.Load()+f.Load()) == len(processList) { + return + } + log.Info("[snapshot uploader] Uploading files", "progress", fmt.Sprintf("%d/%d/%d", i.Load(), len(processList), f.Load())) + } + } + }() + + for _, s := range uploadList { + state := s + func() { + state.Lock() + defer state.Unlock() + + g.Go(func() error { + defer i.Add(1) + defer func() { + state.Lock() + state.uploads = nil + state.Unlock() + }() + + if err := u.uploadSession.Upload(gctx, state.uploads...); err != nil { + f.Add(1) + return nil + } + + uploadCount++ + + state.Lock() + state.remote = true + state.hasRemoteTorrent = true + state.Unlock() + return nil + }) + }() + } + + if err := g.Wait(); err != nil { + logger.Debug("[snapshot uploader] upload failed", "err", err) + } + } + + if f.Load() == 0 { + break + } + + time.Sleep(retryTime) + + if retryTime < maxRetryTime { + retryTime += retryTime + } else { + retryTime = maxRetryTime + } + } + + var err error + + if uploadCount > 0 { + err = u.uploadManifest(ctx, false) + } + + if err == nil { + if maxUploaded := u.maxUploadedHeader(); u.cfg.syncConfig.FrozenBlockLimit > 0 && maxUploaded > u.cfg.syncConfig.FrozenBlockLimit { + u.removeBefore(maxUploaded - u.cfg.syncConfig.FrozenBlockLimit) + } + } +} diff --git a/eth/stagedsync/stagedsynctest/chain_configs.go b/eth/stagedsync/stagedsynctest/chain_configs.go new file mode 100644 index 00000000000..99b81e24442 --- /dev/null +++ b/eth/stagedsync/stagedsynctest/chain_configs.go @@ -0,0 +1,20 @@ +package stagedsynctest + +import ( + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon/params" +) + +func BorDevnetChainConfigWithNoBlockSealDelays() *chain.Config { + // take care not to mutate global var (shallow copy) + chainConfigCopy := *params.BorDevnetChainConfig + borConfigCopy := *chainConfigCopy.Bor + borConfigCopy.Period = map[string]uint64{ + "0": 0, + } + borConfigCopy.ProducerDelay = map[string]uint64{ + "0": 0, + } + chainConfigCopy.Bor = &borConfigCopy + return &chainConfigCopy +} diff --git a/eth/stagedsync/stagedsynctest/harness.go b/eth/stagedsync/stagedsynctest/harness.go new file mode 100644 index 00000000000..0b3100d8022 --- /dev/null +++ b/eth/stagedsync/stagedsynctest/harness.go @@ -0,0 +1,595 @@ +package stagedsynctest + +import ( + "context" + "crypto/ecdsa" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "math/big" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/require" + + "github.com/ledgerwatch/erigon-lib/chain" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/bor" + "github.com/ledgerwatch/erigon/consensus/bor/clerk" + "github.com/ledgerwatch/erigon/consensus/bor/contract" + heimdallmock "github.com/ledgerwatch/erigon/consensus/bor/heimdall/mock" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" + bormock "github.com/ledgerwatch/erigon/consensus/bor/mock" + "github.com/ledgerwatch/erigon/consensus/bor/valset" + consensusmock "github.com/ledgerwatch/erigon/consensus/mock" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/stages/mock" + "github.com/ledgerwatch/erigon/turbo/testlog" +) + +func InitHarness(ctx context.Context, t *testing.T, cfg HarnessCfg) Harness { + logger := testlog.Logger(t, cfg.LogLvl) + genesisInit := createGenesisInitData(t, cfg.ChainConfig) + m := mock.MockWithGenesis(t, genesisInit.genesis, genesisInit.genesisAllocPrivateKey, false) + chainDataDB := m.DB + blockReader := m.BlockReader + borConsensusDB := memdb.NewTestDB(t) + ctrl := gomock.NewController(t) + heimdallClient := heimdallmock.NewMockIHeimdallClient(ctrl) + bhCfg := stagedsync.StageBorHeimdallCfg( + chainDataDB, + borConsensusDB, + stagedsync.NewProposingState(ðconfig.Defaults.Miner), + *cfg.ChainConfig, + heimdallClient, + blockReader, + nil, // headerDownloader + nil, // penalize + nil, // not used + nil, // not used + nil, + ) + stateSyncStages := stagedsync.DefaultStages( + ctx, + stagedsync.SnapshotsCfg{}, + stagedsync.HeadersCfg{}, + bhCfg, + stagedsync.BlockHashesCfg{}, + stagedsync.BodiesCfg{}, + stagedsync.SendersCfg{}, + stagedsync.ExecuteBlockCfg{}, + stagedsync.HashStateCfg{}, + stagedsync.TrieCfg{}, + stagedsync.HistoryCfg{}, + stagedsync.LogIndexCfg{}, + stagedsync.CallTracesCfg{}, + stagedsync.TxLookupCfg{}, + stagedsync.FinishCfg{}, + true, + ) + stateSync := stagedsync.New(ethconfig.Defaults.Sync, stateSyncStages, stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, logger) + validatorKey, err := crypto.GenerateKey() + require.NoError(t, err) + validatorAddress := crypto.PubkeyToAddress(validatorKey.PublicKey) + h := Harness{ + logger: logger, + chainDataDB: chainDataDB, + borConsensusDB: borConsensusDB, + chainConfig: cfg.ChainConfig, + blockReader: blockReader, + stateSyncStages: stateSyncStages, + stateSync: stateSync, + bhCfg: bhCfg, + heimdallClient: heimdallClient, + heimdallProducersOverride: cfg.GetOrCreateDefaultHeimdallProducersOverride(), + sealedHeaders: make(map[uint64]*types.Header), + borSpanner: bormock.NewMockSpanner(ctrl), + validatorAddress: validatorAddress, + validatorKey: validatorKey, + genesisInitData: genesisInit, + } + + if cfg.ChainConfig.Bor != nil { + h.setHeimdallNextMockSpan(logger) + h.mockBorSpanner() + h.mockHeimdallClient() + } + + h.generateChain(ctx, t, ctrl, cfg) + + return h +} + +type genesisInitData struct { + genesis *types.Genesis + genesisAllocPrivateKey *ecdsa.PrivateKey + genesisAllocPrivateKeys map[libcommon.Address]*ecdsa.PrivateKey + fundedAddresses []libcommon.Address +} + +type HarnessCfg struct { + ChainConfig *chain.Config + GenerateChainNumBlocks int + LogLvl log.Lvl + HeimdallProducersOverride map[uint64][]valset.Validator +} + +func (hc *HarnessCfg) GetOrCreateDefaultHeimdallProducersOverride() map[uint64][]valset.Validator { + if hc.HeimdallProducersOverride == nil { + hc.HeimdallProducersOverride = map[uint64][]valset.Validator{} + } + + return hc.HeimdallProducersOverride +} + +type Harness struct { + logger log.Logger + chainDataDB kv.RwDB + borConsensusDB kv.RwDB + chainConfig *chain.Config + blockReader services.BlockReader + stateSyncStages []*stagedsync.Stage + stateSync *stagedsync.Sync + bhCfg stagedsync.BorHeimdallCfg + heimdallClient *heimdallmock.MockIHeimdallClient + heimdallNextMockSpan *span.HeimdallSpan + heimdallLastEventID uint64 + heimdallLastEventHeaderNum uint64 + heimdallProducersOverride map[uint64][]valset.Validator // spanID -> selected producers override + sealedHeaders map[uint64]*types.Header + borSpanner *bormock.MockSpanner + validatorAddress libcommon.Address + validatorKey *ecdsa.PrivateKey + genesisInitData *genesisInitData +} + +func (h *Harness) Logger() log.Logger { + return h.logger +} + +func (h *Harness) SaveStageProgress(ctx context.Context, t *testing.T, stageID stages.SyncStage, progress uint64) { + rwTx, err := h.chainDataDB.BeginRw(ctx) + require.NoError(t, err) + defer rwTx.Rollback() + + err = stages.SaveStageProgress(rwTx, stageID, progress) + require.NoError(t, err) + err = rwTx.Commit() + require.NoError(t, err) +} + +func (h *Harness) GetStageProgress(ctx context.Context, t *testing.T, stageID stages.SyncStage) uint64 { + roTx, err := h.chainDataDB.BeginRo(ctx) + require.NoError(t, err) + defer roTx.Rollback() + + progress, err := stages.GetStageProgress(roTx, stageID) + require.NoError(t, err) + return progress +} + +func (h *Harness) StateSyncUnwindPoint() uint64 { + return h.stateSync.UnwindPoint() +} + +func (h *Harness) StateSyncUnwindReason() stagedsync.UnwindReason { + return h.stateSync.UnwindReason() +} + +func (h *Harness) RunStageForward(t *testing.T, id stages.SyncStage) { + h.RunStageForwardWithErrorIs(t, id, nil) +} + +func (h *Harness) RunStageForwardWithErrorIs(t *testing.T, id stages.SyncStage, wantErr error) { + err := h.RunStageForwardWithReturnError(t, id) + require.ErrorIs(t, err, wantErr) +} + +func (h *Harness) RunStageForwardWithReturnError(t *testing.T, id stages.SyncStage) error { + err := h.stateSync.SetCurrentStage(id) + require.NoError(t, err) + + stage, found := h.findStateSyncStageByID(id) + require.True(t, found) + + stageState, err := h.stateSync.StageState(id, nil, h.chainDataDB) + require.NoError(t, err) + + return stage.Forward(true, false, stageState, h.stateSync, nil, h.logger) +} + +func (h *Harness) ReadSpansFromDB(ctx context.Context) (spans []*span.HeimdallSpan, err error) { + err = h.chainDataDB.View(ctx, func(tx kv.Tx) error { + spanIter, err := tx.Range(kv.BorSpans, nil, nil) + if err != nil { + return err + } + + for spanIter.HasNext() { + keyBytes, spanBytes, err := spanIter.Next() + if err != nil { + return err + } + + spanKey := binary.BigEndian.Uint64(keyBytes) + var heimdallSpan span.HeimdallSpan + if err = json.Unmarshal(spanBytes, &heimdallSpan); err != nil { + return err + } + + if spanKey != heimdallSpan.ID { + return fmt.Errorf("span key and id mismatch %d!=%d", spanKey, heimdallSpan.ID) + } + + spans = append(spans, &heimdallSpan) + } + + return nil + }) + if err != nil { + return nil, err + } + + return spans, nil +} + +func (h *Harness) ReadStateSyncEventsFromDB(ctx context.Context) (eventIDs []uint64, err error) { + err = h.chainDataDB.View(ctx, func(tx kv.Tx) error { + eventsIter, err := tx.Range(kv.BorEvents, nil, nil) + if err != nil { + return err + } + + for eventsIter.HasNext() { + keyBytes, _, err := eventsIter.Next() + if err != nil { + return err + } + + eventIDs = append(eventIDs, binary.BigEndian.Uint64(keyBytes)) + } + + return nil + }) + if err != nil { + return nil, err + } + + return eventIDs, nil +} + +func (h *Harness) ReadFirstStateSyncEventNumPerBlockFromDB(ctx context.Context) (nums map[uint64]uint64, err error) { + nums = map[uint64]uint64{} + err = h.chainDataDB.View(ctx, func(tx kv.Tx) error { + eventNumsIter, err := tx.Range(kv.BorEventNums, nil, nil) + if err != nil { + return err + } + + for eventNumsIter.HasNext() { + blockNumBytes, firstEventNumBytes, err := eventNumsIter.Next() + if err != nil { + return err + } + + blockNum := binary.BigEndian.Uint64(blockNumBytes) + firstEventNum := binary.BigEndian.Uint64(firstEventNumBytes) + nums[blockNum] = firstEventNum + } + + return nil + }) + if err != nil { + return nil, err + } + + return nums, nil +} + +func (h *Harness) ReadHeaderByNumber(ctx context.Context, number uint64) (header *types.Header, err error) { + err = h.chainDataDB.View(ctx, func(tx kv.Tx) error { + header = rawdb.ReadHeaderByNumber(tx, number) + if header == nil { + return errors.New("header not found by harness") + } + + return nil + }) + + return +} + +func createGenesisInitData(t *testing.T, chainConfig *chain.Config) *genesisInitData { + t.Helper() + accountPrivateKey, err := crypto.GenerateKey() + require.NoError(t, err) + accountAddress := crypto.PubkeyToAddress(accountPrivateKey.PublicKey) + + return &genesisInitData{ + genesisAllocPrivateKey: accountPrivateKey, + genesis: &types.Genesis{ + Config: chainConfig, + Alloc: types.GenesisAlloc{ + accountAddress: { + Balance: new(big.Int).Exp(big.NewInt(1_000), big.NewInt(18), nil), + }, + }, + }, + genesisAllocPrivateKeys: map[libcommon.Address]*ecdsa.PrivateKey{ + accountAddress: accountPrivateKey, + }, + fundedAddresses: []libcommon.Address{ + accountAddress, + }, + } +} + +func (h *Harness) generateChain(ctx context.Context, t *testing.T, ctrl *gomock.Controller, cfg HarnessCfg) { + consensusEngine := h.consensusEngine(t, cfg) + var parentBlock *types.Block + err := h.chainDataDB.View(ctx, func(tx kv.Tx) (err error) { + parentBlock, err = h.blockReader.BlockByNumber(ctx, tx, 0) + return err + }) + require.NoError(t, err) + h.sealedHeaders[parentBlock.Number().Uint64()] = parentBlock.Header() + mockChainHR := h.mockChainHeaderReader(ctrl) + + chainPack, err := core.GenerateChain( + h.chainConfig, + parentBlock, + consensusEngine, + h.chainDataDB, + cfg.GenerateChainNumBlocks, + func(i int, gen *core.BlockGen) { + // seal parent block first so that we can Prepare the current header + if gen.GetParent().Number().Uint64() > 0 { + h.seal(t, mockChainHR, consensusEngine, gen.GetParent()) + } + + h.logger.Info("Preparing mock header", "headerNum", gen.GetHeader().Number) + gen.GetHeader().ParentHash = h.sealedHeaders[gen.GetParent().Number().Uint64()].Hash() + if err := consensusEngine.Prepare(mockChainHR, gen.GetHeader(), nil); err != nil { + t.Fatal(err) + } + + h.logger.Info("Adding 1 mock tx to block", "blockNum", gen.GetHeader().Number) + chainID := uint256.Int{} + overflow := chainID.SetFromBig(h.chainConfig.ChainID) + require.False(t, overflow) + from := h.genesisInitData.fundedAddresses[0] + tx, err := types.SignTx( + types.NewEIP1559Transaction( + chainID, + gen.TxNonce(from), + from, // send to itself + new(uint256.Int), + 21000, + new(uint256.Int), + new(uint256.Int), + uint256.NewInt(937500001), + nil, + ), + *types.LatestSignerForChainID(h.chainConfig.ChainID), + h.genesisInitData.genesisAllocPrivateKeys[from], + ) + require.NoError(t, err) + gen.AddTx(tx) + }, + ) + require.NoError(t, err) + + h.seal(t, mockChainHR, consensusEngine, chainPack.TopBlock) + sealedHeadersList := make([]*types.Header, len(h.sealedHeaders)) + for num, header := range h.sealedHeaders { + sealedHeadersList[num] = header + } + + h.saveHeaders(ctx, t, sealedHeadersList) +} + +func (h *Harness) seal(t *testing.T, chr consensus.ChainHeaderReader, eng consensus.Engine, block *types.Block) { + h.logger.Info("Sealing mock block", "blockNum", block.Number()) + sealRes, sealStop := make(chan *types.Block, 1), make(chan struct{}, 1) + if err := eng.Seal(chr, block, sealRes, sealStop); err != nil { + t.Fatal(err) + } + + sealedParentBlock := <-sealRes + h.sealedHeaders[sealedParentBlock.Number().Uint64()] = sealedParentBlock.Header() +} + +func (h *Harness) consensusEngine(t *testing.T, cfg HarnessCfg) consensus.Engine { + if h.chainConfig.Bor != nil { + genesisContracts := contract.NewGenesisContractsClient( + h.chainConfig, + h.chainConfig.Bor.ValidatorContract, + h.chainConfig.Bor.StateReceiverContract, + h.logger, + ) + + borConsensusEng := bor.New( + h.chainConfig, + h.borConsensusDB, + nil, + h.borSpanner, + h.heimdallClient, + genesisContracts, + h.logger, + ) + + borConsensusEng.Authorize(h.validatorAddress, func(_ libcommon.Address, _ string, msg []byte) ([]byte, error) { + return crypto.Sign(crypto.Keccak256(msg), h.validatorKey) + }) + + return borConsensusEng + } + + t.Fatalf("unimplmented consensus engine init for cfg %v", cfg.ChainConfig) + return nil +} + +func (h *Harness) SaveHeader(ctx context.Context, t *testing.T, header *types.Header) { + h.saveHeaders(ctx, t, []*types.Header{header}) +} + +func (h *Harness) saveHeaders(ctx context.Context, t *testing.T, headers []*types.Header) { + rwTx, err := h.chainDataDB.BeginRw(ctx) + require.NoError(t, err) + defer rwTx.Rollback() + + for _, header := range headers { + err = rawdb.WriteHeader(rwTx, header) + require.NoError(t, err) + + err = rawdb.WriteCanonicalHash(rwTx, header.Hash(), header.Number.Uint64()) + require.NoError(t, err) + } + + err = rwTx.Commit() + require.NoError(t, err) +} + +func (h *Harness) mockChainHeaderReader(ctrl *gomock.Controller) consensus.ChainHeaderReader { + mockChainHR := consensusmock.NewMockChainHeaderReader(ctrl) + mockChainHR. + EXPECT(). + GetHeader(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ libcommon.Hash, number uint64) *types.Header { + return h.sealedHeaders[number] + }). + AnyTimes() + + mockChainHR. + EXPECT(). + GetHeaderByNumber(gomock.Any()). + DoAndReturn(func(number uint64) *types.Header { + return h.sealedHeaders[number] + }). + AnyTimes() + + mockChainHR. + EXPECT(). + FrozenBlocks(). + Return(uint64(0)). + AnyTimes() + + return mockChainHR +} + +func (h *Harness) setHeimdallNextMockSpan(logger log.Logger) { + validators := []*valset.Validator{ + { + ID: 1, + Address: h.validatorAddress, + VotingPower: 1000, + ProposerPriority: 1, + }, + } + + validatorSet := valset.NewValidatorSet(validators, logger) + selectedProducers := make([]valset.Validator, len(validators)) + for i := range validators { + selectedProducers[i] = *validators[i] + } + + h.heimdallNextMockSpan = &span.HeimdallSpan{ + Span: span.Span{ + ID: 0, + StartBlock: 0, + EndBlock: 255, + }, + ValidatorSet: *validatorSet, + SelectedProducers: selectedProducers, + } +} + +func (h *Harness) mockBorSpanner() { + h.borSpanner. + EXPECT(). + GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()). + Return(h.heimdallNextMockSpan.ValidatorSet.Validators, nil). + AnyTimes() + + h.borSpanner. + EXPECT(). + GetCurrentProducers(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ uint64, _ libcommon.Address, _ consensus.ChainHeaderReader) ([]*valset.Validator, error) { + res := make([]*valset.Validator, len(h.heimdallNextMockSpan.SelectedProducers)) + for i := range h.heimdallNextMockSpan.SelectedProducers { + res[i] = &h.heimdallNextMockSpan.SelectedProducers[i] + } + + return res, nil + }). + AnyTimes() +} + +func (h *Harness) mockHeimdallClient() { + h.heimdallClient. + EXPECT(). + Span(gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) { + res := h.heimdallNextMockSpan + h.heimdallNextMockSpan = &span.HeimdallSpan{ + Span: span.Span{ + ID: res.ID + 1, + StartBlock: res.EndBlock + 1, + EndBlock: res.EndBlock + 6400, + }, + ValidatorSet: res.ValidatorSet, + SelectedProducers: res.SelectedProducers, + } + + if selectedProducers, ok := h.heimdallProducersOverride[res.ID]; ok { + res.SelectedProducers = selectedProducers + } + + return res, nil + }). + AnyTimes() + + h.heimdallClient. + EXPECT(). + StateSyncEvents(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, _ uint64, _ int64) ([]*clerk.EventRecordWithTime, error) { + h.heimdallLastEventID++ + h.heimdallLastEventHeaderNum += h.chainConfig.Bor.CalculateSprint(h.heimdallLastEventHeaderNum) + stateSyncDelay := h.chainConfig.Bor.CalculateStateSyncDelay(h.heimdallLastEventHeaderNum) + newEvent := clerk.EventRecordWithTime{ + EventRecord: clerk.EventRecord{ + ID: h.heimdallLastEventID, + ChainID: h.chainConfig.ChainID.String(), + }, + Time: time.Unix(int64(h.sealedHeaders[h.heimdallLastEventHeaderNum].Time-stateSyncDelay-1), 0), + } + + // 1 per sprint + return []*clerk.EventRecordWithTime{&newEvent}, nil + }). + AnyTimes() +} + +func (h *Harness) findStateSyncStageByID(id stages.SyncStage) (*stagedsync.Stage, bool) { + for _, s := range h.stateSyncStages { + if s.ID == id { + return s, true + } + } + + return nil, false +} diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index 1112fad19bf..cf1cd273fac 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -5,18 +5,21 @@ import ( "fmt" "time" + "github.com/ledgerwatch/log/v3" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/log/v3" - + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" ) type Sync struct { + cfg ethconfig.Sync unwindPoint *uint64 // used to run stages prevUnwindPoint *uint64 // used to get value from outside of staged sync after cycle (for example to notify RPCDaemon) unwindReason UnwindReason + posTransition *uint64 stages []*Stage unwindOrder []*Stage @@ -34,8 +37,21 @@ type Timing struct { took time.Duration } -func (s *Sync) Len() int { return len(s.stages) } -func (s *Sync) PrevUnwindPoint() *uint64 { return s.prevUnwindPoint } +func (s *Sync) Len() int { + return len(s.stages) +} + +func (s *Sync) UnwindPoint() uint64 { + return *s.unwindPoint +} + +func (s *Sync) UnwindReason() UnwindReason { + return s.unwindReason +} + +func (s *Sync) PrevUnwindPoint() *uint64 { + return s.prevUnwindPoint +} func (s *Sync) NewUnwindState(id stages.SyncStage, unwindPoint, currentProgress uint64) *UnwindState { return &UnwindState{id, unwindPoint, currentProgress, UnwindReason{nil, nil}, s} @@ -138,7 +154,7 @@ func (s *Sync) SetCurrentStage(id stages.SyncStage) error { return fmt.Errorf("stage not found with id: %v", id) } -func New(stagesList []*Stage, unwindOrder UnwindOrder, pruneOrder PruneOrder, logger log.Logger) *Sync { +func New(cfg ethconfig.Sync, stagesList []*Stage, unwindOrder UnwindOrder, pruneOrder PruneOrder, logger log.Logger) *Sync { unwindStages := make([]*Stage, len(stagesList)) for i, stageIndex := range unwindOrder { for _, s := range stagesList { @@ -163,6 +179,7 @@ func New(stagesList []*Stage, unwindOrder UnwindOrder, pruneOrder PruneOrder, lo } return &Sync{ + cfg: cfg, stages: stagesList, currentStage: 0, unwindOrder: unwindStages, @@ -269,6 +286,11 @@ func (s *Sync) RunNoInterrupt(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { return libcommon.ErrStopped } + if string(stage.ID) == s.cfg.BreakAfterStage { // break process loop + s.logger.Warn("--sync.loop.break caused stage break") + break + } + s.NextStage() } @@ -280,10 +302,12 @@ func (s *Sync) RunNoInterrupt(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { return nil } -func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { +func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) (bool, error) { s.prevUnwindPoint = nil s.timings = s.timings[:0] + hasMore := false + for !s.IsDone() { var badBlockUnwind bool if s.unwindPoint != nil { @@ -292,7 +316,7 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { continue } if err := s.unwindStage(firstCycle, s.unwindOrder[j], db, tx); err != nil { - return err + return false, err } } s.prevUnwindPoint = s.unwindPoint @@ -302,7 +326,7 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { } s.unwindReason = UnwindReason{} if err := s.SetCurrentStage(s.stages[0].ID); err != nil { - return err + return false, err } // If there were unwinds at the start, a heavier but invalid chain may be present, so // we relax the rules for Stage1 @@ -318,7 +342,7 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { if string(stage.ID) == dbg.StopBeforeStage() { // stop process for debugging reasons s.logger.Warn("STOP_BEFORE_STAGE env flag forced to stop app") - return libcommon.ErrStopped + return false, libcommon.ErrStopped } if stage.Disabled || stage.Forward == nil { @@ -329,23 +353,46 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { } if err := s.runStage(stage, db, tx, firstCycle, badBlockUnwind); err != nil { - return err + return false, err } if string(stage.ID) == dbg.StopAfterStage() { // stop process for debugging reasons s.logger.Warn("STOP_AFTER_STAGE env flag forced to stop app") - return libcommon.ErrStopped + return false, libcommon.ErrStopped + } + + if string(stage.ID) == s.cfg.BreakAfterStage { // break process loop + s.logger.Warn("--sync.loop.break caused stage break") + if s.posTransition != nil { + ptx := tx + + if ptx == nil { + if tx, err := db.BeginRw(context.Background()); err == nil { + ptx = tx + defer tx.Rollback() + } + } + + if ptx != nil { + if progress, err := stages.GetStageProgress(ptx, stage.ID); err == nil { + hasMore = progress < *s.posTransition + } + } + } else { + hasMore = true + } + break } s.NextStage() } if err := s.SetCurrentStage(s.stages[0].ID); err != nil { - return err + return false, err } s.currentStage = 0 - return nil + return hasMore, nil } func (s *Sync) RunPrune(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { diff --git a/eth/stagedsync/sync_test.go b/eth/stagedsync/sync_test.go index 8992c31c5a0..48a6c475478 100644 --- a/eth/stagedsync/sync_test.go +++ b/eth/stagedsync/sync_test.go @@ -10,6 +10,7 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/assert" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" ) @@ -41,9 +42,9 @@ func TestStagesSuccess(t *testing.T) { }, }, } - state := New(s, nil, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, tx, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -81,9 +82,9 @@ func TestDisabledStages(t *testing.T) { }, }, } - state := New(s, nil, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, tx, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -121,9 +122,9 @@ func TestErroredStage(t *testing.T) { }, }, } - state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, tx, true /* initialCycle */) assert.Equal(t, fmt.Errorf("[2/3 Bodies] %w", expectedErr), err) expectedFlow := []stages.SyncStage{ @@ -204,9 +205,9 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { }, }, } - state := New(s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, tx, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -297,9 +298,9 @@ func TestUnwind(t *testing.T) { }, }, } - state := New(s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, tx, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -326,7 +327,7 @@ func TestUnwind(t *testing.T) { flow = flow[:0] state.unwindOrder = []*Stage{s[3], s[2], s[1], s[0]} state.UnwindTo(100, UnwindReason{}) - err = state.Run(db, tx, true /* initialCycle */) + _, err = state.Run(db, tx, true /* initialCycle */) assert.NoError(t, err) expectedFlow = []stages.SyncStage{ @@ -386,9 +387,9 @@ func TestUnwindEmptyUnwinder(t *testing.T) { }, }, } - state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, tx, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -442,13 +443,13 @@ func TestSyncDoTwice(t *testing.T) { }, } - state := New(s, nil, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, tx, true /* initialCycle */) assert.NoError(t, err) - state = New(s, nil, nil, log.New()) - err = state.Run(db, tx, true /* initialCycle */) + state = New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) + _, err = state.Run(db, tx, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -500,15 +501,15 @@ func TestStateSyncInterruptRestart(t *testing.T) { }, } - state := New(s, nil, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, tx, true /* initialCycle */) assert.Equal(t, fmt.Errorf("[2/3 Bodies] %w", expectedErr), err) expectedErr = nil - state = New(s, nil, nil, log.New()) - err = state.Run(db, tx, true /* initialCycle */) + state = New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) + _, err = state.Run(db, tx, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -579,9 +580,9 @@ func TestSyncInterruptLongUnwind(t *testing.T) { }, }, } - state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, tx, true /* initialCycle */) assert.Error(t, errInterrupted, err) //state = NewState(s) @@ -589,7 +590,7 @@ func TestSyncInterruptLongUnwind(t *testing.T) { //err = state.LoadUnwindInfo(tx) //assert.NoError(t, err) //state.UnwindTo(500, libcommon.Hash{}) - err = state.Run(db, tx, true /* initialCycle */) + _, err = state.Run(db, tx, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ diff --git a/go.mod b/go.mod index 306eb4dd623..c6951f2f5ff 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ go 1.20 require ( github.com/erigontech/mdbx-go v0.27.21 github.com/erigontech/silkworm-go v0.10.0 - github.com/ledgerwatch/erigon-lib v1.0.0 github.com/ledgerwatch/log/v3 v3.9.0 github.com/ledgerwatch/secp256k1 v1.0.0 ) @@ -37,6 +36,7 @@ require ( github.com/emicklei/dot v1.6.0 github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35 + github.com/gfx-labs/sse v0.0.0-20231226060816-f747e26a9baa github.com/go-chi/chi/v5 v5.0.10 github.com/goccy/go-json v0.9.11 github.com/gofrs/flock v0.8.1 @@ -57,6 +57,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/klauspost/compress v1.17.3 + github.com/ledgerwatch/erigon-lib v1.0.0 github.com/libp2p/go-libp2p v0.31.0 github.com/libp2p/go-libp2p-mplex v0.9.0 github.com/libp2p/go-libp2p-pubsub v0.9.3 @@ -66,7 +67,6 @@ require ( github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 github.com/pelletier/go-toml v1.9.5 github.com/pelletier/go-toml/v2 v2.1.0 - github.com/pierrec/lz4 v2.6.1+incompatible github.com/pion/randutil v0.1.0 github.com/pion/stun v0.6.0 github.com/protolambda/ztyp v0.2.2 @@ -87,7 +87,7 @@ require ( github.com/vektah/gqlparser/v2 v2.5.10 github.com/xsleonard/go-merkle v1.1.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.16.0 + golang.org/x/crypto v0.17.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/net v0.19.0 golang.org/x/sync v0.5.0 @@ -186,7 +186,7 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231210032908-6ff6f4c91c60 // indirect + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240101230756-23fbc6c56a1d // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect @@ -252,7 +252,7 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect @@ -270,6 +270,7 @@ require ( golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.16.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect lukechampine.com/blake3 v1.2.1 // indirect lukechampine.com/uint128 v1.3.0 // indirect diff --git a/go.sum b/go.sum index 553a8e7d8f8..2f82abc2cc8 100644 --- a/go.sum +++ b/go.sum @@ -309,6 +309,8 @@ github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c h1:uYNKzPntb8c6DKvP9E github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35 h1:I8QswD9gf3VEpr7bpepKKOm7ChxFITIG+oc1I5/S0no= github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35/go.mod h1:DMDd04jjQgdynaAwbEgiRERIGpC8fDjx0+y06an7Psg= +github.com/gfx-labs/sse v0.0.0-20231226060816-f747e26a9baa h1:b6fBm4SLM8jywQHNmc3ZCl6zQEhEyZl6bp7is4en72M= +github.com/gfx-labs/sse v0.0.0-20231226060816-f747e26a9baa/go.mod h1:K0FMPjMrIaS1+/SeZeOVkGVjDVERZJW53inQL00FjLE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= @@ -540,8 +542,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231210032908-6ff6f4c91c60 h1:bsZ6XWPJkNp1DeVHkaX9/+/Tqg7+r5/IkRPlyc4Ztq4= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231210032908-6ff6f4c91c60/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240101230756-23fbc6c56a1d h1:rMqDEGLdmVgGdpDmaNp4Do1vc9BtUQ3rjFD9gQBRSx0= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240101230756-23fbc6c56a1d/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= github.com/ledgerwatch/log/v3 v3.9.0/go.mod h1:EiAY6upmI/6LkNhOVxb4eVsmsP11HZCnZ3PlJMjYiqE= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -683,8 +685,6 @@ github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6 github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pion/datachannel v1.5.2 h1:piB93s8LGmbECrpO84DnkIVWasRMk3IimbcXkTQLE6E= github.com/pion/datachannel v1.5.2/go.mod h1:FTGQWaHrdCwIJ1rw6xBIfZVkslikjShim5yr05XFuCQ= github.com/pion/dtls/v2 v2.1.3/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus= @@ -791,8 +791,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 h1:Lt9DzQALzHoDwMBGJ6v8ObDPR0dzr2a6sXTB1Fq7IHs= @@ -954,8 +954,9 @@ golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -995,6 +996,7 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1050,6 +1052,7 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1149,6 +1152,7 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -1156,6 +1160,8 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1351,6 +1357,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= +gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/migrations/migrations.go b/migrations/migrations.go index 9f734d1ac9e..7bcd4824d5e 100644 --- a/migrations/migrations.go +++ b/migrations/migrations.go @@ -4,9 +4,10 @@ import ( "bytes" "context" "fmt" - "github.com/ledgerwatch/erigon-lib/common" "path/filepath" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" @@ -35,6 +36,7 @@ var migrations = map[kv.Label][]Migration{ dbSchemaVersion5, TxsBeginEnd, TxsV3, + ProhibitNewDownloadsLock, }, kv.TxPoolDB: {}, kv.SentryDB: {}, diff --git a/migrations/prohibit_new_downloads_lock.go b/migrations/prohibit_new_downloads_lock.go new file mode 100644 index 00000000000..77dcc27fb5a --- /dev/null +++ b/migrations/prohibit_new_downloads_lock.go @@ -0,0 +1,49 @@ +package migrations + +import ( + "context" + "os" + "path/filepath" + + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/common/dir" + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/log/v3" +) + +var ProhibitNewDownloadsLock = Migration{ + Name: "prohibit_new_downloads_lock", + Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback, logger log.Logger) (err error) { + tx, err := db.BeginRw(context.Background()) + if err != nil { + return err + } + defer tx.Rollback() + + snapshotsStageProgress, err := stages.GetStageProgress(tx, stages.Snapshots) + if err != nil { + return err + } + if snapshotsStageProgress > 0 { + fPath := filepath.Join(dirs.Snap, downloader.ProhibitNewDownloadsFileName) + if !dir.FileExist(fPath) { + f, err := os.Create(fPath) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + } + } + + // This migration is no-op, but it forces the migration mechanism to apply it and thus write the DB schema version info + if err := BeforeCommit(tx, nil, true); err != nil { + return err + } + return tx.Commit() + }, +} diff --git a/p2p/discover/lookup.go b/p2p/discover/lookup.go index 87ba2c2d55e..5bc2ba47982 100644 --- a/p2p/discover/lookup.go +++ b/p2p/discover/lookup.go @@ -132,12 +132,18 @@ func (it *lookup) startQueries() bool { return it.queries > 0 } +type ctxKey int + +const ( + ckNoSlowdown ctxKey = iota +) + func disableLookupSlowdown(ctx context.Context) context.Context { - return context.WithValue(ctx, "p2p.discover.lookup.noSlowdown", true) + return context.WithValue(ctx, ckNoSlowdown, true) } func isDisabledLookupSlowdown(ctx context.Context) bool { - return ctx.Value("p2p.discover.lookup.noSlowdown") != nil + return ctx.Value(ckNoSlowdown) != nil } func (it *lookup) slowdown() { diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go index c9c0153bf78..e2a2354408c 100644 --- a/p2p/discover/table_test.go +++ b/p2p/discover/table_test.go @@ -29,6 +29,7 @@ import ( "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/p2p/enr" "github.com/ledgerwatch/erigon/p2p/netutil" + "github.com/ledgerwatch/log/v3" ) func TestTable_pingReplace(t *testing.T) { @@ -49,7 +50,7 @@ func TestTable_pingReplace(t *testing.T) { func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) { transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) defer db.Close() defer tab.close() @@ -118,7 +119,7 @@ func testTableBumpNoDuplicatesRun(t *testing.T, bucketCountGen byte, bumpCountGe if len(bumps) > 0 { tmpDir := t.TempDir() - tab, db := newTestTable(newPingRecorder(), tmpDir) + tab, db := newTestTable(newPingRecorder(), tmpDir, log.Root()) defer db.Close() defer tab.close() @@ -170,7 +171,7 @@ func TestTable_bumpNoDuplicates_examples(t *testing.T) { func TestTable_IPLimit(t *testing.T) { transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) defer db.Close() defer tab.close() @@ -188,7 +189,7 @@ func TestTable_IPLimit(t *testing.T) { func TestTable_BucketIPLimit(t *testing.T) { transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) defer db.Close() defer tab.close() @@ -224,7 +225,7 @@ func testTableFindNodeByIDRun(t *testing.T, nodesCountGen uint16, resultsCountGe // for any node table, Target and N transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) defer db.Close() defer tab.close() @@ -328,7 +329,7 @@ func testTableReadRandomNodesGetAllRun(t *testing.T, nodesCountGen uint16, rand buf := make([]*enode.Node, nodesCount) transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) defer db.Close() defer tab.close() <-tab.initDone @@ -392,7 +393,7 @@ func generateNode(rand *rand.Rand) *node { func TestTable_addVerifiedNode(t *testing.T) { tmpDir := t.TempDir() - tab, db := newTestTable(newPingRecorder(), tmpDir) + tab, db := newTestTable(newPingRecorder(), tmpDir, log.Root()) <-tab.initDone defer db.Close() defer tab.close() @@ -425,7 +426,7 @@ func TestTable_addVerifiedNode(t *testing.T) { func TestTable_addSeenNode(t *testing.T) { tmpDir := t.TempDir() - tab, db := newTestTable(newPingRecorder(), tmpDir) + tab, db := newTestTable(newPingRecorder(), tmpDir, log.Root()) <-tab.initDone defer db.Close() defer tab.close() @@ -460,7 +461,7 @@ func TestTable_addSeenNode(t *testing.T) { func TestTable_revalidateSyncRecord(t *testing.T) { transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) <-tab.initDone defer db.Close() defer tab.close() diff --git a/p2p/discover/table_util_test.go b/p2p/discover/table_util_test.go index e4613192884..72fea0258ae 100644 --- a/p2p/discover/table_util_test.go +++ b/p2p/discover/table_util_test.go @@ -43,8 +43,8 @@ func init() { nullNode = enode.SignNull(&r, enode.ID{}) } -func newTestTable(t transport, tmpDir string) (*Table, *enode.DB) { - db, err := enode.OpenDB(context.Background(), "", tmpDir) +func newTestTable(t transport, tmpDir string, logger log.Logger) (*Table, *enode.DB) { + db, err := enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index 8811e4c41df..923bca651d4 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -82,7 +82,7 @@ func newUDPTestContext(ctx context.Context, t *testing.T, logger log.Logger) *ud tmpDir := t.TempDir() var err error - test.db, err = enode.OpenDB(ctx, "", tmpDir) + test.db, err = enode.OpenDB(ctx, "", tmpDir, logger) if err != nil { panic(err) } @@ -619,7 +619,7 @@ func startLocalhostV4(ctx context.Context, t *testing.T, cfg Config, logger log. cfg.PrivateKey = newkey() tmpDir := t.TempDir() - db, err := enode.OpenDB(context.Background(), "", tmpDir) + db, err := enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } diff --git a/p2p/discover/v5_udp_test.go b/p2p/discover/v5_udp_test.go index 9e0c70f6f07..c4e9c350885 100644 --- a/p2p/discover/v5_udp_test.go +++ b/p2p/discover/v5_udp_test.go @@ -41,7 +41,7 @@ import ( func startLocalhostV5(t *testing.T, cfg Config, logger log.Logger) *UDPv5 { cfg.PrivateKey = newkey() tmpDir := t.TempDir() - db, err := enode.OpenDB(context.Background(), "", tmpDir) + db, err := enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } @@ -573,7 +573,7 @@ func newUDPV5TestContext(ctx context.Context, t *testing.T, logger log.Logger) * t.Cleanup(test.close) var err error tmpDir := t.TempDir() - test.db, err = enode.OpenDB(context.Background(), "", tmpDir) + test.db, err = enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } @@ -627,7 +627,7 @@ func (test *udpV5Test) getNode(key *ecdsa.PrivateKey, addr *net.UDPAddr, logger ln := test.nodesByID[id] if ln == nil { tmpDir := test.t.TempDir() - db, err := enode.OpenDB(context.Background(), "", tmpDir) + db, err := enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go index 0eee9ed8d64..f28d5278642 100644 --- a/p2p/discover/v5wire/encoding_test.go +++ b/p2p/discover/v5wire/encoding_test.go @@ -537,7 +537,7 @@ func (t *handshakeTest) close() { } func (n *handshakeTestNode) init(key *ecdsa.PrivateKey, ip net.IP, clock mclock.Clock, tmpDir string, logger log.Logger) { - db, err := enode.OpenDB(context.Background(), "", tmpDir) + db, err := enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } diff --git a/p2p/enode/localnode_test.go b/p2p/enode/localnode_test.go index 2046dfd23f1..8e5b51b8e40 100644 --- a/p2p/enode/localnode_test.go +++ b/p2p/enode/localnode_test.go @@ -29,7 +29,7 @@ import ( ) func newLocalNodeForTesting(tmpDir string, logger log.Logger) (*LocalNode, *DB) { - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go index fb4b27e4c85..34f1cc1f334 100644 --- a/p2p/enode/nodedb.go +++ b/p2p/enode/nodedb.go @@ -82,8 +82,7 @@ type DB struct { // OpenDB opens a node database for storing and retrieving infos about known peers in the // network. If no path is given an in-memory, temporary database is constructed. -func OpenDB(ctx context.Context, path string, tmpDir string) (*DB, error) { - logger := log.New() //TODO: move higher +func OpenDB(ctx context.Context, path string, tmpDir string, logger log.Logger) (*DB, error) { if path == "" { return newMemoryDB(logger, tmpDir) } diff --git a/p2p/enode/nodedb_test.go b/p2p/enode/nodedb_test.go index 313f424b947..4e72954ba1d 100644 --- a/p2p/enode/nodedb_test.go +++ b/p2p/enode/nodedb_test.go @@ -25,6 +25,8 @@ import ( "reflect" "testing" "time" + + "github.com/ledgerwatch/log/v3" ) var keytestID = HexID("51232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439") @@ -89,7 +91,7 @@ var nodeDBInt64Tests = []struct { func TestDBInt64(t *testing.T) { tmpDir := t.TempDir() - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, log.Root()) if err != nil { panic(err) } @@ -125,7 +127,7 @@ func TestDBFetchStore(t *testing.T) { inst := time.Now() num := 314 - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, log.Root()) if err != nil { panic(err) } @@ -268,7 +270,7 @@ func TestDBSeedQuery(t *testing.T) { } func testSeedQuery(tmpDir string) error { - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, log.Root()) if err != nil { panic(err) } @@ -318,7 +320,7 @@ func TestDBPersistency(t *testing.T) { ) // Create a persistent database and store some values - db, err := OpenDB(context.Background(), filepath.Join(root, "database"), root) + db, err := OpenDB(context.Background(), filepath.Join(root, "database"), root, log.Root()) if err != nil { t.Fatalf("failed to create persistent database: %v", err) } @@ -329,7 +331,7 @@ func TestDBPersistency(t *testing.T) { db.Close() // ReopenSegments the database and check the value - db, err = OpenDB(context.Background(), filepath.Join(root, "database"), root) + db, err = OpenDB(context.Background(), filepath.Join(root, "database"), root, log.Root()) if err != nil { t.Fatalf("failed to open persistent database: %v", err) } @@ -432,7 +434,7 @@ var nodeDBExpirationNodes = []struct { func TestDBExpiration(t *testing.T) { tmpDir := t.TempDir() - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, log.Root()) if err != nil { panic(err) } @@ -479,7 +481,7 @@ func TestDBExpiration(t *testing.T) { // in the database. func TestDBExpireV5(t *testing.T) { tmpDir := t.TempDir() - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, log.Root()) if err != nil { panic(err) } diff --git a/p2p/sentry/sentry_grpc_server_test.go b/p2p/sentry/sentry_grpc_server_test.go index 93b0c4b8e8d..5cd8739c9a8 100644 --- a/p2p/sentry/sentry_grpc_server_test.go +++ b/p2p/sentry/sentry_grpc_server_test.go @@ -22,6 +22,7 @@ import ( "github.com/ledgerwatch/erigon/core/state/temporal" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/p2p" + "github.com/ledgerwatch/log/v3" ) func testSentryServer(db kv.Getter, genesis *types.Genesis, genesisHash libcommon.Hash) *GrpcServer { @@ -88,8 +89,8 @@ func testForkIDSplit(t *testing.T, protocol uint) { gspecNoFork = &types.Genesis{Config: configNoFork} gspecProFork = &types.Genesis{Config: configProFork} - genesisNoFork = core.MustCommitGenesis(gspecNoFork, dbNoFork, "") - genesisProFork = core.MustCommitGenesis(gspecProFork, dbProFork, "") + genesisNoFork = core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root()) + genesisProFork = core.MustCommitGenesis(gspecProFork, dbProFork, "", log.Root()) ) var s1, s2 *GrpcServer @@ -177,7 +178,7 @@ func TestSentryServerImpl_SetStatusInitPanic(t *testing.T) { configNoFork := &chain.Config{HomesteadBlock: big.NewInt(1), ChainID: big.NewInt(1)} _, dbNoFork, _ := temporal.NewTestDB(t, datadir.New(t.TempDir()), nil) gspecNoFork := &types.Genesis{Config: configNoFork} - genesisNoFork := core.MustCommitGenesis(gspecNoFork, dbNoFork, "") + genesisNoFork := core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root()) ss := &GrpcServer{p2p: &p2p.Config{}} _, err := ss.SetStatus(context.Background(), &proto_sentry.StatusData{ diff --git a/p2p/sentry/simulator/sentry_simulator.go b/p2p/sentry/simulator/sentry_simulator.go new file mode 100644 index 00000000000..51eb8c2de68 --- /dev/null +++ b/p2p/sentry/simulator/sentry_simulator.go @@ -0,0 +1,453 @@ +package simulator + +import ( + "bytes" + "context" + "fmt" + "path/filepath" + + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon-lib/gointerfaces" + sentry_if "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + core_types "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/protocols/eth" + "github.com/ledgerwatch/erigon/p2p" + "github.com/ledgerwatch/erigon/p2p/discover/v4wire" + "github.com/ledgerwatch/erigon/p2p/enode" + "github.com/ledgerwatch/erigon/p2p/sentry" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" + "github.com/ledgerwatch/log/v3" + "google.golang.org/protobuf/types/known/emptypb" +) + +type server struct { + sentry_if.UnimplementedSentryServer + ctx context.Context + peers map[[64]byte]*p2p.Peer + messageReceivers map[sentry_if.MessageId][]sentry_if.Sentry_MessagesServer + logger log.Logger + knownSnapshots *freezeblocks.RoSnapshots + activeSnapshots *freezeblocks.RoSnapshots + blockReader *freezeblocks.BlockReader + downloader *TorrentClient +} + +func newPeer(name string, caps []p2p.Cap) (*p2p.Peer, error) { + key, err := crypto.GenerateKey() + + if err != nil { + return nil, err + } + + return p2p.NewPeer(enode.PubkeyToIDV4(&key.PublicKey), v4wire.EncodePubkey(&key.PublicKey), name, caps, true), nil +} + +func NewSentry(ctx context.Context, chain string, snapshotLocation string, peerCount int, logger log.Logger) (sentry_if.SentryServer, error) { + peers := map[[64]byte]*p2p.Peer{} + + for i := 0; i < peerCount; i++ { + peer, err := newPeer(fmt.Sprint("peer-", i), nil) + + if err != nil { + return nil, err + } + peers[peer.Pubkey()] = peer + } + + cfg := snapcfg.KnownCfg(chain, 0) + + knownSnapshots := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, "", cfg.Version, logger) + + files := make([]string, 0, len(cfg.Preverified)) + + for _, item := range cfg.Preverified { + files = append(files, item.Name) + } + + knownSnapshots.InitSegments(files) + + //s.knownSnapshots.ReopenList([]string{ent2.Name()}, false) + activeSnapshots := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, snapshotLocation, cfg.Version, logger) + + if err := activeSnapshots.ReopenFolder(); err != nil { + return nil, err + } + + downloader, err := NewTorrentClient(ctx, chain, snapshotLocation, logger) + + if err != nil { + return nil, err + } + + s := &server{ + ctx: ctx, + peers: peers, + messageReceivers: map[sentry_if.MessageId][]sentry_if.Sentry_MessagesServer{}, + knownSnapshots: knownSnapshots, + activeSnapshots: activeSnapshots, + blockReader: freezeblocks.NewBlockReader(activeSnapshots, nil), + logger: logger, + downloader: downloader, + } + + go func() { + <-ctx.Done() + s.Close() + }() + + return s, nil +} + +func (s *server) Close() { + s.downloader.Close() + if closer, ok := s.downloader.cfg.DefaultStorage.(interface{ Close() error }); ok { + closer.Close() + } + s.activeSnapshots.Close() +} + +func (s *server) NodeInfo(context.Context, *emptypb.Empty) (*types.NodeInfoReply, error) { + return nil, fmt.Errorf("TODO") +} + +func (s *server) PeerById(ctx context.Context, in *sentry_if.PeerByIdRequest) (*sentry_if.PeerByIdReply, error) { + peerId := sentry.ConvertH512ToPeerID(in.PeerId) + + peer, ok := s.peers[peerId] + + if !ok { + return nil, fmt.Errorf("unknown peer") + } + + info := peer.Info() + + return &sentry_if.PeerByIdReply{ + Peer: &types.PeerInfo{ + Id: info.ID, + Name: info.Name, + Enode: info.Enode, + Enr: info.ENR, + Caps: info.Caps, + ConnLocalAddr: info.Network.LocalAddress, + ConnRemoteAddr: info.Network.RemoteAddress, + ConnIsInbound: info.Network.Inbound, + ConnIsTrusted: info.Network.Trusted, + ConnIsStatic: info.Network.Static, + }, + }, nil +} + +func (s *server) PeerCount(context.Context, *sentry_if.PeerCountRequest) (*sentry_if.PeerCountReply, error) { + return &sentry_if.PeerCountReply{Count: uint64(len(s.peers))}, nil +} + +func (s *server) PeerEvents(*sentry_if.PeerEventsRequest, sentry_if.Sentry_PeerEventsServer) error { + return fmt.Errorf("TODO") +} + +func (s *server) PeerMinBlock(context.Context, *sentry_if.PeerMinBlockRequest) (*emptypb.Empty, error) { + return nil, fmt.Errorf("TODO") +} + +func (s *server) Peers(context.Context, *emptypb.Empty) (*sentry_if.PeersReply, error) { + reply := &sentry_if.PeersReply{} + + for _, peer := range s.peers { + info := peer.Info() + + reply.Peers = append(reply.Peers, + &types.PeerInfo{ + Id: info.ID, + Name: info.Name, + Enode: info.Enode, + Enr: info.ENR, + Caps: info.Caps, + ConnLocalAddr: info.Network.LocalAddress, + ConnRemoteAddr: info.Network.RemoteAddress, + ConnIsInbound: info.Network.Inbound, + ConnIsTrusted: info.Network.Trusted, + ConnIsStatic: info.Network.Static, + }) + } + + return reply, nil +} + +func (s *server) SendMessageById(ctx context.Context, in *sentry_if.SendMessageByIdRequest) (*sentry_if.SentPeers, error) { + peerId := sentry.ConvertH512ToPeerID(in.PeerId) + + if err := s.sendMessageById(ctx, peerId, in.Data); err != nil { + return nil, err + } + + return &sentry_if.SentPeers{ + Peers: []*types.H512{in.PeerId}, + }, nil +} + +func (s *server) sendMessageById(ctx context.Context, peerId [64]byte, messageData *sentry_if.OutboundMessageData) error { + peer, ok := s.peers[peerId] + + if !ok { + return fmt.Errorf("unknown peer") + } + + switch messageData.Id { + case sentry_if.MessageId_GET_BLOCK_HEADERS_65: + packet := ð.GetBlockHeadersPacket{} + if err := rlp.DecodeBytes(messageData.Data, packet); err != nil { + return fmt.Errorf("failed to decode packet: %w", err) + } + + go s.processGetBlockHeaders(ctx, peer, 0, packet) + + case sentry_if.MessageId_GET_BLOCK_HEADERS_66: + packet := ð.GetBlockHeadersPacket66{} + if err := rlp.DecodeBytes(messageData.Data, packet); err != nil { + return fmt.Errorf("failed to decode packet: %w", err) + } + + go s.processGetBlockHeaders(ctx, peer, packet.RequestId, packet.GetBlockHeadersPacket) + + default: + return fmt.Errorf("unhandled message id: %s", messageData.Id) + } + + return nil +} + +func (s *server) SendMessageByMinBlock(ctx context.Context, request *sentry_if.SendMessageByMinBlockRequest) (*sentry_if.SentPeers, error) { + return s.UnimplementedSentryServer.SendMessageByMinBlock(ctx, request) +} + +func (s *server) SendMessageToAll(ctx context.Context, data *sentry_if.OutboundMessageData) (*sentry_if.SentPeers, error) { + sentPeers := &sentry_if.SentPeers{} + + for _, peer := range s.peers { + peerKey := peer.Pubkey() + + if err := s.sendMessageById(ctx, peerKey, data); err != nil { + return sentPeers, err + } + + sentPeers.Peers = append(sentPeers.Peers, gointerfaces.ConvertBytesToH512(peerKey[:])) + } + + return sentPeers, nil +} + +func (s *server) SendMessageToRandomPeers(ctx context.Context, request *sentry_if.SendMessageToRandomPeersRequest) (*sentry_if.SentPeers, error) { + sentPeers := &sentry_if.SentPeers{} + + var i uint64 + + for _, peer := range s.peers { + peerKey := peer.Pubkey() + + if err := s.sendMessageById(ctx, peerKey, request.Data); err != nil { + return sentPeers, err + } + + sentPeers.Peers = append(sentPeers.Peers, gointerfaces.ConvertBytesToH512(peerKey[:])) + + i++ + + if i == request.MaxPeers { + break + } + } + + return sentPeers, nil + +} + +func (s *server) Messages(request *sentry_if.MessagesRequest, receiver sentry_if.Sentry_MessagesServer) error { + for _, messageId := range request.Ids { + receivers := s.messageReceivers[messageId] + s.messageReceivers[messageId] = append(receivers, receiver) + } + + <-s.ctx.Done() + + return nil +} + +func (s *server) processGetBlockHeaders(ctx context.Context, peer *p2p.Peer, requestId uint64, request *eth.GetBlockHeadersPacket) { + r65 := s.messageReceivers[sentry_if.MessageId_BLOCK_HEADERS_65] + r66 := s.messageReceivers[sentry_if.MessageId_BLOCK_HEADERS_66] + + if len(r65)+len(r66) > 0 { + + peerKey := peer.Pubkey() + peerId := gointerfaces.ConvertBytesToH512(peerKey[:]) + + headers, err := s.getHeaders(ctx, request.Origin, request.Amount, request.Skip, request.Reverse) + + if err != nil { + s.logger.Warn("Can't get headers", "error", err) + return + } + + if len(r65) > 0 { + var data bytes.Buffer + + err := rlp.Encode(&data, headers) + + if err != nil { + s.logger.Warn("Can't encode headers", "error", err) + return + } + + for _, receiver := range r65 { + receiver.Send(&sentry_if.InboundMessage{ + Id: sentry_if.MessageId_BLOCK_HEADERS_65, + Data: data.Bytes(), + PeerId: peerId, + }) + } + } + + if len(r66) > 0 { + var data bytes.Buffer + + err := rlp.Encode(&data, ð.BlockHeadersPacket66{ + RequestId: requestId, + BlockHeadersPacket: headers, + }) + + if err != nil { + fmt.Printf("Error (move to logger): %s", err) + return + } + + for _, receiver := range r66 { + receiver.Send(&sentry_if.InboundMessage{ + Id: sentry_if.MessageId_BLOCK_HEADERS_66, + Data: data.Bytes(), + PeerId: peerId, + }) + } + } + } +} + +func (s *server) getHeaders(ctx context.Context, origin eth.HashOrNumber, amount uint64, skip uint64, reverse bool) (eth.BlockHeadersPacket, error) { + + var headers eth.BlockHeadersPacket + + var next uint64 + + nextBlockNum := func(blockNum uint64) uint64 { + inc := uint64(1) + + if skip != 0 { + inc = skip + } + + if reverse { + return blockNum - inc + } else { + return blockNum + inc + } + } + + if origin.Hash != (common.Hash{}) { + header, err := s.getHeaderByHash(ctx, origin.Hash) + + if err != nil { + return nil, err + } + + headers = append(headers, header) + + next = nextBlockNum(header.Number.Uint64()) + } else { + header, err := s.getHeader(ctx, origin.Number) + + if err != nil { + return nil, err + } + + headers = append(headers, header) + + next = nextBlockNum(header.Number.Uint64()) + } + + for len(headers) < int(amount) { + header, err := s.getHeader(ctx, next) + + if err != nil { + return nil, err + } + + headers = append(headers, header) + + next = nextBlockNum(header.Number.Uint64()) + } + + return headers, nil +} + +func (s *server) getHeader(ctx context.Context, blockNum uint64) (*core_types.Header, error) { + header, err := s.blockReader.Header(ctx, nil, common.Hash{}, blockNum) + + if err != nil { + return nil, err + } + + if header == nil { + view := s.knownSnapshots.View() + defer view.Close() + + if seg, ok := view.HeadersSegment(blockNum); ok { + if err := s.downloadHeaders(ctx, seg); err != nil { + return nil, err + } + } + + s.activeSnapshots.ReopenSegments([]snaptype.Type{snaptype.Headers}) + + header, err = s.blockReader.Header(ctx, nil, common.Hash{}, blockNum) + + if err != nil { + return nil, err + } + } + + return header, nil +} + +func (s *server) getHeaderByHash(ctx context.Context, hash common.Hash) (*core_types.Header, error) { + return s.blockReader.HeaderByHash(ctx, nil, hash) +} + +func (s *server) downloadHeaders(ctx context.Context, header *freezeblocks.HeaderSegment) error { + fileName := snaptype.SegmentFileName(s.knownSnapshots.Version(), header.From(), header.To(), snaptype.Headers) + + s.logger.Info(fmt.Sprintf("Downloading %s", fileName)) + + err := s.downloader.Download(ctx, fileName) + + if err != nil { + return fmt.Errorf("can't download %s: %w", fileName, err) + } + + s.logger.Info(fmt.Sprintf("Indexing %s", fileName)) + + return freezeblocks.HeadersIdx(ctx, + filepath.Join(s.downloader.LocalFsRoot(), fileName), s.knownSnapshots.Version(), header.From(), s.downloader.LocalFsRoot(), nil, log.LvlDebug, s.logger) +} diff --git a/p2p/sentry/simulator/simulator_test.go b/p2p/sentry/simulator/simulator_test.go new file mode 100644 index 00000000000..3821bb88bf7 --- /dev/null +++ b/p2p/sentry/simulator/simulator_test.go @@ -0,0 +1,203 @@ +//go:build integration + +package simulator_test + +import ( + "bytes" + "context" + "testing" + + "github.com/ledgerwatch/erigon-lib/direct" + "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + sentry_if "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon/eth/protocols/eth" + "github.com/ledgerwatch/erigon/p2p/sentry/simulator" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/log/v3" +) + +func TestSimulatorStart(t *testing.T) { + + ctx, cancel := context.WithCancel(context.Background()) + + defer cancel() + + logger := log.New() + logger.SetHandler(log.StdoutHandler) + dataDir := t.TempDir() + + sim, err := simulator.NewSentry(ctx, "mumbai", dataDir, 1, logger) + + if err != nil { + t.Fatal(err) + } + + simClient := direct.NewSentryClientDirect(66, sim) + + peerCount, err := simClient.PeerCount(ctx, &sentry.PeerCountRequest{}) + + if err != nil { + t.Fatal(err) + } + + if peerCount.Count != 1 { + t.Fatal("Invalid response count: expected:", 1, "got:", peerCount.Count) + } + + receiver, err := simClient.Messages(ctx, &sentry.MessagesRequest{ + Ids: []sentry.MessageId{sentry.MessageId_BLOCK_HEADERS_66}, + }) + + if err != nil { + t.Fatal(err) + } + + getHeaders66 := ð.GetBlockHeadersPacket66{ + RequestId: 1, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{Number: 10}, + Amount: 10, + }, + } + + var data bytes.Buffer + + err = rlp.Encode(&data, getHeaders66) + + if err != nil { + t.Fatal(err) + } + + peers, err := simClient.SendMessageToAll(ctx, &sentry.OutboundMessageData{ + Id: sentry_if.MessageId_GET_BLOCK_HEADERS_66, + Data: data.Bytes(), + }) + + if err != nil { + t.Fatal(err) + } + + if len(peers.Peers) != int(peerCount.Count) { + t.Fatal("Unexpected peer count expected:", peerCount.Count, len(peers.Peers)) + } + + message, err := receiver.Recv() + + if err != nil { + t.Fatal(err) + } + + if message.Id != sentry_if.MessageId_BLOCK_HEADERS_66 { + t.Fatal("unexpected message id expected:", sentry_if.MessageId_BLOCK_HEADERS_66, "got:", message.Id) + } + + var expectedPeer bool + + for _, peer := range peers.Peers { + if message.PeerId.String() == peer.String() { + expectedPeer = true + break + } + } + + if !expectedPeer { + t.Fatal("message received from unexpected peer:", message.PeerId) + } + + packet := ð.BlockHeadersPacket66{} + + if err := rlp.DecodeBytes(message.Data, packet); err != nil { + t.Fatal("failed to decode packet:", err) + } + + if len(packet.BlockHeadersPacket) != 10 { + t.Fatal("unexpected header count: expected:", 10, "got:", len(packet.BlockHeadersPacket)) + } + + blockNum := uint64(10) + + for _, header := range packet.BlockHeadersPacket { + if header.Number.Uint64() != blockNum { + t.Fatal("unexpected block number: expected:", blockNum, "got:", header.Number) + } + + blockNum++ + } + + simClient65 := direct.NewSentryClientDirect(65, sim) + + getHeaders65 := ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{Number: 100}, + Amount: 50, + } + + data.Reset() + + err = rlp.Encode(&data, getHeaders65) + + if err != nil { + t.Fatal(err) + } + + peers65, err := simClient65.SendMessageById(ctx, &sentry_if.SendMessageByIdRequest{ + Data: &sentry.OutboundMessageData{ + Id: sentry_if.MessageId_GET_BLOCK_HEADERS_65, + Data: data.Bytes(), + }, + PeerId: peers.Peers[0], + }) + + if err != nil { + t.Fatal(err) + } + + if len(peers65.Peers) != 1 { + t.Fatal("message sent to unexpected number of peers:", len(peers65.Peers)) + } + + if peers65.Peers[0].String() != peers.Peers[0].String() { + t.Fatal("message sent to unexpected number of peers", peers65.Peers[0]) + } + + receiver65, err := simClient65.Messages(ctx, &sentry.MessagesRequest{ + Ids: []sentry.MessageId{sentry.MessageId_BLOCK_HEADERS_65}, + }) + + if err != nil { + t.Fatal(err) + } + + message, err = receiver65.Recv() + + if err != nil { + t.Fatal(err) + } + + if message.Id != sentry_if.MessageId_BLOCK_HEADERS_65 { + t.Fatal("unexpected message id expected:", sentry_if.MessageId_BLOCK_HEADERS_65, "got:", message.Id) + } + + if message.PeerId.String() != peers.Peers[0].String() { + t.Fatal("message received from unexpected peer:", message.PeerId) + } + + packet65 := eth.BlockHeadersPacket{} + + if err := rlp.DecodeBytes(message.Data, &packet65); err != nil { + t.Fatal("failed to decode packet:", err) + } + + if len(packet65) != 50 { + t.Fatal("unexpected header count: expected:", 50, "got:", len(packet.BlockHeadersPacket)) + } + + blockNum = uint64(100) + + for _, header := range packet65 { + if header.Number.Uint64() != blockNum { + t.Fatal("unexpected block number: expected:", blockNum, "got:", header.Number) + } + + blockNum++ + } +} diff --git a/p2p/sentry/simulator/syncutil.go b/p2p/sentry/simulator/syncutil.go new file mode 100644 index 00000000000..c38877b4fc3 --- /dev/null +++ b/p2p/sentry/simulator/syncutil.go @@ -0,0 +1,195 @@ +package simulator + +import ( + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/storage" + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/downloader/downloadernat" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/p2p/nat" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/log/v3" + "golang.org/x/sync/errgroup" +) + +// The code in this file is taken from cmd/snapshots - which is yet to be merged +// to devel - once tthat is done this file can be removed + +type TorrentClient struct { + *torrent.Client + cfg *torrent.ClientConfig + items map[string]snapcfg.PreverifiedItem +} + +func NewTorrentClient(ctx context.Context, chain string, torrentDir string, logger log.Logger) (*TorrentClient, error) { + + relativeDataDir := torrentDir + if torrentDir != "" { + var err error + absdatadir, err := filepath.Abs(torrentDir) + if err != nil { + panic(err) + } + torrentDir = absdatadir + } + + dirs := datadir.Dirs{ + RelativeDataDir: relativeDataDir, + DataDir: torrentDir, + Snap: torrentDir, + } + + webseedsList := common.CliString2Array(utils.WebSeedsFlag.Value) + + if known, ok := snapcfg.KnownWebseeds[chain]; ok { + webseedsList = append(webseedsList, known...) + } + + var downloadRate, uploadRate datasize.ByteSize + + if err := downloadRate.UnmarshalText([]byte(utils.TorrentDownloadRateFlag.Value)); err != nil { + return nil, err + } + + if err := uploadRate.UnmarshalText([]byte(utils.TorrentUploadRateFlag.Value)); err != nil { + return nil, err + } + + logLevel, _, err := downloadercfg.Int2LogLevel(utils.TorrentVerbosityFlag.Value) + + if err != nil { + return nil, err + } + + version := "erigon: " + params.VersionWithCommit(params.GitCommit) + + cfg, err := downloadercfg.New(dirs, version, logLevel, downloadRate, uploadRate, + utils.TorrentPortFlag.Value, utils.TorrentConnsPerFileFlag.Value, 0, nil, webseedsList, chain) + + if err != nil { + return nil, err + } + + if err := os.MkdirAll(torrentDir, 0755); err != nil { + return nil, err + } + + cfg.ClientConfig.DataDir = torrentDir + + cfg.ClientConfig.PieceHashersPerTorrent = 32 * runtime.NumCPU() + cfg.ClientConfig.DisableIPv6 = utils.DisableIPV6.Value + cfg.ClientConfig.DisableIPv4 = utils.DisableIPV4.Value + + natif, err := nat.Parse(utils.NATFlag.Value) + + if err != nil { + return nil, fmt.Errorf("invalid nat option %s: %w", utils.NATFlag.Value, err) + } + + downloadernat.DoNat(natif, cfg.ClientConfig, logger) + + cfg.ClientConfig.DefaultStorage = storage.NewMMap(torrentDir) + + cli, err := torrent.NewClient(cfg.ClientConfig) + + if err != nil { + return nil, fmt.Errorf("can't create torrent client: %w", err) + } + + items := map[string]snapcfg.PreverifiedItem{} + for _, it := range snapcfg.KnownCfg(chain, 0).Preverified { + items[it.Name] = it + } + + return &TorrentClient{cli, cfg.ClientConfig, items}, nil +} + +func (s *TorrentClient) LocalFsRoot() string { + return s.cfg.DataDir +} + +func (s *TorrentClient) Download(ctx context.Context, files ...string) error { + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(len(files)) + + for _, f := range files { + file := f + + g.Go(func() error { + it, ok := s.items[file] + + if !ok { + return fs.ErrNotExist + } + + t, err := func() (*torrent.Torrent, error) { + infoHash := snaptype.Hex2InfoHash(it.Hash) + + for _, t := range s.Torrents() { + if t.Name() == file { + return t, nil + } + } + + mi := &metainfo.MetaInfo{AnnounceList: downloader.Trackers} + magnet := mi.Magnet(&infoHash, &metainfo.Info{Name: file}) + spec, err := torrent.TorrentSpecFromMagnetUri(magnet.String()) + + if err != nil { + return nil, err + } + + spec.DisallowDataDownload = true + + t, _, err := s.AddTorrentSpec(spec) + if err != nil { + return nil, err + } + + return t, nil + }() + + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.GotInfo(): + } + + if !t.Complete.Bool() { + t.AllowDataDownload() + t.DownloadAll() + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.Complete.On(): + } + } + + closed := t.Closed() + t.Drop() + <-closed + + return nil + }) + } + + return g.Wait() +} diff --git a/p2p/server.go b/p2p/server.go index 7ba83014a3e..c42f88d0355 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -558,7 +558,7 @@ func (srv *Server) setupLocalNode() error { } sort.Sort(capsByNameAndVersion(srv.ourHandshake.Caps)) // Create the local node - db, err := enode.OpenDB(srv.quitCtx, srv.Config.NodeDatabase, srv.Config.TmpDir) + db, err := enode.OpenDB(srv.quitCtx, srv.Config.NodeDatabase, srv.Config.TmpDir, srv.logger) if err != nil { return err } diff --git a/polygon/sync/db.go b/polygon/sync/db.go new file mode 100644 index 00000000000..560ab2bc1dd --- /dev/null +++ b/polygon/sync/db.go @@ -0,0 +1,8 @@ +package sync + +import "github.com/ledgerwatch/erigon/core/types" + +//go:generate mockgen -destination=./mock/db_mock.go -package=mock . DB +type DB interface { + WriteHeaders(headers []*types.Header) error +} diff --git a/polygon/sync/header_downloader.go b/polygon/sync/header_downloader.go new file mode 100644 index 00000000000..76a8f29f33a --- /dev/null +++ b/polygon/sync/header_downloader.go @@ -0,0 +1,214 @@ +package sync + +import ( + "context" + "fmt" + "math" + "sort" + "sync" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/sync/peerinfo" +) + +const headerDownloaderLogPrefix = "HeaderDownloader" + +func NewHeaderDownloader(logger log.Logger, sentry Sentry, db DB, heimdall Heimdall, verify HeaderVerifier) *HeaderDownloader { + statePointHeadersMemo, err := lru.New[common.Hash, []*types.Header](sentry.MaxPeers()) + if err != nil { + panic(err) + } + + return &HeaderDownloader{ + logger: logger, + sentry: sentry, + db: db, + heimdall: heimdall, + verify: verify, + statePointHeadersMemo: statePointHeadersMemo, + } +} + +type HeaderDownloader struct { + logger log.Logger + sentry Sentry + db DB + heimdall Heimdall + verify HeaderVerifier + statePointHeadersMemo *lru.Cache[common.Hash, []*types.Header] // statePoint.rootHash->[headers part of state point] +} + +func (hd *HeaderDownloader) DownloadUsingCheckpoints(ctx context.Context, start uint64) error { + checkpoints, err := hd.heimdall.FetchCheckpoints(ctx, start) + if err != nil { + return err + } + + err = hd.downloadUsingStatePoints(ctx, statePointsFromCheckpoints(checkpoints)) + if err != nil { + return err + } + + return nil +} + +func (hd *HeaderDownloader) DownloadUsingMilestones(ctx context.Context, start uint64) error { + milestones, err := hd.heimdall.FetchMilestones(ctx, start) + if err != nil { + return err + } + + err = hd.downloadUsingStatePoints(ctx, statePointsFromMilestones(milestones)) + if err != nil { + return err + } + + return nil +} + +func (hd *HeaderDownloader) downloadUsingStatePoints(ctx context.Context, statePoints statePoints) error { + for len(statePoints) > 0 { + allPeers := hd.sentry.PeersWithBlockNumInfo() + if len(allPeers) == 0 { + hd.logger.Warn(fmt.Sprintf("[%s] zero peers, will try again", headerDownloaderLogPrefix)) + continue + } + + sort.Sort(allPeers) // sort by block num in asc order + peers := hd.choosePeers(allPeers, statePoints) + if len(peers) == 0 { + hd.logger.Warn( + fmt.Sprintf("[%s] can't use any peers to sync, will try again", headerDownloaderLogPrefix), + "start", statePoints[0].startBlock, + "end", statePoints[len(statePoints)-1].endBlock, + "minPeerBlockNum", allPeers[0].BlockNum, + "minPeerID", allPeers[0].ID, + ) + continue + } + + peerCount := len(peers) + statePointsBatch := statePoints[:peerCount] + hd.logger.Info( + fmt.Sprintf("[%s] downloading headers", headerDownloaderLogPrefix), + "start", statePointsBatch[0].startBlock, + "end", statePointsBatch[len(statePointsBatch)-1].endBlock, + "kind", statePointsBatch[0].kind, + "peerCount", peerCount, + ) + + headerBatches := make([][]*types.Header, len(statePointsBatch)) + maxStatePointLength := float64(0) + wg := sync.WaitGroup{} + for i, point := range statePointsBatch { + maxStatePointLength = math.Max(float64(point.length()), maxStatePointLength) + wg.Add(1) + go func(i int, statePoint *statePoint, peerID string) { + defer wg.Done() + + if headers, ok := hd.statePointHeadersMemo.Get(statePoint.rootHash); ok { + headerBatches[i] = headers + return + } + + headers, err := hd.sentry.DownloadHeaders(ctx, statePoint.startBlock, statePoint.endBlock, peerID) + if err != nil { + hd.logger.Debug( + fmt.Sprintf("[%s] issue downloading headers, will try again", headerDownloaderLogPrefix), + "err", err, + "start", statePoint.startBlock, + "end", statePoint.endBlock, + "rootHash", statePoint.rootHash, + "kind", statePoint.kind, + "peerID", peerID, + ) + return + } + + if err := hd.verify(statePoint, headers); err != nil { + hd.logger.Debug( + fmt.Sprintf( + "[%s] bad headers received from peer for state point - penalizing and will try again", + headerDownloaderLogPrefix, + ), + "start", statePoint.startBlock, + "end", statePoint.endBlock, + "rootHash", statePoint.rootHash, + "kind", statePoint.kind, + "peerID", peerID, + ) + + hd.sentry.Penalize(peerID) + return + } + + hd.statePointHeadersMemo.Add(statePoint.rootHash, headers) + headerBatches[i] = headers + }(i, point, peers[i].ID) + } + + wg.Wait() + headers := make([]*types.Header, 0, int(maxStatePointLength)*peerCount) + gapIndex := -1 + for i, headerBatch := range headerBatches { + if len(headerBatch) == 0 { + hd.logger.Debug( + fmt.Sprintf("[%s] no headers, will try again", headerDownloaderLogPrefix), + "start", statePointsBatch[i].startBlock, + "end", statePointsBatch[i].endBlock, + "rootHash", statePointsBatch[i].rootHash, + "kind", statePointsBatch[i].kind, + ) + + gapIndex = i + break + } + + headers = append(headers, headerBatch...) + } + + if gapIndex >= 0 { + statePoints = statePoints[gapIndex:] + } else { + statePoints = statePoints[len(statePointsBatch):] + } + + dbWriteStartTime := time.Now() + if err := hd.db.WriteHeaders(headers); err != nil { + return err + } + + hd.logger.Debug( + fmt.Sprintf("[%s] wrote headers to db", headerDownloaderLogPrefix), + "numHeaders", len(headers), + "time", time.Since(dbWriteStartTime), + ) + } + + return nil +} + +// choosePeers assumes peers are sorted in ascending order based on block num +func (hd *HeaderDownloader) choosePeers(peers peerinfo.PeersWithBlockNumInfo, statePoints statePoints) peerinfo.PeersWithBlockNumInfo { + var peersIdx int + chosenPeers := make(peerinfo.PeersWithBlockNumInfo, 0, len(peers)) + for _, statePoint := range statePoints { + if peersIdx >= len(peers) { + break + } + + peer := peers[peersIdx] + if peer.BlockNum.Cmp(statePoint.endBlock) > -1 { + chosenPeers = append(chosenPeers, peer) + } + + peersIdx++ + } + + return chosenPeers +} diff --git a/polygon/sync/header_downloader_test.go b/polygon/sync/header_downloader_test.go new file mode 100644 index 00000000000..f60ef0c6557 --- /dev/null +++ b/polygon/sync/header_downloader_test.go @@ -0,0 +1,284 @@ +package sync + +import ( + "context" + "errors" + "fmt" + "math" + "math/big" + "testing" + + "github.com/golang/mock/gomock" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/require" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/sync/mock" + "github.com/ledgerwatch/erigon/polygon/sync/peerinfo" + "github.com/ledgerwatch/erigon/turbo/testlog" +) + +func newHeaderDownloaderTest(t *testing.T) *headerDownloaderTest { + return newHeaderDownloaderTestWithOpts(t, headerDownloaderTestOpts{}) +} + +func newHeaderDownloaderTestWithOpts(t *testing.T, opts headerDownloaderTestOpts) *headerDownloaderTest { + ctrl := gomock.NewController(t) + heimdall := mock.NewMockHeimdall(ctrl) + sentry := mock.NewMockSentry(ctrl) + sentry.EXPECT().MaxPeers().Return(100).Times(1) + db := mock.NewMockDB(ctrl) + logger := testlog.Logger(t, log.LvlDebug) + headerVerifier := opts.getOrCreateDefaultHeaderVerifier() + headerDownloader := NewHeaderDownloader(logger, sentry, db, heimdall, headerVerifier) + return &headerDownloaderTest{ + heimdall: heimdall, + sentry: sentry, + db: db, + headerDownloader: headerDownloader, + } +} + +type headerDownloaderTestOpts struct { + headerVerifier HeaderVerifier +} + +func (opts headerDownloaderTestOpts) getOrCreateDefaultHeaderVerifier() HeaderVerifier { + if opts.headerVerifier == nil { + return func(_ *statePoint, _ []*types.Header) error { + return nil + } + } + + return opts.headerVerifier +} + +type headerDownloaderTest struct { + heimdall *mock.MockHeimdall + sentry *mock.MockSentry + db *mock.MockDB + headerDownloader *HeaderDownloader +} + +func (hdt headerDownloaderTest) fakePeers(count int, blockNums ...*big.Int) peerinfo.PeersWithBlockNumInfo { + peers := make(peerinfo.PeersWithBlockNumInfo, count) + for i := range peers { + var blockNum *big.Int + if i < len(blockNums) { + blockNum = blockNums[i] + } else { + blockNum = new(big.Int).SetUint64(math.MaxUint64) + } + + peers[i] = &peerinfo.PeerWithBlockNumInfo{ + ID: fmt.Sprintf("peer%d", i+1), + BlockNum: blockNum, + } + } + + return peers +} + +func (hdt headerDownloaderTest) fakeCheckpoints(count int) []*checkpoint.Checkpoint { + checkpoints := make([]*checkpoint.Checkpoint, count) + for i := range checkpoints { + num := i + 1 + checkpoints[i] = &checkpoint.Checkpoint{ + StartBlock: big.NewInt(int64(num)), + EndBlock: big.NewInt(int64(num)), + RootHash: common.BytesToHash([]byte(fmt.Sprintf("0x%d", num))), + } + } + + return checkpoints +} + +func (hdt headerDownloaderTest) fakeMilestones(count int) []*milestone.Milestone { + milestones := make([]*milestone.Milestone, count) + for i := range milestones { + num := i + 1 + milestones[i] = &milestone.Milestone{ + StartBlock: big.NewInt(int64(num)), + EndBlock: big.NewInt(int64(num)), + Hash: common.BytesToHash([]byte(fmt.Sprintf("0x%d", num))), + } + } + + return milestones +} + +type downloadHeadersMock func(context.Context, *big.Int, *big.Int, string) ([]*types.Header, error) + +func (hdt headerDownloaderTest) defaultDownloadHeadersMock() downloadHeadersMock { + return func(ctx context.Context, start *big.Int, end *big.Int, peerID string) ([]*types.Header, error) { + res := make([]*types.Header, new(big.Int).Sub(end, start).Uint64()+1) + for i := new(big.Int).Set(start); i.Cmp(end) < 1; i.Add(i, new(big.Int).SetUint64(1)) { + res[new(big.Int).Sub(i, start).Uint64()] = &types.Header{Number: new(big.Int).Set(i)} + } + return res, nil + } +} + +func (hdt headerDownloaderTest) defaultWriteHeadersMock(capture *[]*types.Header) func([]*types.Header) error { + return func(headers []*types.Header) error { + *capture = append(*capture, headers...) + return nil + } +} + +func TestHeaderDownloadUsingMilestones(t *testing.T) { + test := newHeaderDownloaderTest(t) + test.heimdall.EXPECT(). + FetchMilestones(gomock.Any(), gomock.Any()). + Return(test.fakeMilestones(4), nil). + Times(1) + test.sentry.EXPECT(). + PeersWithBlockNumInfo(). + Return(test.fakePeers(8)). + Times(1) + test.sentry.EXPECT(). + DownloadHeaders(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(test.defaultDownloadHeadersMock()). + Times(4) + var persistedHeaders []*types.Header + test.db.EXPECT(). + WriteHeaders(gomock.Any()). + DoAndReturn(test.defaultWriteHeadersMock(&persistedHeaders)). + Times(1) + + err := test.headerDownloader.DownloadUsingMilestones(context.Background(), 1) + require.NoError(t, err) + require.Len(t, persistedHeaders, 4) + // check headers are written in order + require.Equal(t, uint64(1), persistedHeaders[0].Number.Uint64()) + require.Equal(t, uint64(2), persistedHeaders[1].Number.Uint64()) + require.Equal(t, uint64(3), persistedHeaders[2].Number.Uint64()) + require.Equal(t, uint64(4), persistedHeaders[3].Number.Uint64()) +} + +func TestHeaderDownloadUsingCheckpoints(t *testing.T) { + test := newHeaderDownloaderTest(t) + test.heimdall.EXPECT(). + FetchCheckpoints(gomock.Any(), gomock.Any()). + Return(test.fakeCheckpoints(8), nil). + Times(1) + test.sentry.EXPECT(). + PeersWithBlockNumInfo(). + Return(test.fakePeers(2)). + Times(4) + test.sentry.EXPECT(). + DownloadHeaders(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(test.defaultDownloadHeadersMock()). + Times(8) + var persistedHeaders []*types.Header + test.db.EXPECT(). + WriteHeaders(gomock.Any()). + DoAndReturn(test.defaultWriteHeadersMock(&persistedHeaders)). + Times(4) + + err := test.headerDownloader.DownloadUsingCheckpoints(context.Background(), 1) + require.NoError(t, err) + require.Len(t, persistedHeaders, 8) + // check headers are written in order + require.Equal(t, uint64(1), persistedHeaders[0].Number.Uint64()) + require.Equal(t, uint64(2), persistedHeaders[1].Number.Uint64()) + require.Equal(t, uint64(3), persistedHeaders[2].Number.Uint64()) + require.Equal(t, uint64(4), persistedHeaders[3].Number.Uint64()) + require.Equal(t, uint64(5), persistedHeaders[4].Number.Uint64()) + require.Equal(t, uint64(6), persistedHeaders[5].Number.Uint64()) + require.Equal(t, uint64(7), persistedHeaders[6].Number.Uint64()) + require.Equal(t, uint64(8), persistedHeaders[7].Number.Uint64()) +} + +func TestHeaderDownloadWhenInvalidStateThenPenalizePeerAndReDownload(t *testing.T) { + var firstTimeInvalidReturned bool + firstTimeInvalidReturnedPtr := &firstTimeInvalidReturned + test := newHeaderDownloaderTestWithOpts(t, headerDownloaderTestOpts{ + headerVerifier: func(statePoint *statePoint, headers []*types.Header) error { + if statePoint.startBlock.Cmp(new(big.Int).SetUint64(2)) == 0 && !*firstTimeInvalidReturnedPtr { + *firstTimeInvalidReturnedPtr = true + return errors.New("invalid checkpoint") + } + return nil + }, + }) + test.heimdall.EXPECT(). + FetchCheckpoints(gomock.Any(), gomock.Any()). + Return(test.fakeCheckpoints(6), nil). + Times(1) + test.sentry.EXPECT(). + PeersWithBlockNumInfo(). + Return(test.fakePeers(3)). + Times(3) + test.sentry.EXPECT(). + DownloadHeaders(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(test.defaultDownloadHeadersMock()). + // request 1,2,3 in parallel + // -> 2 fails + // requests 2,3,4 in parallel + // 3 is cached + // requests 5,6 in parallel + // in total 6 requests + 1 request for re-requesting checkpoint 2 + // total = 7 (note this also tests caching works) + Times(7) + test.sentry.EXPECT(). + Penalize(gomock.Eq("peer2")). + Times(1) + var persistedHeadersFirstTime, persistedHeadersRemaining []*types.Header + gomock.InOrder( + test.db.EXPECT(). + WriteHeaders(gomock.Any()). + DoAndReturn(test.defaultWriteHeadersMock(&persistedHeadersFirstTime)). + Times(1), + test.db.EXPECT(). + WriteHeaders(gomock.Any()). + DoAndReturn(test.defaultWriteHeadersMock(&persistedHeadersRemaining)). + Times(2), + ) + + err := test.headerDownloader.DownloadUsingCheckpoints(context.Background(), 1) + require.NoError(t, err) + require.Len(t, persistedHeadersFirstTime, 1) + require.Len(t, persistedHeadersRemaining, 5) +} + +func TestHeaderDownloadWhenZeroPeersTriesAgain(t *testing.T) { + test := newHeaderDownloaderTest(t) + test.heimdall.EXPECT(). + FetchCheckpoints(gomock.Any(), gomock.Any()). + Return(test.fakeCheckpoints(8), nil). + Times(1) + test.sentry.EXPECT(). + DownloadHeaders(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(test.defaultDownloadHeadersMock()). + Times(8) + var persistedHeaders []*types.Header + test.db.EXPECT(). + WriteHeaders(gomock.Any()). + DoAndReturn(test.defaultWriteHeadersMock(&persistedHeaders)). + Times(4) + gomock.InOrder( + // first, no peers at all + test.sentry.EXPECT(). + PeersWithBlockNumInfo(). + Return(nil). + Times(1), + // second, 2 peers but not synced enough for us to use + test.sentry.EXPECT(). + PeersWithBlockNumInfo(). + Return(test.fakePeers(2, new(big.Int).SetUint64(0), new(big.Int).SetUint64(0))). + Times(1), + // then, 2 fully synced peers that we can use + test.sentry.EXPECT(). + PeersWithBlockNumInfo(). + Return(test.fakePeers(2)). + Times(4), + ) + + err := test.headerDownloader.DownloadUsingCheckpoints(context.Background(), 1) + require.NoError(t, err) + require.Len(t, persistedHeaders, 8) +} diff --git a/polygon/sync/header_verifier.go b/polygon/sync/header_verifier.go new file mode 100644 index 00000000000..6898f384926 --- /dev/null +++ b/polygon/sync/header_verifier.go @@ -0,0 +1,5 @@ +package sync + +import "github.com/ledgerwatch/erigon/core/types" + +type HeaderVerifier func(statePoint *statePoint, headers []*types.Header) error diff --git a/polygon/sync/heimdall.go b/polygon/sync/heimdall.go new file mode 100644 index 00000000000..f1addfad9e5 --- /dev/null +++ b/polygon/sync/heimdall.go @@ -0,0 +1,181 @@ +package sync + +import ( + "context" + "errors" + "math/big" + "time" + + "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon/consensus/bor/heimdall" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" +) + +// Heimdall is a wrapper of Heimdall HTTP API +// +//go:generate mockgen -destination=./mock/heimdall_mock.go -package=mock . Heimdall +type Heimdall interface { + FetchCheckpoints(ctx context.Context, start uint64) ([]*checkpoint.Checkpoint, error) + FetchMilestones(ctx context.Context, start uint64) ([]*milestone.Milestone, error) + FetchSpan(ctx context.Context, start uint64) (*span.HeimdallSpan, error) + OnMilestoneEvent(ctx context.Context, callback func(*milestone.Milestone)) error +} + +// ErrIncompleteMilestoneRange happens when FetchMilestones is called with an old start block because old milestones are evicted +var ErrIncompleteMilestoneRange = errors.New("milestone range doesn't contain the start block") + +type HeimdallImpl struct { + client heimdall.IHeimdallClient + pollDelay time.Duration + logger log.Logger +} + +func NewHeimdall(client heimdall.IHeimdallClient, logger log.Logger) Heimdall { + impl := HeimdallImpl{ + client: client, + pollDelay: time.Second, + logger: logger, + } + return &impl +} + +func cmpNumToRange(n uint64, min *big.Int, max *big.Int) int { + num := new(big.Int).SetUint64(n) + if num.Cmp(min) < 0 { + return -1 + } + if num.Cmp(max) > 0 { + return 1 + } + return 0 +} + +func cmpBlockNumToCheckpointRange(n uint64, c *checkpoint.Checkpoint) int { + return cmpNumToRange(n, c.StartBlock, c.EndBlock) +} + +func cmpBlockNumToMilestoneRange(n uint64, m *milestone.Milestone) int { + return cmpNumToRange(n, m.StartBlock, m.EndBlock) +} + +func reverse[T any](s []T) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +func (impl *HeimdallImpl) FetchCheckpoints(ctx context.Context, start uint64) ([]*checkpoint.Checkpoint, error) { + count, err := impl.client.FetchCheckpointCount(ctx) + if err != nil { + return nil, err + } + + var checkpoints []*checkpoint.Checkpoint + + for i := count; i >= 1; i-- { + c, err := impl.client.FetchCheckpoint(ctx, i) + if err != nil { + return nil, err + } + + cmpResult := cmpBlockNumToCheckpointRange(start, c) + // the start block is past the last checkpoint + if cmpResult > 0 { + return nil, nil + } + + checkpoints = append(checkpoints, c) + + // the checkpoint contains the start block + if cmpResult == 0 { + break + } + } + + reverse(checkpoints) + return checkpoints, nil +} + +func (impl *HeimdallImpl) FetchMilestones(ctx context.Context, start uint64) ([]*milestone.Milestone, error) { + count, err := impl.client.FetchMilestoneCount(ctx) + if err != nil { + return nil, err + } + + var milestones []*milestone.Milestone + + for i := count; i >= 1; i-- { + m, err := impl.client.FetchMilestone(ctx, i) + if err != nil { + if errors.Is(err, heimdall.ErrNotInMilestoneList) { + reverse(milestones) + return milestones, ErrIncompleteMilestoneRange + } + return nil, err + } + + cmpResult := cmpBlockNumToMilestoneRange(start, m) + // the start block is past the last milestone + if cmpResult > 0 { + return nil, nil + } + + milestones = append(milestones, m) + + // the checkpoint contains the start block + if cmpResult == 0 { + break + } + } + + reverse(milestones) + return milestones, nil +} + +func (impl *HeimdallImpl) FetchSpan(ctx context.Context, start uint64) (*span.HeimdallSpan, error) { + return impl.client.Span(ctx, span.IDAt(start)) +} + +func (impl *HeimdallImpl) OnMilestoneEvent(ctx context.Context, callback func(*milestone.Milestone)) error { + currentCount, err := impl.client.FetchMilestoneCount(ctx) + if err != nil { + return err + } + + go func() { + for { + count, err := impl.client.FetchMilestoneCount(ctx) + if err != nil { + if !errors.Is(err, context.Canceled) { + impl.logger.Error("HeimdallImpl.OnMilestoneEvent FetchMilestoneCount error", "err", err) + } + break + } + + if count <= currentCount { + pollDelayTimer := time.NewTimer(impl.pollDelay) + select { + case <-ctx.Done(): + return + case <-pollDelayTimer.C: + } + } else { + currentCount = count + m, err := impl.client.FetchMilestone(ctx, count) + if err != nil { + if !errors.Is(err, context.Canceled) { + impl.logger.Error("HeimdallImpl.OnMilestoneEvent FetchMilestone error", "err", err) + } + break + } + + go callback(m) + } + } + }() + + return nil +} diff --git a/polygon/sync/heimdall_test.go b/polygon/sync/heimdall_test.go new file mode 100644 index 00000000000..2036feb84d5 --- /dev/null +++ b/polygon/sync/heimdall_test.go @@ -0,0 +1,250 @@ +package sync + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + heimdallclient "github.com/ledgerwatch/erigon/consensus/bor/heimdall" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" + heimdallmock "github.com/ledgerwatch/erigon/consensus/bor/heimdall/mock" +) + +func makeCheckpoint(start uint64, len uint) *checkpoint.Checkpoint { + c := checkpoint.Checkpoint{ + StartBlock: new(big.Int).SetUint64(start), + EndBlock: new(big.Int).SetUint64(start + uint64(len) - 1), + Timestamp: uint64(time.Now().Unix()), + } + return &c +} + +func makeMilestone(start uint64, len uint) *milestone.Milestone { + m := milestone.Milestone{ + StartBlock: new(big.Int).SetUint64(start), + EndBlock: new(big.Int).SetUint64(start + uint64(len) - 1), + Timestamp: uint64(time.Now().Unix()), + } + return &m +} + +type heimdallTest struct { + ctx context.Context + client *heimdallmock.MockIHeimdallClient + heimdall Heimdall + logger log.Logger +} + +func newHeimdallTest(t *testing.T) heimdallTest { + logger := log.New() + ctx := context.Background() + + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + client := heimdallmock.NewMockIHeimdallClient(ctrl) + heimdall := NewHeimdall(client, logger) + + return heimdallTest{ + ctx, + client, + heimdall, + logger, + } +} + +func (test heimdallTest) setupCheckpoints(count int) []*checkpoint.Checkpoint { + var expectedCheckpoints []*checkpoint.Checkpoint + for i := 0; i < count; i++ { + c := makeCheckpoint(uint64(i*256), 256) + expectedCheckpoints = append(expectedCheckpoints, c) + } + + client := test.client + client.EXPECT().FetchCheckpointCount(gomock.Any()).Return(int64(len(expectedCheckpoints)), nil) + client.EXPECT().FetchCheckpoint(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) { + return expectedCheckpoints[number-1], nil + }).AnyTimes() + + return expectedCheckpoints +} + +func (test heimdallTest) setupMilestones(count int) []*milestone.Milestone { + var expectedMilestones []*milestone.Milestone + for i := 0; i < count; i++ { + m := makeMilestone(uint64(i*16), 16) + expectedMilestones = append(expectedMilestones, m) + } + + client := test.client + client.EXPECT().FetchMilestoneCount(gomock.Any()).Return(int64(len(expectedMilestones)), nil) + client.EXPECT().FetchMilestone(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, number int64) (*milestone.Milestone, error) { + return expectedMilestones[number-1], nil + }).AnyTimes() + + return expectedMilestones +} + +func TestFetchCheckpoints1(t *testing.T) { + test := newHeimdallTest(t) + expectedCheckpoint := test.setupCheckpoints(1)[0] + + checkpoints, err := test.heimdall.FetchCheckpoints(test.ctx, 0) + require.Nil(t, err) + + require.Equal(t, 1, len(checkpoints)) + assert.Equal(t, expectedCheckpoint.Timestamp, checkpoints[0].Timestamp) +} + +func TestFetchCheckpointsPastLast(t *testing.T) { + test := newHeimdallTest(t) + _ = test.setupCheckpoints(1)[0] + + checkpoints, err := test.heimdall.FetchCheckpoints(test.ctx, 500) + require.Nil(t, err) + + require.Equal(t, 0, len(checkpoints)) +} + +func TestFetchCheckpoints10(t *testing.T) { + test := newHeimdallTest(t) + expectedCheckpoints := test.setupCheckpoints(10) + + checkpoints, err := test.heimdall.FetchCheckpoints(test.ctx, 0) + require.Nil(t, err) + + require.Equal(t, len(expectedCheckpoints), len(checkpoints)) + for i := 0; i < len(checkpoints); i++ { + assert.Equal(t, expectedCheckpoints[i].StartBlock.Uint64(), checkpoints[i].StartBlock.Uint64()) + } +} + +func TestFetchCheckpointsMiddleStart(t *testing.T) { + test := newHeimdallTest(t) + expectedCheckpoints := test.setupCheckpoints(10) + const offset = 6 + + checkpoints, err := test.heimdall.FetchCheckpoints(test.ctx, expectedCheckpoints[offset].StartBlock.Uint64()) + require.Nil(t, err) + + require.Equal(t, len(expectedCheckpoints)-offset, len(checkpoints)) + for i := 0; i < len(checkpoints); i++ { + assert.Equal(t, expectedCheckpoints[offset+i].StartBlock.Uint64(), checkpoints[i].StartBlock.Uint64()) + } +} + +func TestFetchMilestones1(t *testing.T) { + test := newHeimdallTest(t) + expectedMilestone := test.setupMilestones(1)[0] + + milestones, err := test.heimdall.FetchMilestones(test.ctx, 0) + require.Nil(t, err) + + require.Equal(t, 1, len(milestones)) + assert.Equal(t, expectedMilestone.Timestamp, milestones[0].Timestamp) +} + +func TestFetchMilestonesPastLast(t *testing.T) { + test := newHeimdallTest(t) + _ = test.setupMilestones(1)[0] + + milestones, err := test.heimdall.FetchMilestones(test.ctx, 500) + require.Nil(t, err) + + require.Equal(t, 0, len(milestones)) +} + +func TestFetchMilestones10(t *testing.T) { + test := newHeimdallTest(t) + expectedMilestones := test.setupMilestones(10) + + milestones, err := test.heimdall.FetchMilestones(test.ctx, 0) + require.Nil(t, err) + + require.Equal(t, len(expectedMilestones), len(milestones)) + for i := 0; i < len(milestones); i++ { + assert.Equal(t, expectedMilestones[i].StartBlock.Uint64(), milestones[i].StartBlock.Uint64()) + } +} + +func TestFetchMilestonesMiddleStart(t *testing.T) { + test := newHeimdallTest(t) + expectedMilestones := test.setupMilestones(10) + const offset = 6 + + milestones, err := test.heimdall.FetchMilestones(test.ctx, expectedMilestones[offset].StartBlock.Uint64()) + require.Nil(t, err) + + require.Equal(t, len(expectedMilestones)-offset, len(milestones)) + for i := 0; i < len(milestones); i++ { + assert.Equal(t, expectedMilestones[offset+i].StartBlock.Uint64(), milestones[i].StartBlock.Uint64()) + } +} + +func TestFetchMilestonesStartingBeforeEvictionPoint(t *testing.T) { + test := newHeimdallTest(t) + + var expectedMilestones []*milestone.Milestone + for i := 0; i < 20; i++ { + m := makeMilestone(uint64(i*16), 16) + expectedMilestones = append(expectedMilestones, m) + } + const keptMilestones = 5 + + client := test.client + client.EXPECT().FetchMilestoneCount(gomock.Any()).Return(int64(len(expectedMilestones)), nil) + client.EXPECT().FetchMilestone(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, number int64) (*milestone.Milestone, error) { + if int(number) <= len(expectedMilestones)-keptMilestones { + return nil, heimdallclient.ErrNotInMilestoneList + } + return expectedMilestones[number-1], nil + }).AnyTimes() + + milestones, err := test.heimdall.FetchMilestones(test.ctx, 0) + require.NotNil(t, err) + require.ErrorIs(t, err, ErrIncompleteMilestoneRange) + + require.Equal(t, keptMilestones, len(milestones)) + for i := 0; i < len(milestones); i++ { + assert.Equal(t, expectedMilestones[len(expectedMilestones)-len(milestones)+i].StartBlock.Uint64(), milestones[i].StartBlock.Uint64()) + } +} + +func TestOnMilestoneEvent(t *testing.T) { + test := newHeimdallTest(t) + + var cancel context.CancelFunc + test.ctx, cancel = context.WithCancel(test.ctx) + defer cancel() + + client := test.client + count := new(int64) + client.EXPECT().FetchMilestoneCount(gomock.Any()).DoAndReturn(func(ctx context.Context) (int64, error) { + c := *count + if c == 2 { + cancel() + return 0, ctx.Err() + } + *count += 1 + return c, nil + }).AnyTimes() + + expectedMilestone := makeMilestone(0, 12) + client.EXPECT().FetchMilestone(gomock.Any(), gomock.Any()).Return(expectedMilestone, nil) + + eventChan := make(chan *milestone.Milestone) + err := test.heimdall.OnMilestoneEvent(test.ctx, func(m *milestone.Milestone) { + eventChan <- m + }) + require.Nil(t, err) + + m := <-eventChan + assert.Equal(t, expectedMilestone.Timestamp, m.Timestamp) +} diff --git a/polygon/sync/mock/db_mock.go b/polygon/sync/mock/db_mock.go new file mode 100644 index 00000000000..22e6fa6b482 --- /dev/null +++ b/polygon/sync/mock/db_mock.go @@ -0,0 +1,49 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/polygon/sync (interfaces: DB) + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + types "github.com/ledgerwatch/erigon/core/types" +) + +// MockDB is a mock of DB interface. +type MockDB struct { + ctrl *gomock.Controller + recorder *MockDBMockRecorder +} + +// MockDBMockRecorder is the mock recorder for MockDB. +type MockDBMockRecorder struct { + mock *MockDB +} + +// NewMockDB creates a new mock instance. +func NewMockDB(ctrl *gomock.Controller) *MockDB { + mock := &MockDB{ctrl: ctrl} + mock.recorder = &MockDBMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDB) EXPECT() *MockDBMockRecorder { + return m.recorder +} + +// WriteHeaders mocks base method. +func (m *MockDB) WriteHeaders(arg0 []*types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteHeaders", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteHeaders indicates an expected call of WriteHeaders. +func (mr *MockDBMockRecorder) WriteHeaders(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteHeaders", reflect.TypeOf((*MockDB)(nil).WriteHeaders), arg0) +} diff --git a/polygon/sync/mock/heimdall_mock.go b/polygon/sync/mock/heimdall_mock.go new file mode 100644 index 00000000000..c38947dc559 --- /dev/null +++ b/polygon/sync/mock/heimdall_mock.go @@ -0,0 +1,97 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/polygon/sync (interfaces: Heimdall) + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + checkpoint "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" + milestone "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" + span "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" +) + +// MockHeimdall is a mock of Heimdall interface. +type MockHeimdall struct { + ctrl *gomock.Controller + recorder *MockHeimdallMockRecorder +} + +// MockHeimdallMockRecorder is the mock recorder for MockHeimdall. +type MockHeimdallMockRecorder struct { + mock *MockHeimdall +} + +// NewMockHeimdall creates a new mock instance. +func NewMockHeimdall(ctrl *gomock.Controller) *MockHeimdall { + mock := &MockHeimdall{ctrl: ctrl} + mock.recorder = &MockHeimdallMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockHeimdall) EXPECT() *MockHeimdallMockRecorder { + return m.recorder +} + +// FetchCheckpoints mocks base method. +func (m *MockHeimdall) FetchCheckpoints(arg0 context.Context, arg1 uint64) ([]*checkpoint.Checkpoint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchCheckpoints", arg0, arg1) + ret0, _ := ret[0].([]*checkpoint.Checkpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchCheckpoints indicates an expected call of FetchCheckpoints. +func (mr *MockHeimdallMockRecorder) FetchCheckpoints(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchCheckpoints", reflect.TypeOf((*MockHeimdall)(nil).FetchCheckpoints), arg0, arg1) +} + +// FetchMilestones mocks base method. +func (m *MockHeimdall) FetchMilestones(arg0 context.Context, arg1 uint64) ([]*milestone.Milestone, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchMilestones", arg0, arg1) + ret0, _ := ret[0].([]*milestone.Milestone) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchMilestones indicates an expected call of FetchMilestones. +func (mr *MockHeimdallMockRecorder) FetchMilestones(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestones", reflect.TypeOf((*MockHeimdall)(nil).FetchMilestones), arg0, arg1) +} + +// FetchSpan mocks base method. +func (m *MockHeimdall) FetchSpan(arg0 context.Context, arg1 uint64) (*span.HeimdallSpan, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchSpan", arg0, arg1) + ret0, _ := ret[0].(*span.HeimdallSpan) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchSpan indicates an expected call of FetchSpan. +func (mr *MockHeimdallMockRecorder) FetchSpan(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchSpan", reflect.TypeOf((*MockHeimdall)(nil).FetchSpan), arg0, arg1) +} + +// OnMilestoneEvent mocks base method. +func (m *MockHeimdall) OnMilestoneEvent(arg0 context.Context, arg1 func(*milestone.Milestone)) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OnMilestoneEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// OnMilestoneEvent indicates an expected call of OnMilestoneEvent. +func (mr *MockHeimdallMockRecorder) OnMilestoneEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnMilestoneEvent", reflect.TypeOf((*MockHeimdall)(nil).OnMilestoneEvent), arg0, arg1) +} diff --git a/polygon/sync/mock/sentry_mock.go b/polygon/sync/mock/sentry_mock.go new file mode 100644 index 00000000000..09da633586b --- /dev/null +++ b/polygon/sync/mock/sentry_mock.go @@ -0,0 +1,93 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/polygon/sync (interfaces: Sentry) + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + big "math/big" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + types "github.com/ledgerwatch/erigon/core/types" + peerinfo "github.com/ledgerwatch/erigon/polygon/sync/peerinfo" +) + +// MockSentry is a mock of Sentry interface. +type MockSentry struct { + ctrl *gomock.Controller + recorder *MockSentryMockRecorder +} + +// MockSentryMockRecorder is the mock recorder for MockSentry. +type MockSentryMockRecorder struct { + mock *MockSentry +} + +// NewMockSentry creates a new mock instance. +func NewMockSentry(ctrl *gomock.Controller) *MockSentry { + mock := &MockSentry{ctrl: ctrl} + mock.recorder = &MockSentryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSentry) EXPECT() *MockSentryMockRecorder { + return m.recorder +} + +// DownloadHeaders mocks base method. +func (m *MockSentry) DownloadHeaders(arg0 context.Context, arg1, arg2 *big.Int, arg3 string) ([]*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DownloadHeaders", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DownloadHeaders indicates an expected call of DownloadHeaders. +func (mr *MockSentryMockRecorder) DownloadHeaders(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadHeaders", reflect.TypeOf((*MockSentry)(nil).DownloadHeaders), arg0, arg1, arg2, arg3) +} + +// MaxPeers mocks base method. +func (m *MockSentry) MaxPeers() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MaxPeers") + ret0, _ := ret[0].(int) + return ret0 +} + +// MaxPeers indicates an expected call of MaxPeers. +func (mr *MockSentryMockRecorder) MaxPeers() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxPeers", reflect.TypeOf((*MockSentry)(nil).MaxPeers)) +} + +// PeersWithBlockNumInfo mocks base method. +func (m *MockSentry) PeersWithBlockNumInfo() peerinfo.PeersWithBlockNumInfo { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PeersWithBlockNumInfo") + ret0, _ := ret[0].(peerinfo.PeersWithBlockNumInfo) + return ret0 +} + +// PeersWithBlockNumInfo indicates an expected call of PeersWithBlockNumInfo. +func (mr *MockSentryMockRecorder) PeersWithBlockNumInfo() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeersWithBlockNumInfo", reflect.TypeOf((*MockSentry)(nil).PeersWithBlockNumInfo)) +} + +// Penalize mocks base method. +func (m *MockSentry) Penalize(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Penalize", arg0) +} + +// Penalize indicates an expected call of Penalize. +func (mr *MockSentryMockRecorder) Penalize(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Penalize", reflect.TypeOf((*MockSentry)(nil).Penalize), arg0) +} diff --git a/polygon/sync/peerinfo/peer_with_block_num_info.go b/polygon/sync/peerinfo/peer_with_block_num_info.go new file mode 100644 index 00000000000..643aa078deb --- /dev/null +++ b/polygon/sync/peerinfo/peer_with_block_num_info.go @@ -0,0 +1,22 @@ +package peerinfo + +import "math/big" + +type PeerWithBlockNumInfo struct { + ID string + BlockNum *big.Int +} + +type PeersWithBlockNumInfo []*PeerWithBlockNumInfo + +func (peers PeersWithBlockNumInfo) Len() int { + return len(peers) +} + +func (peers PeersWithBlockNumInfo) Less(i int, j int) bool { + return peers[i].BlockNum.Cmp(peers[j].BlockNum) < 1 +} + +func (peers PeersWithBlockNumInfo) Swap(i int, j int) { + peers[i], peers[j] = peers[j], peers[i] +} diff --git a/polygon/sync/sentry.go b/polygon/sync/sentry.go new file mode 100644 index 00000000000..aa8e5198cc6 --- /dev/null +++ b/polygon/sync/sentry.go @@ -0,0 +1,17 @@ +package sync + +import ( + "context" + "math/big" + + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/sync/peerinfo" +) + +//go:generate mockgen -destination=./mock/sentry_mock.go -package=mock . Sentry +type Sentry interface { + MaxPeers() int + PeersWithBlockNumInfo() peerinfo.PeersWithBlockNumInfo + DownloadHeaders(ctx context.Context, start *big.Int, end *big.Int, peerID string) ([]*types.Header, error) + Penalize(peerID string) +} diff --git a/polygon/sync/state_point.go b/polygon/sync/state_point.go new file mode 100644 index 00000000000..dfd61da3858 --- /dev/null +++ b/polygon/sync/state_point.go @@ -0,0 +1,47 @@ +package sync + +import ( + "math/big" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" +) + +func statePointFromCheckpoint(checkpoint *checkpoint.Checkpoint) *statePoint { + return &statePoint{ + proposer: checkpoint.Proposer, + startBlock: new(big.Int).Set(checkpoint.StartBlock), + endBlock: new(big.Int).Set(checkpoint.EndBlock), + rootHash: checkpoint.RootHash, + chainId: checkpoint.BorChainID, + timestamp: checkpoint.Timestamp, + kind: checkpointKind, + } +} + +func statePointFromMilestone(milestone *milestone.Milestone) *statePoint { + return &statePoint{ + proposer: milestone.Proposer, + startBlock: new(big.Int).Set(milestone.StartBlock), + endBlock: new(big.Int).Set(milestone.EndBlock), + rootHash: milestone.Hash, + chainId: milestone.BorChainID, + timestamp: milestone.Timestamp, + kind: milestoneKind, + } +} + +type statePoint struct { + proposer common.Address + startBlock *big.Int + endBlock *big.Int + rootHash common.Hash + chainId string + timestamp uint64 + kind statePointKind +} + +func (sp *statePoint) length() int { + return int(new(big.Int).Sub(sp.endBlock, sp.startBlock).Int64() + 1) +} diff --git a/polygon/sync/state_point_kind.go b/polygon/sync/state_point_kind.go new file mode 100644 index 00000000000..c61cb5e84bc --- /dev/null +++ b/polygon/sync/state_point_kind.go @@ -0,0 +1,8 @@ +package sync + +type statePointKind string + +const ( + checkpointKind = statePointKind("checkpoint") + milestoneKind = statePointKind("milestone") +) diff --git a/polygon/sync/state_points.go b/polygon/sync/state_points.go new file mode 100644 index 00000000000..5577f24d2f4 --- /dev/null +++ b/polygon/sync/state_points.go @@ -0,0 +1,26 @@ +package sync + +import ( + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" +) + +func statePointsFromCheckpoints(checkpoints []*checkpoint.Checkpoint) statePoints { + statePoints := make(statePoints, len(checkpoints)) + for i, checkpoint := range checkpoints { + statePoints[i] = statePointFromCheckpoint(checkpoint) + } + + return statePoints +} + +func statePointsFromMilestones(milestones []*milestone.Milestone) statePoints { + statePoints := make(statePoints, len(milestones)) + for i, milestone := range milestones { + statePoints[i] = statePointFromMilestone(milestone) + } + + return statePoints +} + +type statePoints []*statePoint diff --git a/tests/state_test_util.go b/tests/state_test_util.go index c10b4d69184..32cf2e639ec 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -46,6 +46,7 @@ import ( "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/trie" + "github.com/ledgerwatch/log/v3" ) // StateTest checks transaction processing without block context. @@ -182,7 +183,7 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork} } vmconfig.ExtraEips = eips - block, _, err := core.GenesisToBlock(t.genesis(config), "") + block, _, err := core.GenesisToBlock(t.genesis(config), "", log.Root()) if err != nil { return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork} } diff --git a/turbo/app/README.md b/turbo/app/README.md new file mode 100644 index 00000000000..c7bc4b7f61d --- /dev/null +++ b/turbo/app/README.md @@ -0,0 +1,71 @@ +# Erigon Sub Commands + +## Backup + +## Import + +## Init + +## Support + +## Snapshots + +This sub command can be used for manipulating snapshot files + +### Uploader + +The `snapshots uploader` command starts a version of erigon customized for uploading snapshot files to +a remote location. + +It breaks the stage execution process after the senders stage and then uses the snapshot stage to send +uploaded headers, bodies and (in the case of polygon) bor spans and events to snapshot files. Because +this process avoids execution in run signifigantly faster than a standard erigon configuration. + +The uploader uses rclone to send seedable (100K or 500K blocks) to a remote storage location specified +in the rclone config file. + +The **uploader** is configured to minimize disk usage by doing the following: + +* It removes snapshots once they are loaded +* It agressively prunes the database once entites are transferred to snapshots + +in addition to this it has the following performance related features: + +* maximises the workers allocated to snaphot processing to improve thoughtput +* Can be started from scratch by downloading the latest snapshots from the remote location to seed processing + +The following configuration can be used to upload blocks from genesis where: + +| | | +|---|---| +| sync.loop.prune.limit=500000 | Sets the records to be pruned to the database to 500,000 per iteration (as opposed to 100) | +| upload.location=r2:erigon-v2-snapshots-bor-mainnet | Specified the rclone loaction to upload snapshot to | +| upload.from=earliest | Sets the upload start location to be the earliest availible block, which will be 0 in the case of a fresh installtion, or specified by the last block in the chaindata db | +| upload.snapshot.limit=1500000 | Tells the uploader to keep a maximum 1,500,000 blocks in the `snapshots` before deleting the aged snapshot | +| snapshot.version=2 | Indivates the version to be appended to snapshot file names when they are creatated| + + +```shell +erigon/build/bin/erigon snapshots uploader --datadir=~/snapshots/bor-mainnet --chain=bor-mainnet \ + --bor.heimdall=https://heimdall-api.polygon.technology --bor.milestone=false --sync.loop.prune.limit=500000 \ + --upload.location=r2:erigon-v2-snapshots-bor-mainnet --upload.from=earliest --snapshot.version=2 \ + --upload.snapshot.limit=1500000 +``` + +In order to start with the lates uploaded block when starting with an empty drive set the `upload.from` flag to `latest`. e.g. + +```shell +--upload.from=latest +``` + +The configuration of the uploader implicitly sets the following flag values on start-up: + +```shell + --sync.loop.break.after=Senders + --sync.loop.block.limit=100000 + --sync.loop.prune.limit=100000 + --upload.snapshot.limit=1500000 + --nodownloader=true + --http.enables=false + --txpool.disable=true +``` diff --git a/turbo/app/make_app.go b/turbo/app/make_app.go index a591af54265..d4e5f17bbb2 100644 --- a/turbo/app/make_app.go +++ b/turbo/app/make_app.go @@ -51,23 +51,8 @@ func MakeApp(name string, action cli.ActionFunc, cliFlags []cli.Flag) *cli.App { // run default action return action(context) } - app.Flags = append(cliFlags, debug.Flags...) // debug flags are required - app.Flags = append(app.Flags, utils.MetricFlags...) - app.Flags = append(app.Flags, logging.Flags...) - app.Flags = append(app.Flags, &utils.ConfigFlag) - // remove exact duplicate flags, keeping only the first one. this will allow easier composition later down the line - allFlags := app.Flags - newFlags := make([]cli.Flag, 0, len(allFlags)) - seen := map[string]struct{}{} - for _, vv := range allFlags { - v := vv - if _, ok := seen[v.String()]; ok { - continue - } - newFlags = append(newFlags, v) - } - app.Flags = newFlags + app.Flags = appFlags(cliFlags) app.After = func(ctx *cli.Context) error { debug.Exit() @@ -83,6 +68,28 @@ func MakeApp(name string, action cli.ActionFunc, cliFlags []cli.Flag) *cli.App { return app } +func appFlags(cliFlags []cli.Flag) []cli.Flag { + + flags := append(cliFlags, debug.Flags...) // debug flags are required + flags = append(flags, utils.MetricFlags...) + flags = append(flags, logging.Flags...) + flags = append(flags, &utils.ConfigFlag) + + // remove exact duplicate flags, keeping only the first one. this will allow easier composition later down the line + allFlags := flags + newFlags := make([]cli.Flag, 0, len(allFlags)) + seen := map[string]struct{}{} + for _, vv := range allFlags { + v := vv + if _, ok := seen[v.String()]; ok { + continue + } + newFlags = append(newFlags, v) + } + + return newFlags +} + // MigrateFlags makes all global flag values available in the // context. This should be called as early as possible in app.Before. // diff --git a/turbo/app/snapshots_cmd.go b/turbo/app/snapshots_cmd.go index 9dd63620653..2d333c576b7 100644 --- a/turbo/app/snapshots_cmd.go +++ b/turbo/app/snapshots_cmd.go @@ -3,23 +3,27 @@ package app import ( "bufio" "bytes" + "context" "encoding/binary" "errors" "fmt" "io" + "net/http" "os" "path/filepath" "runtime" "time" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon-lib/common/dir" + "github.com/ledgerwatch/erigon-lib/metrics" "github.com/ledgerwatch/log/v3" "github.com/urfave/cli/v2" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" - "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/compress" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" @@ -32,11 +36,15 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/blockio" + "github.com/ledgerwatch/erigon/diagnostics" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/params" + erigoncli "github.com/ledgerwatch/erigon/turbo/cli" "github.com/ledgerwatch/erigon/turbo/debug" "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/ledgerwatch/erigon/turbo/node" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" ) @@ -80,6 +88,27 @@ var snapshotCommand = cli.Command{ &SnapshotEveryFlag, }), }, + { + Name: "uploader", + Action: doUploaderCommand, + Usage: "run erigon in snapshot upload mode (no execution)", + Flags: uploaderCommandFlags([]cli.Flag{ + &SnapshotVersionFlag, + &erigoncli.UploadLocationFlag, + &erigoncli.UploadFromFlag, + &erigoncli.FrozenBlockLimitFlag, + }), + Before: func(context *cli.Context) error { + erigoncli.SyncLoopBreakAfterFlag.Value = "Senders" + erigoncli.SyncLoopBlockLimitFlag.Value = 100000 + erigoncli.SyncLoopPruneLimitFlag.Value = 100000 + erigoncli.FrozenBlockLimitFlag.Value = 1500000 + utils.NoDownloaderFlag.Value = true + utils.HTTPEnabledFlag.Value = false + utils.TxPoolDisableFlag.Value = true + return nil + }, + }, { Name: "uncompress", Action: doUncompress, @@ -134,6 +163,11 @@ var ( Usage: "Do operation every N blocks", Value: 1_000, } + SnapshotVersionFlag = cli.IntFlag{ + Name: "snapshot.version", + Usage: "Snapshot files version.", + Value: 1, + } SnapshotRebuildFlag = cli.BoolFlag{ Name: "rebuild", Usage: "Force rebuild", @@ -256,31 +290,57 @@ func doIndicesCommand(cliCtx *cli.Context) error { if rebuild { panic("not implemented") } - cfg := ethconfig.NewSnapCfg(true, true, false) - allSnapshots := freezeblocks.NewRoSnapshots(cfg, dirs.Snap, logger) - if err := allSnapshots.ReopenFolder(); err != nil { + cfg := ethconfig.NewSnapCfg(true, false, true) + blockSnaps, borSnaps, br, agg, err := openSnaps(ctx, cfg, dirs, snapcfg.KnownCfg(chainConfig.ChainName, 0).Version, chainDB, logger) + + if err != nil { return err } - allSnapshots.LogStat() - indexWorkers := estimate.IndexSnapshot.Workers() - if err := freezeblocks.BuildMissedIndices("Indexing", ctx, dirs, chainConfig, indexWorkers, logger); err != nil { + defer blockSnaps.Close() + defer borSnaps.Close() + defer agg.Close() + if err := br.BuildMissedIndicesIfNeed(ctx, "Indexing", nil, chainConfig); err != nil { return err } - agg, err := libstate.NewAggregatorV3(ctx, dirs.SnapHistory, dirs.Tmp, ethconfig.HistoryV3AggregationStep, chainDB, logger) + err = agg.BuildMissedIndices(ctx, estimate.IndexSnapshot.Workers()) if err != nil { return err } - err = agg.OpenFolder() + + return nil +} + +func openSnaps(ctx context.Context, cfg ethconfig.BlocksFreezing, dirs datadir.Dirs, version uint8, chainDB kv.RwDB, logger log.Logger) ( + blockSnaps *freezeblocks.RoSnapshots, borSnaps *freezeblocks.BorRoSnapshots, br *freezeblocks.BlockRetire, agg *libstate.AggregatorV3, err error, +) { + blockSnaps = freezeblocks.NewRoSnapshots(cfg, dirs.Snap, version, logger) + if err = blockSnaps.ReopenFolder(); err != nil { + return + } + blockSnaps.LogStat("open") + + borSnaps = freezeblocks.NewBorRoSnapshots(cfg, dirs.Snap, version, logger) + if err = borSnaps.ReopenFolder(); err != nil { + return + } + borSnaps.LogStat("open") + + agg, err = libstate.NewAggregatorV3(ctx, dirs.SnapHistory, dirs.Tmp, ethconfig.HistoryV3AggregationStep, chainDB, logger) if err != nil { - return err + return } - err = agg.BuildMissedIndices(ctx, indexWorkers) + agg.SetWorkers(estimate.CompressSnapshot.Workers()) + err = agg.OpenFolder() if err != nil { - return err + return } - return nil + blockReader := freezeblocks.NewBlockReader(blockSnaps, borSnaps) + blockWriter := blockio.NewBlockWriter(fromdb.HistV3(chainDB)) + chainConfig := fromdb.ChainConfig(chainDB) + br = freezeblocks.NewBlockRetire(estimate.CompressSnapshot.Workers(), dirs, blockReader, blockWriter, chainDB, chainConfig, nil, logger) + return } func doUncompress(cliCtx *cli.Context) error { @@ -399,36 +459,34 @@ func doRetireCommand(cliCtx *cli.Context) error { from := cliCtx.Uint64(SnapshotFromFlag.Name) to := cliCtx.Uint64(SnapshotToFlag.Name) every := cliCtx.Uint64(SnapshotEveryFlag.Name) + version := uint8(cliCtx.Int(SnapshotVersionFlag.Name)) + db := mdbx.NewMDBX(logger).Label(kv.ChainDB).Path(dirs.Chaindata).MustOpen() defer db.Close() cfg := ethconfig.NewSnapCfg(true, false, true) - blockSnapshots := freezeblocks.NewRoSnapshots(cfg, dirs.Snap, logger) - borSnapshots := freezeblocks.NewBorRoSnapshots(cfg, dirs.Snap, logger) - if err := blockSnapshots.ReopenFolder(); err != nil { - return err - } - blockReader := freezeblocks.NewBlockReader(blockSnapshots, borSnapshots) - blockWriter := blockio.NewBlockWriter(fromdb.HistV3(db)) - - br := freezeblocks.NewBlockRetire(estimate.CompressSnapshot.Workers(), dirs, blockReader, blockWriter, db, nil, logger) - agg, err := libstate.NewAggregatorV3(ctx, dirs.SnapHistory, dirs.Tmp, ethconfig.HistoryV3AggregationStep, db, logger) + blockSnaps, borSnaps, br, agg, err := openSnaps(ctx, cfg, dirs, version, db, logger) if err != nil { return err } - err = agg.OpenFolder() - if err != nil { + defer blockSnaps.Close() + defer borSnaps.Close() + defer agg.Close() + + chainConfig := fromdb.ChainConfig(db) + if err := br.BuildMissedIndicesIfNeed(ctx, "retire", nil, chainConfig); err != nil { return err } - agg.SetWorkers(estimate.CompressSnapshot.Workers()) + agg.CleanDir() + var forwardProgress uint64 if to == 0 { - var forwardProgress uint64 db.View(ctx, func(tx kv.Tx) error { forwardProgress, err = stages.GetStageProgress(tx, stages.Senders) return err }) + blockReader, _ := br.IO() from2, to2, ok := freezeblocks.CanRetire(forwardProgress, blockReader.FrozenBlocks()) if ok { from, to, every = from2, to2, to2-from2 @@ -436,52 +494,25 @@ func doRetireCommand(cliCtx *cli.Context) error { } logger.Info("Params", "from", from, "to", to, "every", every) - { - logEvery := time.NewTicker(10 * time.Second) - defer logEvery.Stop() - - for j := 0; j < 10_000; j++ { // prune happens by small steps, so need many runs - if err := db.Update(ctx, func(tx kv.RwTx) error { - if err := br.PruneAncientBlocks(tx, 100, false /* includeBor */); err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - case <-logEvery.C: - firstNonGenesisHeader, err := rawdbv3.SecondKey(tx, kv.Headers) - if err != nil { - return err - } - if len(firstNonGenesisHeader) > 0 { - logger.Info("Prunning old blocks", "progress", binary.BigEndian.Uint64(firstNonGenesisHeader)) - } - default: - } - return nil - }); err != nil { - return err - } - } + if err := br.RetireBlocks(ctx, 0, forwardProgress, log.LvlInfo, nil, nil); err != nil { + return err } - for i := from; i < to; i += every { - if err := br.RetireBlocks(ctx, i, i+every, log.LvlInfo, nil, nil); err != nil { - panic(err) - } - if err := br.RetireBorBlocks(ctx, i, i+every, log.LvlInfo, nil, nil); err != nil { - panic(err) + if err := db.Update(ctx, func(tx kv.RwTx) error { + blockReader, _ := br.IO() + if err := rawdb.WriteSnapshots(tx, blockReader.FrozenFiles(), agg.Files()); err != nil { + return err } - if err := db.Update(ctx, func(tx kv.RwTx) error { - if err := rawdb.WriteSnapshots(tx, blockReader.FrozenFiles(), agg.Files()); err != nil { + return nil + }); err != nil { + return err + } + + for j := 0; j < 10_000; j++ { // prune happens by small steps, so need many runs + if err := db.UpdateNosync(ctx, func(tx kv.RwTx) error { + if err := br.PruneAncientBlocks(tx, 100); err != nil { return err } - for j := 0; j < 10_000; j++ { // prune happens by small steps, so need many runs - if err := br.PruneAncientBlocks(tx, 100, true /* includeBor */); err != nil { - return err - } - } return nil }); err != nil { return err @@ -533,7 +564,7 @@ func doRetireCommand(cliCtx *cli.Context) error { return err } if err := db.UpdateNosync(ctx, func(tx kv.RwTx) error { - return rawdb.WriteSnapshots(tx, blockSnapshots.Files(), agg.Files()) + return rawdb.WriteSnapshots(tx, blockSnaps.Files(), agg.Files()) }); err != nil { return err } @@ -552,10 +583,61 @@ func doRetireCommand(cliCtx *cli.Context) error { } logger.Info("Prune state history") if err := db.Update(ctx, func(tx kv.RwTx) error { - return rawdb.WriteSnapshots(tx, blockSnapshots.Files(), agg.Files()) + return rawdb.WriteSnapshots(tx, blockSnaps.Files(), agg.Files()) }); err != nil { return err } return nil } + +func uploaderCommandFlags(flags []cli.Flag) []cli.Flag { + return joinFlags(erigoncli.DefaultFlags, flags, []cli.Flag{ + &erigoncli.SyncLoopBreakAfterFlag, + &erigoncli.SyncLoopBlockLimitFlag, + &erigoncli.SyncLoopPruneLimitFlag, + }) +} + +func doUploaderCommand(cliCtx *cli.Context) error { + var logger log.Logger + var err error + var metricsMux *http.ServeMux + + if logger, metricsMux, err = debug.Setup(cliCtx, true /* root logger */); err != nil { + return err + } + + // initializing the node and providing the current git commit there + + logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit) + erigonInfoGauge := metrics.GetOrCreateGauge(fmt.Sprintf(`erigon_info{version="%s",commit="%s"}`, params.Version, params.GitCommit)) + erigonInfoGauge.Set(1) + + if version := uint8(cliCtx.Int(SnapshotVersionFlag.Name)); version != 0 { + snapcfg.SnapshotVersion(version) + } + + nodeCfg := node.NewNodConfigUrfave(cliCtx, logger) + if err := datadir.ApplyMigrations(nodeCfg.Dirs); err != nil { + return err + } + + ethCfg := node.NewEthConfigUrfave(cliCtx, nodeCfg, logger) + + ethNode, err := node.New(cliCtx.Context, nodeCfg, ethCfg, logger) + if err != nil { + log.Error("Erigon startup", "err", err) + return err + } + + if metricsMux != nil { + diagnostics.Setup(cliCtx, metricsMux, ethNode) + } + + err = ethNode.Serve() + if err != nil { + log.Error("error while serving an Erigon node", "err", err) + } + return err +} diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 29d7b023056..4ab09e4358b 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -10,6 +10,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/c2h5oh/datasize" @@ -148,6 +149,42 @@ var ( Value: "", } + SyncLoopPruneLimitFlag = cli.UintFlag{ + Name: "sync.loop.prune.limit", + Usage: "Sets the maximum number of block to prune per loop iteration", + Value: 100, + } + + SyncLoopBreakAfterFlag = cli.StringFlag{ + Name: "sync.loop.break", + Usage: "Sets the last stage of the sync loop to run", + Value: "", + } + + SyncLoopBlockLimitFlag = cli.UintFlag{ + Name: "sync.loop.block.limit", + Usage: "Sets the maximum number of blocks to process per loop iteration", + Value: 0, // unlimited + } + + UploadLocationFlag = cli.StringFlag{ + Name: "upload.location", + Usage: "Location to upload snapshot segments to", + Value: "", + } + + UploadFromFlag = cli.StringFlag{ + Name: "upload.from", + Usage: "Blocks to upload from: number, or 'earliest' (start of the chain), 'latest' (last segment previously uploaded)", + Value: "latest", + } + + FrozenBlockLimitFlag = cli.UintFlag{ + Name: "upload.snapshot.limit", + Usage: "Sets the maximum number of snapshot blocks to hold on the local disk when uploading", + Value: 1500000, + } + BadBlockFlag = cli.StringFlag{ Name: "bad.block", Usage: "Marks block with given hex string as bad and forces initial reorg before normal staged sync", @@ -255,6 +292,32 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log. cfg.Sync.LoopThrottle = syncLoopThrottle } + if limit := ctx.Uint(SyncLoopPruneLimitFlag.Name); limit > 0 { + cfg.Sync.PruneLimit = int(limit) + } + + if stage := ctx.String(SyncLoopBreakAfterFlag.Name); len(stage) > 0 { + cfg.Sync.BreakAfterStage = stage + } + + if limit := ctx.Uint(SyncLoopBlockLimitFlag.Name); limit > 0 { + cfg.Sync.LoopBlockLimit = limit + } + + if location := ctx.String(UploadLocationFlag.Name); len(location) > 0 { + cfg.Sync.UploadLocation = location + } + + if blockno := ctx.String(UploadFromFlag.Name); len(blockno) > 0 { + cfg.Sync.UploadFrom = rpc.AsBlockNumber(blockno) + } else { + cfg.Sync.UploadFrom = rpc.LatestBlockNumber + } + + if limit := ctx.Uint(FrozenBlockLimitFlag.Name); limit > 0 { + cfg.Sync.FrozenBlockLimit = uint64(limit) + } + if ctx.String(BadBlockFlag.Name) != "" { bytes, err := hexutil.Decode(ctx.String(BadBlockFlag.Name)) if err != nil { @@ -269,7 +332,7 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log. downloadRate := ctx.String(utils.TorrentDownloadRateFlag.Name) uploadRate := ctx.String(utils.TorrentUploadRateFlag.Name) - logger.Info("[Downloader] Runnning with", "ipv6-enabled", !disableIPV6, "ipv4-enabled", !disableIPV4, "download.rate", downloadRate, "upload.rate", uploadRate) + logger.Info("[Downloader] Running with", "ipv6-enabled", !disableIPV6, "ipv4-enabled", !disableIPV4, "download.rate", downloadRate, "upload.rate", uploadRate) if ctx.Bool(utils.DisableIPV6.Name) { cfg.Downloader.ClientConfig.DisableIPv6 = true } @@ -354,10 +417,15 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config, logger log.Logg } apis := ctx.String(utils.HTTPApiFlag.Name) - logger.Info("starting HTTP APIs", "APIs", apis) c := &httpcfg.HttpCfg{ - Enabled: ctx.Bool(utils.HTTPEnabledFlag.Name), + Enabled: func() bool { + if ctx.IsSet(utils.HTTPEnabledFlag.Name) { + return ctx.Bool(utils.HTTPEnabledFlag.Name) + } + + return true + }(), HttpServerEnabled: ctx.Bool(utils.HTTPServerEnabledFlag.Name), Dirs: cfg.Dirs, @@ -408,6 +476,11 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config, logger log.Logg StateCache: kvcache.DefaultCoherentConfig, RPCSlowLogThreshold: ctx.Duration(utils.RPCSlowFlag.Name), } + + if c.Enabled { + logger.Info("starting HTTP APIs", "APIs", apis) + } + if ctx.IsSet(utils.HttpCompressionFlag.Name) { c.HttpCompression = ctx.Bool(utils.HttpCompressionFlag.Name) } else { diff --git a/turbo/debug/flags.go b/turbo/debug/flags.go index 1be6efa51b3..a2bd0f10a94 100644 --- a/turbo/debug/flags.go +++ b/turbo/debug/flags.go @@ -184,7 +184,7 @@ func Setup(ctx *cli.Context, rootLogger bool) (log.Logger, *http.ServeMux, error RaiseFdLimit() - logger := logging.SetupLoggerCtx("erigon", ctx, rootLogger) + logger := logging.SetupLoggerCtx("erigon", ctx, log.LvlInfo, log.LvlInfo, rootLogger) if traceFile := ctx.String(traceFlag.Name); traceFile != "" { if err := Handler.StartGoTrace(traceFile); err != nil { diff --git a/turbo/engineapi/engine_helpers/fork_validator.go b/turbo/engineapi/engine_helpers/fork_validator.go index 9f742279a95..b0b12de0fdf 100644 --- a/turbo/engineapi/engine_helpers/fork_validator.go +++ b/turbo/engineapi/engine_helpers/fork_validator.go @@ -17,9 +17,10 @@ import ( "context" "errors" "fmt" - "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru" "sync" + "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" @@ -132,7 +133,7 @@ func (fv *ForkValidator) FlushExtendingFork(tx kv.RwTx, accumulator *shards.Accu // if the payload extends the canonical chain, then we stack it in extendingFork without any unwind. // if the payload is a fork then we unwind to the point where the fork meets the canonical chain, and there we check whether it is valid. // if for any reason none of the actions above can be performed due to lack of information, we accept the payload and avoid validation. -func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *types.RawBody, extendCanonical bool) (status engine_types.EngineStatus, latestValidHash libcommon.Hash, validationError error, criticalError error) { +func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *types.RawBody, extendCanonical bool, logger log.Logger) (status engine_types.EngineStatus, latestValidHash libcommon.Hash, validationError error, criticalError error) { fv.lock.Lock() defer fv.lock.Unlock() if fv.validatePayload == nil { @@ -149,7 +150,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t log.Debug("Execution ForkValidator.ValidatePayload", "extendCanonical", extendCanonical) if extendCanonical { - extendingFork := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir) + extendingFork := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger) defer extendingFork.Close() fv.extendingForkNotifications = &shards.Notifications{ @@ -186,7 +187,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t return } - log.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint) + logger.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint) var bodiesChain []*types.RawBody var headersChain []*types.Header @@ -222,13 +223,13 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t if criticalError != nil { return } - log.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint) + logger.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint) } // Do not set an unwind point if we are already there. if unwindPoint == fv.currentHeight { unwindPoint = 0 } - batch := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir) + batch := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger) defer batch.Rollback() notifications := &shards.Notifications{ Events: shards.NewEvents(), diff --git a/turbo/execution/eth1/ethereum_execution.go b/turbo/execution/eth1/ethereum_execution.go index baa1e8e82df..c8a6f6a2a04 100644 --- a/turbo/execution/eth1/ethereum_execution.go +++ b/turbo/execution/eth1/ethereum_execution.go @@ -183,7 +183,7 @@ func (e *EthereumExecutionModule) ValidateChain(ctx context.Context, req *execut extendingHash := e.forkValidator.ExtendingForkHeadHash() extendCanonical := extendingHash == libcommon.Hash{} && header.ParentHash == currentHeadHash - status, lvh, validationError, criticalError := e.forkValidator.ValidatePayload(tx, header, body.RawBody(), extendCanonical) + status, lvh, validationError, criticalError := e.forkValidator.ValidatePayload(tx, header, body.RawBody(), extendCanonical, e.logger) if criticalError != nil { return nil, criticalError } @@ -229,18 +229,25 @@ func (e *EthereumExecutionModule) purgeBadChain(ctx context.Context, tx kv.RwTx, func (e *EthereumExecutionModule) Start(ctx context.Context) { e.semaphore.Acquire(ctx, 1) defer e.semaphore.Release(1) - // Run the forkchoice - if err := e.executionPipeline.Run(e.db, nil, true); err != nil { - if !errors.Is(err, context.Canceled) { - e.logger.Error("Could not start execution service", "err", err) + + more := true + + for more { + var err error + + if more, err = e.executionPipeline.Run(e.db, nil, true); err != nil { + if !errors.Is(err, context.Canceled) { + e.logger.Error("Could not start execution service", "err", err) + } + continue } - return - } - if err := e.executionPipeline.RunPrune(e.db, nil, true); err != nil { - if !errors.Is(err, context.Canceled) { - e.logger.Error("Could not start execution service", "err", err) + + if err := e.executionPipeline.RunPrune(e.db, nil, true); err != nil { + if !errors.Is(err, context.Canceled) { + e.logger.Error("Could not start execution service", "err", err) + } + continue } - return } } diff --git a/turbo/execution/eth1/forkchoice.go b/turbo/execution/eth1/forkchoice.go index 1ed0851c587..574735846c6 100644 --- a/turbo/execution/eth1/forkchoice.go +++ b/turbo/execution/eth1/forkchoice.go @@ -305,7 +305,7 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, blockHas } } // Run the forkchoice - if err := e.executionPipeline.Run(e.db, tx, false); err != nil { + if _, err := e.executionPipeline.Run(e.db, tx, false); err != nil { err = fmt.Errorf("updateForkChoice: %w", err) sendForkchoiceErrorWithoutWaiting(outcomeCh, err) return diff --git a/turbo/jsonrpc/eth_call.go b/turbo/jsonrpc/eth_call.go index 667bb801d1a..84871efc594 100644 --- a/turbo/jsonrpc/eth_call.go +++ b/turbo/jsonrpc/eth_call.go @@ -355,7 +355,7 @@ func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, sto if latestBlock-blockNr > uint64(api.MaxGetProofRewindBlockCount) { return nil, fmt.Errorf("requested block is too old, block must be within %d blocks of the head block number (currently %d)", uint64(api.MaxGetProofRewindBlockCount), latestBlock) } - batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp) + batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp, api.logger) defer batch.Rollback() unwindState := &stagedsync.UnwindState{UnwindPoint: blockNr} @@ -489,7 +489,14 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi2.CallArgs, } if reply.Found { nonce = reply.Nonce + 1 + } else { + a, err := stateReader.ReadAccountData(*args.From) + if err != nil { + return nil, err + } + nonce = a.Nonce + 1 } + args.Nonce = (*hexutil.Uint64)(&nonce) } to = crypto.CreateAddress(*args.From, uint64(*args.Nonce)) diff --git a/turbo/jsonrpc/gen_traces_test.go b/turbo/jsonrpc/gen_traces_test.go index 85b90bb5a00..a5f4c7bdc7a 100644 --- a/turbo/jsonrpc/gen_traces_test.go +++ b/turbo/jsonrpc/gen_traces_test.go @@ -49,6 +49,7 @@ func TestGeneratedDebugApi(t *testing.T) { expectedJSON := ` [ { + "txHash": "0xb42edc1d46932ef34be0ba49402dc94e3d2319c066f02945f6828cd344fcfa7b", "result": { "calls": [ { diff --git a/turbo/jsonrpc/tracing.go b/turbo/jsonrpc/tracing.go index 6a0c1c755d6..bdf129af64c 100644 --- a/turbo/jsonrpc/tracing.go +++ b/turbo/jsonrpc/tracing.go @@ -111,6 +111,9 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp for idx, txn := range txns { stream.WriteObjectStart() + stream.WriteObjectField("txHash") + stream.WriteString(txn.Hash().Hex()) + stream.WriteMore() stream.WriteObjectField("result") select { default: diff --git a/turbo/jsonrpc/txpool_api.go b/turbo/jsonrpc/txpool_api.go index 05663be4a88..96ff0435cad 100644 --- a/turbo/jsonrpc/txpool_api.go +++ b/turbo/jsonrpc/txpool_api.go @@ -3,6 +3,7 @@ package jsonrpc import ( "context" "fmt" + "github.com/ledgerwatch/erigon-lib/common/hexutil" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -14,9 +15,10 @@ import ( "github.com/ledgerwatch/erigon/core/types" ) -// NetAPI the interface for the net_ RPC commands +// TxPoolAPI the interface for the txpool_ RPC commands type TxPoolAPI interface { Content(ctx context.Context) (map[string]map[string]map[string]*RPCTransaction, error) + ContentFrom(ctx context.Context, addr libcommon.Address) (map[string]map[string]*RPCTransaction, error) } // TxPoolAPIImpl data structure to store things needed for net_ commands @@ -116,6 +118,76 @@ func (api *TxPoolAPIImpl) Content(ctx context.Context) (map[string]map[string]ma return content, nil } +func (api *TxPoolAPIImpl) ContentFrom(ctx context.Context, addr libcommon.Address) (map[string]map[string]*RPCTransaction, error) { + reply, err := api.pool.All(ctx, &proto_txpool.AllRequest{}) + if err != nil { + return nil, err + } + + content := map[string]map[string]*RPCTransaction{ + "pending": make(map[string]*RPCTransaction), + "baseFee": make(map[string]*RPCTransaction), + "queued": make(map[string]*RPCTransaction), + } + + pending := make([]types.Transaction, 0, 4) + baseFee := make([]types.Transaction, 0, 4) + queued := make([]types.Transaction, 0, 4) + for i := range reply.Txs { + txn, err := types.DecodeWrappedTransaction(reply.Txs[i].RlpTx) + if err != nil { + return nil, fmt.Errorf("decoding transaction from: %x: %w", reply.Txs[i].RlpTx, err) + } + sender := gointerfaces.ConvertH160toAddress(reply.Txs[i].Sender) + if sender != addr { + continue + } + + switch reply.Txs[i].TxnType { + case proto_txpool.AllReply_PENDING: + pending = append(pending, txn) + case proto_txpool.AllReply_BASE_FEE: + baseFee = append(baseFee, txn) + case proto_txpool.AllReply_QUEUED: + queued = append(queued, txn) + } + } + + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + cc, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + curHeader := rawdb.ReadCurrentHeader(tx) + if curHeader == nil { + return nil, nil + } + // Flatten the pending transactions + dump := make(map[string]*RPCTransaction) + for _, txn := range pending { + dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + } + content["pending"] = dump + // Flatten the baseFee transactions + dump = make(map[string]*RPCTransaction) + for _, txn := range baseFee { + dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + } + content["baseFee"] = dump + // Flatten the queued transactions + dump = make(map[string]*RPCTransaction) + for _, txn := range queued { + dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + } + content["queued"] = dump + return content, nil +} + // Status returns the number of pending and queued transaction in the pool. func (api *TxPoolAPIImpl) Status(ctx context.Context) (map[string]hexutil.Uint, error) { reply, err := api.pool.Status(ctx, &proto_txpool.StatusRequest{}) diff --git a/turbo/logging/logging.go b/turbo/logging/logging.go index 988fa7fb5da..f36b0999b4c 100644 --- a/turbo/logging/logging.go +++ b/turbo/logging/logging.go @@ -3,7 +3,6 @@ package logging import ( "flag" "os" - "path" "path/filepath" "strconv" @@ -21,7 +20,8 @@ import ( // This function which is used in Erigon itself. // Note: urfave and cobra are two CLI frameworks/libraries for the same functionalities // and it would make sense to choose one over another -func SetupLoggerCtx(filePrefix string, ctx *cli.Context, rootHandler bool) log.Logger { +func SetupLoggerCtx(filePrefix string, ctx *cli.Context, + consoleDefaultLevel log.Lvl, dirDefaultLevel log.Lvl, rootHandler bool) log.Logger { var consoleJson = ctx.Bool(LogJsonFlag.Name) || ctx.Bool(LogConsoleJsonFlag.Name) var dirJson = ctx.Bool(LogDirJsonFlag.Name) @@ -30,13 +30,13 @@ func SetupLoggerCtx(filePrefix string, ctx *cli.Context, rootHandler bool) log.L // try verbosity flag consoleLevel, lErr = tryGetLogLevel(ctx.String(LogVerbosityFlag.Name)) if lErr != nil { - consoleLevel = log.LvlInfo + consoleLevel = consoleDefaultLevel } } dirLevel, dErr := tryGetLogLevel(ctx.String(LogDirVerbosityFlag.Name)) if dErr != nil { - dirLevel = log.LvlInfo + dirLevel = dirDefaultLevel } dirPath := "" @@ -202,7 +202,7 @@ func initSeparatedLogging( } lumberjack := &lumberjack.Logger{ - Filename: path.Join(dirPath, filePrefix+".log"), + Filename: filepath.Join(dirPath, filePrefix+".log"), MaxSize: 100, // megabytes MaxBackups: 3, MaxAge: 28, //days diff --git a/turbo/services/interfaces.go b/turbo/services/interfaces.go index afed450b734..0857520ac88 100644 --- a/turbo/services/interfaces.go +++ b/turbo/services/interfaces.go @@ -90,17 +90,19 @@ type FullBlockReader interface { } type BlockSnapshots interface { - LogStat() + LogStat(label string) ReopenFolder() error SegmentsMax() uint64 + SegmentsMin() uint64 } // BlockRetire - freezing blocks: moving old data from DB to snapshot files type BlockRetire interface { - PruneAncientBlocks(tx kv.RwTx, limit int, includeBor bool) error - RetireBlocksInBackground(ctx context.Context, maxBlockNumInDB uint64, includeBor bool, lvl log.Lvl, seedNewSnapshots func(downloadRequest []DownloadRequest) error, onDelete func(l []string) error) + PruneAncientBlocks(tx kv.RwTx, limit int) error + RetireBlocksInBackground(ctx context.Context, miBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []DownloadRequest) error, onDelete func(l []string) error) HasNewFrozenFiles() bool BuildMissedIndicesIfNeed(ctx context.Context, logPrefix string, notifier DBEventNotifier, cc *chain.Config) error + SetWorkers(workers int) } /* @@ -124,6 +126,7 @@ type DBEventNotifier interface { } type DownloadRequest struct { + Version uint8 Path string TorrentHash string } diff --git a/turbo/snapshotsync/freezeblocks/beacon_block_reader.go b/turbo/snapshotsync/freezeblocks/beacon_block_reader.go index 40ce7813aa1..a17578c2f48 100644 --- a/turbo/snapshotsync/freezeblocks/beacon_block_reader.go +++ b/turbo/snapshotsync/freezeblocks/beacon_block_reader.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" + "github.com/klauspost/compress/zstd" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cl/clparams" @@ -13,16 +14,19 @@ import ( "github.com/ledgerwatch/erigon/cl/persistence" "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" "github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format" - "github.com/pierrec/lz4" ) var buffersPool = sync.Pool{ New: func() interface{} { return &bytes.Buffer{} }, } -var lz4ReaderPool = sync.Pool{ +var decompressorPool = sync.Pool{ New: func() interface{} { - return lz4.NewReader(nil) + r, err := zstd.NewReader(nil) + if err != nil { + panic(err) + } + return r }, } @@ -98,12 +102,12 @@ func (r *beaconSnapshotReader) ReadBlockBySlot(ctx context.Context, tx kv.Tx, sl buffer.Reset() buffer.Write(buf) - lzReader := lz4ReaderPool.Get().(*lz4.Reader) - defer lz4ReaderPool.Put(lzReader) - lzReader.Reset(buffer) + reader := decompressorPool.Get().(*zstd.Decoder) + defer decompressorPool.Put(reader) + reader.Reset(buffer) // Use pooled buffers and readers to avoid allocations. - return snapshot_format.ReadBlockFromSnapshot(lzReader, r.eth1Getter, r.cfg) + return snapshot_format.ReadBlockFromSnapshot(reader, r.eth1Getter, r.cfg) } func (r *beaconSnapshotReader) ReadBlockByRoot(ctx context.Context, tx kv.Tx, root libcommon.Hash) (*cltypes.SignedBeaconBlock, error) { @@ -121,6 +125,9 @@ func (r *beaconSnapshotReader) ReadBlockByRoot(ctx context.Context, tx kv.Tx, ro var buf []byte if *slot > r.sn.BlocksAvailable() { data, err := r.beaconDB.GetBlock(ctx, tx, *slot) + if data == nil { + return nil, err + } return data.Data, err } if r.eth1Getter == nil { @@ -166,12 +173,12 @@ func (r *beaconSnapshotReader) ReadBlockByRoot(ctx context.Context, tx kv.Tx, ro buffer.Reset() buffer.Write(buf) - lzReader := lz4ReaderPool.Get().(*lz4.Reader) - defer lz4ReaderPool.Put(lzReader) - lzReader.Reset(buffer) + reader := decompressorPool.Get().(*zstd.Decoder) + defer decompressorPool.Put(reader) + reader.Reset(buffer) // Use pooled buffers and readers to avoid allocations. - return snapshot_format.ReadBlockFromSnapshot(lzReader, r.eth1Getter, r.cfg) + return snapshot_format.ReadBlockFromSnapshot(reader, r.eth1Getter, r.cfg) } func (r *beaconSnapshotReader) ReadHeaderByRoot(ctx context.Context, tx kv.Tx, root libcommon.Hash) (*cltypes.SignedBeaconBlockHeader, error) { diff --git a/turbo/snapshotsync/freezeblocks/block_reader.go b/turbo/snapshotsync/freezeblocks/block_reader.go index 6dec6ac3a09..21fae498f05 100644 --- a/turbo/snapshotsync/freezeblocks/block_reader.go +++ b/turbo/snapshotsync/freezeblocks/block_reader.go @@ -15,10 +15,10 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/recsplit" - "github.com/ledgerwatch/erigon/eth/ethconfig" - + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/services" ) @@ -246,7 +246,8 @@ type BlockReader struct { } func NewBlockReader(snapshots services.BlockSnapshots, borSnapshots services.BlockSnapshots) *BlockReader { - return &BlockReader{sn: snapshots.(*RoSnapshots), borSn: borSnapshots.(*BorRoSnapshots)} + borSn, _ := borSnapshots.(*BorRoSnapshots) + return &BlockReader{sn: snapshots.(*RoSnapshots), borSn: borSn} } func (r *BlockReader) CanPruneTo(currentBlockInDB uint64) uint64 { @@ -261,8 +262,13 @@ func (r *BlockReader) BorSnapshots() services.BlockSnapshots { return nil } -func (r *BlockReader) FrozenBlocks() uint64 { return r.sn.BlocksAvailable() } -func (r *BlockReader) FrozenBorBlocks() uint64 { return r.borSn.BlocksAvailable() } +func (r *BlockReader) FrozenBlocks() uint64 { return r.sn.BlocksAvailable() } +func (r *BlockReader) FrozenBorBlocks() uint64 { + if r.borSn != nil { + return r.borSn.BlocksAvailable() + } + return 0 +} func (r *BlockReader) FrozenFiles() []string { files := r.sn.Files() if r.borSn != nil { @@ -278,16 +284,18 @@ func (r *BlockReader) HeadersRange(ctx context.Context, walker func(header *type } func (r *BlockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (h *types.Header, err error) { - blockHash, err := rawdb.ReadCanonicalHash(tx, blockHeight) - if err != nil { - return nil, err - } - if blockHash == (common.Hash{}) { - return nil, nil - } - h = rawdb.ReadHeader(tx, blockHash, blockHeight) - if h != nil { - return h, nil + if tx != nil { + blockHash, err := rawdb.ReadCanonicalHash(tx, blockHeight) + if err != nil { + return nil, err + } + if blockHash == (common.Hash{}) { + return nil, nil + } + h = rawdb.ReadHeader(tx, blockHash, blockHeight) + if h != nil { + return h, nil + } } view := r.sn.View() @@ -365,9 +373,11 @@ func (r *BlockReader) CanonicalHash(ctx context.Context, tx kv.Getter, blockHeig } func (r *BlockReader) Header(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (h *types.Header, err error) { - h = rawdb.ReadHeader(tx, hash, blockHeight) - if h != nil { - return h, nil + if tx != nil { + h = rawdb.ReadHeader(tx, hash, blockHeight) + if h != nil { + return h, nil + } } view := r.sn.View() @@ -384,13 +394,14 @@ func (r *BlockReader) Header(ctx context.Context, tx kv.Getter, hash common.Hash } func (r *BlockReader) BodyWithTransactions(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { - - body, err = rawdb.ReadBodyWithTransactions(tx, hash, blockHeight) - if err != nil { - return nil, err - } - if body != nil { - return body, nil + if tx != nil { + body, err = rawdb.ReadBodyWithTransactions(tx, hash, blockHeight) + if err != nil { + return nil, err + } + if body != nil { + return body, nil + } } view := r.sn.View() @@ -576,7 +587,7 @@ func (r *BlockReader) headerFromSnapshot(blockHeight uint64, sn *HeaderSegment, func (r *BlockReader) headerFromSnapshotByHash(hash common.Hash, sn *HeaderSegment, buf []byte) (*types.Header, error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.ranges.from, sn.ranges.to, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.from, sn.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things @@ -628,7 +639,7 @@ func (r *BlockReader) bodyFromSnapshot(blockHeight uint64, sn *BodySegment, buf func (r *BlockReader) bodyForStorageFromSnapshot(blockHeight uint64, sn *BodySegment, buf []byte) (*types.BodyForStorage, []byte, error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.ranges.from, sn.ranges.to, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.from, sn.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things @@ -658,7 +669,7 @@ func (r *BlockReader) bodyForStorageFromSnapshot(blockHeight uint64, sn *BodySeg func (r *BlockReader) txsFromSnapshot(baseTxnID uint64, txsAmount uint32, txsSeg *TxnSegment, buf []byte) (txs []types.Transaction, senders []common.Address, err error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, txsSeg.ranges.from, txsSeg.ranges.to, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, txsSeg.from, txsSeg.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things @@ -842,7 +853,7 @@ func (r *BlockReader) IterateFrozenBodies(f func(blockNum, baseTxNum, txAmount u var buf []byte g := sn.seg.MakeGetter() - blockNum := sn.ranges.from + blockNum := sn.from var b types.BodyForStorage for g.HasNext() { buf, _ = g.Next(buf[:0]) @@ -952,6 +963,10 @@ func (r *BlockReader) EventLookup(ctx context.Context, tx kv.Getter, txnHash com return *n, true, nil } + if r.borSn == nil { + return 0, false, nil + } + view := r.borSn.View() defer view.Close() @@ -1044,10 +1059,10 @@ func (r *BlockReader) EventsByBlock(ctx context.Context, tx kv.Tx, hash common.H result := []rlp.RawValue{} for i := len(segments) - 1; i >= 0; i-- { sn := segments[i] - if sn.ranges.from > blockHeight { + if sn.from > blockHeight { continue } - if sn.ranges.to <= blockHeight { + if sn.to <= blockHeight { continue } if sn.IdxBorTxnHash == nil { @@ -1070,13 +1085,27 @@ func (r *BlockReader) EventsByBlock(ctx context.Context, tx kv.Tx, hash common.H } func (r *BlockReader) LastFrozenEventID() uint64 { + if r.borSn == nil { + return 0 + } + view := r.borSn.View() defer view.Close() segments := view.Events() if len(segments) == 0 { return 0 } - lastSegment := segments[len(segments)-1] + // find the last segment which has a built index + var lastSegment *BorEventSegment + for i := len(segments) - 1; i >= 0; i-- { + if segments[i].IdxBorTxnHash != nil { + lastSegment = segments[i] + break + } + } + if lastSegment == nil { + return 0 + } var lastEventID uint64 gg := lastSegment.seg.MakeGetter() var buf []byte @@ -1088,25 +1117,39 @@ func (r *BlockReader) LastFrozenEventID() uint64 { } func (r *BlockReader) LastFrozenSpanID() uint64 { + if r.borSn == nil { + return 0 + } + view := r.borSn.View() defer view.Close() segments := view.Spans() if len(segments) == 0 { return 0 } - lastSegment := segments[len(segments)-1] - var lastSpanID uint64 - if lastSegment.ranges.to > zerothSpanEnd { - lastSpanID = (lastSegment.ranges.to - zerothSpanEnd - 1) / spanLength + // find the last segment which has a built index + var lastSegment *BorSpanSegment + for i := len(segments) - 1; i >= 0; i-- { + if segments[i].idx != nil { + lastSegment = segments[i] + break + } + } + if lastSegment == nil { + return 0 + } + + lastSpanID := span.IDAt(lastSegment.to) + if lastSpanID > 0 { + lastSpanID-- } return lastSpanID } func (r *BlockReader) Span(ctx context.Context, tx kv.Getter, spanId uint64) ([]byte, error) { - // Compute starting block of the span var endBlock uint64 if spanId > 0 { - endBlock = (spanId)*spanLength + zerothSpanEnd + endBlock = span.EndBlockNum(spanId) } var buf [8]byte binary.BigEndian.PutUint64(buf[:], spanId) @@ -1117,7 +1160,7 @@ func (r *BlockReader) Span(ctx context.Context, tx kv.Getter, spanId uint64) ([] return nil, err } if v == nil { - return nil, fmt.Errorf("span %d not found (db)", spanId) + return nil, fmt.Errorf("span %d not found (db), frosenBlocks=%d", spanId, maxBlockNumInFiles) } return common.Copy(v), nil } @@ -1129,17 +1172,11 @@ func (r *BlockReader) Span(ctx context.Context, tx kv.Getter, spanId uint64) ([] if sn.idx == nil { continue } - var spanFrom uint64 - if sn.ranges.from > zerothSpanEnd { - spanFrom = 1 + (sn.ranges.from-zerothSpanEnd-1)/spanLength - } + spanFrom := span.IDAt(sn.from) if spanId < spanFrom { continue } - var spanTo uint64 - if sn.ranges.to > zerothSpanEnd { - spanTo = 1 + (sn.ranges.to-zerothSpanEnd-1)/spanLength - } + spanTo := span.IDAt(sn.to) if spanId >= spanTo { continue } @@ -1175,10 +1212,10 @@ func (r *BlockReader) Integrity(ctx context.Context) error { view := r.sn.View() defer view.Close() for _, seg := range view.Headers() { - if err := r.ensureHeaderNumber(seg.ranges.from, seg); err != nil { + if err := r.ensureHeaderNumber(seg.from, seg); err != nil { return err } - if err := r.ensureHeaderNumber(seg.ranges.to-1, seg); err != nil { + if err := r.ensureHeaderNumber(seg.to-1, seg); err != nil { return err } } diff --git a/turbo/snapshotsync/freezeblocks/block_reader_test.go b/turbo/snapshotsync/freezeblocks/block_reader_test.go new file mode 100644 index 00000000000..a408ea2b820 --- /dev/null +++ b/turbo/snapshotsync/freezeblocks/block_reader_test.go @@ -0,0 +1,226 @@ +package freezeblocks + +import ( + "context" + "encoding/binary" + "os" + "path/filepath" + "testing" + + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/require" + + "github.com/ledgerwatch/erigon-lib/common/length" + "github.com/ledgerwatch/erigon-lib/compress" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon-lib/recsplit" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/turbo/testlog" +) + +func TestBlockReaderLastFrozenSpanIDWhenSegmentFilesArePresent(t *testing.T) { + t.Parallel() + + logger := testlog.Logger(t, log.LvlInfo) + dir := t.TempDir() + createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) + defer borRoSnapshots.Close() + err := borRoSnapshots.ReopenFolder() + require.NoError(t, err) + + blockReader := &BlockReader{borSn: borRoSnapshots} + require.Equal(t, uint64(78), blockReader.LastFrozenSpanID()) +} + +func TestBlockReaderLastFrozenSpanIDWhenSegmentFilesAreNotPresent(t *testing.T) { + t.Parallel() + + logger := testlog.Logger(t, log.LvlInfo) + dir := t.TempDir() + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) + defer borRoSnapshots.Close() + err := borRoSnapshots.ReopenFolder() + require.NoError(t, err) + + blockReader := &BlockReader{borSn: borRoSnapshots} + require.Equal(t, uint64(0), blockReader.LastFrozenSpanID()) +} + +func TestBlockReaderLastFrozenSpanIDReturnsLastSegWithIdx(t *testing.T) { + t.Parallel() + + logger := testlog.Logger(t, log.LvlInfo) + dir := t.TempDir() + createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) + createTestBorEventSegmentFile(t, 500_000, 1_000_000, 264, dir, logger) + createTestBorEventSegmentFile(t, 1_000_000, 1_500_000, 528, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, 1, logger) + // delete idx file for last bor span segment to simulate segment with missing idx file + idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, snaptype.BorSpans.String())) + err := os.Remove(idxFileToDelete) + require.NoError(t, err) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) + defer borRoSnapshots.Close() + err = borRoSnapshots.ReopenFolder() + require.NoError(t, err) + + blockReader := &BlockReader{borSn: borRoSnapshots} + require.Equal(t, uint64(156), blockReader.LastFrozenSpanID()) +} + +func TestBlockReaderLastFrozenSpanIDReturnsZeroWhenAllSegmentsDoNotHaveIdx(t *testing.T) { + t.Parallel() + + logger := testlog.Logger(t, log.LvlInfo) + dir := t.TempDir() + createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) + createTestBorEventSegmentFile(t, 500_000, 1_000_000, 264, dir, logger) + createTestBorEventSegmentFile(t, 1_000_000, 1_500_000, 528, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, 1, logger) + // delete idx file for all bor span segments to simulate segments with missing idx files + idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1, 1, 500_000, snaptype.BorSpans.String())) + err := os.Remove(idxFileToDelete) + require.NoError(t, err) + idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1, 500_000, 1_000_000, snaptype.BorSpans.String())) + err = os.Remove(idxFileToDelete) + require.NoError(t, err) + idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, snaptype.BorSpans.String())) + err = os.Remove(idxFileToDelete) + require.NoError(t, err) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) + defer borRoSnapshots.Close() + err = borRoSnapshots.ReopenFolder() + require.NoError(t, err) + + blockReader := &BlockReader{borSn: borRoSnapshots} + require.Equal(t, uint64(0), blockReader.LastFrozenSpanID()) +} + +func TestBlockReaderLastFrozenEventIDWhenSegmentFilesArePresent(t *testing.T) { + t.Parallel() + + logger := testlog.Logger(t, log.LvlInfo) + dir := t.TempDir() + createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) + defer borRoSnapshots.Close() + err := borRoSnapshots.ReopenFolder() + require.NoError(t, err) + + blockReader := &BlockReader{borSn: borRoSnapshots} + require.Equal(t, uint64(132), blockReader.LastFrozenEventID()) +} + +func TestBlockReaderLastFrozenEventIDWhenSegmentFilesAreNotPresent(t *testing.T) { + t.Parallel() + + logger := testlog.Logger(t, log.LvlInfo) + dir := t.TempDir() + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) + defer borRoSnapshots.Close() + err := borRoSnapshots.ReopenFolder() + require.NoError(t, err) + + blockReader := &BlockReader{borSn: borRoSnapshots} + require.Equal(t, uint64(0), blockReader.LastFrozenEventID()) +} + +func TestBlockReaderLastFrozenEventIDReturnsLastSegWithIdx(t *testing.T) { + t.Parallel() + + logger := testlog.Logger(t, log.LvlInfo) + dir := t.TempDir() + createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) + createTestBorEventSegmentFile(t, 500_000, 1_000_000, 264, dir, logger) + createTestBorEventSegmentFile(t, 1_000_000, 1_500_000, 528, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, 1, logger) + // delete idx file for last bor events segment to simulate segment with missing idx file + idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, snaptype.BorEvents.String())) + err := os.Remove(idxFileToDelete) + require.NoError(t, err) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) + defer borRoSnapshots.Close() + err = borRoSnapshots.ReopenFolder() + require.NoError(t, err) + + blockReader := &BlockReader{borSn: borRoSnapshots} + require.Equal(t, uint64(264), blockReader.LastFrozenEventID()) +} + +func TestBlockReaderLastFrozenEventIDReturnsZeroWhenAllSegmentsDoNotHaveIdx(t *testing.T) { + t.Parallel() + + logger := testlog.Logger(t, log.LvlInfo) + dir := t.TempDir() + createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) + createTestBorEventSegmentFile(t, 500_000, 1_000_000, 264, dir, logger) + createTestBorEventSegmentFile(t, 1_000_000, 1_500_000, 528, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, 1, logger) + // delete idx files for all bor events segment to simulate segment files with missing idx files + idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1, 0, 500_000, snaptype.BorEvents.String())) + err := os.Remove(idxFileToDelete) + require.NoError(t, err) + idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1, 500_000, 1_000_000, snaptype.BorEvents.String())) + err = os.Remove(idxFileToDelete) + require.NoError(t, err) + idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, snaptype.BorEvents.String())) + err = os.Remove(idxFileToDelete) + require.NoError(t, err) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) + defer borRoSnapshots.Close() + err = borRoSnapshots.ReopenFolder() + require.NoError(t, err) + + blockReader := &BlockReader{borSn: borRoSnapshots} + require.Equal(t, uint64(0), blockReader.LastFrozenEventID()) +} + +func createTestBorEventSegmentFile(t *testing.T, from, to, eventId uint64, dir string, logger log.Logger) { + compressor, err := compress.NewCompressor( + context.Background(), + "test", + filepath.Join(dir, snaptype.SegmentFileName(1, from, to, snaptype.BorEvents)), + dir, + 100, + 1, + log.LvlDebug, + logger, + ) + require.NoError(t, err) + defer compressor.Close() + compressor.DisableFsync() + data := make([]byte, length.Hash+length.BlockNum+8) + binary.BigEndian.PutUint64(data[length.Hash+length.BlockNum:length.Hash+length.BlockNum+8], eventId) + err = compressor.AddWord(data) + require.NoError(t, err) + err = compressor.Compress() + require.NoError(t, err) + idx, err := recsplit.NewRecSplit( + recsplit.RecSplitArgs{ + KeyCount: 1, + BucketSize: 10, + TmpDir: dir, + IndexFile: filepath.Join(dir, snaptype.IdxFileName(1, from, to, snaptype.BorEvents.String())), + LeafSize: 8, + }, + logger, + ) + require.NoError(t, err) + defer idx.Close() + idx.DisableFsync() + err = idx.AddKey([]byte{1}, 0) + require.NoError(t, err) + err = idx.Build(context.Background()) + require.NoError(t, err) +} diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index 7893bb1a10c..e4b36c123d3 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "os" - "path" "path/filepath" "reflect" "runtime" @@ -18,6 +17,10 @@ import ( "time" "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/chain/snapcfg" common2 "github.com/ledgerwatch/erigon-lib/common" @@ -28,12 +31,14 @@ import ( dir2 "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/compress" + "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/recsplit" types2 "github.com/ledgerwatch/erigon-lib/types" "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/blockio" "github.com/ledgerwatch/erigon/core/types" @@ -46,28 +51,28 @@ import ( "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/silkworm" - "github.com/ledgerwatch/log/v3" - "golang.org/x/exp/slices" - "golang.org/x/sync/errgroup" ) type HeaderSegment struct { seg *compress.Decompressor // value: first_byte_of_header_hash + header_rlp idxHeaderHash *recsplit.Index // header_hash -> headers_segment_offset - ranges Range + Range + version uint8 } type BodySegment struct { seg *compress.Decompressor // value: rlp(types.BodyForStorage) idxBodyNumber *recsplit.Index // block_num_u64 -> bodies_segment_offset - ranges Range + Range + version uint8 } type TxnSegment struct { Seg *compress.Decompressor // value: first_byte_of_transaction_hash + sender_address + transaction_rlp IdxTxnHash *recsplit.Index // transaction_hash -> transactions_segment_offset IdxTxnHash2BlockNum *recsplit.Index // transaction_hash -> block_number - ranges Range + Range + version uint8 } func (sn *HeaderSegment) closeIdx() { @@ -86,10 +91,25 @@ func (sn *HeaderSegment) close() { sn.closeSeg() sn.closeIdx() } + +func (sn *HeaderSegment) openFiles() []string { + var files []string + + if sn.seg.IsOpen() { + files = append(files, sn.seg.FilePath()) + } + + if sn.idxHeaderHash != nil { + files = append(files, sn.idxHeaderHash.FilePath()) + } + + return files +} + func (sn *HeaderSegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.Headers) - sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.from, sn.to, snaptype.Headers) + sn.seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -116,8 +136,8 @@ func (sn *HeaderSegment) reopenIdx(dir string) (err error) { if sn.seg == nil { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.Headers.String()) - sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.Headers.String()) + sn.idxHeaderHash, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -142,10 +162,24 @@ func (sn *BodySegment) close() { sn.closeIdx() } +func (sn *BodySegment) openFiles() []string { + var files []string + + if sn.seg.IsOpen() { + files = append(files, sn.seg.FilePath()) + } + + if sn.idxBodyNumber != nil { + files = append(files, sn.idxBodyNumber.FilePath()) + } + + return files +} + func (sn *BodySegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.Bodies) - sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.from, sn.to, snaptype.Bodies) + sn.seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -173,8 +207,8 @@ func (sn *BodySegment) reopenIdx(dir string) (err error) { if sn.seg == nil { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.Bodies.String()) - sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.Bodies.String()) + sn.idxBodyNumber, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -201,10 +235,29 @@ func (sn *TxnSegment) close() { sn.closeSeg() sn.closeIdx() } + +func (sn *TxnSegment) openFiles() []string { + var files []string + + if sn.Seg.IsOpen() { + files = append(files, sn.Seg.FilePath()) + } + + if sn.IdxTxnHash != nil && sn.IdxTxnHash.IsOpen() { + files = append(files, sn.IdxTxnHash.FilePath()) + } + + if sn.IdxTxnHash2BlockNum != nil && sn.IdxTxnHash2BlockNum.IsOpen() { + files = append(files, sn.IdxTxnHash2BlockNum.FilePath()) + } + + return files +} + func (sn *TxnSegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.Transactions) - sn.Seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.from, sn.to, snaptype.Transactions) + sn.Seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -215,8 +268,8 @@ func (sn *TxnSegment) reopenIdx(dir string) (err error) { if sn.Seg == nil { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.Transactions.String()) - sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.Transactions.String()) + sn.IdxTxnHash, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -236,8 +289,8 @@ func (sn *TxnSegment) reopenIdx(dir string) (err error) { } */ - fileName = snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.Transactions2Block.String()) - sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName = snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.Transactions2Block.String()) + sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -286,7 +339,7 @@ func (s *bodySegments) ViewSegment(blockNum uint64, f func(*BodySegment) error) s.lock.RLock() defer s.lock.RUnlock() for _, seg := range s.segments { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return true, f(seg) @@ -308,7 +361,7 @@ func (s *txnSegments) ViewSegment(blockNum uint64, f func(*TxnSegment) error) (f s.lock.RLock() defer s.lock.RUnlock() for _, seg := range s.segments { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return true, f(seg) @@ -328,7 +381,11 @@ type RoSnapshots struct { segmentsMax atomic.Uint64 // all types of .seg files are available - up to this number idxMax atomic.Uint64 // all types of .idx files are available - up to this number cfg ethconfig.BlocksFreezing + version uint8 logger log.Logger + + // allows for pruning segments - this is the min availible segment + segmentsMin atomic.Uint64 } // NewRoSnapshots - opens all snapshots. But to simplify everything: @@ -336,21 +393,24 @@ type RoSnapshots struct { // - all snapshots of given blocks range must exist - to make this blocks range available // - gaps are not allowed // - segment have [from:to) semantic -func NewRoSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, logger log.Logger) *RoSnapshots { - return &RoSnapshots{dir: snapDir, cfg: cfg, Headers: &headerSegments{}, Bodies: &bodySegments{}, Txs: &txnSegments{}, logger: logger} +func NewRoSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, version uint8, logger log.Logger) *RoSnapshots { + return &RoSnapshots{dir: snapDir, cfg: cfg, version: version, Headers: &headerSegments{}, Bodies: &bodySegments{}, Txs: &txnSegments{}, logger: logger} } +func (s *RoSnapshots) Version() uint8 { return s.version } func (s *RoSnapshots) Cfg() ethconfig.BlocksFreezing { return s.cfg } func (s *RoSnapshots) Dir() string { return s.dir } func (s *RoSnapshots) SegmentsReady() bool { return s.segmentsReady.Load() } func (s *RoSnapshots) IndicesReady() bool { return s.indicesReady.Load() } func (s *RoSnapshots) IndicesMax() uint64 { return s.idxMax.Load() } func (s *RoSnapshots) SegmentsMax() uint64 { return s.segmentsMax.Load() } +func (s *RoSnapshots) SegmentsMin() uint64 { return s.segmentsMin.Load() } +func (s *RoSnapshots) SetSegmentsMin(min uint64) { s.segmentsMin.Store(min) } func (s *RoSnapshots) BlocksAvailable() uint64 { return cmp.Min(s.segmentsMax.Load(), s.idxMax.Load()) } -func (s *RoSnapshots) LogStat() { +func (s *RoSnapshots) LogStat(label string) { var m runtime.MemStats dbg.ReadMemStats(&m) - s.logger.Info("[snapshots] Blocks Stat", + s.logger.Info(fmt.Sprintf("[snapshots:%s] Blocks Stat", label), "blocks", fmt.Sprintf("%dk", (s.BlocksAvailable()+1)/1000), "indices", fmt.Sprintf("%dk", (s.IndicesMax()+1)/1000), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) @@ -442,19 +502,19 @@ func (s *RoSnapshots) idxAvailability() uint64 { if seg.idxHeaderHash == nil { break } - headers = seg.ranges.to - 1 + headers = seg.to - 1 } for _, seg := range s.Bodies.segments { if seg.idxBodyNumber == nil { break } - bodies = seg.ranges.to - 1 + bodies = seg.to - 1 } for _, seg := range s.Txs.segments { if seg.IdxTxnHash == nil || seg.IdxTxnHash2BlockNum == nil { break } - txs = seg.ranges.to - 1 + txs = seg.to - 1 } return cmp.Min(headers, cmp.Min(bodies, txs)) } @@ -484,7 +544,7 @@ func (s *RoSnapshots) Files() (list []string) { if seg.seg == nil { continue } - if seg.ranges.from > maxBlockNumInFiles { + if seg.from > maxBlockNumInFiles { continue } _, fName := filepath.Split(seg.seg.FilePath()) @@ -494,7 +554,7 @@ func (s *RoSnapshots) Files() (list []string) { if seg.seg == nil { continue } - if seg.ranges.from > maxBlockNumInFiles { + if seg.from > maxBlockNumInFiles { continue } _, fName := filepath.Split(seg.seg.FilePath()) @@ -504,7 +564,7 @@ func (s *RoSnapshots) Files() (list []string) { if seg.Seg == nil { continue } - if seg.ranges.from > maxBlockNumInFiles { + if seg.from > maxBlockNumInFiles { continue } _, fName := filepath.Split(seg.Seg.FilePath()) @@ -514,8 +574,39 @@ func (s *RoSnapshots) Files() (list []string) { return list } +func (s *RoSnapshots) OpenFiles() (list []string) { + s.Headers.lock.RLock() + defer s.Headers.lock.RUnlock() + s.Bodies.lock.RLock() + defer s.Bodies.lock.RUnlock() + s.Txs.lock.RLock() + defer s.Txs.lock.RUnlock() + + for _, header := range s.Headers.segments { + list = append(list, header.openFiles()...) + } + + for _, body := range s.Bodies.segments { + list = append(list, body.openFiles()...) + } + + for _, txs := range s.Txs.segments { + list = append(list, txs.openFiles()...) + } + + return list +} + // ReopenList stops on optimistic=false, continue opening files on optimistic=true func (s *RoSnapshots) ReopenList(fileNames []string, optimistic bool) error { + return s.rebuildSegments(fileNames, true, optimistic) +} + +func (s *RoSnapshots) InitSegments(fileNames []string) error { + return s.rebuildSegments(fileNames, false, true) +} + +func (s *RoSnapshots) rebuildSegments(fileNames []string, open bool, optimistic bool) error { s.Headers.lock.Lock() defer s.Headers.lock.Unlock() s.Bodies.lock.Lock() @@ -549,22 +640,25 @@ Loop: } } if !exists { - sn = &HeaderSegment{ranges: Range{f.From, f.To}} + sn = &HeaderSegment{version: f.Version, Range: Range{f.From, f.To}} } - if err := sn.reopenSeg(s.dir); err != nil { - if errors.Is(err, os.ErrNotExist) { + + if open { + if err := sn.reopenSeg(s.dir); err != nil { + if errors.Is(err, os.ErrNotExist) { + if optimistic { + continue Loop + } else { + break Loop + } + } if optimistic { + s.logger.Warn("[snapshots] open segment", "err", err) continue Loop } else { - break Loop + return err } } - if optimistic { - s.logger.Warn("[snapshots] open segment", "err", err) - continue Loop - } else { - return err - } } if !exists { @@ -572,8 +666,11 @@ Loop: // then make segment available even if index open may fail s.Headers.segments = append(s.Headers.segments, sn) } - if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { - return err + + if open { + if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { + return err + } } case snaptype.Bodies: var sn *BodySegment @@ -589,28 +686,34 @@ Loop: } } if !exists { - sn = &BodySegment{ranges: Range{f.From, f.To}} + sn = &BodySegment{version: f.Version, Range: Range{f.From, f.To}} } - if err := sn.reopenSeg(s.dir); err != nil { - if errors.Is(err, os.ErrNotExist) { + + if open { + if err := sn.reopenSeg(s.dir); err != nil { + if errors.Is(err, os.ErrNotExist) { + if optimistic { + continue Loop + } else { + break Loop + } + } if optimistic { + s.logger.Warn("[snapshots] open segment", "err", err) continue Loop } else { - break Loop + return err } } - if optimistic { - s.logger.Warn("[snapshots] open segment", "err", err) - continue Loop - } else { - return err - } } if !exists { s.Bodies.segments = append(s.Bodies.segments, sn) } - if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { - return err + + if open { + if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { + return err + } } case snaptype.Transactions: var sn *TxnSegment @@ -626,28 +729,35 @@ Loop: } } if !exists { - sn = &TxnSegment{ranges: Range{f.From, f.To}} + sn = &TxnSegment{version: f.Version, Range: Range{f.From, f.To}} } - if err := sn.reopenSeg(s.dir); err != nil { - if errors.Is(err, os.ErrNotExist) { + + if open { + if err := sn.reopenSeg(s.dir); err != nil { + if errors.Is(err, os.ErrNotExist) { + if optimistic { + continue Loop + } else { + break Loop + } + } if optimistic { + s.logger.Warn("[snapshots] open segment", "err", err) continue Loop } else { - break Loop + return err } } - if optimistic { - s.logger.Warn("[snapshots] open segment", "err", err) - continue Loop - } else { - return err - } } + if !exists { s.Txs.segments = append(s.Txs.segments, sn) } - if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { - return err + + if open { + if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { + return err + } } default: processed = false @@ -677,7 +787,7 @@ func (s *RoSnapshots) Ranges() (ranges []Range) { defer view.Close() for _, sn := range view.Headers() { - ranges = append(ranges, sn.ranges) + ranges = append(ranges, sn.Range) } return ranges } @@ -685,7 +795,14 @@ func (s *RoSnapshots) Ranges() (ranges []Range) { func (s *RoSnapshots) OptimisticalyReopenFolder() { _ = s.ReopenFolder() } func (s *RoSnapshots) OptimisticalyReopenWithDB(db kv.RoDB) { _ = s.ReopenWithDB(db) } func (s *RoSnapshots) ReopenFolder() error { - files, _, err := Segments(s.dir) + return s.ReopenSegments(snaptype.BlockSnapshotTypes) +} + +func (s *RoSnapshots) ReopenSegments(types []snaptype.Type) error { + files, _, err := segments(s.dir, s.version, 0, func(dir string, in []snaptype.FileInfo) (res []snaptype.FileInfo) { + return typeOfSegmentsMustExist(dir, in, types) + }) + if err != nil { return err } @@ -696,6 +813,7 @@ func (s *RoSnapshots) ReopenFolder() error { } return s.ReopenList(list, false) } + func (s *RoSnapshots) ReopenWithDB(db kv.RoDB) error { if err := db.View(context.Background(), func(tx kv.Tx) error { snList, _, err := rawdb.ReadSnapshots(tx) @@ -806,15 +924,15 @@ func (s *RoSnapshots) PrintDebug() { defer s.Txs.lock.RUnlock() fmt.Println(" == Snapshots, Header") for _, sn := range s.Headers.segments { - fmt.Printf("%d, %t\n", sn.ranges.from, sn.idxHeaderHash == nil) + fmt.Printf("%d, %t\n", sn.from, sn.idxHeaderHash == nil) } fmt.Println(" == Snapshots, Body") for _, sn := range s.Bodies.segments { - fmt.Printf("%d, %t\n", sn.ranges.from, sn.idxBodyNumber == nil) + fmt.Printf("%d, %t\n", sn.from, sn.idxBodyNumber == nil) } fmt.Println(" == Snapshots, Txs") for _, sn := range s.Txs.segments { - fmt.Printf("%d, %t, %t\n", sn.ranges.from, sn.IdxTxnHash == nil, sn.IdxTxnHash2BlockNum == nil) + fmt.Printf("%d, %t, %t\n", sn.from, sn.IdxTxnHash == nil, sn.IdxTxnHash2BlockNum == nil) } } @@ -876,7 +994,7 @@ func buildIdx(ctx context.Context, sn snaptype.FileInfo, chainConfig *chain.Conf //log.Info("[snapshots] build idx", "file", fName) switch sn.T { case snaptype.Headers: - if err := HeadersIdx(ctx, chainConfig, sn.Path, sn.From, tmpDir, p, lvl, logger); err != nil { + if err := HeadersIdx(ctx, sn.Path, sn.Version, sn.From, tmpDir, p, lvl, logger); err != nil { return err } case snaptype.Bodies: @@ -885,17 +1003,17 @@ func buildIdx(ctx context.Context, sn snaptype.FileInfo, chainConfig *chain.Conf } case snaptype.Transactions: dir, _ := filepath.Split(sn.Path) - if err := TransactionsIdx(ctx, chainConfig, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { + if err := TransactionsIdx(ctx, chainConfig, sn.Version, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { return err } case snaptype.BorEvents: dir, _ := filepath.Split(sn.Path) - if err := BorEventsIdx(ctx, sn.Path, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { + if err := BorEventsIdx(ctx, sn.Path, sn.Version, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { return err } case snaptype.BorSpans: dir, _ := filepath.Split(sn.Path) - if err := BorSpansIdx(ctx, sn.Path, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { + if err := BorSpansIdx(ctx, sn.Path, sn.Version, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { return err } } @@ -903,11 +1021,11 @@ func buildIdx(ctx context.Context, sn snaptype.FileInfo, chainConfig *chain.Conf return nil } -func BuildMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs, chainConfig *chain.Config, workers int, logger log.Logger) error { +func BuildMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs, version uint8, minIndex uint64, chainConfig *chain.Config, workers int, logger log.Logger) error { dir, tmpDir := dirs.Snap, dirs.Tmp //log.Log(lvl, "[snapshots] Build indices", "from", min) - segments, _, err := Segments(dir) + segments, _, err := Segments(dir, version, minIndex) if err != nil { return err } @@ -927,6 +1045,7 @@ func BuildMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs case <-logEvery.C: var m runtime.MemStats dbg.ReadMemStats(&m) + sendDiagnostics(startIndexingTime, ps.DiagnossticsData(), m.Alloc, m.Sys) logger.Info(fmt.Sprintf("[%s] Indexing", logPrefix), "progress", ps.String(), "total-indexing-time", time.Since(startIndexingTime).Round(time.Second).String(), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) case <-finish: return @@ -949,6 +1068,7 @@ func BuildMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs g.Go(func() error { p := &background.Progress{} ps.Add(p) + defer notifySegmentIndexingFinished(sn.Name()) defer ps.Delete(p) return buildIdx(gCtx, sn, chainConfig, tmpDir, p, log.LvlInfo, logger) }) @@ -966,13 +1086,12 @@ func BuildMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs case <-ctx.Done(): return ctx.Err() } - } -func BuildBorMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs, chainConfig *chain.Config, workers int, logger log.Logger) error { +func BuildBorMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs, version uint8, minIndex uint64, chainConfig *chain.Config, workers int, logger log.Logger) error { dir, tmpDir := dirs.Snap, dirs.Tmp - segments, _, err := BorSegments(dir) + segments, _, err := BorSegments(dir, version, minIndex) if err != nil { return err } @@ -981,7 +1100,7 @@ func BuildBorMissedIndices(logPrefix string, ctx context.Context, dirs datadir.D g, gCtx := errgroup.WithContext(ctx) g.SetLimit(workers) - for _, t := range []snaptype.Type{snaptype.BorEvents, snaptype.BorSpans} { + for _, t := range snaptype.BorSnapshotTypes { for _, segment := range segments { if segment.T != t { continue @@ -993,6 +1112,7 @@ func BuildBorMissedIndices(logPrefix string, ctx context.Context, dirs datadir.D g.Go(func() error { p := &background.Progress{} ps.Add(p) + defer notifySegmentIndexingFinished(sn.Name()) defer ps.Delete(p) return buildIdx(gCtx, sn, chainConfig, tmpDir, p, log.LvlInfo, logger) }) @@ -1015,13 +1135,38 @@ func BuildBorMissedIndices(logPrefix string, ctx context.Context, dirs datadir.D case <-logEvery.C: var m runtime.MemStats dbg.ReadMemStats(&m) + sendDiagnostics(startIndexingTime, ps.DiagnossticsData(), m.Alloc, m.Sys) logger.Info(fmt.Sprintf("[%s] Indexing", logPrefix), "progress", ps.String(), "total-indexing-time", time.Since(startIndexingTime).Round(time.Second).String(), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) } } } -func noGaps(in []snaptype.FileInfo) (out []snaptype.FileInfo, missingSnapshots []Range) { - var prevTo uint64 +func notifySegmentIndexingFinished(name string) { + diagnostics.Send( + diagnostics.SnapshotSegmentIndexingFinishedUpdate{ + SegmentName: name, + }, + ) +} + +func sendDiagnostics(startIndexingTime time.Time, indexPercent map[string]int, alloc uint64, sys uint64) { + segmentsStats := make([]diagnostics.SnapshotSegmentIndexingStatistics, 0, len(indexPercent)) + for k, v := range indexPercent { + segmentsStats = append(segmentsStats, diagnostics.SnapshotSegmentIndexingStatistics{ + SegmentName: k, + Percent: v, + Alloc: alloc, + Sys: sys, + }) + } + diagnostics.Send(diagnostics.SnapshotIndexingStatistics{ + Segments: segmentsStats, + TimeElapsed: time.Since(startIndexingTime).Round(time.Second).Seconds(), + }) +} + +func noGaps(in []snaptype.FileInfo, from uint64) (out []snaptype.FileInfo, missingSnapshots []Range) { + prevTo := from for _, f := range in { if f.To <= prevTo { continue @@ -1036,14 +1181,14 @@ func noGaps(in []snaptype.FileInfo) (out []snaptype.FileInfo, missingSnapshots [ return out, missingSnapshots } -func allTypeOfSegmentsMustExist(dir string, in []snaptype.FileInfo) (res []snaptype.FileInfo) { +func typeOfSegmentsMustExist(dir string, in []snaptype.FileInfo, types []snaptype.Type) (res []snaptype.FileInfo) { MainLoop: for _, f := range in { if f.From == f.To { continue } - for _, t := range snaptype.BlockSnapshotTypes { - p := filepath.Join(dir, snaptype.SegmentFileName(f.From, f.To, t)) + for _, t := range types { + p := filepath.Join(dir, snaptype.SegmentFileName(f.Version, f.From, f.To, t)) if !dir2.FileExist(p) { continue MainLoop } @@ -1053,21 +1198,12 @@ MainLoop: return res } +func allTypeOfSegmentsMustExist(dir string, in []snaptype.FileInfo) (res []snaptype.FileInfo) { + return typeOfSegmentsMustExist(dir, in, snaptype.BlockSnapshotTypes) +} + func borSegmentsMustExist(dir string, in []snaptype.FileInfo) (res []snaptype.FileInfo) { -MainLoop: - for _, f := range in { - if f.From == f.To { - continue - } - for _, t := range []snaptype.Type{snaptype.BorEvents, snaptype.BorSpans} { - p := filepath.Join(dir, snaptype.SegmentFileName(f.From, f.To, t)) - if !dir2.FileExist(p) { - continue MainLoop - } - } - res = append(res, f) - } - return res + return typeOfSegmentsMustExist(dir, in, []snaptype.Type{snaptype.BorEvents, snaptype.BorSpans}) } // noOverlaps - keep largest ranges and avoid overlap @@ -1095,8 +1231,8 @@ func noOverlaps(in []snaptype.FileInfo) (res []snaptype.FileInfo) { return res } -func SegmentsCaplin(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { - list, err := snaptype.Segments(dir) +func SegmentsCaplin(dir string, version uint8, minBlock uint64) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { + list, err := snaptype.Segments(dir, version) if err != nil { return nil, missingSnapshots, err } @@ -1110,15 +1246,19 @@ func SegmentsCaplin(dir string) (res []snaptype.FileInfo, missingSnapshots []Ran } l = append(l, f) } - l, m = noGaps(noOverlaps(l)) + l, m = noGaps(noOverlaps(l), minBlock) res = append(res, l...) missingSnapshots = append(missingSnapshots, m...) } return res, missingSnapshots, nil } -func Segments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { - list, err := snaptype.Segments(dir) +func Segments(dir string, version uint8, minBlock uint64) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { + return segments(dir, version, minBlock, allTypeOfSegmentsMustExist) +} + +func segments(dir string, version uint8, minBlock uint64, segmentsTypeCheck func(dir string, in []snaptype.FileInfo) []snaptype.FileInfo) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { + list, err := snaptype.Segments(dir, version) if err != nil { return nil, missingSnapshots, err } @@ -1131,7 +1271,7 @@ func Segments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, er } l = append(l, f) } - l, m = noGaps(noOverlaps(allTypeOfSegmentsMustExist(dir, l))) + l, m = noGaps(noOverlaps(segmentsTypeCheck(dir, l)), minBlock) res = append(res, l...) missingSnapshots = append(missingSnapshots, m...) } @@ -1143,7 +1283,7 @@ func Segments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, er } l = append(l, f) } - l, _ = noGaps(noOverlaps(allTypeOfSegmentsMustExist(dir, l))) + l, _ = noGaps(noOverlaps(segmentsTypeCheck(dir, l)), minBlock) res = append(res, l...) } { @@ -1154,7 +1294,7 @@ func Segments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, er } l = append(l, f) } - l, _ = noGaps(noOverlaps(allTypeOfSegmentsMustExist(dir, l))) + l, _ = noGaps(noOverlaps(segmentsTypeCheck(dir, l)), minBlock) res = append(res, l...) } @@ -1173,6 +1313,7 @@ func chooseSegmentEnd(from, to, blocksPerFile uint64) uint64 { } type BlockRetire struct { + maxScheduledBlock atomic.Uint64 working atomic.Bool needSaveFilesListInDB atomic.Bool @@ -1185,12 +1326,23 @@ type BlockRetire struct { blockReader services.FullBlockReader blockWriter *blockio.BlockWriter dirs datadir.Dirs + chainConfig *chain.Config } -func NewBlockRetire(workers int, dirs datadir.Dirs, blockReader services.FullBlockReader, blockWriter *blockio.BlockWriter, db kv.RoDB, notifier services.DBEventNotifier, logger log.Logger) *BlockRetire { - return &BlockRetire{workers: workers, tmpDir: dirs.Tmp, dirs: dirs, blockReader: blockReader, blockWriter: blockWriter, db: db, notifier: notifier, logger: logger} +func NewBlockRetire(compressWorkers int, dirs datadir.Dirs, blockReader services.FullBlockReader, blockWriter *blockio.BlockWriter, db kv.RoDB, chainConfig *chain.Config, notifier services.DBEventNotifier, logger log.Logger) *BlockRetire { + return &BlockRetire{workers: compressWorkers, tmpDir: dirs.Tmp, dirs: dirs, blockReader: blockReader, blockWriter: blockWriter, db: db, chainConfig: chainConfig, notifier: notifier, logger: logger} } +func (br *BlockRetire) SetWorkers(workers int) { + br.workers = workers +} + +func (br *BlockRetire) IO() (services.FullBlockReader, *blockio.BlockWriter) { + return br.blockReader, br.blockWriter +} + +func (br *BlockRetire) Writer() *RoSnapshots { return br.blockReader.Snapshots().(*RoSnapshots) } + func (br *BlockRetire) snapshots() *RoSnapshots { return br.blockReader.Snapshots().(*RoSnapshots) } func (br *BlockRetire) borSnapshots() *BorRoSnapshots { @@ -1249,29 +1401,34 @@ func CanDeleteTo(curBlockNum uint64, blocksInSnapshots uint64) (blockTo uint64) return cmp.Min(hardLimit, blocksInSnapshots+1) } -func (br *BlockRetire) RetireBlocks(ctx context.Context, blockFrom, blockTo uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDelete func(l []string) error) error { - chainConfig := fromdb.ChainConfig(br.db) +func (br *BlockRetire) retireBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDelete func(l []string) error) (bool, error) { notifier, logger, blockReader, tmpDir, db, workers := br.notifier, br.logger, br.blockReader, br.tmpDir, br.db, br.workers - logger.Log(lvl, "[snapshots] Retire Blocks", "range", fmt.Sprintf("%dk-%dk", blockFrom/1000, blockTo/1000)) snapshots := br.snapshots() firstTxNum := blockReader.(*BlockReader).FirstTxNumNotInSnapshots() - // in future we will do it in background - if err := DumpBlocks(ctx, blockFrom, blockTo, snaptype.Erigon2MergeLimit, tmpDir, snapshots.Dir(), firstTxNum, db, workers, lvl, logger, blockReader); err != nil { - return fmt.Errorf("DumpBlocks: %w", err) - } - if err := snapshots.ReopenFolder(); err != nil { - return fmt.Errorf("reopen: %w", err) - } - snapshots.LogStat() - if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size - notifier.OnNewSnapshot() + blockFrom, blockTo, ok := CanRetire(maxBlockNum, minBlockNum) + + if ok { + logger.Log(lvl, "[snapshots] Retire Blocks", "range", fmt.Sprintf("%dk-%dk", blockFrom/1000, blockTo/1000)) + // in future we will do it in background + if err := DumpBlocks(ctx, snapshots.version, blockFrom, blockTo, snaptype.Erigon2MergeLimit, tmpDir, snapshots.Dir(), firstTxNum, db, workers, lvl, logger, blockReader); err != nil { + return ok, fmt.Errorf("DumpBlocks: %w", err) + } + if err := snapshots.ReopenFolder(); err != nil { + return ok, fmt.Errorf("reopen: %w", err) + } + snapshots.LogStat("retire") + if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size + notifier.OnNewSnapshot() + } } - merger := NewMerger(tmpDir, workers, lvl, db, chainConfig, logger) + + merger := NewMerger(tmpDir, workers, lvl, db, br.chainConfig, logger) rangesToMerge := merger.FindMergeRanges(snapshots.Ranges(), snapshots.BlocksAvailable()) if len(rangesToMerge) == 0 { - return nil + return ok, nil } + ok = true // have something to merge onMerge := func(r Range) error { if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size notifier.OnNewSnapshot() @@ -1289,13 +1446,13 @@ func (br *BlockRetire) RetireBlocks(ctx context.Context, blockFrom, blockTo uint } err := merger.Merge(ctx, snapshots, rangesToMerge, snapshots.Dir(), true /* doIndex */, onMerge, onDelete) if err != nil { - return err + return ok, err } - return nil + return ok, nil } -func (br *BlockRetire) PruneAncientBlocks(tx kv.RwTx, limit int, includeBor bool) error { +func (br *BlockRetire) PruneAncientBlocks(tx kv.RwTx, limit int) error { if br.blockReader.FreezingCfg().KeepBlocks { return nil } @@ -1304,133 +1461,200 @@ func (br *BlockRetire) PruneAncientBlocks(tx kv.RwTx, limit int, includeBor bool return err } canDeleteTo := CanDeleteTo(currentProgress, br.blockReader.FrozenBlocks()) + + br.logger.Info("[snapshots] Prune Blocks", "to", canDeleteTo, "limit", limit) if err := br.blockWriter.PruneBlocks(context.Background(), tx, canDeleteTo, limit); err != nil { - return nil + return err } + includeBor := br.chainConfig.Bor != nil if includeBor { canDeleteTo := CanDeleteTo(currentProgress, br.blockReader.FrozenBorBlocks()) - if err := br.blockWriter.PruneBorBlocks(context.Background(), tx, canDeleteTo, limit); err != nil { - return nil + br.logger.Info("[snapshots] Prune Bor Blocks", "to", canDeleteTo, "limit", limit) + + if err := br.blockWriter.PruneBorBlocks(context.Background(), tx, canDeleteTo, limit, span.IDAt); err != nil { + return err } } return nil } -func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProgress uint64, includeBor bool, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error) { - ok := br.working.CompareAndSwap(false, true) - if !ok { - // go-routine is still working +func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error) { + if maxBlockNum > br.maxScheduledBlock.Load() { + br.maxScheduledBlock.Store(maxBlockNum) + } + + if !br.working.CompareAndSwap(false, true) { return } + go func() { + defer br.working.Store(false) - blockFrom, blockTo, ok := CanRetire(forwardProgress, br.blockReader.FrozenBlocks()) - if ok { - if err := br.RetireBlocks(ctx, blockFrom, blockTo, lvl, seedNewSnapshots, onDeleteSnapshots); err != nil { - br.logger.Warn("[snapshots] retire blocks", "err", err, "fromBlock", blockFrom, "toBlock", blockTo) + for { + maxBlockNum := br.maxScheduledBlock.Load() + + err := br.RetireBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots) + + if err != nil { + br.logger.Warn("[snapshots] retire blocks", "err", err) + return } - } - if includeBor { - blockFrom, blockTo, ok = CanRetire(forwardProgress, br.blockReader.FrozenBorBlocks()) - if ok { - if err := br.RetireBorBlocks(ctx, blockFrom, blockTo, lvl, seedNewSnapshots, onDeleteSnapshots); err != nil { - br.logger.Warn("[bor snapshots] retire blocks", "err", err, "fromBlock", blockFrom, "toBlock", blockTo) - } + if maxBlockNum == br.maxScheduledBlock.Load() { + return } } }() } -func (br *BlockRetire) BuildMissedIndicesIfNeed(ctx context.Context, logPrefix string, notifier services.DBEventNotifier, cc *chain.Config) error { - snapshots := br.snapshots() - snapshots.LogStat() - - // Create .idx files - if snapshots.IndicesMax() < snapshots.SegmentsMax() { +func (br *BlockRetire) RetireBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error) (err error) { + includeBor := br.chainConfig.Bor != nil - if !snapshots.Cfg().Produce && snapshots.IndicesMax() == 0 { - return fmt.Errorf("please remove --snap.stop, erigon can't work without creating basic indices") + if includeBor { + // "bor snaps" can be behind "block snaps", it's ok: for example because of `kill -9` in the middle of merge + if frozen := br.blockReader.FrozenBlocks(); frozen > minBlockNum { + minBlockNum = frozen } - if snapshots.Cfg().Produce { - if !snapshots.SegmentsReady() { - return fmt.Errorf("not all snapshot segments are available") - } - - // wait for Downloader service to download all expected snapshots - if snapshots.IndicesMax() < snapshots.SegmentsMax() { - indexWorkers := estimate.IndexSnapshot.Workers() - if err := BuildMissedIndices(logPrefix, ctx, br.dirs, cc, indexWorkers, br.logger); err != nil { - return fmt.Errorf("BuildMissedIndices: %w", err) - } - } - if err := snapshots.ReopenFolder(); err != nil { + for br.blockReader.FrozenBorBlocks() < minBlockNum { + ok, err := br.retireBorBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots) + if err != nil { return err } - snapshots.LogStat() - if notifier != nil { - notifier.OnNewSnapshot() + if !ok { + break } } } - if cc.Bor != nil { - borSnapshots := br.borSnapshots() - borSnapshots.LogStat() - // Create .idx files - if borSnapshots.IndicesMax() < borSnapshots.SegmentsMax() { - - if !borSnapshots.Cfg().Produce && borSnapshots.IndicesMax() == 0 { - return fmt.Errorf("please remove --snap.stop, erigon can't work without creating basic indices") - } - if borSnapshots.Cfg().Produce { - if !borSnapshots.SegmentsReady() { - return fmt.Errorf("not all bor snapshot segments are available") - } + var ok, okBor bool + for { + if frozen := br.blockReader.FrozenBlocks(); frozen > minBlockNum { + minBlockNum = frozen + } - // wait for Downloader service to download all expected snapshots - if borSnapshots.IndicesMax() < borSnapshots.SegmentsMax() { - indexWorkers := estimate.IndexSnapshot.Workers() - if err := BuildBorMissedIndices(logPrefix, ctx, br.dirs, cc, indexWorkers, br.logger); err != nil { - return fmt.Errorf("BuildBorMissedIndices: %w", err) - } - } + ok, err = br.retireBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots) + if err != nil { + return err + } - if err := borSnapshots.ReopenFolder(); err != nil { - return err - } - borSnapshots.LogStat() - if notifier != nil { - notifier.OnNewSnapshot() - } + if includeBor { + okBor, err = br.retireBorBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots) + if err != nil { + return err } } + haveMore := ok || okBor + if !haveMore { + break + } + } + + return nil +} + +func (br *BlockRetire) BuildMissedIndicesIfNeed(ctx context.Context, logPrefix string, notifier services.DBEventNotifier, cc *chain.Config) error { + if err := br.buildMissedIndicesIfNeed(ctx, logPrefix, notifier, cc); err != nil { + return err + } + + if err := br.buildBorMissedIndicesIfNeed(ctx, logPrefix, notifier, cc); err != nil { + return err + } + + return nil +} + +func (br *BlockRetire) buildMissedIndicesIfNeed(ctx context.Context, logPrefix string, notifier services.DBEventNotifier, cc *chain.Config) error { + snapshots := br.snapshots() + if snapshots.IndicesMax() >= snapshots.SegmentsMax() { + return nil + } + snapshots.LogStat("missed-idx") + if !snapshots.Cfg().Produce && snapshots.IndicesMax() == 0 { + return fmt.Errorf("please remove --snap.stop, erigon can't work without creating basic indices") + } + if !snapshots.Cfg().Produce { + return nil + } + if !snapshots.SegmentsReady() { + return fmt.Errorf("not all snapshot segments are available") + } + + // wait for Downloader service to download all expected snapshots + indexWorkers := estimate.IndexSnapshot.Workers() + if err := BuildMissedIndices(logPrefix, ctx, br.dirs, snapshots.Version(), snapshots.SegmentsMin(), cc, indexWorkers, br.logger); err != nil { + return fmt.Errorf("BuildMissedIndices: %w", err) + } + + if err := snapshots.ReopenFolder(); err != nil { + return err + } + snapshots.LogStat("missed-idx:reopen") + if notifier != nil { + notifier.OnNewSnapshot() + } + return nil +} + +func (br *BlockRetire) buildBorMissedIndicesIfNeed(ctx context.Context, logPrefix string, notifier services.DBEventNotifier, cc *chain.Config) error { + if cc.Bor == nil { + return nil + } + + borSnapshots := br.borSnapshots() + if borSnapshots.IndicesMax() >= borSnapshots.SegmentsMax() { + return nil + } + + borSnapshots.LogStat("bor:missed-idx") + if !borSnapshots.Cfg().Produce && borSnapshots.IndicesMax() == 0 { + return fmt.Errorf("please remove --snap.stop, erigon can't work without creating basic indices") + } + if !borSnapshots.Cfg().Produce { + return nil + } + if !borSnapshots.SegmentsReady() { + return fmt.Errorf("not all bor snapshot segments are available") + } + + // wait for Downloader service to download all expected snapshots + indexWorkers := estimate.IndexSnapshot.Workers() + if err := BuildBorMissedIndices(logPrefix, ctx, br.dirs, borSnapshots.Version(), borSnapshots.SegmentsMin(), cc, indexWorkers, br.logger); err != nil { + return fmt.Errorf("BuildBorMissedIndices: %w", err) + } + + if err := borSnapshots.ReopenFolder(); err != nil { + return err + } + borSnapshots.LogStat("bor:missed-idx:reopen") + if notifier != nil { + notifier.OnNewSnapshot() } return nil } -func DumpBlocks(ctx context.Context, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { +func DumpBlocks(ctx context.Context, version uint8, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { if blocksPerFile == 0 { return nil } chainConfig := fromdb.ChainConfig(chainDB) for i := blockFrom; i < blockTo; i = chooseSegmentEnd(i, blockTo, blocksPerFile) { - if err := dumpBlocksRange(ctx, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, firstTxNum, chainDB, *chainConfig, workers, lvl, logger, blockReader); err != nil { + if err := dumpBlocksRange(ctx, version, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, firstTxNum, chainDB, *chainConfig, workers, lvl, logger, blockReader); err != nil { return err } } return nil } -func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, chainConfig chain.Config, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { +func dumpBlocksRange(ctx context.Context, version uint8, blockFrom, blockTo uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, chainConfig chain.Config, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() { - segName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Headers) + segName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Headers) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot Headers", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger) @@ -1454,7 +1678,7 @@ func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, sna } { - segName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Bodies) + segName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Bodies) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot Bodies", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger) @@ -1478,7 +1702,7 @@ func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, sna } { - segName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Transactions) + segName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Transactions) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot Txs", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger) @@ -1498,9 +1722,9 @@ func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, sna } snapDir, fileName := filepath.Split(f.Path) ext := filepath.Ext(fileName) - logger.Log(lvl, "[snapshots] Compression", "ratio", sn.Ratio.String(), "file", fileName[:len(fileName)-len(ext)]) - - _, expectedCount, err = txsAmountBasedOnBodiesSnapshots(snapDir, blockFrom, blockTo) + logger.Log(lvl, "[snapshots] Compression start", "file", fileName[:len(fileName)-len(ext)], "workers", sn.Workers()) + t := time.Now() + _, expectedCount, err = txsAmountBasedOnBodiesSnapshots(snapDir, version, blockFrom, blockTo) if err != nil { return err } @@ -1510,6 +1734,7 @@ func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, sna if err := sn.Compress(); err != nil { return fmt.Errorf("compress: %w", err) } + logger.Log(lvl, "[snapshots] Compression", "took", time.Since(t), "ratio", sn.Ratio.String(), "file", fileName[:len(fileName)-len(ext)]) p := &background.Progress{} if err := buildIdx(ctx, f, &chainConfig, tmpDir, p, lvl, logger); err != nil { @@ -1522,24 +1747,24 @@ func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, sna func hasIdxFile(sn snaptype.FileInfo, logger log.Logger) bool { dir, _ := filepath.Split(sn.Path) - fName := snaptype.IdxFileName(sn.From, sn.To, sn.T.String()) + fName := snaptype.IdxFileName(sn.Version, sn.From, sn.To, sn.T.String()) var result = true switch sn.T { case snaptype.Headers, snaptype.Bodies, snaptype.BorEvents, snaptype.BorSpans, snaptype.BeaconBlocks: - idx, err := recsplit.OpenIndex(path.Join(dir, fName)) + idx, err := recsplit.OpenIndex(filepath.Join(dir, fName)) if err != nil { return false } idx.Close() case snaptype.Transactions: - idx, err := recsplit.OpenIndex(path.Join(dir, fName)) + idx, err := recsplit.OpenIndex(filepath.Join(dir, fName)) if err != nil { return false } idx.Close() - fName = snaptype.IdxFileName(sn.From, sn.To, snaptype.Transactions2Block.String()) - idx, err = recsplit.OpenIndex(path.Join(dir, fName)) + fName = snaptype.IdxFileName(sn.Version, sn.From, sn.To, snaptype.Transactions2Block.String()) + idx, err = recsplit.OpenIndex(filepath.Join(dir, fName)) if err != nil { return false } @@ -1548,6 +1773,12 @@ func hasIdxFile(sn snaptype.FileInfo, logger log.Logger) bool { return result } +var bufPool = sync.Pool{ + New: func() any { + return make([]byte, 16*4096) + }, +} + // DumpTxs - [from, to) // Format: hash[0]_1byte + sender_address_2bytes + txnRlp func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainConfig *chain.Config, workers int, lvl log.Lvl, logger log.Logger, collect func([]byte) error) (expectedCount int, err error) { @@ -1559,12 +1790,12 @@ func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainCo chainID, _ := uint256.FromBig(chainConfig.ChainID) numBuf := make([]byte, 8) - parseCtx := types2.NewTxParseContext(*chainID) - parseCtx.WithSender(false) - slot := types2.TxSlot{} - var sender [20]byte - parse := func(v, valueBuf []byte, senders []common2.Address, j int) ([]byte, error) { - if _, err := parseCtx.ParseTransaction(v, 0, &slot, sender[:], false /* hasEnvelope */, false /* wrappedWithBlobs */, nil); err != nil { + + parse := func(ctx *types2.TxParseContext, v, valueBuf []byte, senders []common2.Address, j int) ([]byte, error) { + var sender [20]byte + slot := types2.TxSlot{} + + if _, err := ctx.ParseTransaction(v, 0, &slot, sender[:], false /* hasEnvelope */, false /* wrappedWithBlobs */, nil); err != nil { return valueBuf, err } if len(senders) > 0 { @@ -1577,8 +1808,8 @@ func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainCo valueBuf = append(valueBuf, v...) return valueBuf, nil } - valueBuf := make([]byte, 16*4096) - addSystemTx := func(tx kv.Tx, txId uint64) error { + + addSystemTx := func(ctx *types2.TxParseContext, tx kv.Tx, txId uint64) error { binary.BigEndian.PutUint64(numBuf, txId) tv, err := tx.GetOne(kv.EthTx, numBuf) if err != nil { @@ -1591,8 +1822,12 @@ func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainCo return nil } - parseCtx.WithSender(false) - valueBuf, err = parse(tv, valueBuf, nil, 0) + ctx.WithSender(false) + + valueBuf := bufPool.Get().([]byte) + defer bufPool.Put(valueBuf) //nolint + + valueBuf, err = parse(ctx, tv, valueBuf, nil, 0) if err != nil { return err } @@ -1637,30 +1872,89 @@ func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainCo return false, err } - j := 0 + workers := estimate.AlmostAllCPUs() + + if workers > 3 { + workers = workers / 3 * 2 + } - if err := addSystemTx(tx, body.BaseTxId); err != nil { + if workers > int(body.TxAmount-2) { + if int(body.TxAmount-2) > 1 { + workers = int(body.TxAmount - 2) + } else { + workers = 1 + } + } + + parsers := errgroup.Group{} + parsers.SetLimit(workers) + + valueBufs := make([][]byte, workers) + parseCtxs := make([]*types2.TxParseContext, workers) + + for i := 0; i < workers; i++ { + valueBuf := bufPool.Get().([]byte) + defer bufPool.Put(valueBuf) //nolint + valueBufs[i] = valueBuf + parseCtxs[i] = types2.NewTxParseContext(*chainID) + } + + if err := addSystemTx(parseCtxs[0], tx, body.BaseTxId); err != nil { return false, err } + binary.BigEndian.PutUint64(numBuf, body.BaseTxId+1) + + collected := -1 + collectorLock := sync.Mutex{} + collections := sync.NewCond(&collectorLock) + + var j int + if err := tx.ForAmount(kv.EthTx, numBuf, body.TxAmount-2, func(_, tv []byte) error { - parseCtx.WithSender(len(senders) == 0) - valueBuf, err = parse(tv, valueBuf, senders, j) - if err != nil { - return fmt.Errorf("%w, block: %d", err, blockNum) - } - // first tx byte => sender adress => tx rlp - if err := collect(valueBuf); err != nil { - return err - } + tx := j j++ + parsers.Go(func() error { + parseCtx := parseCtxs[tx%workers] + + parseCtx.WithSender(len(senders) == 0) + parseCtx.WithAllowPreEip2s(blockNum <= chainConfig.HomesteadBlock.Uint64()) + + valueBuf, err := parse(parseCtx, tv, valueBufs[tx%workers], senders, tx) + + if err != nil { + return fmt.Errorf("%w, block: %d", err, blockNum) + } + + collectorLock.Lock() + defer collectorLock.Unlock() + + for collected < tx-1 { + collections.Wait() + } + + // first tx byte => sender adress => tx rlp + if err := collect(valueBuf); err != nil { + return err + } + + collected = tx + collections.Broadcast() + + return nil + }) + return nil }); err != nil { return false, fmt.Errorf("ForAmount: %w", err) } - if err := addSystemTx(tx, body.BaseTxId+uint64(body.TxAmount)-1); err != nil { + if err := parsers.Wait(); err != nil { + return false, fmt.Errorf("ForAmount parser: %w", err) + } + + if err := addSystemTx(parseCtxs[0], tx, body.BaseTxId+uint64(body.TxAmount)-1); err != nil { return false, err } @@ -1803,8 +2097,8 @@ func DumpBodies(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, firs var EmptyTxHash = common2.Hash{} -func txsAmountBasedOnBodiesSnapshots(snapDir string, blockFrom, blockTo uint64) (firstTxID uint64, expectedCount int, err error) { - bodySegmentPath := filepath.Join(snapDir, snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Bodies)) +func txsAmountBasedOnBodiesSnapshots(snapDir string, version uint8, blockFrom, blockTo uint64) (firstTxID uint64, expectedCount int, err error) { + bodySegmentPath := filepath.Join(snapDir, snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Bodies)) bodiesSegment, err := compress.NewDecompressor(bodySegmentPath) if err != nil { return @@ -1840,25 +2134,25 @@ func txsAmountBasedOnBodiesSnapshots(snapDir string, blockFrom, blockTo uint64) return } -func TransactionsIdx(ctx context.Context, chainConfig *chain.Config, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { +func TransactionsIdx(ctx context.Context, chainConfig *chain.Config, version uint8, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("TransactionsIdx: at=%d-%d, %v, %s", blockFrom, blockTo, rec, dbg.Stack()) } }() firstBlockNum := blockFrom - firstTxID, expectedCount, err := txsAmountBasedOnBodiesSnapshots(snapDir, blockFrom, blockTo) + firstTxID, expectedCount, err := txsAmountBasedOnBodiesSnapshots(snapDir, version, blockFrom, blockTo) if err != nil { return err } - bodySegmentPath := filepath.Join(snapDir, snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Bodies)) + bodySegmentPath := filepath.Join(snapDir, snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Bodies)) bodiesSegment, err := compress.NewDecompressor(bodySegmentPath) if err != nil { return } defer bodiesSegment.Close() - segFileName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Transactions) + segFileName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Transactions) segmentFilePath := filepath.Join(snapDir, segFileName) d, err := compress.NewDecompressor(segmentFilePath) if err != nil { @@ -1868,8 +2162,11 @@ func TransactionsIdx(ctx context.Context, chainConfig *chain.Config, blockFrom, if d.Count() != expectedCount { return fmt.Errorf("TransactionsIdx: at=%d-%d, pre index building, expect: %d, got %d", blockFrom, blockTo, expectedCount, d.Count()) } - p.Name.Store(&segFileName) - p.Total.Store(uint64(d.Count() * 2)) + + if p != nil { + p.Name.Store(&segFileName) + p.Total.Store(uint64(d.Count() * 2)) + } txnHashIdx, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{ KeyCount: d.Count(), @@ -1877,7 +2174,7 @@ func TransactionsIdx(ctx context.Context, chainConfig *chain.Config, blockFrom, BucketSize: 2000, LeafSize: 8, TmpDir: tmpDir, - IndexFile: filepath.Join(snapDir, snaptype.IdxFileName(blockFrom, blockTo, snaptype.Transactions.String())), + IndexFile: filepath.Join(snapDir, snaptype.IdxFileName(version, blockFrom, blockTo, snaptype.Transactions.String())), BaseDataID: firstTxID, EtlBufLimit: etl.BufferOptimalSize / 2, }, logger) @@ -1891,7 +2188,7 @@ func TransactionsIdx(ctx context.Context, chainConfig *chain.Config, blockFrom, BucketSize: 2000, LeafSize: 8, TmpDir: tmpDir, - IndexFile: filepath.Join(snapDir, snaptype.IdxFileName(blockFrom, blockTo, snaptype.Transactions2Block.String())), + IndexFile: filepath.Join(snapDir, snaptype.IdxFileName(version, blockFrom, blockTo, snaptype.Transactions2Block.String())), BaseDataID: firstBlockNum, EtlBufLimit: etl.BufferOptimalSize / 2, }, logger) @@ -1923,7 +2220,10 @@ RETRY: } for g.HasNext() { - p.Processed.Add(1) + if p != nil { + p.Processed.Add(1) + } + word, nextPos = g.Next(word[:0]) select { case <-ctx.Done(): @@ -1992,7 +2292,7 @@ RETRY: } // HeadersIdx - headerHash -> offset (analog of kv.HeaderNumber) -func HeadersIdx(ctx context.Context, chainConfig *chain.Config, segmentFilePath string, firstBlockNumInSegment uint64, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { +func HeadersIdx(ctx context.Context, segmentFilePath string, version uint8, firstBlockNumInSegment uint64, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { defer func() { if rec := recover(); rec != nil { _, fName := filepath.Split(segmentFilePath) @@ -2006,15 +2306,20 @@ func HeadersIdx(ctx context.Context, chainConfig *chain.Config, segmentFilePath } defer d.Close() - _, fname := filepath.Split(segmentFilePath) - p.Name.Store(&fname) - p.Total.Store(uint64(d.Count())) + if p != nil { + _, fname := filepath.Split(segmentFilePath) + p.Name.Store(&fname) + p.Total.Store(uint64(d.Count())) + } hasher := crypto.NewKeccakState() defer cryptopool.ReturnToPoolKeccak256(hasher) var h common2.Hash if err := Idx(ctx, d, firstBlockNumInSegment, tmpDir, log.LvlDebug, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { - p.Processed.Add(1) + if p != nil { + p.Processed.Add(1) + } + headerRlp := word[1:] hasher.Reset() hasher.Write(headerRlp) @@ -2045,12 +2350,16 @@ func BodiesIdx(ctx context.Context, segmentFilePath string, firstBlockNumInSegme } defer d.Close() - _, fname := filepath.Split(segmentFilePath) - p.Name.Store(&fname) - p.Total.Store(uint64(d.Count())) + if p != nil { + _, fname := filepath.Split(segmentFilePath) + p.Name.Store(&fname) + p.Total.Store(uint64(d.Count())) + } if err := Idx(ctx, d, firstBlockNumInSegment, tmpDir, log.LvlDebug, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { - p.Processed.Add(1) + if p != nil { + p.Processed.Add(1) + } n := binary.PutUvarint(num, i) if err := idx.AddKey(num[:n], offset); err != nil { return err @@ -2151,19 +2460,20 @@ type Merger struct { chainConfig *chain.Config chainDB kv.RoDB logger log.Logger + noFsync bool // fsync is enabled by default, but tests can manually disable } func NewMerger(tmpDir string, compressWorkers int, lvl log.Lvl, chainDB kv.RoDB, chainConfig *chain.Config, logger log.Logger) *Merger { return &Merger{tmpDir: tmpDir, compressWorkers: compressWorkers, lvl: lvl, chainDB: chainDB, chainConfig: chainConfig, logger: logger} } +func (m *Merger) DisableFsync() { m.noFsync = true } type Range struct { from, to uint64 } -func (r Range) From() uint64 { return r.from } -func (r Range) To() uint64 { return r.to } -func (r Range) IsRecent(max uint64) bool { return max-r.to < snaptype.Erigon2MergeLimit } +func (r Range) From() uint64 { return r.from } +func (r Range) To() uint64 { return r.to } type Ranges []Range @@ -2171,22 +2481,14 @@ func (r Ranges) String() string { return fmt.Sprintf("%d", r) } -var MergeSteps = []uint64{500_000, 100_000, 10_000} -var RecentMergeSteps = []uint64{100_000, 10_000} - func (m *Merger) FindMergeRanges(currentRanges []Range, maxBlockNum uint64) (toMerge []Range) { for i := len(currentRanges) - 1; i > 0; i-- { r := currentRanges[i] - isRecent := r.IsRecent(maxBlockNum) - mergeLimit, mergeSteps := uint64(snaptype.Erigon2MergeLimit), MergeSteps - if isRecent { - mergeLimit, mergeSteps = snaptype.Erigon2RecentMergeLimit, RecentMergeSteps - } - + mergeLimit := uint64(snaptype.Erigon2MergeLimit) if r.to-r.from >= mergeLimit { continue } - for _, span := range mergeSteps { + for _, span := range snaptype.MergeSteps { if r.to%span != 0 { continue } @@ -2232,7 +2534,7 @@ func (v *View) Bodies() []*BodySegment { return v.s.Bodies.segments } func (v *View) Txs() []*TxnSegment { return v.s.Txs.segments } func (v *View) HeadersSegment(blockNum uint64) (*HeaderSegment, bool) { for _, seg := range v.Headers() { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return seg, true @@ -2241,7 +2543,7 @@ func (v *View) HeadersSegment(blockNum uint64) (*HeaderSegment, bool) { } func (v *View) BodiesSegment(blockNum uint64) (*BodySegment, bool) { for _, seg := range v.Bodies() { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return seg, true @@ -2250,7 +2552,7 @@ func (v *View) BodiesSegment(blockNum uint64) (*BodySegment, bool) { } func (v *View) TxsSegment(blockNum uint64) (*TxnSegment, bool) { for _, seg := range v.Txs() { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return seg, true @@ -2268,10 +2570,10 @@ func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (map[snap tSegments := view.Txs() for i, sn := range hSegments { - if sn.ranges.from < from { + if sn.from < from { continue } - if sn.ranges.to > to { + if sn.to > to { break } toMerge[snaptype.Headers] = append(toMerge[snaptype.Headers], hSegments[i].seg.FilePath()) @@ -2296,11 +2598,12 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges } for _, t := range snaptype.BlockSnapshotTypes { - segName := snaptype.SegmentFileName(r.from, r.to, t) + segName := snaptype.SegmentFileName(snapshots.version, r.from, r.to, t) f, ok := snaptype.ParseFileName(snapDir, segName) if !ok { continue } + if err := m.merge(ctx, toMerge[t], f.Path, logEvery); err != nil { return fmt.Errorf("mergeByAppendSegments: %w", err) } @@ -2314,25 +2617,28 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges if err := snapshots.ReopenFolder(); err != nil { return fmt.Errorf("ReopenSegments: %w", err) } - snapshots.LogStat() - if err := onMerge(r); err != nil { - return err + snapshots.LogStat("merge") + + if onMerge != nil { + if err := onMerge(r); err != nil { + return err + } } + for _, t := range snaptype.BlockSnapshotTypes { if len(toMerge[t]) == 0 { continue } - if err := onDelete(toMerge[t]); err != nil { - return err + if onDelete != nil { + if err := onDelete(toMerge[t]); err != nil { + return err + } } - } - time.Sleep(1 * time.Second) // i working on blocking API - to ensure client does not use old snapsthos - and then delete them - for _, t := range snaptype.BlockSnapshotTypes { - m.removeOldFiles(toMerge[t], snapDir) + m.removeOldFiles(toMerge[t], snapDir, snapshots.Version()) } } - m.logger.Log(m.lvl, "[snapshots] Merge done", "from", mergeRanges[0].from) + m.logger.Log(m.lvl, "[snapshots] Merge done", "from", mergeRanges[0].from, "to", mergeRanges[0].to) return nil } @@ -2355,6 +2661,9 @@ func (m *Merger) merge(ctx context.Context, toMerge []string, targetFile string, return err } defer f.Close() + if m.noFsync { + f.DisableFsync() + } _, fName := filepath.Split(targetFile) m.logger.Debug("[snapshots] merge", "file", fName) @@ -2382,9 +2691,10 @@ func (m *Merger) merge(ctx context.Context, toMerge []string, targetFile string, return nil } -func (m *Merger) removeOldFiles(toDel []string, snapDir string) { +func (m *Merger) removeOldFiles(toDel []string, snapDir string, version uint8) { for _, f := range toDel { _ = os.Remove(f) + _ = os.Remove(f + ".torrent") ext := filepath.Ext(f) withoutExt := f[:len(f)-len(ext)] _ = os.Remove(withoutExt + ".idx") @@ -2393,7 +2703,7 @@ func (m *Merger) removeOldFiles(toDel []string, snapDir string) { _ = os.Remove(withoutExt + "-to-block.idx") } } - tmpFiles, err := snaptype.TmpFiles(snapDir) + tmpFiles, err := snaptype.TmpFiles(snapDir, version) if err != nil { return } diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go index 46690935591..2cb17f77d80 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go @@ -19,10 +19,11 @@ import ( "github.com/ledgerwatch/erigon/params" ) -func createTestSegmentFile(t *testing.T, from, to uint64, name snaptype.Type, dir string, logger log.Logger) { - c, err := compress.NewCompressor(context.Background(), "test", filepath.Join(dir, snaptype.SegmentFileName(from, to, name)), dir, 100, 1, log.LvlDebug, logger) +func createTestSegmentFile(t *testing.T, from, to uint64, name snaptype.Type, dir string, version uint8, logger log.Logger) { + c, err := compress.NewCompressor(context.Background(), "test", filepath.Join(dir, snaptype.SegmentFileName(version, from, to, name)), dir, 100, 1, log.LvlDebug, logger) require.NoError(t, err) defer c.Close() + c.DisableFsync() err = c.AddWord([]byte{1}) require.NoError(t, err) err = c.Compress() @@ -31,11 +32,12 @@ func createTestSegmentFile(t *testing.T, from, to uint64, name snaptype.Type, di KeyCount: 1, BucketSize: 10, TmpDir: dir, - IndexFile: filepath.Join(dir, snaptype.IdxFileName(from, to, name.String())), + IndexFile: filepath.Join(dir, snaptype.IdxFileName(1, from, to, name.String())), LeafSize: 8, }, logger) require.NoError(t, err) defer idx.Close() + idx.DisableFsync() err = idx.AddKey([]byte{1}, 0) require.NoError(t, err) err = idx.Build(context.Background()) @@ -45,7 +47,7 @@ func createTestSegmentFile(t *testing.T, from, to uint64, name snaptype.Type, di KeyCount: 1, BucketSize: 10, TmpDir: dir, - IndexFile: filepath.Join(dir, snaptype.IdxFileName(from, to, snaptype.Transactions2Block.String())), + IndexFile: filepath.Join(dir, snaptype.IdxFileName(1, from, to, snaptype.Transactions2Block.String())), LeafSize: 8, }, logger) require.NoError(t, err) @@ -59,6 +61,7 @@ func createTestSegmentFile(t *testing.T, from, to uint64, name snaptype.Type, di func TestFindMergeRange(t *testing.T) { merger := NewMerger("x", 1, log.LvlInfo, nil, params.MainnetChainConfig, nil) + merger.DisableFsync() t.Run("big", func(t *testing.T) { var ranges []Range for i := 0; i < 24; i++ { @@ -66,12 +69,8 @@ func TestFindMergeRange(t *testing.T) { } found := merger.FindMergeRanges(ranges, uint64(24*100_000)) - expect := []Range{ - {0, 500_000}, - {500_000, 1_000_000}, - {1_000_000, 1_500_000}, - } - require.Equal(t, Ranges(expect).String(), Ranges(found).String()) + expect := Ranges{} + require.Equal(t, expect.String(), Ranges(found).String()) }) t.Run("small", func(t *testing.T) { @@ -80,35 +79,14 @@ func TestFindMergeRange(t *testing.T) { ranges = append(ranges, Range{from: uint64(i * 10_000), to: uint64((i + 1) * 10_000)}) } found := merger.FindMergeRanges(ranges, uint64(240*10_000)) - - expect := Ranges{ - {0, 500_000}, - {500_000, 1_000_000}, - {1_000_000, 1_500_000}, - {1_500_000, 1_600_000}, - {1_600_000, 1_700_000}, - {1_700_000, 1_800_000}, - {1_800_000, 1_900_000}, - {1_900_000, 2_000_000}, - {2_000_000, 2_100_000}, - {2_100_000, 2_200_000}, - {2_200_000, 2_300_000}, - {2_300_000, 2_400_000}, + var expect Ranges + for i := uint64(0); i < 24; i++ { + expect = append(expect, Range{from: i * snaptype.Erigon2MergeLimit, to: (i + 1) * snaptype.Erigon2MergeLimit}) } require.Equal(t, expect.String(), Ranges(found).String()) }) - t.Run("IsRecent", func(t *testing.T) { - require.True(t, Range{500_000, 599_000}.IsRecent(1_000_000)) - require.True(t, Range{500_000, 501_000}.IsRecent(1_000_000)) - require.False(t, Range{499_000, 500_000}.IsRecent(1_000_000)) - require.False(t, Range{400_000, 500_000}.IsRecent(1_000_000)) - require.False(t, Range{400_000, 401_000}.IsRecent(1_000_000)) - - require.False(t, Range{500_000, 501_000}.IsRecent(1_100_000)) - }) - } func TestMergeSnapshots(t *testing.T) { @@ -116,55 +94,48 @@ func TestMergeSnapshots(t *testing.T) { dir, require := t.TempDir(), require.New(t) createFile := func(from, to uint64) { for _, snT := range snaptype.BlockSnapshotTypes { - createTestSegmentFile(t, from, to, snT, dir, logger) + createTestSegmentFile(t, from, to, snT, dir, 1, logger) } } - N := uint64(17) - createFile(0, snaptype.Erigon2MergeLimit) - for i := uint64(snaptype.Erigon2MergeLimit); i < snaptype.Erigon2MergeLimit+N*100_000; i += 100_000 { - createFile(i, i+100_000) + N := uint64(70) + for i := uint64(0); i < N; i++ { + createFile(i*10_000, (i+1)*10_000) } - s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, logger) + s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) defer s.Close() require.NoError(s.ReopenFolder()) { merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) + merger.DisableFsync() ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) require.True(len(ranges) > 0) - err := merger.Merge(context.Background(), s, ranges, s.Dir(), false, func(r Range) error { - return nil - }, func(l []string) error { - return nil - }) + err := merger.Merge(context.Background(), s, ranges, s.Dir(), false, nil, nil) require.NoError(err) } - expectedFileName := snaptype.SegmentFileName(500_000, 1_000_000, snaptype.Transactions) + expectedFileName := snaptype.SegmentFileName(1, 100_000, 200_000, snaptype.Transactions) d, err := compress.NewDecompressor(filepath.Join(dir, expectedFileName)) require.NoError(err) defer d.Close() a := d.Count() - require.Equal(5, a) + require.Equal(10, a) { merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) + merger.DisableFsync() ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) require.True(len(ranges) == 0) - err := merger.Merge(context.Background(), s, ranges, s.Dir(), false, func(r Range) error { - return nil - }, func(l []string) error { - return nil - }) + err := merger.Merge(context.Background(), s, ranges, s.Dir(), false, nil, nil) require.NoError(err) } - expectedFileName = snaptype.SegmentFileName(1_800_000, 1_900_000, snaptype.Transactions) + expectedFileName = snaptype.SegmentFileName(1, 600_000, 700_000, snaptype.Transactions) d, err = compress.NewDecompressor(filepath.Join(dir, expectedFileName)) require.NoError(err) defer d.Close() a = d.Count() - require.Equal(1, a) + require.Equal(10, a) } func TestCanRetire(t *testing.T) { @@ -175,25 +146,25 @@ func TestCanRetire(t *testing.T) { }{ {0, 1234, 0, 1000, true}, {1_000_000, 1_120_000, 1_000_000, 1_100_000, true}, - {2_500_000, 4_100_000, 2_500_000, 3_000_000, true}, + {2_500_000, 4_100_000, 2_500_000, 2_600_000, true}, {2_500_000, 2_500_100, 2_500_000, 2_500_000, false}, {1_001_000, 2_000_000, 1_001_000, 1_002_000, true}, } - for _, tc := range cases { + for i, tc := range cases { from, to, can := canRetire(tc.inFrom, tc.inTo) - require.Equal(int(tc.outFrom), int(from)) - require.Equal(int(tc.outTo), int(to)) - require.Equal(tc.can, can, tc.inFrom, tc.inTo) + require.Equal(int(tc.outFrom), int(from), i) + require.Equal(int(tc.outTo), int(to), i) + require.Equal(tc.can, can, tc.inFrom, tc.inTo, i) } } func TestOpenAllSnapshot(t *testing.T) { logger := log.New() dir, require := t.TempDir(), require.New(t) - chainSnapshotCfg := snapcfg.KnownCfg(networkname.MainnetChainName) + chainSnapshotCfg := snapcfg.KnownCfg(networkname.MainnetChainName, 0) chainSnapshotCfg.ExpectBlocks = math.MaxUint64 cfg := ethconfig.BlocksFreezing{Enabled: true} - createFile := func(from, to uint64, name snaptype.Type) { createTestSegmentFile(t, from, to, name, dir, logger) } - s := NewRoSnapshots(cfg, dir, logger) + createFile := func(from, to uint64, name snaptype.Type) { createTestSegmentFile(t, from, to, name, dir, 1, logger) } + s := NewRoSnapshots(cfg, dir, 1, logger) defer s.Close() err := s.ReopenFolder() require.NoError(err) @@ -201,14 +172,14 @@ func TestOpenAllSnapshot(t *testing.T) { s.Close() createFile(500_000, 1_000_000, snaptype.Bodies) - s = NewRoSnapshots(cfg, dir, logger) + s = NewRoSnapshots(cfg, dir, 1, logger) defer s.Close() require.Equal(0, len(s.Bodies.segments)) //because, no headers and transactions snapshot files are created s.Close() createFile(500_000, 1_000_000, snaptype.Headers) createFile(500_000, 1_000_000, snaptype.Transactions) - s = NewRoSnapshots(cfg, dir, logger) + s = NewRoSnapshots(cfg, dir, 1, logger) err = s.ReopenFolder() require.NoError(err) require.Equal(0, len(s.Headers.segments)) @@ -217,7 +188,7 @@ func TestOpenAllSnapshot(t *testing.T) { createFile(0, 500_000, snaptype.Bodies) createFile(0, 500_000, snaptype.Headers) createFile(0, 500_000, snaptype.Transactions) - s = NewRoSnapshots(cfg, dir, logger) + s = NewRoSnapshots(cfg, dir, 1, logger) defer s.Close() err = s.ReopenFolder() @@ -229,11 +200,11 @@ func TestOpenAllSnapshot(t *testing.T) { seg, ok := view.TxsSegment(10) require.True(ok) - require.Equal(int(seg.ranges.to), 500_000) + require.Equal(int(seg.to), 500_000) seg, ok = view.TxsSegment(500_000) require.True(ok) - require.Equal(int(seg.ranges.to), 1_000_000) + require.Equal(int(seg.to), 1_000_000) _, ok = view.TxsSegment(1_000_000) require.False(ok) @@ -241,7 +212,7 @@ func TestOpenAllSnapshot(t *testing.T) { // Erigon may create new snapshots by itself - with high bigger than hardcoded ExpectedBlocks // ExpectedBlocks - says only how much block must come from Torrent chainSnapshotCfg.ExpectBlocks = 500_000 - 1 - s = NewRoSnapshots(cfg, dir, logger) + s = NewRoSnapshots(cfg, dir, 1, logger) err = s.ReopenFolder() require.NoError(err) defer s.Close() @@ -251,7 +222,7 @@ func TestOpenAllSnapshot(t *testing.T) { createFile(500_000, 900_000, snaptype.Bodies) createFile(500_000, 900_000, snaptype.Transactions) chainSnapshotCfg.ExpectBlocks = math.MaxUint64 - s = NewRoSnapshots(cfg, dir, logger) + s = NewRoSnapshots(cfg, dir, 1, logger) defer s.Close() err = s.ReopenFolder() require.NoError(err) diff --git a/turbo/snapshotsync/freezeblocks/bor_snapshots.go b/turbo/snapshotsync/freezeblocks/bor_snapshots.go index 5492b99ba4d..91d181ba960 100644 --- a/turbo/snapshotsync/freezeblocks/bor_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/bor_snapshots.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "os" - "path" "path/filepath" "reflect" "runtime" @@ -15,6 +14,9 @@ import ( "sync/atomic" "time" + "github.com/ledgerwatch/log/v3" + "golang.org/x/exp/slices" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/chain/snapcfg" common2 "github.com/ledgerwatch/erigon-lib/common" @@ -28,23 +30,18 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/recsplit" "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb" + "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/log/v3" - "golang.org/x/exp/slices" -) - -const ( - spanLength = 6400 // Number of blocks in a span - zerothSpanEnd = 255 // End block of 0th span ) type BorEventSegment struct { seg *compress.Decompressor // value: event_rlp IdxBorTxnHash *recsplit.Index // bor_transaction_hash -> bor_event_segment_offset - ranges Range + Range + version uint8 } func (sn *BorEventSegment) closeIdx() { @@ -65,8 +62,8 @@ func (sn *BorEventSegment) close() { } func (sn *BorEventSegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.BorEvents) - sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.from, sn.to, snaptype.BorEvents) + sn.seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -78,8 +75,8 @@ func (sn *BorEventSegment) reopenIdx(dir string) (err error) { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.BorEvents.String()) - sn.IdxBorTxnHash, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.BorEvents.String()) + sn.IdxBorTxnHash, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -109,9 +106,10 @@ type borEventSegments struct { } type BorSpanSegment struct { - seg *compress.Decompressor // value: span_json - idx *recsplit.Index // span_id -> offset - ranges Range + seg *compress.Decompressor // value: span_json + idx *recsplit.Index // span_id -> offset + Range + version uint8 } func (sn *BorSpanSegment) closeIdx() { @@ -132,8 +130,8 @@ func (sn *BorSpanSegment) close() { } func (sn *BorSpanSegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.BorSpans) - sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.from, sn.to, snaptype.BorSpans) + sn.seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -144,8 +142,8 @@ func (sn *BorSpanSegment) reopenIdx(dir string) (err error) { if sn.seg == nil { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.BorSpans.String()) - sn.idx, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.BorSpans.String()) + sn.idx, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -174,28 +172,33 @@ type borSpanSegments struct { segments []*BorSpanSegment } -func (br *BlockRetire) RetireBorBlocks(ctx context.Context, blockFrom, blockTo uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDelete func(l []string) error) error { +func (br *BlockRetire) retireBorBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDelete func(l []string) error) (bool, error) { chainConfig := fromdb.ChainConfig(br.db) notifier, logger, blockReader, tmpDir, db, workers := br.notifier, br.logger, br.blockReader, br.tmpDir, br.db, br.workers - logger.Log(lvl, "[bor snapshots] Retire Bor Blocks", "range", fmt.Sprintf("%dk-%dk", blockFrom/1000, blockTo/1000)) snapshots := br.borSnapshots() firstTxNum := blockReader.(*BlockReader).FirstTxNumNotInSnapshots() - - if err := DumpBorBlocks(ctx, chainConfig, blockFrom, blockTo, snaptype.Erigon2MergeLimit, tmpDir, snapshots.Dir(), firstTxNum, db, workers, lvl, logger, blockReader); err != nil { - return fmt.Errorf("DumpBorBlocks: %w", err) - } - if err := snapshots.ReopenFolder(); err != nil { - return fmt.Errorf("reopen: %w", err) - } - snapshots.LogStat() - if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size - notifier.OnNewSnapshot() + blockFrom, blockTo, ok := CanRetire(maxBlockNum, minBlockNum) + if ok { + logger.Log(lvl, "[bor snapshots] Retire Bor Blocks", "range", fmt.Sprintf("%dk-%dk", blockFrom/1000, blockTo/1000)) + if err := DumpBorBlocks(ctx, chainConfig, snapshots.version, blockFrom, blockTo, snaptype.Erigon2MergeLimit, tmpDir, snapshots.Dir(), firstTxNum, db, workers, lvl, logger, blockReader); err != nil { + return ok, fmt.Errorf("DumpBorBlocks: %w", err) + } + if err := snapshots.ReopenFolder(); err != nil { + return ok, fmt.Errorf("reopen: %w", err) + } + snapshots.LogStat("retire") + if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size + notifier.OnNewSnapshot() + } } + merger := NewBorMerger(tmpDir, workers, lvl, db, chainConfig, notifier, logger) - rangesToMerge := merger.FindMergeRanges(snapshots.Ranges(), snapshots.BlocksAvailable()) + rangesToMerge := merger.FindMergeRanges(snapshots.Ranges()) + logger.Warn("[bor snapshots] Retire Bor Blocks", "rangesToMerge", fmt.Sprintf("%s", Ranges(rangesToMerge))) if len(rangesToMerge) == 0 { - return nil + return ok, nil } + ok = true // have something to merge onMerge := func(r Range) error { if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size notifier.OnNewSnapshot() @@ -213,30 +216,29 @@ func (br *BlockRetire) RetireBorBlocks(ctx context.Context, blockFrom, blockTo u } err := merger.Merge(ctx, snapshots, rangesToMerge, snapshots.Dir(), true /* doIndex */, onMerge, onDelete) if err != nil { - return err + return ok, err } - return nil + return ok, nil } - -func DumpBorBlocks(ctx context.Context, chainConfig *chain.Config, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { +func DumpBorBlocks(ctx context.Context, chainConfig *chain.Config, version uint8, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { if blocksPerFile == 0 { return nil } for i := blockFrom; i < blockTo; i = chooseSegmentEnd(i, blockTo, blocksPerFile) { - if err := dumpBorBlocksRange(ctx, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, firstTxNum, chainDB, *chainConfig, workers, lvl, logger, blockReader); err != nil { + if err := dumpBorBlocksRange(ctx, version, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, firstTxNum, chainDB, *chainConfig, workers, lvl, logger, blockReader); err != nil { return err } } return nil } -func dumpBorBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, chainConfig chain.Config, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { +func dumpBorBlocksRange(ctx context.Context, version uint8, blockFrom, blockTo uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, chainConfig chain.Config, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() { - segName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.BorEvents) + segName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.BorEvents) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot BorEvents", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger) @@ -259,7 +261,7 @@ func dumpBorBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, } } { - segName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.BorSpans) + segName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.BorSpans) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot BorSpans", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger) @@ -374,13 +376,8 @@ func DumpBorEvents(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, w func DumpBorSpans(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, workers int, lvl log.Lvl, logger log.Logger, collect func([]byte) error) error { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() - var spanFrom, spanTo uint64 - if blockFrom > zerothSpanEnd { - spanFrom = 1 + (blockFrom-zerothSpanEnd-1)/spanLength - } - if blockTo > zerothSpanEnd { - spanTo = 1 + (blockTo-zerothSpanEnd-1)/spanLength - } + spanFrom := span.IDAt(blockFrom) + spanTo := span.IDAt(blockTo) from := hexutility.EncodeTs(spanFrom) if err := kv.BigChunks(db, kv.BorSpans, from, func(tx kv.Tx, spanIdBytes, spanBytes []byte) (bool, error) { spanId := binary.BigEndian.Uint64(spanIdBytes) @@ -410,7 +407,7 @@ func DumpBorSpans(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, wo return nil } -func BorEventsIdx(ctx context.Context, segmentFilePath string, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { +func BorEventsIdx(ctx context.Context, segmentFilePath string, version uint8, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("BorEventsIdx: at=%d-%d, %v, %s", blockFrom, blockTo, rec, dbg.Stack()) @@ -444,7 +441,7 @@ func BorEventsIdx(ctx context.Context, segmentFilePath string, blockFrom, blockT default: } } - var idxFilePath = filepath.Join(snapDir, snaptype.IdxFileName(blockFrom, blockTo, snaptype.BorEvents.String())) + var idxFilePath = filepath.Join(snapDir, snaptype.IdxFileName(version, blockFrom, blockTo, snaptype.BorEvents.String())) rs, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{ KeyCount: blockCount, @@ -496,7 +493,7 @@ RETRY: return nil } -func BorSpansIdx(ctx context.Context, segmentFilePath string, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { +func BorSpansIdx(ctx context.Context, segmentFilePath string, version uint8, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("BorSpansIdx: at=%d-%d, %v, %s", blockFrom, blockTo, rec, dbg.Stack()) @@ -509,12 +506,9 @@ func BorSpansIdx(ctx context.Context, segmentFilePath string, blockFrom, blockTo } defer d.Close() g := d.MakeGetter() - var idxFilePath = filepath.Join(snapDir, snaptype.IdxFileName(blockFrom, blockTo, snaptype.BorSpans.String())) + var idxFilePath = filepath.Join(snapDir, snaptype.IdxFileName(version, blockFrom, blockTo, snaptype.BorSpans.String())) - var baseSpanId uint64 - if blockFrom > zerothSpanEnd { - baseSpanId = 1 + (blockFrom-zerothSpanEnd-1)/spanLength - } + baseSpanId := span.IDAt(blockFrom) rs, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{ KeyCount: d.Count(), @@ -573,6 +567,9 @@ type BorRoSnapshots struct { idxMax atomic.Uint64 // all types of .idx files are available - up to this number cfg ethconfig.BlocksFreezing logger log.Logger + version uint8 + + segmentsMin atomic.Uint64 } // NewBorRoSnapshots - opens all bor snapshots. But to simplify everything: @@ -580,30 +577,33 @@ type BorRoSnapshots struct { // - all snapshots of given blocks range must exist - to make this blocks range available // - gaps are not allowed // - segment have [from:to) semantic -func NewBorRoSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, logger log.Logger) *BorRoSnapshots { - return &BorRoSnapshots{dir: snapDir, cfg: cfg, Events: &borEventSegments{}, Spans: &borSpanSegments{}, logger: logger} +func NewBorRoSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, version uint8, logger log.Logger) *BorRoSnapshots { + return &BorRoSnapshots{dir: snapDir, version: version, cfg: cfg, Events: &borEventSegments{}, Spans: &borSpanSegments{}, logger: logger} } +func (s *BorRoSnapshots) Version() uint8 { return s.version } func (s *BorRoSnapshots) Cfg() ethconfig.BlocksFreezing { return s.cfg } func (s *BorRoSnapshots) Dir() string { return s.dir } func (s *BorRoSnapshots) SegmentsReady() bool { return s.segmentsReady.Load() } func (s *BorRoSnapshots) IndicesReady() bool { return s.indicesReady.Load() } func (s *BorRoSnapshots) IndicesMax() uint64 { return s.idxMax.Load() } func (s *BorRoSnapshots) SegmentsMax() uint64 { return s.segmentsMax.Load() } +func (s *BorRoSnapshots) SegmentsMin() uint64 { return s.segmentsMin.Load() } +func (s *BorRoSnapshots) SetSegmentsMin(min uint64) { s.segmentsMin.Store(min) } func (s *BorRoSnapshots) BlocksAvailable() uint64 { return cmp.Min(s.segmentsMax.Load(), s.idxMax.Load()) } -func (s *BorRoSnapshots) LogStat() { +func (s *BorRoSnapshots) LogStat(label string) { var m runtime.MemStats dbg.ReadMemStats(&m) - s.logger.Info("[bor snapshots] Blocks Stat", + s.logger.Info(fmt.Sprintf("[bor snapshots:%s] Blocks Stat", label), "blocks", fmt.Sprintf("%dk", (s.SegmentsMax()+1)/1000), "indices", fmt.Sprintf("%dk", (s.IndicesMax()+1)/1000), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) } -func BorSegments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { - list, err := snaptype.Segments(dir) +func BorSegments(dir string, version uint8, min uint64) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { + list, err := snaptype.Segments(dir, version) if err != nil { return nil, missingSnapshots, err } @@ -616,7 +616,7 @@ func BorSegments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, } l = append(l, f) } - l, m = noGaps(noOverlaps(borSegmentsMustExist(dir, l))) + l, m = noGaps(noOverlaps(borSegmentsMustExist(dir, l)), min) res = append(res, l...) missingSnapshots = append(missingSnapshots, m...) } @@ -628,13 +628,75 @@ func BorSegments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, } l = append(l, f) } - l, _ = noGaps(noOverlaps(borSegmentsMustExist(dir, l))) + l, _ = noGaps(noOverlaps(borSegmentsMustExist(dir, l)), min) res = append(res, l...) } return res, missingSnapshots, nil } +// this is one off code to fix an issue in 2.49.x->2.52.x which missed +// removal of intermediate segments after a merge operation +func removeBorOverlaps(dir string, version uint8, active []snaptype.FileInfo, max uint64) { + list, err := snaptype.Segments(dir, version) + + if err != nil { + return + } + + var toDel []string + l := make([]snaptype.FileInfo, 0, len(list)) + + for _, f := range list { + if !(f.T == snaptype.BorSpans || f.T == snaptype.BorEvents) { + continue + } + l = append(l, f) + } + + // added overhead to make sure we don't delete in the + // current 500k block segment + if max > 500_001 { + max -= 500_001 + } + + for _, f := range l { + if max < f.From { + continue + } + + for _, a := range active { + if a.T != snaptype.BorSpans { + continue + } + + if f.From < a.From { + continue + } + + if f.From == a.From { + if f.To < a.To { + toDel = append(toDel, f.Path) + } + + break + } + + if f.From < a.To { + toDel = append(toDel, f.Path) + break + } + } + } + + for _, f := range toDel { + _ = os.Remove(f) + ext := filepath.Ext(f) + withoutExt := f[:len(f)-len(ext)] + _ = os.Remove(withoutExt + ".idx") + } +} + func (s *BorRoSnapshots) EnsureExpectedBlocksAreAvailable(cfg *snapcfg.Cfg) error { if s.BlocksAvailable() < cfg.ExpectBlocks { return fmt.Errorf("app must wait until all expected bor snapshots are available. Expected: %d, Available: %d", cfg.ExpectBlocks, s.BlocksAvailable()) @@ -701,13 +763,13 @@ func (s *BorRoSnapshots) idxAvailability() uint64 { if seg.IdxBorTxnHash == nil { break } - events = seg.ranges.to - 1 + events = seg.to - 1 } for _, seg := range s.Spans.segments { if seg.idx == nil { break } - spans = seg.ranges.to - 1 + spans = seg.to - 1 } return cmp.Min(events, spans) } @@ -735,7 +797,7 @@ func (s *BorRoSnapshots) Files() (list []string) { if seg.seg == nil { continue } - if seg.ranges.from > max { + if seg.from > max { continue } _, fName := filepath.Split(seg.seg.FilePath()) @@ -745,7 +807,7 @@ func (s *BorRoSnapshots) Files() (list []string) { if seg.seg == nil { continue } - if seg.ranges.from > max { + if seg.from > max { continue } _, fName := filepath.Split(seg.seg.FilePath()) @@ -789,7 +851,7 @@ Loop: } } if !exists { - sn = &BorEventSegment{ranges: Range{f.From, f.To}} + sn = &BorEventSegment{version: f.Version, Range: Range{f.From, f.To}} } if err := sn.reopenSeg(s.dir); err != nil { if errors.Is(err, os.ErrNotExist) { @@ -829,7 +891,7 @@ Loop: } } if !exists { - sn = &BorSpanSegment{ranges: Range{f.From, f.To}} + sn = &BorSpanSegment{version: f.Version, Range: Range{f.From, f.To}} } if err := sn.reopenSeg(s.dir); err != nil { if errors.Is(err, os.ErrNotExist) { @@ -883,7 +945,7 @@ func (s *BorRoSnapshots) Ranges() (ranges []Range) { defer view.Close() for _, sn := range view.Events() { - ranges = append(ranges, sn.ranges) + ranges = append(ranges, sn.Range) } return ranges } @@ -891,10 +953,15 @@ func (s *BorRoSnapshots) Ranges() (ranges []Range) { func (s *BorRoSnapshots) OptimisticalyReopenFolder() { _ = s.ReopenFolder() } func (s *BorRoSnapshots) OptimisticalyReopenWithDB(db kv.RoDB) { _ = s.ReopenWithDB(db) } func (s *BorRoSnapshots) ReopenFolder() error { - files, _, err := BorSegments(s.dir) + files, _, err := BorSegments(s.dir, s.version, s.segmentsMin.Load()) if err != nil { return err } + + // this is one off code to fix an issue in 2.49.x->2.52.x which missed + // removal of intermediate segments after a merge operation + removeBorOverlaps(s.dir, s.version, files, s.BlocksAvailable()) + list := make([]string, 0, len(files)) for _, f := range files { _, fName := filepath.Split(f.Path) @@ -982,11 +1049,11 @@ func (s *BorRoSnapshots) PrintDebug() { defer s.Spans.lock.RUnlock() fmt.Println(" == BorSnapshots, Event") for _, sn := range s.Events.segments { - fmt.Printf("%d, %t\n", sn.ranges.from, sn.IdxBorTxnHash == nil) + fmt.Printf("%d, %t\n", sn.from, sn.IdxBorTxnHash == nil) } fmt.Println(" == BorSnapshots, Span") for _, sn := range s.Spans.segments { - fmt.Printf("%d, %t\n", sn.ranges.from, sn.idx == nil) + fmt.Printf("%d, %t\n", sn.from, sn.idx == nil) } } @@ -1014,7 +1081,7 @@ func (v *BorView) Events() []*BorEventSegment { return v.s.Events.segments } func (v *BorView) Spans() []*BorSpanSegment { return v.s.Spans.segments } func (v *BorView) EventsSegment(blockNum uint64) (*BorEventSegment, bool) { for _, seg := range v.Events() { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return seg, true @@ -1023,7 +1090,7 @@ func (v *BorView) EventsSegment(blockNum uint64) (*BorEventSegment, bool) { } func (v *BorView) SpansSegment(blockNum uint64) (*BorSpanSegment, bool) { for _, seg := range v.Spans() { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return seg, true @@ -1045,19 +1112,14 @@ func NewBorMerger(tmpDir string, compressWorkers int, lvl log.Lvl, chainDB kv.Ro return &BorMerger{tmpDir: tmpDir, compressWorkers: compressWorkers, lvl: lvl, chainDB: chainDB, chainConfig: chainConfig, notifier: notifier, logger: logger} } -func (m *BorMerger) FindMergeRanges(currentRanges []Range, maxBlockNum uint64) (toMerge []Range) { +func (m *BorMerger) FindMergeRanges(currentRanges []Range) (toMerge []Range) { for i := len(currentRanges) - 1; i > 0; i-- { r := currentRanges[i] - isRecent := r.IsRecent(maxBlockNum) - mergeLimit, mergeSteps := uint64(snaptype.Erigon2RecentMergeLimit), MergeSteps - if isRecent { - mergeLimit, mergeSteps = snaptype.Erigon2MergeLimit, RecentMergeSteps - } - + mergeLimit := uint64(snaptype.Erigon2MergeLimit) if r.to-r.from >= mergeLimit { continue } - for _, span := range mergeSteps { + for _, span := range snaptype.MergeSteps { if r.to%span != 0 { continue } @@ -1085,10 +1147,10 @@ func (m *BorMerger) filesByRange(snapshots *BorRoSnapshots, from, to uint64) (ma sSegments := view.Spans() for i, sn := range eSegments { - if sn.ranges.from < from { + if sn.from < from { continue } - if sn.ranges.to > to { + if sn.to > to { break } toMerge[snaptype.BorEvents] = append(toMerge[snaptype.BorEvents], eSegments[i].seg.FilePath()) @@ -1111,8 +1173,8 @@ func (m *BorMerger) Merge(ctx context.Context, snapshots *BorRoSnapshots, mergeR return err } - for _, t := range []snaptype.Type{snaptype.BorEvents, snaptype.BorSpans} { - segName := snaptype.SegmentFileName(r.from, r.to, t) + for _, t := range snaptype.BorSnapshotTypes { + segName := snaptype.SegmentFileName(snapshots.Version(), r.from, r.to, t) f, ok := snaptype.ParseFileName(snapDir, segName) if !ok { continue @@ -1130,20 +1192,24 @@ func (m *BorMerger) Merge(ctx context.Context, snapshots *BorRoSnapshots, mergeR if err := snapshots.ReopenFolder(); err != nil { return fmt.Errorf("ReopenSegments: %w", err) } - snapshots.LogStat() + snapshots.LogStat("merge") if err := onMerge(r); err != nil { return err } - for _, t := range snaptype.BlockSnapshotTypes { + + for _, t := range snaptype.BorSnapshotTypes { if len(toMerge[t]) == 0 { continue } + if err := onDelete(toMerge[t]); err != nil { return err } + } - for _, t := range []snaptype.Type{snaptype.BorEvents, snaptype.BorSpans} { - m.removeOldFiles(toMerge[t], snapDir) + time.Sleep(1 * time.Second) // i working on blocking API - to ensure client does not use old snapsthos - and then delete them + for _, t := range snaptype.BorSnapshotTypes { + m.removeOldFiles(toMerge[t], snapDir, snapshots.Version()) } } m.logger.Log(m.lvl, "[bor snapshots] Merge done", "from", mergeRanges[0].from, "to", mergeRanges[0].to) @@ -1193,14 +1259,14 @@ func (m *BorMerger) merge(ctx context.Context, toMerge []string, targetFile stri return nil } -func (m *BorMerger) removeOldFiles(toDel []string, snapDir string) { +func (m *BorMerger) removeOldFiles(toDel []string, snapDir string, version uint8) { for _, f := range toDel { _ = os.Remove(f) ext := filepath.Ext(f) withoutExt := f[:len(f)-len(ext)] _ = os.Remove(withoutExt + ".idx") } - tmpFiles, err := snaptype.TmpFiles(snapDir) + tmpFiles, err := snaptype.TmpFiles(snapDir, version) if err != nil { return } diff --git a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go index 7862c85ad07..ee7d2e70284 100644 --- a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go @@ -7,11 +7,11 @@ import ( "errors" "fmt" "os" - "path" "path/filepath" "sync" "sync/atomic" + "github.com/klauspost/compress/zstd" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/erigon-lib/common/cmp" @@ -20,18 +20,19 @@ import ( "github.com/ledgerwatch/erigon-lib/downloader/snaptype" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/recsplit" + "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/persistence" "github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/log/v3" - "github.com/pierrec/lz4" ) type BeaconBlockSegment struct { seg *compress.Decompressor // value: chunked(ssz(SignedBeaconBlocks)) idxSlot *recsplit.Index // slot -> beacon_slot_segment_offset ranges Range + version uint8 } func (sn *BeaconBlockSegment) closeIdx() { @@ -52,8 +53,8 @@ func (sn *BeaconBlockSegment) close() { } func (sn *BeaconBlockSegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.BeaconBlocks) - sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.ranges.from, sn.ranges.to, snaptype.BeaconBlocks) + sn.seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -81,8 +82,8 @@ func (sn *BeaconBlockSegment) reopenIdx(dir string) (err error) { if sn.seg == nil { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.BeaconBlocks.String()) - sn.idxSlot, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.ranges.from, sn.ranges.to, snaptype.BeaconBlocks.String()) + sn.idxSlot, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -145,6 +146,11 @@ type CaplinSnapshots struct { idxMax atomic.Uint64 // all types of .idx files are available - up to this number cfg ethconfig.BlocksFreezing logger log.Logger + // allows for pruning segments - this is the min availible segment + segmentsMin atomic.Uint64 + version uint8 + // chain cfg + beaconCfg *clparams.BeaconChainConfig } // NewCaplinSnapshots - opens all snapshots. But to simplify everything: @@ -152,10 +158,11 @@ type CaplinSnapshots struct { // - all snapshots of given blocks range must exist - to make this blocks range available // - gaps are not allowed // - segment have [from:to) semantic -func NewCaplinSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, logger log.Logger) *CaplinSnapshots { - return &CaplinSnapshots{dir: snapDir, cfg: cfg, BeaconBlocks: &beaconBlockSegments{}, logger: logger} +func NewCaplinSnapshots(cfg ethconfig.BlocksFreezing, beaconCfg *clparams.BeaconChainConfig, snapDir string, version uint8, logger log.Logger) *CaplinSnapshots { + return &CaplinSnapshots{dir: snapDir, version: version, cfg: cfg, BeaconBlocks: &beaconBlockSegments{}, logger: logger, beaconCfg: beaconCfg} } +func (s *CaplinSnapshots) Version() uint8 { return s.version } func (s *CaplinSnapshots) IndicesMax() uint64 { return s.idxMax.Load() } func (s *CaplinSnapshots) SegmentsMax() uint64 { return s.segmentsMax.Load() } @@ -204,7 +211,7 @@ Loop: } } if !exists { - sn = &BeaconBlockSegment{ranges: Range{f.From, f.To}} + sn = &BeaconBlockSegment{version: s.version, ranges: Range{f.From, f.To}} } if err := sn.reopenSeg(s.dir); err != nil { if errors.Is(err, os.ErrNotExist) { @@ -263,7 +270,7 @@ func (s *CaplinSnapshots) idxAvailability() uint64 { } func (s *CaplinSnapshots) ReopenFolder() error { - files, _, err := SegmentsCaplin(s.dir) + files, _, err := SegmentsCaplin(s.dir, s.version, s.segmentsMin.Load()) if err != nil { return err } @@ -335,8 +342,8 @@ func (v *CaplinView) BeaconBlocksSegment(slot uint64) (*BeaconBlockSegment, bool return nil, false } -func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, b persistence.BlockSource, fromSlot uint64, toSlot uint64, tmpDir, snapDir string, workers int, lvl log.Lvl, logger log.Logger) error { - segName := snaptype.SegmentFileName(fromSlot, toSlot, snaptype.BeaconBlocks) +func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, b persistence.BlockSource, version uint8, fromSlot uint64, toSlot uint64, tmpDir, snapDir string, workers int, lvl log.Lvl, logger log.Logger) error { + segName := snaptype.SegmentFileName(version, fromSlot, toSlot, snaptype.BeaconBlocks) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot BeaconBlocks", f.Path, tmpDir, compress.MinPatternScore, workers, lvl, logger) @@ -351,8 +358,11 @@ func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, b persistence.BlockS } defer tx.Rollback() var w bytes.Buffer - lzWriter := lz4.NewWriter(&w) - defer lzWriter.Close() + compressor, err := zstd.NewWriter(&w, zstd.WithEncoderLevel(zstd.SpeedBetterCompression)) + if err != nil { + return err + } + defer compressor.Close() // Just make a reusable buffer buf := make([]byte, 2048) // Generate .seg file, which is just the list of beacon blocks. @@ -365,18 +375,16 @@ func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, b persistence.BlockS logger.Log(lvl, "Dumping beacon blocks", "progress", i) } if obj == nil { - if err := sn.AddWord(nil); err != nil { return err } continue } - lzWriter.Reset(&w) - lzWriter.CompressionLevel = 1 - if buf, err = snapshot_format.WriteBlockForSnapshot(lzWriter, obj.Data, buf); err != nil { + + if buf, err = snapshot_format.WriteBlockForSnapshot(compressor, obj.Data, buf); err != nil { return err } - if err := lzWriter.Flush(); err != nil { + if err := compressor.Close(); err != nil { return err } word := w.Bytes() @@ -385,6 +393,7 @@ func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, b persistence.BlockS return err } w.Reset() + compressor.Reset(&w) } if err := sn.Compress(); err != nil { return fmt.Errorf("compress: %w", err) @@ -392,10 +401,10 @@ func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, b persistence.BlockS // Generate .idx file, which is the slot => offset mapping. p := &background.Progress{} - return BeaconBlocksIdx(ctx, f, path.Join(snapDir, segName), fromSlot, toSlot, tmpDir, p, lvl, logger) + return BeaconBlocksIdx(ctx, f, filepath.Join(snapDir, segName), fromSlot, toSlot, tmpDir, p, lvl, logger) } -func DumpBeaconBlocks(ctx context.Context, db kv.RoDB, b persistence.BlockSource, fromSlot, toSlot, blocksPerFile uint64, tmpDir, snapDir string, workers int, lvl log.Lvl, logger log.Logger) error { +func DumpBeaconBlocks(ctx context.Context, db kv.RoDB, b persistence.BlockSource, version uint8, fromSlot, toSlot, blocksPerFile uint64, tmpDir, snapDir string, workers int, lvl log.Lvl, logger log.Logger) error { if blocksPerFile == 0 { return nil } @@ -406,7 +415,7 @@ func DumpBeaconBlocks(ctx context.Context, db kv.RoDB, b persistence.BlockSource } to := chooseSegmentEnd(i, toSlot, blocksPerFile) logger.Log(lvl, "Dumping beacon blocks", "from", i, "to", to) - if err := dumpBeaconBlocksRange(ctx, db, b, i, to, tmpDir, snapDir, workers, lvl, logger); err != nil { + if err := dumpBeaconBlocksRange(ctx, db, b, version, i, to, tmpDir, snapDir, workers, lvl, logger); err != nil { return err } } @@ -419,7 +428,7 @@ func (s *CaplinSnapshots) BuildMissingIndices(ctx context.Context, logger log.Lo // } // wait for Downloader service to download all expected snapshots - segments, _, err := SegmentsCaplin(s.dir) + segments, _, err := SegmentsCaplin(s.dir, s.version, 0) if err != nil { return err } @@ -473,10 +482,10 @@ func (s *CaplinSnapshots) ReadHeader(slot uint64) (*cltypes.SignedBeaconBlockHea buffer.Reset() buffer.Write(buf) - lzReader := lz4ReaderPool.Get().(*lz4.Reader) - defer lz4ReaderPool.Put(lzReader) - lzReader.Reset(buffer) + reader := decompressorPool.Get().(*zstd.Decoder) + defer decompressorPool.Put(reader) + reader.Reset(buffer) // Use pooled buffers and readers to avoid allocations. - return snapshot_format.ReadBlockHeaderFromSnapshotWithExecutionData(lzReader) + return snapshot_format.ReadBlockHeaderFromSnapshotWithExecutionData(reader, s.beaconCfg) } diff --git a/turbo/snapshotsync/freezeblocks/dump_test.go b/turbo/snapshotsync/freezeblocks/dump_test.go index 5136f711534..734e7a4728a 100644 --- a/turbo/snapshotsync/freezeblocks/dump_test.go +++ b/turbo/snapshotsync/freezeblocks/dump_test.go @@ -236,10 +236,10 @@ func TestDump(t *testing.T) { logger := log.New() tmpDir, snapDir := t.TempDir(), t.TempDir() - snConfig := snapcfg.KnownCfg(networkname.MainnetChainName) + snConfig := snapcfg.KnownCfg(networkname.MainnetChainName, 0) snConfig.ExpectBlocks = math.MaxUint64 - err := freezeblocks.DumpBlocks(m.Ctx, 0, uint64(test.chainSize), uint64(test.chainSize), tmpDir, snapDir, 0, m.DB, 1, log.LvlInfo, logger, m.BlockReader) + err := freezeblocks.DumpBlocks(m.Ctx, 1, 0, uint64(test.chainSize), uint64(test.chainSize), tmpDir, snapDir, 0, m.DB, 1, log.LvlInfo, logger, m.BlockReader) require.NoError(err) }) } diff --git a/turbo/snapshotsync/snapshotsync.go b/turbo/snapshotsync/snapshotsync.go index 0c9d7663db5..b55104250c3 100644 --- a/turbo/snapshotsync/snapshotsync.go +++ b/turbo/snapshotsync/snapshotsync.go @@ -67,7 +67,7 @@ func RequestSnapshotsDownload(ctx context.Context, downloadRequest []services.Do // WaitForDownloader - wait for Downloader service to download all expected snapshots // for MVP we sync with Downloader only once, in future will send new snapshots also -func WaitForDownloader(logPrefix string, ctx context.Context, histV3 bool, caplin CaplinMode, agg *state.AggregatorV3, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient) error { +func WaitForDownloader(ctx context.Context, logPrefix string, histV3 bool, caplin CaplinMode, agg *state.AggregatorV3, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient) error { snapshots := blockReader.Snapshots() borSnapshots := blockReader.BorSnapshots() if blockReader.FreezingCfg().NoDownloader { @@ -87,7 +87,9 @@ func WaitForDownloader(logPrefix string, ctx context.Context, histV3 bool, capli // - Erigon "download once": means restart/upgrade/downgrade must not download files (and will be fast) // - After "download once" - Erigon will produce and seed new files - preverifiedBlockSnapshots := snapcfg.KnownCfg(cc.ChainName).Preverified + // send all hashes to the Downloader service + snapCfg := snapcfg.KnownCfg(cc.ChainName, 0) + preverifiedBlockSnapshots := snapCfg.Preverified downloadRequest := make([]services.DownloadRequest, 0, len(preverifiedBlockSnapshots)) // build all download requests diff --git a/turbo/stages/blockchain_test.go b/turbo/stages/blockchain_test.go index 72052da663d..3b20f792c1e 100644 --- a/turbo/stages/blockchain_test.go +++ b/turbo/stages/blockchain_test.go @@ -32,8 +32,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/ledgerwatch/erigon-lib/chain" - chain2 "github.com/ledgerwatch/erigon-lib/chain" + libchain "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" @@ -616,7 +615,7 @@ func TestEIP155Transition(t *testing.T) { funds = big.NewInt(1000000000) deleteAddr = libcommon.Address{1} gspec = &types.Genesis{ - Config: &chain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)}, + Config: &libchain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)}, Alloc: types.GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}}, } ) @@ -689,7 +688,7 @@ func TestEIP155Transition(t *testing.T) { } // generate an invalid chain id transaction - config := &chain2.Config{ChainID: big.NewInt(2), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)} + config := &libchain.Config{ChainID: big.NewInt(2), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)} chain, chainErr = core.GenerateChain(config, chain.TopBlock, m.Engine, m.DB, 4, func(i int, block *core.BlockGen) { var ( basicTx = func(signer types.Signer) (types.Transaction, error) { @@ -741,7 +740,7 @@ func doModesTest(t *testing.T, pm prune.Mode) error { funds = big.NewInt(1000000000) deleteAddr = libcommon.Address{1} gspec = &types.Genesis{ - Config: &chain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)}, + Config: &libchain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)}, Alloc: types.GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}}, } ) @@ -959,7 +958,7 @@ func TestEIP161AccountRemoval(t *testing.T) { funds = big.NewInt(1000000000) theAddr = libcommon.Address{1} gspec = &types.Genesis{ - Config: &chain.Config{ + Config: &libchain.Config{ ChainID: big.NewInt(1), HomesteadBlock: new(big.Int), TangerineWhistleBlock: new(big.Int), diff --git a/turbo/stages/genesis_test.go b/turbo/stages/genesis_test.go index 67e685eb366..c94b72ee15b 100644 --- a/turbo/stages/genesis_test.go +++ b/turbo/stages/genesis_test.go @@ -87,7 +87,7 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == nil", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir) + core.MustCommitGenesis(&customg, db, tmpdir, logger) return core.CommitGenesisBlock(db, nil, tmpdir, logger) }, wantHash: customghash, @@ -96,7 +96,7 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == sepolia", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir) + core.MustCommitGenesis(&customg, db, tmpdir, logger) return core.CommitGenesisBlock(db, core.SepoliaGenesisBlock(), tmpdir, logger) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.SepoliaGenesisHash}, @@ -106,7 +106,7 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == bor-mainnet", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir) + core.MustCommitGenesis(&customg, db, tmpdir, logger) return core.CommitGenesisBlock(db, core.BorMainnetGenesisBlock(), tmpdir, logger) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.BorMainnetGenesisHash}, @@ -116,7 +116,7 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == mumbai", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir) + core.MustCommitGenesis(&customg, db, tmpdir, logger) return core.CommitGenesisBlock(db, core.MumbaiGenesisBlock(), tmpdir, logger) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.MumbaiGenesisHash}, @@ -126,7 +126,7 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == amoy", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir) + core.MustCommitGenesis(&customg, db, tmpdir, logger) return core.CommitGenesisBlock(db, core.AmoyGenesisBlock(), tmpdir, logger) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.AmoyGenesisHash}, @@ -136,7 +136,7 @@ func TestSetupGenesis(t *testing.T) { { name: "compatible config in DB", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&oldcustomg, db, tmpdir) + core.MustCommitGenesis(&oldcustomg, db, tmpdir, logger) return core.CommitGenesisBlock(db, &customg, tmpdir, logger) }, wantHash: customghash, @@ -176,7 +176,7 @@ func TestSetupGenesis(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() _, db, _ := temporal.NewTestDB(t, datadir.New(tmpdir), nil) - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New())) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New())) config, genesis, err := test.fn(db) // Check the return values. if !reflect.DeepEqual(err, test.wantErr) { diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index f9b64b074ea..3c6fc8fde54 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -623,12 +623,15 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult // InsertHeaders attempts to insert headers into the database, verifying them first // It returns true in the first return value if the system is "in sync" -func (hd *HeaderDownload) InsertHeaders(hf FeedHeaderFunc, terminalTotalDifficulty *big.Int, logPrefix string, logChannel <-chan time.Time, currentTime uint64) (bool, error) { +func (hd *HeaderDownload) InsertHeaders(hf FeedHeaderFunc, headerLimit uint, terminalTotalDifficulty *big.Int, logPrefix string, logChannel <-chan time.Time, currentTime uint64) (bool, error) { var more = true var err error var force bool var blocksToTTD uint64 var blockTime uint64 + + startHeight := hd.highestInDb + for more { if more, force, blocksToTTD, blockTime, err = hd.InsertHeader(hf, terminalTotalDifficulty, logPrefix, logChannel); err != nil { return false, err @@ -636,9 +639,13 @@ func (hd *HeaderDownload) InsertHeaders(hf FeedHeaderFunc, terminalTotalDifficul if force { return true, nil } + + if headerLimit > 0 && hd.highestInDb-startHeight > uint64(headerLimit) { + break + } } if blocksToTTD > 0 { - hd.logger.Info("Estimated to reaching TTD", "blocks", blocksToTTD) + hd.logger.Trace("Estimated to reaching TTD", "blocks", blocksToTTD) } hd.lock.RLock() defer hd.lock.RUnlock() diff --git a/turbo/stages/mock/mock_sentry.go b/turbo/stages/mock/mock_sentry.go index fc92b99ec5c..c65625117a8 100644 --- a/turbo/stages/mock/mock_sentry.go +++ b/turbo/stages/mock/mock_sentry.go @@ -51,6 +51,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/prune" + "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -259,8 +260,8 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK cfg.HistoryV3 = histV3 erigonGrpcServeer := remotedbserver.NewKvServer(ctx, db, nil, nil, logger) - allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, logger) - allBorSnapshots := freezeblocks.NewBorRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, logger) + allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, 1, logger) + allBorSnapshots := freezeblocks.NewBorRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, 1, logger) mock := &MockSentry{ Ctx: ctx, cancel: ctxCancel, DB: db, agg: agg, tb: tb, @@ -411,9 +412,10 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK miningStatePos := stagedsync.NewProposingState(&cfg.Miner) miningStatePos.MiningConfig.Etherbase = param.SuggestedFeeRecipient proposingSync := stagedsync.New( + cfg.Sync, stagedsync.MiningStages(mock.Ctx, stagedsync.StageMiningCreateBlockCfg(mock.DB, miningStatePos, *mock.ChainConfig, mock.Engine, mock.txPoolDB, param, tmpdir, mock.BlockReader), - stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miningStatePos, *mock.ChainConfig, nil, mock.BlockReader, nil, nil, recents, signatures), + stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miningStatePos, *mock.ChainConfig, nil, mock.BlockReader, nil, nil, nil, recents, signatures), stagedsync.StageMiningExecCfg(mock.DB, miningStatePos, mock.Notifications.Events, *mock.ChainConfig, mock.Engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, mock.TxPool, mock.txPoolDB, mock.BlockReader), stagedsync.StageHashStateCfg(mock.DB, dirs, cfg.HistoryV3), stagedsync.StageTrieCfg(mock.DB, false, true, true, tmpdir, mock.BlockReader, nil, histV3, mock.agg), @@ -421,22 +423,23 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder, logger) // We start the mining step - if err := stages2.MiningStep(ctx, mock.DB, proposingSync, tmpdir); err != nil { + if err := stages2.MiningStep(ctx, mock.DB, proposingSync, tmpdir, logger); err != nil { return nil, err } block := <-miningStatePos.MiningResultPOSCh return block, nil } - blockRetire := freezeblocks.NewBlockRetire(1, dirs, mock.BlockReader, blockWriter, mock.DB, mock.Notifications.Events, logger) + blockRetire := freezeblocks.NewBlockRetire(1, dirs, mock.BlockReader, blockWriter, mock.DB, mock.ChainConfig, mock.Notifications.Events, logger) mock.Sync = stagedsync.New( + cfg.Sync, stagedsync.DefaultStages(mock.Ctx, - stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, dirs, blockRetire, snapshotsDownloader, mock.BlockReader, mock.Notifications.Events, mock.HistoryV3, mock.agg, false, nil), - stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, mock.BlockReader, blockWriter, dirs.Tmp, mock.Notifications, engine_helpers.NewForkValidatorMock(1), nil), - stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, stagedsync.MiningState{}, *mock.ChainConfig, nil /* heimdallClient */, mock.BlockReader, nil, nil, recents, signatures), + stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, cfg.Sync, dirs, blockRetire, snapshotsDownloader, mock.BlockReader, mock.Notifications, mock.HistoryV3, mock.agg, false, nil), + stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, cfg.Sync, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, mock.BlockReader, blockWriter, dirs.Tmp, mock.Notifications, engine_helpers.NewForkValidatorMock(1), nil), + stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, stagedsync.MiningState{}, *mock.ChainConfig, nil /* heimdallClient */, mock.BlockReader, nil, nil, nil, recents, signatures), stagedsync.StageBlockHashesCfg(mock.DB, mock.Dirs.Tmp, mock.ChainConfig, blockWriter), - stagedsync.StageBodiesCfg(mock.DB, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, mock.BlockReader, cfg.HistoryV3, blockWriter), - stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd), + stagedsync.StageBodiesCfg(mock.DB, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, mock.BlockReader, cfg.HistoryV3, blockWriter, nil), + stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd, nil), stagedsync.StageExecuteBlocksCfg( mock.DB, prune, @@ -471,9 +474,9 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK ) cfg.Genesis = gspec - pipelineStages := stages2.NewPipelineStages(mock.Ctx, db, &cfg, mock.sentriesClient, mock.Notifications, + pipelineStages := stages2.NewPipelineStages(mock.Ctx, db, &cfg, p2p.Config{}, mock.sentriesClient, mock.Notifications, snapshotsDownloader, mock.BlockReader, blockRetire, mock.agg, nil, forkValidator, logger, checkStateRoot) - mock.posStagedSync = stagedsync.New(pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) + mock.posStagedSync = stagedsync.New(cfg.Sync, pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) mock.Eth1ExecutionService = eth1.NewEthereumExecutionModule(mock.BlockReader, mock.DB, mock.posStagedSync, forkValidator, mock.ChainConfig, assembleBlockPOS, nil, mock.Notifications.Accumulator, mock.Notifications.StateChangesConsumer, logger, engine, histV3) @@ -494,9 +497,10 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.PendingBlocks = miner.PendingResultCh mock.MinedBlocks = miner.MiningResultCh mock.MiningSync = stagedsync.New( + cfg.Sync, stagedsync.MiningStages(mock.Ctx, stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, nil, nil, dirs.Tmp, mock.BlockReader), - stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miner, *mock.ChainConfig, nil /*heimdallClient*/, mock.BlockReader, nil, nil, recents, signatures), + stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miner, *mock.ChainConfig, nil /*heimdallClient*/, mock.BlockReader, nil, nil, nil, recents, signatures), stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, dirs.Tmp, nil, 0, mock.TxPool, nil, mock.BlockReader), stagedsync.StageHashStateCfg(mock.DB, dirs, cfg.HistoryV3), stagedsync.StageTrieCfg(mock.DB, false, true, false, dirs.Tmp, mock.BlockReader, mock.sentriesClient.Hd, cfg.HistoryV3, mock.agg), diff --git a/turbo/stages/mock/sentry_mock_test.go b/turbo/stages/mock/sentry_mock_test.go index 54b62f1cd3a..e8d95a4256c 100644 --- a/turbo/stages/mock/sentry_mock_test.go +++ b/turbo/stages/mock/sentry_mock_test.go @@ -119,7 +119,7 @@ func TestMineBlockWith1Tx(t *testing.T) { } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed - err = stages.MiningStep(m.Ctx, m.DB, m.MiningSync, "") + err = stages.MiningStep(m.Ctx, m.DB, m.MiningSync, "", log.Root()) require.NoError(err) got := <-m.PendingBlocks diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index de8fee1d567..ae2453afd60 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -146,7 +146,7 @@ func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stage return err } } - err = sync.Run(db, tx, initialCycle) + _, err = sync.Run(db, tx, initialCycle) if err != nil { return err } @@ -333,7 +333,7 @@ func (h *Hook) afterRun(tx kv.Tx, finishProgressBefore uint64) error { return nil } -func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync, tmpDir string) (err error) { +func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync, tmpDir string, logger log.Logger) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) @@ -346,10 +346,10 @@ func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync, tmpDir } defer tx.Rollback() - miningBatch := membatchwithdb.NewMemoryBatch(tx, tmpDir) + miningBatch := membatchwithdb.NewMemoryBatch(tx, tmpDir, logger) defer miningBatch.Rollback() - if err = mining.Run(nil, miningBatch, false /* firstCycle */); err != nil { + if _, err = mining.Run(nil, miningBatch, false /* firstCycle */); err != nil { return err } tx.Rollback() @@ -477,19 +477,36 @@ func NewDefaultStages(ctx context.Context, // Hence we run it in the test mode. runInTestMode := cfg.ImportMode - var loopBreakCheck func() bool + var loopBreakCheck func(int) bool if heimdallClient != nil && flags.Milestone { - loopBreakCheck = heimdall.MilestoneRewindPending + loopBreakCheck = func(int) bool { + return heimdall.MilestoneRewindPending() + } + } + + if cfg.Sync.LoopBlockLimit > 0 { + previousBreakCheck := loopBreakCheck + loopBreakCheck = func(loopCount int) bool { + if loopCount > int(cfg.Sync.LoopBlockLimit) { + return true + } + + if previousBreakCheck != nil { + return previousBreakCheck(loopCount) + } + + return false + } } return stagedsync.DefaultStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, dirs, blockRetire, snapDownloader, blockReader, notifications.Events, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, silkworm), - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, forkValidator, loopBreakCheck), - stagedsync.StageBorHeimdallCfg(db, snapDb, stagedsync.MiningState{}, *controlServer.ChainConfig, heimdallClient, blockReader, controlServer.Hd, controlServer.Penalize, recents, signatures), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, silkworm), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, forkValidator, loopBreakCheck), + stagedsync.StageBorHeimdallCfg(db, snapDb, stagedsync.MiningState{}, *controlServer.ChainConfig, heimdallClient, blockReader, controlServer.Hd, controlServer.Penalize, loopBreakCheck, recents, signatures), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), - stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, loopBreakCheck), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -523,6 +540,7 @@ func NewDefaultStages(ctx context.Context, func NewPipelineStages(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config, + p2pCfg p2p.Config, controlServer *sentry_multi_client.MultiClient, notifications *shards.Notifications, snapDownloader proto_downloader.DownloaderClient, @@ -541,10 +559,64 @@ func NewPipelineStages(ctx context.Context, // Hence we run it in the test mode. runInTestMode := cfg.ImportMode - return stagedsync.PipelineStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, dirs, blockRetire, snapDownloader, blockReader, notifications.Events, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, silkworm), + var loopBreakCheck func(int) bool + + if cfg.Sync.LoopBlockLimit > 0 { + previousBreakCheck := loopBreakCheck + loopBreakCheck = func(loopCount int) bool { + if loopCount > int(cfg.Sync.LoopBlockLimit) { + return true + } + + if previousBreakCheck != nil { + return previousBreakCheck(loopCount) + } + + return false + } + } + + if len(cfg.Sync.UploadLocation) == 0 { + return stagedsync.PipelineStages(ctx, + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, silkworm), + stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), + stagedsync.StageExecuteBlocksCfg( + db, + cfg.Prune, + cfg.BatchSize, + nil, + controlServer.ChainConfig, + controlServer.Engine, + &vm.Config{}, + notifications.Accumulator, + cfg.StateStream, + /*stateStream=*/ false, + cfg.HistoryV3, + dirs, + blockReader, + controlServer.Hd, + cfg.Genesis, + cfg.Sync, + agg, + silkwormForExecutionStage(silkworm, cfg), + ), + stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3), + stagedsync.StageTrieCfg(db, checkStateRoot, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg), + stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), + stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp), + stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), + stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader), + stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator), + runInTestMode) + } + + return stagedsync.UploaderPipelineStages(ctx, + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, silkworm), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, forkValidator, loopBreakCheck), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, loopBreakCheck), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -573,17 +645,19 @@ func NewPipelineStages(ctx context.Context, stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader), stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator), runInTestMode) + } func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config, controlServer *sentry_multi_client.MultiClient, dirs datadir.Dirs, notifications *shards.Notifications, blockReader services.FullBlockReader, blockWriter *blockio.BlockWriter, agg *state.AggregatorV3, silkworm *silkworm.Silkworm, logger log.Logger) *stagedsync.Sync { return stagedsync.New( + cfg.Sync, stagedsync.StateStages(ctx, - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, nil, nil, nil), - stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, nil, nil, nil), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, nil), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, true, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, true, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, nil), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune,