diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 65edf9dde48..bf973280852 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,7 +27,10 @@ jobs: if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} strategy: matrix: - os: [ ubuntu-22.04 ] # Disabling macos as getting inexplicable "this job failed" messages with no logs, macos-13-xlarge ] # list of os: https://github.com/actions/virtual-environments + # list of os: https://github.com/actions/virtual-environments + os: + - ubuntu-22.04 + - macos-13 runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/qa-clean-exit-block-downloading.yml b/.github/workflows/qa-clean-exit-block-downloading.yml new file mode 100644 index 00000000000..73104382ce8 --- /dev/null +++ b/.github/workflows/qa-clean-exit-block-downloading.yml @@ -0,0 +1,81 @@ +name: QA - Clean exit (block downloading) + +on: + push: + branches: + - 'release/**' + pull_request: + branches: + - devel + - 'release/**' + types: + - ready_for_review + +jobs: + long-running-test: + runs-on: self-hosted + env: + ERIGON_REFERENCE_DATA_DIR: /opt/erigon-release/datadir + ERIGON_TESTBED_DATA_DIR: /opt/erigon-testbed/datadir + WORKING_TIME_SECONDS: 600 + + steps: + - name: Check out repository + uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.21' + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + #- name: Install dependencies + # run: | + # sudo apt-get update + # sudo apt-get install -y build-essential make gcc + + - name: Restore Erigon Testbed Data Directory + run: | + rsync -av --delete $ERIGON_REFERENCE_DATA_DIR/ $ERIGON_TESTBED_DATA_DIR/ + + - name: Clean Erigon Build Directory + run: | + make clean + + - name: Build Erigon + run: | + make erigon + working-directory: ${{ github.workspace }} + + #- name: Download Python Script for Logs Checking + # run: | + # curl -o check_erigon_exit.py 'https://gist.githubusercontent.com/mriccobene/8db4030a745de34d527f136f2caa104f/raw/3c1a860cb87d61075e78ce399e17f0ab157cacc6/check_erigon_exit.py' + + - name: Run Erigon, send ctrl-c and check for clean exiting + run: | + # Run Erigon, send ctrl-c and check logs + python3 ${{ github.workspace }}/../../../../erigon-qa/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_TESTBED_DATA_DIR $WORKING_TIME_SECONDS + + # Capture monitoring script exit status + monitoring_exit_status=$? + + # Clean up Erigon process if it's still running + if kill -0 $ERIGON_PID 2> /dev/null; then + echo "Terminating Erigon" + kill $ERIGON_PID + wait $ERIGON_PID + else + echo "Erigon has already terminated" + fi + + # Check monitoring script exit status + if [ $monitoring_exit_status -eq 0 ]; then + echo "Monitoring completed successfully" + else + echo "Error detected in Erigon logs or monitoring script exited unexpectedly" + exit 1 + fi diff --git a/.github/workflows/qa-clean-exit.yml b/.github/workflows/qa-clean-exit-snapshot-downloading.yml similarity index 88% rename from .github/workflows/qa-clean-exit.yml rename to .github/workflows/qa-clean-exit-snapshot-downloading.yml index 1e6c9f2767f..7b04664f762 100644 --- a/.github/workflows/qa-clean-exit.yml +++ b/.github/workflows/qa-clean-exit-snapshot-downloading.yml @@ -1,4 +1,4 @@ -name: QA - Clean exit on Ctrl-C +name: QA - Clean exit (snapshot downloading) on: push: @@ -19,14 +19,10 @@ on: jobs: long-running-test: - #if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} - #strategy: - # matrix: - # os: [ ubuntu-22.04, macos-13-xlarge ] - #runs-on: ${{ matrix.os }} runs-on: self-hosted env: ERIGON_DATA_DIR: ${{ github.workspace }}/erigon_data + WORKING_TIME_SECONDS: 600 steps: - name: Check out repository @@ -35,7 +31,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: '1.20' + go-version: '1.21' - name: Set up Python uses: actions/setup-python@v4 @@ -63,7 +59,7 @@ jobs: - name: Run Erigon, send ctrl-c and check for clean exiting run: | # Run Erigon, send ctrl-c and check logs - python3 ${{ github.workspace }}/../../../../erigon-qa/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR + python3 ${{ github.workspace }}/../../../../erigon-qa/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $WORKING_TIME_SECONDS # Capture monitoring script exit status monitoring_exit_status=$? diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d338b4727a9..c4531295ef8 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -47,7 +47,7 @@ jobs: id: prepare run: | TAG=${GITHUB_REF#refs/tags/} - echo ::set-output name=tag_name::${TAG} + echo "tag_name=${TAG}" >> $GITHUB_OUTPUT - name: Set up QEMU uses: docker/setup-qemu-action@v2 diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index d0bca79ee6f..7e4380be4be 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -18,7 +18,10 @@ jobs: tests: strategy: matrix: - os: [ ubuntu-22.04 ] # Disable macos as it is giving us "This Job Failed" errors with no logs, macos-13-xlarge ] # list of os: https://github.com/actions/virtual-environments + # list of os: https://github.com/actions/virtual-environments + os: + - ubuntu-22.04 + - macos-13 runs-on: ${{ matrix.os }} steps: diff --git a/.gitignore b/.gitignore index 3ce4eeca701..123c1eb2b93 100644 --- a/.gitignore +++ b/.gitignore @@ -98,3 +98,5 @@ node_modules /config.toml /config.yaml /config.yml + +vendor \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index ea4a442c1de..a7ec604b677 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -84,7 +84,7 @@ linters-settings: rules: "rules.go" hugeParam: # size in bytes that makes the warning trigger (default 80) - sizeThreshold: 1000 + sizeThreshold: 1100 rangeExprCopy: # size in bytes that makes the warning trigger (default 512) sizeThreshold: 512 diff --git a/Makefile b/Makefile index c275e0af6d4..39cd47ef31a 100644 --- a/Makefile +++ b/Makefile @@ -135,6 +135,8 @@ COMMANDS += sentinel COMMANDS += caplin COMMANDS += caplin-regression COMMANDS += tooling +COMMANDS += snapshots + diff --git a/README.md b/README.md index 3cb2bed9ccd..8ee63718b93 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ System Requirements * Gnosis Chain Archive: 600GB (October 2023). -* Polygon Mainnet Archive: 5TB. (April 2022). `--prune.*.older 15768000`: 5.1Tb (Sept 2023). Polygon Mumbai Archive: +* Polygon Mainnet Archive: 8.5TiB (December 2023). `--prune.*.older 15768000`: 5.1Tb (September 2023). Polygon Mumbai Archive: 1TB. (April 2022). SSD or NVMe. Do not recommend HDD - on HDD Erigon will always stay N blocks behind chain tip, but not fall behind. diff --git a/cl/abstract/beacon_state.go b/cl/abstract/beacon_state.go index a4d098e3206..cc77a206181 100644 --- a/cl/abstract/beacon_state.go +++ b/cl/abstract/beacon_state.go @@ -22,7 +22,7 @@ type BeaconStateUpgradable interface { } type BeaconStateExtension interface { - SlashValidator(slashedInd uint64, whistleblowerInd *uint64) error + SlashValidator(slashedInd uint64, whistleblowerInd *uint64) (uint64, error) InitiateValidatorExit(index uint64) error GetActiveValidatorsIndices(epoch uint64) (indicies []uint64) GetTotalActiveBalance() uint64 @@ -40,6 +40,7 @@ type BeaconStateExtension interface { ValidatorIndexByPubkey(key [48]byte) (uint64, bool) PreviousStateRoot() common.Hash SetPreviousStateRoot(root common.Hash) + GetValidatorActivationChurnLimit() uint64 } type BeaconStateBasic interface { @@ -162,6 +163,8 @@ type BeaconStateMinimal interface { Eth1Data() *cltypes.Eth1Data Eth1DataVotes() *solid.ListSSZ[*cltypes.Eth1Data] Eth1DepositIndex() uint64 + ValidatorSet() *solid.ValidatorSet + PreviousEpochParticipation() *solid.BitList ForEachValidator(fn func(v solid.Validator, idx int, total int) bool) ValidatorForValidatorIndex(index int) (solid.Validator, error) diff --git a/cl/antiquary/antiquary.go b/cl/antiquary/antiquary.go index 27273ed5d5b..a0f97b41fa5 100644 --- a/cl/antiquary/antiquary.go +++ b/cl/antiquary/antiquary.go @@ -34,7 +34,7 @@ type Antiquary struct { beaconDB persistence.BlockSource backfilled *atomic.Bool cfg *clparams.BeaconChainConfig - states bool + states, blocks bool fs afero.Fs validatorsTable *state_accessors.StaticValidatorTable genesisState *state.CachingBeaconState @@ -43,7 +43,7 @@ type Antiquary struct { balances32 []byte } -func NewAntiquary(ctx context.Context, genesisState *state.CachingBeaconState, validatorsTable *state_accessors.StaticValidatorTable, cfg *clparams.BeaconChainConfig, dirs datadir.Dirs, downloader proto_downloader.DownloaderClient, mainDB kv.RwDB, sn *freezeblocks.CaplinSnapshots, reader freezeblocks.BeaconSnapshotReader, beaconDB persistence.BlockSource, logger log.Logger, states bool, fs afero.Fs) *Antiquary { +func NewAntiquary(ctx context.Context, genesisState *state.CachingBeaconState, validatorsTable *state_accessors.StaticValidatorTable, cfg *clparams.BeaconChainConfig, dirs datadir.Dirs, downloader proto_downloader.DownloaderClient, mainDB kv.RwDB, sn *freezeblocks.CaplinSnapshots, reader freezeblocks.BeaconSnapshotReader, beaconDB persistence.BlockSource, logger log.Logger, states, blocks bool, fs afero.Fs) *Antiquary { backfilled := &atomic.Bool{} backfilled.Store(false) return &Antiquary{ @@ -61,12 +61,13 @@ func NewAntiquary(ctx context.Context, genesisState *state.CachingBeaconState, v fs: fs, validatorsTable: validatorsTable, genesisState: genesisState, + blocks: blocks, } } // Antiquate is the function that starts transactions seeding and shit, very cool but very shit too as a name. func (a *Antiquary) Loop() error { - if a.downloader == nil { + if a.downloader == nil || !a.blocks { return nil // Just skip if we don't have a downloader } // Skip if we dont support backfilling for the current network @@ -94,7 +95,6 @@ func (a *Antiquary) Loop() error { return err } // Here we need to start mdbx transaction and lock the thread - log.Info("[Antiquary]: Stopping Caplin to process historical indicies") tx, err := a.mainDB.BeginRw(a.ctx) if err != nil { return err @@ -110,6 +110,7 @@ func (a *Antiquary) Loop() error { return err } defer logInterval.Stop() + log.Info("[Antiquary]: Stopping Caplin to process historical indicies", "from", from, "to", a.sn.BlocksAvailable()) // Now write the snapshots as indicies for i := from; i < a.sn.BlocksAvailable(); i++ { @@ -208,7 +209,7 @@ func (a *Antiquary) Loop() error { if to-from < snaptype.Erigon2MergeLimit { continue } - if err := a.antiquate(from, to); err != nil { + if err := a.antiquate(a.sn.Version(), from, to); err != nil { return err } case <-a.ctx.Done(): @@ -217,12 +218,12 @@ func (a *Antiquary) Loop() error { } // Antiquate will antiquate a specific block range (aka. retire snapshots), this should be ran in the background. -func (a *Antiquary) antiquate(from, to uint64) error { +func (a *Antiquary) antiquate(version uint8, from, to uint64) error { if a.downloader == nil { return nil // Just skip if we don't have a downloader } log.Info("[Antiquary]: Antiquating", "from", from, "to", to) - if err := freezeblocks.DumpBeaconBlocks(a.ctx, a.mainDB, a.beaconDB, from, to, snaptype.Erigon2MergeLimit, a.dirs.Tmp, a.dirs.Snap, 1, log.LvlDebug, a.logger); err != nil { + if err := freezeblocks.DumpBeaconBlocks(a.ctx, a.mainDB, a.beaconDB, version, from, to, snaptype.Erigon2MergeLimit, a.dirs.Tmp, a.dirs.Snap, 1, log.LvlDebug, a.logger); err != nil { return err } @@ -248,7 +249,7 @@ func (a *Antiquary) antiquate(from, to uint64) error { } // Notify bittorent to seed the new snapshots if _, err := a.downloader.Add(a.ctx, &proto_downloader.AddRequest{Items: downloadItems}); err != nil { - return err + log.Warn("[Antiquary]: Failed to add items to bittorent", "err", err) } tx, err := a.mainDB.BeginRw(a.ctx) diff --git a/cl/antiquary/state_antiquary.go b/cl/antiquary/state_antiquary.go index fb12ea39440..892df0a2ceb 100644 --- a/cl/antiquary/state_antiquary.go +++ b/cl/antiquary/state_antiquary.go @@ -28,6 +28,7 @@ import ( "github.com/ledgerwatch/erigon/cl/phase1/core/state/raw" "github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling" "github.com/ledgerwatch/erigon/cl/transition" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" "github.com/ledgerwatch/log/v3" ) @@ -149,40 +150,43 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return next(k, k, v) } - effectiveBalance := etl.NewCollector(kv.ValidatorEffectiveBalance, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + etlBufSz := etl.BufferOptimalSize / 8 // 18 collectors * 256mb / 8 = 512mb in worst case + effectiveBalance := etl.NewCollector(kv.ValidatorEffectiveBalance, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer effectiveBalance.Close() - balances := etl.NewCollector(kv.ValidatorBalance, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + balances := etl.NewCollector(kv.ValidatorBalance, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer balances.Close() - randaoMixes := etl.NewCollector(kv.RandaoMixes, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + randaoMixes := etl.NewCollector(kv.RandaoMixes, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer randaoMixes.Close() - intraRandaoMixes := etl.NewCollector(kv.IntraRandaoMixes, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + intraRandaoMixes := etl.NewCollector(kv.IntraRandaoMixes, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer intraRandaoMixes.Close() - proposers := etl.NewCollector(kv.Proposers, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + proposers := etl.NewCollector(kv.Proposers, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer proposers.Close() - slashings := etl.NewCollector(kv.ValidatorSlashings, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + slashings := etl.NewCollector(kv.ValidatorSlashings, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer slashings.Close() - blockRoots := etl.NewCollector(kv.BlockRoot, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + blockRoots := etl.NewCollector(kv.BlockRoot, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer blockRoots.Close() - stateRoots := etl.NewCollector(kv.StateRoot, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + stateRoots := etl.NewCollector(kv.StateRoot, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer stateRoots.Close() - minimalBeaconStates := etl.NewCollector(kv.MinimalBeaconState, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) - defer minimalBeaconStates.Close() - inactivityScoresC := etl.NewCollector(kv.InactivityScores, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + slotData := etl.NewCollector(kv.SlotData, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) + defer slotData.Close() + epochData := etl.NewCollector(kv.EpochData, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) + defer epochData.Close() + inactivityScoresC := etl.NewCollector(kv.InactivityScores, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer inactivityScoresC.Close() - checkpoints := etl.NewCollector(kv.Checkpoints, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) - defer checkpoints.Close() - nextSyncCommittee := etl.NewCollector(kv.NextSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + nextSyncCommittee := etl.NewCollector(kv.NextSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer nextSyncCommittee.Close() - currentSyncCommittee := etl.NewCollector(kv.CurrentSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + currentSyncCommittee := etl.NewCollector(kv.CurrentSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer currentSyncCommittee.Close() - currentEpochAttestations := etl.NewCollector(kv.CurrentEpochAttestations, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + currentEpochAttestations := etl.NewCollector(kv.CurrentEpochAttestations, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer currentEpochAttestations.Close() - previousEpochAttestations := etl.NewCollector(kv.PreviousEpochAttestations, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + previousEpochAttestations := etl.NewCollector(kv.PreviousEpochAttestations, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer previousEpochAttestations.Close() - eth1DataVotes := etl.NewCollector(kv.Eth1DataVotes, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + eth1DataVotes := etl.NewCollector(kv.Eth1DataVotes, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer eth1DataVotes.Close() - stateEvents := etl.NewCollector(kv.StateEvents, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger) + stateEvents := etl.NewCollector(kv.StateEvents, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) defer stateEvents.Close() + activeValidatorIndicies := etl.NewCollector(kv.ActiveValidatorIndicies, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger) + defer activeValidatorIndicies.Close() progress, err := state_accessors.GetStateProcessingProgress(tx) if err != nil { @@ -214,7 +218,7 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return err } // Collect genesis state if we are at genesis - if err := s.collectGenesisState(ctx, compressedWriter, s.currentState, currentSyncCommittee, nextSyncCommittee, slashings, checkpoints, inactivityScoresC, proposers, minimalBeaconStates, stateEvents, changedValidators); err != nil { + if err := s.collectGenesisState(ctx, compressedWriter, s.currentState, currentSyncCommittee, nextSyncCommittee, slashings, epochData, inactivityScoresC, proposers, slotData, stateEvents, changedValidators); err != nil { return err } } else { @@ -231,7 +235,9 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return err } log.Info("Recovered Beacon State", "slot", s.currentState.Slot(), "elapsed", end, "root", libcommon.Hash(hashRoot).String()) - + if err := s.currentState.InitBeaconState(); err != nil { + return err + } } s.balances32 = s.balances32[:0] s.balances32 = append(s.balances32, s.currentState.RawBalances()...) @@ -296,19 +302,34 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return s.validatorsTable.AddWithdrawalCredentials(uint64(index), slot, libcommon.BytesToHash(wc)) }, OnEpochBoundary: func(epoch uint64) error { - k := base_encoding.Encode64ToBytes4(s.cfg.RoundSlotToEpoch(slot)) - v := make([]byte, solid.CheckpointSize*3) - copy(v, s.currentState.CurrentJustifiedCheckpoint()) - copy(v[solid.CheckpointSize:], s.currentState.PreviousJustifiedCheckpoint()) - copy(v[solid.CheckpointSize*2:], s.currentState.FinalizedCheckpoint()) - if err := checkpoints.Collect(k, v); err != nil { + if err := s.storeEpochData(commonBuffer, s.currentState, epochData); err != nil { return err } - prevEpoch := epoch - 1 + var prevEpoch uint64 + if epoch > 0 { + prevEpoch = epoch - 1 + } mix := s.currentState.GetRandaoMixes(prevEpoch) if err := randaoMixes.Collect(base_encoding.Encode64ToBytes4(prevEpoch*s.cfg.SlotsPerEpoch), mix[:]); err != nil { return err } + // Write active validator indicies + actives := s.currentState.GetActiveValidatorsIndices(prevEpoch) + commonBuffer.Reset() + if err := base_encoding.WriteRabbits(actives, commonBuffer); err != nil { + return err + } + if err := activeValidatorIndicies.Collect(base_encoding.Encode64ToBytes4(prevEpoch*s.cfg.SlotsPerEpoch), libcommon.Copy(commonBuffer.Bytes())); err != nil { + return err + } + actives = s.currentState.GetActiveValidatorsIndices(epoch) + commonBuffer.Reset() + if err := base_encoding.WriteRabbits(actives, commonBuffer); err != nil { + return err + } + if err := activeValidatorIndicies.Collect(base_encoding.Encode64ToBytes4(epoch*s.cfg.SlotsPerEpoch), libcommon.Copy(commonBuffer.Bytes())); err != nil { + return err + } // truncate the file return proposers.Collect(base_encoding.Encode64ToBytes4(epoch), getProposerDutiesValue(s.currentState)) }, @@ -340,9 +361,11 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { defer progressTimer.Stop() prevSlot := slot first := false + blocksBeforeCommit := 100_000 + blocksProcessed := 0 // This tells us that transition and operations do not happen concurrently and access is safe, so we can optimize for GC. // there is optimized custom cache to recycle big GC overhead. - for ; slot < to; slot++ { + for ; slot < to && blocksProcessed < blocksBeforeCommit; slot++ { slashingOccured = false // Set this to false at the beginning of each slot. key := base_encoding.Encode64ToBytes4(slot) @@ -398,11 +421,13 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { prevValSet = prevValSet[:0] prevValSet = append(prevValSet, s.currentState.RawValidatorSet()...) - fullValidation := slot%100_000 == 0 || first - // We sanity check the state every 100k slots or when we start. - if err := transition.TransitionState(s.currentState, block, fullValidation); err != nil { + fullValidation := slot%1000 == 0 || first + blockRewardsCollector := ð2.BlockRewardsCollector{} + // We sanity check the state every 1k slots or when we start. + if err := transition.TransitionState(s.currentState, block, blockRewardsCollector, fullValidation); err != nil { return err } + blocksProcessed++ first = false @@ -413,7 +438,7 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { } } - if err := s.storeMinimalState(commonBuffer, s.currentState, minimalBeaconStates); err != nil { + if err := s.storeSlotData(commonBuffer, s.currentState, blockRewardsCollector, slotData); err != nil { return err } if err := stateEvents.Collect(base_encoding.Encode64ToBytes4(slot), events.CopyBytes()); err != nil { @@ -502,8 +527,11 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { if err := stateRoots.Load(rwTx, kv.StateRoot, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { return err } + if err := activeValidatorIndicies.Load(rwTx, kv.ActiveValidatorIndicies, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + return err + } - if err := minimalBeaconStates.Load(rwTx, kv.MinimalBeaconState, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + if err := slotData.Load(rwTx, kv.SlotData, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { return err } @@ -515,7 +543,7 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return err } - if err := checkpoints.Load(rwTx, kv.Checkpoints, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + if err := epochData.Load(rwTx, kv.EpochData, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { return err } @@ -686,7 +714,7 @@ func getProposerDutiesValue(s *state.CachingBeaconState) []byte { return list } -func (s *Antiquary) collectGenesisState(ctx context.Context, compressor *zstd.Encoder, state *state.CachingBeaconState, currentSyncCommittee, nextSyncCommittee, slashings, checkpoints, inactivities, proposersCollector, minimalBeaconStateCollector, stateEvents *etl.Collector, changedValidators map[uint64]struct{}) error { +func (s *Antiquary) collectGenesisState(ctx context.Context, compressor *zstd.Encoder, state *state.CachingBeaconState, currentSyncCommittee, nextSyncCommittee, slashings, epochData, inactivities, proposersCollector, slotDataCollector, stateEvents *etl.Collector, changedValidators map[uint64]struct{}) error { var err error slot := state.Slot() epoch := slot / s.cfg.SlotsPerEpoch @@ -720,12 +748,7 @@ func (s *Antiquary) collectGenesisState(ctx context.Context, compressor *zstd.En return err } - k := base_encoding.Encode64ToBytes4(s.cfg.RoundSlotToEpoch(slot)) - v := make([]byte, solid.CheckpointSize*3) - copy(v, state.CurrentJustifiedCheckpoint()) - copy(v[solid.CheckpointSize:], state.PreviousJustifiedCheckpoint()) - copy(v[solid.CheckpointSize*2:], state.FinalizedCheckpoint()) - if err := checkpoints.Collect(k, v); err != nil { + if err := s.storeEpochData(&commonBuffer, state, epochData); err != nil { return err } @@ -734,34 +757,50 @@ func (s *Antiquary) collectGenesisState(ctx context.Context, compressor *zstd.En if err := s.antiquateFullUint64List(inactivities, slot, state.RawInactivityScores(), &commonBuffer, compressor); err != nil { return err } - } - - committee := *state.CurrentSyncCommittee() - if err := currentSyncCommittee.Collect(base_encoding.Encode64ToBytes4(slot), libcommon.Copy(committee[:])); err != nil { - return err - } + committeeSlot := s.cfg.RoundSlotToSyncCommitteePeriod(slot) + committee := *state.CurrentSyncCommittee() + if err := currentSyncCommittee.Collect(base_encoding.Encode64ToBytes4(committeeSlot), libcommon.Copy(committee[:])); err != nil { + return err + } - committee = *state.NextSyncCommittee() - if err := nextSyncCommittee.Collect(base_encoding.Encode64ToBytes4(slot), libcommon.Copy(committee[:])); err != nil { - return err + committee = *state.NextSyncCommittee() + if err := nextSyncCommittee.Collect(base_encoding.Encode64ToBytes4(committeeSlot), libcommon.Copy(committee[:])); err != nil { + return err + } } var b bytes.Buffer - if err := s.storeMinimalState(&b, state, minimalBeaconStateCollector); err != nil { + if err := s.storeSlotData(&b, state, nil, slotDataCollector); err != nil { return err } return stateEvents.Collect(base_encoding.Encode64ToBytes4(slot), events.CopyBytes()) } -func (s *Antiquary) storeMinimalState(buffer *bytes.Buffer, st *state.CachingBeaconState, collector *etl.Collector) error { +func (s *Antiquary) storeSlotData(buffer *bytes.Buffer, st *state.CachingBeaconState, rewardsCollector *eth2.BlockRewardsCollector, collector *etl.Collector) error { + buffer.Reset() + slotData := state_accessors.SlotDataFromBeaconState(st) + if rewardsCollector != nil { + slotData.AttestationsRewards = rewardsCollector.Attestations + slotData.SyncAggregateRewards = rewardsCollector.SyncAggregate + slotData.AttesterSlashings = rewardsCollector.AttesterSlashings + slotData.ProposerSlashings = rewardsCollector.ProposerSlashings + } + if err := slotData.WriteTo(buffer); err != nil { + return err + } + return collector.Collect(base_encoding.Encode64ToBytes4(st.Slot()), libcommon.Copy(buffer.Bytes())) +} + +func (s *Antiquary) storeEpochData(buffer *bytes.Buffer, st *state.CachingBeaconState, collector *etl.Collector) error { buffer.Reset() - minimalBeaconState := state_accessors.MinimalBeaconStateFromBeaconState(st.BeaconState) + epochData := state_accessors.EpochDataFromBeaconState(st) - if err := minimalBeaconState.WriteTo(buffer); err != nil { + if err := epochData.WriteTo(buffer); err != nil { return err } - return collector.Collect(base_encoding.Encode64ToBytes4(st.Slot()), buffer.Bytes()) + roundedSlot := s.cfg.RoundSlotToEpoch(st.Slot()) + return collector.Collect(base_encoding.Encode64ToBytes4(roundedSlot), libcommon.Copy(buffer.Bytes())) } func (s *Antiquary) dumpPayload(k []byte, v []byte, c *etl.Collector, b *bytes.Buffer, compressor *zstd.Encoder) error { diff --git a/cl/antiquary/state_antiquary_test.go b/cl/antiquary/state_antiquary_test.go index 352deae3fd7..7f407a4b674 100644 --- a/cl/antiquary/state_antiquary_test.go +++ b/cl/antiquary/state_antiquary_test.go @@ -19,17 +19,18 @@ import ( func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postState *state.CachingBeaconState) { db := memdb.NewTestDB(t) - reader, _ := tests.LoadChain(blocks, db, t) + reader, _ := tests.LoadChain(blocks, postState, db, t) ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() f := afero.NewMemMapFs() - a := NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, f) + a := NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, true, f) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) // TODO: add more meaning here, like checking db values, will do so once i see some bugs } func TestStateAntiquaryCapella(t *testing.T) { + t.Skip("TODO: oom") blocks, preState, postState := tests.GetCapellaRandom() runTest(t, blocks, preState, postState) } diff --git a/cl/antiquary/tests/tests.go b/cl/antiquary/tests/tests.go index 47bb2848dc6..ddfb042405d 100644 --- a/cl/antiquary/tests/tests.go +++ b/cl/antiquary/tests/tests.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/persistence" "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/utils" "github.com/spf13/afero" @@ -61,17 +62,35 @@ func (m *MockBlockReader) ReadBlockBySlot(ctx context.Context, tx kv.Tx, slot ui } func (m *MockBlockReader) ReadBlockByRoot(ctx context.Context, tx kv.Tx, blockRoot libcommon.Hash) (*cltypes.SignedBeaconBlock, error) { - panic("implement me") + // do a linear search + for _, v := range m.u { + r, err := v.Block.HashSSZ() + if err != nil { + return nil, err + } + + if r == blockRoot { + return v, nil + } + } + return nil, nil } func (m *MockBlockReader) ReadHeaderByRoot(ctx context.Context, tx kv.Tx, blockRoot libcommon.Hash) (*cltypes.SignedBeaconBlockHeader, error) { - panic("implement me") + block, err := m.ReadBlockByRoot(ctx, tx, blockRoot) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + return block.SignedBeaconBlockHeader(), nil } func (m *MockBlockReader) FrozenSlots() uint64 { panic("implement me") } -func LoadChain(blocks []*cltypes.SignedBeaconBlock, db kv.RwDB, t *testing.T) (*MockBlockReader, afero.Fs) { +func LoadChain(blocks []*cltypes.SignedBeaconBlock, s *state.CachingBeaconState, db kv.RwDB, t *testing.T) (*MockBlockReader, afero.Fs) { tx, err := db.BeginRw(context.Background()) require.NoError(t, err) defer tx.Rollback() @@ -86,6 +105,7 @@ func LoadChain(blocks []*cltypes.SignedBeaconBlock, db kv.RwDB, t *testing.T) (* require.NoError(t, source.WriteBlock(context.Background(), tx, block, true)) require.NoError(t, beacon_indicies.WriteHighestFinalized(tx, block.Block.Slot+64)) } + require.NoError(t, state_accessors.InitializeStaticTables(tx, s)) require.NoError(t, tx.Commit()) return m, fs diff --git a/cl/beacon/beacon_router_configuration/cfg.go b/cl/beacon/beacon_router_configuration/cfg.go index 1a3307b0a1f..75b9cabcb67 100644 --- a/cl/beacon/beacon_router_configuration/cfg.go +++ b/cl/beacon/beacon_router_configuration/cfg.go @@ -2,11 +2,14 @@ package beacon_router_configuration import "time" -// TODO(enriavil1): Make this configurable via flags type RouterConfiguration struct { Active bool Protocol string Address string + // Cors data + AllowedOrigins []string + AllowedMethods []string + AllowCredentials bool ReadTimeTimeout time.Duration IdleTimeout time.Duration diff --git a/cl/beacon/beaconhttp/api.go b/cl/beacon/beaconhttp/api.go index 7c649d579dd..6bc32c58ddf 100644 --- a/cl/beacon/beaconhttp/api.go +++ b/cl/beacon/beaconhttp/api.go @@ -7,6 +7,7 @@ import ( "net/http" "reflect" "strings" + "time" "github.com/ledgerwatch/erigon-lib/types/ssz" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph" @@ -53,13 +54,13 @@ func (e *EndpointError) WriteTo(w http.ResponseWriter) { } type EndpointHandler[T any] interface { - Handle(r *http.Request) (T, error) + Handle(w http.ResponseWriter, r *http.Request) (T, error) } -type EndpointHandlerFunc[T any] func(r *http.Request) (T, error) +type EndpointHandlerFunc[T any] func(w http.ResponseWriter, r *http.Request) (T, error) -func (e EndpointHandlerFunc[T]) Handle(r *http.Request) (T, error) { - return e(r) +func (e EndpointHandlerFunc[T]) Handle(w http.ResponseWriter, r *http.Request) (T, error) { + return e(w, r) } func HandleEndpointFunc[T any](h EndpointHandlerFunc[T]) http.HandlerFunc { @@ -68,14 +69,19 @@ func HandleEndpointFunc[T any](h EndpointHandlerFunc[T]) http.HandlerFunc { func HandleEndpoint[T any](h EndpointHandler[T]) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ans, err := h.Handle(r) + start := time.Now() + ans, err := h.Handle(w, r) + log.Debug("beacon api request", "endpoint", r.URL.Path, "duration", time.Since(start)) if err != nil { - log.Error("beacon api request error", "err", err) - endpointError := WrapEndpointError(err) + var endpointError *EndpointError + if e, ok := err.(*EndpointError); ok { + endpointError = e + } else { + endpointError = WrapEndpointError(err) + } endpointError.WriteTo(w) return } - // TODO: ssz handler // TODO: potentially add a context option to buffer these contentType := r.Header.Get("Accept") contentTypes := strings.Split(contentType, ",") @@ -94,15 +100,31 @@ func HandleEndpoint[T any](h EndpointHandler[T]) http.HandlerFunc { } w.Write(encoded) case contentType == "*/*", contentType == "", slices.Contains(contentTypes, "text/html"), slices.Contains(contentTypes, "application/json"): - w.Header().Add("content-type", "application/json") - err := json.NewEncoder(w).Encode(ans) - if err != nil { - // this error is fatal, log to console - log.Error("beaconapi failed to encode json", "type", reflect.TypeOf(ans), "err", err) + if !isNil(ans) { + w.Header().Add("content-type", "application/json") + err := json.NewEncoder(w).Encode(ans) + if err != nil { + // this error is fatal, log to console + log.Error("beaconapi failed to encode json", "type", reflect.TypeOf(ans), "err", err) + } + } else { + w.WriteHeader(200) } default: http.Error(w, "content type must be application/json or application/octet-stream", http.StatusBadRequest) - } }) } + +func isNil[T any](t T) bool { + v := reflect.ValueOf(t) + kind := v.Kind() + // Must be one of these types to be nillable + return (kind == reflect.Ptr || + kind == reflect.Interface || + kind == reflect.Slice || + kind == reflect.Map || + kind == reflect.Chan || + kind == reflect.Func) && + v.IsNil() +} diff --git a/cl/beacon/beaconhttp/args.go b/cl/beacon/beaconhttp/args.go new file mode 100644 index 00000000000..1701620b074 --- /dev/null +++ b/cl/beacon/beaconhttp/args.go @@ -0,0 +1,176 @@ +package beaconhttp + +import ( + "fmt" + "net/http" + "regexp" + "strconv" + + "github.com/go-chi/chi/v5" + "github.com/ledgerwatch/erigon-lib/common" +) + +type chainTag int + +var ( + Head chainTag = 0 + Finalized chainTag = 1 + Justified chainTag = 2 + Genesis chainTag = 3 +) + +// Represent either state id or block id +type SegmentID struct { + tag chainTag + slot *uint64 + root *common.Hash +} + +func (c *SegmentID) Head() bool { + return c.tag == Head && c.slot == nil && c.root == nil +} + +func (c *SegmentID) Finalized() bool { + return c.tag == Finalized +} + +func (c *SegmentID) Justified() bool { + return c.tag == Justified +} + +func (c *SegmentID) Genesis() bool { + return c.tag == Genesis +} + +func (c *SegmentID) GetSlot() *uint64 { + return c.slot +} + +func (c *SegmentID) GetRoot() *common.Hash { + return c.root +} + +func EpochFromRequest(r *http.Request) (uint64, error) { + // Must only be a number + regex := regexp.MustCompile(`^\d+$`) + epoch := chi.URLParam(r, "epoch") + if !regex.MatchString(epoch) { + return 0, fmt.Errorf("invalid path variable: {epoch}") + } + epochMaybe, err := strconv.ParseUint(epoch, 10, 64) + if err != nil { + return 0, err + } + return epochMaybe, nil +} + +func StringFromRequest(r *http.Request, name string) (string, error) { + str := chi.URLParam(r, name) + if str == "" { + return "", nil + } + return str, nil +} + +func BlockIdFromRequest(r *http.Request) (*SegmentID, error) { + regex := regexp.MustCompile(`^(?:0x[0-9a-fA-F]{64}|head|finalized|genesis|\d+)$`) + + blockId := chi.URLParam(r, "block_id") + if !regex.MatchString(blockId) { + return nil, fmt.Errorf("invalid path variable: {block_id}") + } + + if blockId == "head" { + return &SegmentID{tag: Head}, nil + } + if blockId == "finalized" { + return &SegmentID{tag: Finalized}, nil + } + if blockId == "genesis" { + return &SegmentID{tag: Genesis}, nil + } + slotMaybe, err := strconv.ParseUint(blockId, 10, 64) + if err == nil { + return &SegmentID{slot: &slotMaybe}, nil + } + root := common.HexToHash(blockId) + return &SegmentID{ + root: &root, + }, nil +} + +func StateIdFromRequest(r *http.Request) (*SegmentID, error) { + regex := regexp.MustCompile(`^(?:0x[0-9a-fA-F]{64}|head|finalized|genesis|justified|\d+)$`) + + stateId := chi.URLParam(r, "state_id") + if !regex.MatchString(stateId) { + return nil, fmt.Errorf("invalid path variable: {state_id}") + } + + if stateId == "head" { + return &SegmentID{tag: Head}, nil + } + if stateId == "finalized" { + return &SegmentID{tag: Finalized}, nil + } + if stateId == "genesis" { + return &SegmentID{tag: Genesis}, nil + } + if stateId == "justified" { + return &SegmentID{tag: Justified}, nil + } + slotMaybe, err := strconv.ParseUint(stateId, 10, 64) + if err == nil { + return &SegmentID{slot: &slotMaybe}, nil + } + root := common.HexToHash(stateId) + return &SegmentID{ + root: &root, + }, nil +} + +func HashFromQueryParams(r *http.Request, name string) (*common.Hash, error) { + hashStr := r.URL.Query().Get(name) + if hashStr == "" { + return nil, nil + } + // check if hashstr is an hex string + if len(hashStr) != 2+2*32 { + return nil, fmt.Errorf("invalid hash length") + } + if hashStr[:2] != "0x" { + return nil, fmt.Errorf("invalid hash prefix") + } + notHex, err := regexp.MatchString("[^0-9A-Fa-f]", hashStr[2:]) + if err != nil { + return nil, err + } + if notHex { + return nil, fmt.Errorf("invalid hash characters") + } + + hash := common.HexToHash(hashStr) + return &hash, nil +} + +// uint64FromQueryParams retrieves a number from the query params, in base 10. +func Uint64FromQueryParams(r *http.Request, name string) (*uint64, error) { + str := r.URL.Query().Get(name) + if str == "" { + return nil, nil + } + num, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return nil, err + } + return &num, nil +} + +// decode a list of strings from the query params +func StringListFromQueryParams(r *http.Request, name string) ([]string, error) { + str := r.URL.Query().Get(name) + if str == "" { + return nil, nil + } + return regexp.MustCompile(`\s*,\s*`).Split(str, -1), nil +} diff --git a/cl/beacon/beaconhttp/beacon_response.go b/cl/beacon/beaconhttp/beacon_response.go new file mode 100644 index 00000000000..c0e340915e6 --- /dev/null +++ b/cl/beacon/beaconhttp/beacon_response.go @@ -0,0 +1,95 @@ +package beaconhttp + +import ( + "encoding/json" + "net/http" + + "github.com/ledgerwatch/erigon-lib/types/ssz" + "github.com/ledgerwatch/erigon/cl/clparams" +) + +type BeaconResponse struct { + Data any + Finalized *bool + Version *clparams.StateVersion + ExecutionOptimistic *bool + + Extra map[string]any +} + +func NewBeaconResponse(data any) *BeaconResponse { + return &BeaconResponse{ + Data: data, + } +} + +func (r *BeaconResponse) With(key string, value any) (out *BeaconResponse) { + out = new(BeaconResponse) + *out = *r + out.Extra[key] = value + return out +} + +func (r *BeaconResponse) WithFinalized(finalized bool) (out *BeaconResponse) { + out = new(BeaconResponse) + *out = *r + out.Finalized = new(bool) + out.ExecutionOptimistic = new(bool) + out.Finalized = &finalized + return out +} + +func (r *BeaconResponse) WithOptimistic(optimistic bool) (out *BeaconResponse) { + out = new(BeaconResponse) + *out = *r + out.ExecutionOptimistic = new(bool) + out.ExecutionOptimistic = &optimistic + return out +} + +func (r *BeaconResponse) WithVersion(version clparams.StateVersion) (out *BeaconResponse) { + out = new(BeaconResponse) + *out = *r + out.Version = new(clparams.StateVersion) + out.Version = &version + return out +} + +func (b *BeaconResponse) MarshalJSON() ([]byte, error) { + o := map[string]any{ + "data": b.Data, + } + if b.Finalized != nil { + o["finalized"] = *b.Finalized + } + if b.Version != nil { + o["version"] = *b.Version + } + if b.ExecutionOptimistic != nil { + o["execution_optimistic"] = *b.ExecutionOptimistic + } + for k, v := range b.Extra { + o[k] = v + } + return json.Marshal(o) +} + +func (b *BeaconResponse) EncodeSSZ(xs []byte) ([]byte, error) { + marshaler, ok := b.Data.(ssz.Marshaler) + if !ok { + return nil, NewEndpointError(http.StatusBadRequest, "This endpoint does not support SSZ response") + } + encoded, err := marshaler.EncodeSSZ(nil) + if err != nil { + return nil, err + } + return encoded, nil +} + +func (b *BeaconResponse) EncodingSizeSSZ() int { + marshaler, ok := b.Data.(ssz.Marshaler) + if !ok { + return 9 + } + return marshaler.EncodingSizeSSZ() +} diff --git a/cl/beacon/beaconhttp/types.go b/cl/beacon/beaconhttp/types.go new file mode 100644 index 00000000000..fc5dfaa3e7e --- /dev/null +++ b/cl/beacon/beaconhttp/types.go @@ -0,0 +1,28 @@ +package beaconhttp + +import ( + "encoding/json" + "strconv" +) + +type IntStr int + +func (i IntStr) MarshalJSON() ([]byte, error) { + return json.Marshal(strconv.FormatInt(int64(i), 10)) +} + +func (i *IntStr) UnmarshalJSON(b []byte) error { + // Try string first + var s string + if err := json.Unmarshal(b, &s); err == nil { + value, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *i = IntStr(value) + return nil + } + + // Fallback to number + return json.Unmarshal(b, (*int)(i)) +} diff --git a/cl/beacon/beacontest/errors.go b/cl/beacon/beacontest/errors.go new file mode 100644 index 00000000000..ecd79566f06 --- /dev/null +++ b/cl/beacon/beacontest/errors.go @@ -0,0 +1,8 @@ +package beacontest + +import "errors" + +var ( + ErrExpressionMustReturnBool = errors.New("cel expression must return bool") + ErrUnknownType = errors.New("unknown type") +) diff --git a/cl/beacon/beacontest/harness.go b/cl/beacon/beacontest/harness.go new file mode 100644 index 00000000000..84287f46530 --- /dev/null +++ b/cl/beacon/beacontest/harness.go @@ -0,0 +1,436 @@ +package beacontest + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "strings" + "testing" + "text/template" + + "github.com/Masterminds/sprig/v3" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "sigs.k8s.io/yaml" +) + +type HarnessOption func(*Harness) error + +func WithTesting(t *testing.T) func(*Harness) error { + return func(h *Harness) error { + h.t = t + return nil + } +} + +func WithTests(name string, xs []Test) func(*Harness) error { + return func(h *Harness) error { + h.tests[name] = xs + return nil + } +} + +func WithHandler(name string, handler http.Handler) func(*Harness) error { + return func(h *Harness) error { + h.handlers[name] = handler + return nil + } +} + +func WithFilesystem(name string, handler afero.Fs) func(*Harness) error { + return func(h *Harness) error { + h.fss[name] = handler + return nil + } +} +func WithTestFromFs(fs afero.Fs, name string) func(*Harness) error { + return func(h *Harness) error { + filename := name + for _, fn := range []string{name, name + ".yaml", name + ".yml", name + ".json"} { + // check if file exists + _, err := fs.Stat(fn) + if err == nil { + filename = fn + break + } + } + xs, err := afero.ReadFile(fs, filename) + if err != nil { + return err + } + return WithTestFromBytes(name, xs)(h) + } +} + +type Extra struct { + Vars map[string]any `json:"vars"` + + RawBodyZZZZ json.RawMessage `json:"tests"` +} + +func WithTestFromBytes(name string, xs []byte) func(*Harness) error { + return func(h *Harness) error { + var t struct { + T []Test `json:"tests"` + } + x := &Extra{} + s := md5.New() + s.Write(xs) + hsh := hex.EncodeToString(s.Sum(nil)) + // unmarshal just the extra data + err := yaml.Unmarshal(xs, &x, yaml.JSONOpt(func(d *json.Decoder) *json.Decoder { + return d + })) + if err != nil { + return err + } + tmpl := template.Must(template.New(hsh).Funcs(sprig.FuncMap()).Parse(string(xs))) + // execute the template using the extra data as the provided top level object + // we can use the original buffer as the output since the original buffer has already been copied when it was passed into template + buf := bytes.NewBuffer(xs) + buf.Reset() + err = tmpl.Execute(buf, x) + if err != nil { + return err + } + err = yaml.Unmarshal(buf.Bytes(), &t) + if err != nil { + return err + } + if len(t.T) == 0 { + return fmt.Errorf("suite with name %s had no tests", name) + } + h.tests[name] = t.T + return nil + } +} + +type Harness struct { + tests map[string][]Test + t *testing.T + + handlers map[string]http.Handler + fss map[string]afero.Fs +} + +func Execute(options ...HarnessOption) { + h := &Harness{ + handlers: map[string]http.Handler{}, + tests: map[string][]Test{}, + fss: map[string]afero.Fs{ + "": afero.NewOsFs(), + }, + } + for _, v := range options { + err := v(h) + if err != nil { + h.t.Error(err) + } + } + h.Execute() +} + +func (h *Harness) Execute() { + ctx := context.Background() + for suiteName, tests := range h.tests { + for idx, v := range tests { + v.Actual.h = h + v.Expect.h = h + name := v.Name + if name == "" { + name = "test" + } + fullname := fmt.Sprintf("%s_%s_%d", suiteName, name, idx) + h.t.Run(fullname, func(t *testing.T) { + err := v.Execute(ctx, t) + require.NoError(t, err) + }) + } + } +} + +type Test struct { + Name string `json:"name"` + Expect Source `json:"expect"` + Actual Source `json:"actual"` + Compare Comparison `json:"compare"` +} + +func (c *Test) Execute(ctx context.Context, t *testing.T) error { + a, aCode, err := c.Expect.Execute(ctx) + if err != nil { + return fmt.Errorf("get expect data: %w", err) + } + b, bCode, err := c.Actual.Execute(ctx) + if err != nil { + return fmt.Errorf("get actual data: %w", err) + } + err = c.Compare.Compare(t, a, b, aCode, bCode) + if err != nil { + return fmt.Errorf("compare: %w", err) + } + + return nil +} + +type Comparison struct { + Expr string `json:"expr"` + Exprs []string `json:"exprs"` + Literal bool `json:"literal"` +} + +func (c *Comparison) Compare(t *testing.T, aRaw, bRaw json.RawMessage, aCode, bCode int) error { + var err error + var a, b any + var aType, bType *types.Type + + if !c.Literal { + var aMap, bMap any + err = yaml.Unmarshal(aRaw, &aMap) + if err != nil { + return err + } + err = yaml.Unmarshal(bRaw, &bMap) + if err != nil { + return err + } + a = aMap + b = bMap + if a != nil { + switch reflect.TypeOf(a).Kind() { + case reflect.Slice: + aType = cel.ListType(cel.MapType(cel.StringType, cel.DynType)) + default: + aType = cel.MapType(cel.StringType, cel.DynType) + } + } else { + aType = cel.MapType(cel.StringType, cel.DynType) + } + if b != nil { + switch reflect.TypeOf(b).Kind() { + case reflect.Slice: + bType = cel.ListType(cel.MapType(cel.StringType, cel.DynType)) + default: + bType = cel.MapType(cel.StringType, cel.DynType) + } + } else { + bType = cel.MapType(cel.StringType, cel.DynType) + } + } else { + a = string(aRaw) + b = string(bRaw) + aType = cel.StringType + bType = cel.StringType + } + + exprs := []string{} + // if no default expr set and no exprs are set, then add the default expr + if len(c.Exprs) == 0 && c.Expr == "" { + exprs = append(exprs, "actual_code == 200", "actual == expect") + } + env, err := cel.NewEnv( + cel.Variable("expect", aType), + cel.Variable("actual", bType), + cel.Variable("expect_code", cel.IntType), + cel.Variable("actual_code", cel.IntType), + ) + if err != nil { + return err + } + + for _, expr := range append(c.Exprs, exprs...) { + ast, issues := env.Compile(expr) + if issues != nil && issues.Err() != nil { + return issues.Err() + } + prg, err := env.Program(ast) + if err != nil { + return fmt.Errorf("program construction error: %w", err) + } + res, _, err := prg.Eval(map[string]any{ + "expect": a, + "actual": b, + "expect_code": aCode, + "actual_code": bCode, + }) + if err != nil { + return err + } + if res.Type() != cel.BoolType { + return ErrExpressionMustReturnBool + } + bres, ok := res.Value().(bool) + if !ok { + return ErrExpressionMustReturnBool + } + if !assert.Equal(t, bres, true, `expr: %s`, expr) { + if os.Getenv("HIDE_HARNESS_LOG") != "1" { + t.Logf(`name: %s + expect%d: %v + actual%d: %v + expr: %s + `, t.Name(), aCode, a, bCode, b, expr) + + } + t.FailNow() + } + } + return nil +} + +type Source struct { + // backref to the harness + h *Harness `json:"-"` + + // remote type + Remote *string `json:"remote,omitempty"` + Handler *string `json:"handler,omitempty"` + Method string `json:"method"` + Path string `json:"path"` + Query map[string]string `json:"query"` + Headers map[string]string `json:"headers"` + Body *Source `json:"body,omitempty"` + + // data type + Data any `json:"data,omitempty"` + + // file type + File *string `json:"file,omitempty"` + Fs string `json:"fs,omitempty"` + + // for raw type + Raw *string `json:"raw,omitempty"` +} + +func (s *Source) Execute(ctx context.Context) (json.RawMessage, int, error) { + if s.Raw != nil { + return s.executeRaw(ctx) + } + if s.File != nil { + return s.executeFile(ctx) + } + if s.Remote != nil || s.Handler != nil { + return s.executeRemote(ctx) + } + if s.Data != nil { + return s.executeData(ctx) + } + return s.executeEmpty(ctx) +} +func (s *Source) executeRemote(ctx context.Context) (json.RawMessage, int, error) { + method := "GET" + if s.Method != "" { + method = s.Method + } + method = strings.ToUpper(method) + var body io.Reader + // hydrate the harness + if s.Body != nil { + s.Body.h = s.h + msg, _, err := s.Body.Execute(ctx) + if err != nil { + return nil, 0, fmt.Errorf("getting body: %w", err) + } + body = bytes.NewBuffer(msg) + } + var purl *url.URL + if s.Remote != nil { + niceUrl, err := url.Parse(*s.Remote) + if err != nil { + return nil, 0, err + } + purl = niceUrl + + } else if s.Handler != nil { + handler, ok := s.h.handlers[*s.Handler] + if !ok { + return nil, 0, fmt.Errorf("handler not registered: %s", *s.Handler) + } + server := httptest.NewServer(handler) + defer server.Close() + niceUrl, err := url.Parse(server.URL) + if err != nil { + return nil, 0, err + } + purl = niceUrl + } else { + panic("impossible code path. bug? source.Execute() should ensure this never happens") + } + + purl = purl.JoinPath(s.Path) + q := purl.Query() + for k, v := range s.Query { + q.Add(k, v) + } + purl.RawQuery = q.Encode() + request, err := http.NewRequest(method, purl.String(), body) + if err != nil { + return nil, 0, err + } + for k, v := range s.Headers { + request.Header.Set(k, v) + } + resp, err := http.DefaultClient.Do(request) + if err != nil { + return nil, 0, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, resp.StatusCode, nil + } + out, err := io.ReadAll(resp.Body) + if err != nil { + return nil, 200, err + } + return json.RawMessage(out), 200, nil +} + +func (s *Source) executeData(ctx context.Context) (json.RawMessage, int, error) { + ans, err := json.Marshal(s.Data) + if err != nil { + return nil, 400, nil + } + return ans, 200, nil +} + +func (s *Source) executeFile(ctx context.Context) (json.RawMessage, int, error) { + afs, ok := s.h.fss[s.Fs] + if !ok { + return nil, 404, fmt.Errorf("filesystem %s not defined", s.Fs) + } + name := *s.File + filename := name + for _, fn := range []string{name, name + ".yaml", name + ".yml", name + ".json"} { + // check if file exists + _, err := afs.Stat(fn) + if err == nil { + filename = fn + break + } + } + fileBytes, err := afero.ReadFile(afs, filename) + if err != nil { + return nil, 404, err + } + return json.RawMessage(fileBytes), 200, nil +} +func (s *Source) executeRaw(ctx context.Context) (json.RawMessage, int, error) { + return json.RawMessage(*s.Raw), 200, nil +} + +func (s *Source) executeEmpty(ctx context.Context) (json.RawMessage, int, error) { + return []byte("{}"), 200, nil +} diff --git a/cl/beacon/beacontest/harness_test.go b/cl/beacon/beacontest/harness_test.go new file mode 100644 index 00000000000..d6ee904d7e7 --- /dev/null +++ b/cl/beacon/beacontest/harness_test.go @@ -0,0 +1,19 @@ +package beacontest_test + +import ( + "testing" + + _ "embed" + + "github.com/ledgerwatch/erigon/cl/beacon/beacontest" +) + +//go:embed harness_test_data.yml +var testData []byte + +func TestSimpleHarness(t *testing.T) { + beacontest.Execute( + beacontest.WithTesting(t), + beacontest.WithTestFromBytes("test", testData), + ) +} diff --git a/cl/beacon/beacontest/harness_test_data.yml b/cl/beacon/beacontest/harness_test_data.yml new file mode 100644 index 00000000000..4c307021986 --- /dev/null +++ b/cl/beacon/beacontest/harness_test_data.yml @@ -0,0 +1,62 @@ +tests: + - name: "equality expression" + expect: + data: + hello: world + actual: + data: + hello: world + compare: + type: "expr" + expr: "actual == expect" + - name: "neg equality expr" + expect: + data: + hello: world + actual: + data: + hello: worlds + compare: + expr: "actual != expect" + - name: "subkey world" + expect: + data: + hi: world + actual: + data: + hello: world + compare: + expr: "actual.hello == expect.hi" + - name: "default compare" + expect: + data: + hello: world + actual: + data: + hello: world + - name: "default neg compare" + expect: + data: + hello: world + actual: + data: + hello: worlds + compare: + expr: "actual != expect" + - name: "key order doesnt matter for non literal" + expect: + data: + a: 1 + b: 2 + actual: + raw: '{"b":2,"a":1}' + - name: "key order does matter for literal" + expect: + data: + a: 1 + b: 2 + actual: + raw: '{"b":2,"a":1}' + compare: + literal: true + expr: "actual != expect" diff --git a/cl/beacon/beacontest/linux_basepathfs.go b/cl/beacon/beacontest/linux_basepathfs.go new file mode 100644 index 00000000000..a27754c2723 --- /dev/null +++ b/cl/beacon/beacontest/linux_basepathfs.go @@ -0,0 +1,252 @@ +package beacontest + +import ( + "io/fs" + "os" + "path" + "runtime" + "strings" + "time" + + "github.com/spf13/afero" +) + +var ( + _ afero.Lstater = (*BasePathFs)(nil) + _ fs.ReadDirFile = (*BasePathFile)(nil) +) + +// This is a version of the afero basepathfs that uses path instead of filepath. +// this is needed to work with things like zipfs and embedfs on windows +type BasePathFs struct { + source afero.Fs + path string +} + +type BasePathFile struct { + afero.File + path string +} + +func (f *BasePathFile) Name() string { + sourcename := f.File.Name() + return strings.TrimPrefix(sourcename, path.Clean(f.path)) +} + +func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) { + if rdf, ok := f.File.(fs.ReadDirFile); ok { + return rdf.ReadDir(n) + } + return readDirFile{f.File}.ReadDir(n) +} + +func NewBasePathFs(source afero.Fs, path string) afero.Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (p string, err error) { + if err := validateBasePathName(name); err != nil { + return name, err + } + + bpath := path.Clean(b.path) + p = path.Clean(path.Join(bpath, name)) + if !strings.HasPrefix(p, bpath) { + return name, os.ErrNotExist + } + + return p, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if path.IsAbs(name) { + return os.ErrNotExist + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Chown(name string, uid, gid int) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chown", Path: name, Err: err} + } + return b.source.Chown(name, uid, gid) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f afero.File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + sourcef, err := b.source.OpenFile(name, flag, mode) + if err != nil { + return nil, err + } + return &BasePathFile{sourcef, b.path}, nil +} + +func (b *BasePathFs) Open(name string) (f afero.File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + sourcef, err := b.source.Open(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f afero.File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + sourcef, err := b.source.Create(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + name, err := b.RealPath(name) + if err != nil { + return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} + } + if lstater, ok := b.source.(afero.Lstater); ok { + return lstater.LstatIfPossible(name) + } + fi, err := b.source.Stat(name) + return fi, false, err +} + +func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error { + oldname, err := b.RealPath(oldname) + if err != nil { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} + } + newname, err = b.RealPath(newname) + if err != nil { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} + } + if linker, ok := b.source.(afero.Linker); ok { + return linker.SymlinkIfPossible(oldname, newname) + } + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: afero.ErrNoSymlink} +} + +func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { + name, err := b.RealPath(name) + if err != nil { + return "", &os.PathError{Op: "readlink", Path: name, Err: err} + } + if reader, ok := b.source.(afero.LinkReader); ok { + return reader.ReadlinkIfPossible(name) + } + return "", &os.PathError{Op: "readlink", Path: name, Err: afero.ErrNoReadlink} +} + +// the readDirFile helper is requried + +// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open +type readDirFile struct { + afero.File +} + +var _ fs.ReadDirFile = readDirFile{} + +func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { + items, err := r.File.Readdir(n) + if err != nil { + return nil, err + } + + ret := make([]fs.DirEntry, len(items)) + for i := range items { + ret[i] = fileInfoDirEntry{FileInfo: items[i]} + } + + return ret, nil +} + +// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry +type fileInfoDirEntry struct { + fs.FileInfo +} + +var _ fs.DirEntry = fileInfoDirEntry{} + +func (d fileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } + +func (d fileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } diff --git a/cl/beacon/building/endpoints.go b/cl/beacon/building/endpoints.go new file mode 100644 index 00000000000..cdf3c133d8c --- /dev/null +++ b/cl/beacon/building/endpoints.go @@ -0,0 +1,25 @@ +package building + +import ( + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" +) + +type BeaconCommitteeSubscription struct { + ValidatorIndex int `json:"validator_index,string"` + CommitteeIndex int `json:"committee_index,string"` + CommitteesAtSlot int `json:"committees_at_slot,string"` + Slot int `json:"slot,string"` + IsAggregator bool `json:"is_aggregator"` +} + +type SyncCommitteeSubscription struct { + ValidatorIndex int `json:"validator_index,string"` + SyncCommitteeIndices []beaconhttp.IntStr `json:"sync_committee_indices"` + UntilEpoch int `json:"until_epoch,string"` +} + +type PrepareBeaconProposer struct { + ValidatorIndex int `json:"validator_index,string"` + FeeRecipient common.Address `json:"fee_recipient"` +} diff --git a/cl/beacon/building/state.go b/cl/beacon/building/state.go new file mode 100644 index 00000000000..e7baf787b8d --- /dev/null +++ b/cl/beacon/building/state.go @@ -0,0 +1,25 @@ +package building + +import ( + "sync" + + "github.com/ledgerwatch/erigon-lib/common" +) + +type State struct { + feeRecipients map[int]common.Address + + mu sync.RWMutex +} + +func NewState() *State { + return &State{ + feeRecipients: map[int]common.Address{}, + } +} + +func (s *State) SetFeeRecipient(idx int, address common.Address) { + s.mu.Lock() + defer s.mu.Unlock() + s.feeRecipients[idx] = address +} diff --git a/cl/beacon/handler/attestation_rewards.go b/cl/beacon/handler/attestation_rewards.go new file mode 100644 index 00000000000..aa823b9b76c --- /dev/null +++ b/cl/beacon/handler/attestation_rewards.go @@ -0,0 +1,450 @@ +package handler + +import ( + "encoding/json" + "io" + "net/http" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2/statechange" + "github.com/ledgerwatch/erigon/cl/utils" +) + +type IdealReward struct { + EffectiveBalance int64 `json:"effective_balance,string"` + Head int64 `json:"head,string"` + Target int64 `json:"target,string"` + Source int64 `json:"source,string"` + InclusionDelay int64 `json:"inclusion_delay,string"` + Inactivity int64 `json:"inactivity,string"` +} + +type TotalReward struct { + ValidatorIndex int64 `json:"validator_index,string"` + Head int64 `json:"head,string"` + Target int64 `json:"target,string"` + Source int64 `json:"source,string"` + InclusionDelay int64 `json:"inclusion_delay,string"` + Inactivity int64 `json:"inactivity,string"` +} + +type attestationsRewardsResponse struct { + IdealRewards []IdealReward `json:"ideal_rewards"` + TotalRewards []TotalReward `json:"total_rewards"` +} + +func (a *ApiHandler) getAttestationsRewards(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + epoch, err := beaconhttp.EpochFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + req := []string{} + // read the entire body + jsonBytes, err := io.ReadAll(r.Body) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + // parse json body request + if len(jsonBytes) > 0 { + if err := json.Unmarshal(jsonBytes, &req); err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + } + + filterIndicies, err := parseQueryValidatorIndicies(tx, req) + if err != nil { + return nil, err + } + _, headSlot, err := a.forkchoiceStore.GetHead() + if err != nil { + return nil, err + } + headEpoch := headSlot / a.beaconChainCfg.SlotsPerEpoch + if epoch > headEpoch { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "epoch is in the future") + } + // Few cases to handle: + // 1) finalized data + // 2) not finalized data + version := a.beaconChainCfg.GetCurrentStateVersion(epoch) + + // finalized data + if epoch > a.forkchoiceStore.FinalizedCheckpoint().Epoch() { + minRange := epoch * a.beaconChainCfg.SlotsPerEpoch + maxRange := (epoch + 1) * a.beaconChainCfg.SlotsPerEpoch + var blockRoot libcommon.Hash + for i := maxRange - 1; i >= minRange; i-- { + blockRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, i) + if err != nil { + return nil, err + } + if blockRoot == (libcommon.Hash{}) { + continue + } + s, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true) + if err != nil { + return nil, err + } + if s == nil { + continue + } + if s.Version() == clparams.Phase0Version { + return a.computeAttestationsRewardsForPhase0(s, filterIndicies, epoch) + } + return a.computeAttestationsRewardsForAltair(s.ValidatorSet(), s.InactivityScores(), s.PreviousEpochParticipation(), state.InactivityLeaking(s), filterIndicies, epoch) + } + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "no block found for this epoch") + } + + if version == clparams.Phase0Version { + minRange := epoch * a.beaconChainCfg.SlotsPerEpoch + maxRange := (epoch + 1) * a.beaconChainCfg.SlotsPerEpoch + for i := maxRange - 1; i >= minRange; i-- { + s, err := a.stateReader.ReadHistoricalState(ctx, tx, i) + if err != nil { + return nil, err + } + if s == nil { + continue + } + if err := s.InitBeaconState(); err != nil { + return nil, err + } + return a.computeAttestationsRewardsForPhase0(s, filterIndicies, epoch) + } + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "no block found for this epoch") + } + lastSlot := epoch*a.beaconChainCfg.SlotsPerEpoch + a.beaconChainCfg.SlotsPerEpoch - 1 + stateProgress, err := state_accessors.GetStateProcessingProgress(tx) + if err != nil { + return nil, err + } + if lastSlot > stateProgress { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "requested range is not yet processed or the node is not archivial") + } + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, lastSlot) + if err != nil { + return nil, err + } + + _, previousIdx, err := a.stateReader.ReadPartecipations(tx, lastSlot) + if err != nil { + return nil, err + } + _, _, finalizedCheckpoint, err := state_accessors.ReadCheckpoints(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + if err != nil { + return nil, err + } + inactivityScores := solid.NewUint64ListSSZ(int(a.beaconChainCfg.ValidatorRegistryLimit)) + if err := a.stateReader.ReconstructUint64ListDump(tx, lastSlot, kv.InactivityScores, validatorSet.Length(), inactivityScores); err != nil { + return nil, err + } + return a.computeAttestationsRewardsForAltair( + validatorSet, + inactivityScores, + previousIdx, + a.isInactivityLeaking(epoch, finalizedCheckpoint), + filterIndicies, + epoch) +} + +func (a *ApiHandler) isInactivityLeaking(epoch uint64, finalityCheckpoint solid.Checkpoint) bool { + prevEpoch := epoch + if epoch > 0 { + prevEpoch = epoch - 1 + } + return prevEpoch-finalityCheckpoint.Epoch() > a.beaconChainCfg.MinEpochsToInactivityPenalty +} + +func (a *ApiHandler) baseReward(version clparams.StateVersion, effectiveBalance, activeBalanceRoot uint64) uint64 { + basePerIncrement := a.beaconChainCfg.EffectiveBalanceIncrement * a.beaconChainCfg.BaseRewardFactor / activeBalanceRoot + if version != clparams.Phase0Version { + return (effectiveBalance / a.beaconChainCfg.EffectiveBalanceIncrement) * basePerIncrement + } + return effectiveBalance * a.beaconChainCfg.BaseRewardFactor / activeBalanceRoot / a.beaconChainCfg.BaseRewardsPerEpoch +} + +func (a *ApiHandler) computeAttestationsRewardsForAltair(validatorSet *solid.ValidatorSet, inactivityScores solid.Uint64ListSSZ, previousParticipation *solid.BitList, inactivityLeak bool, filterIndicies []uint64, epoch uint64) (*beaconhttp.BeaconResponse, error) { + totalActiveBalance := uint64(0) + flagsUnslashedIndiciesSet := statechange.GetUnslashedIndiciesSet(a.beaconChainCfg, epoch, validatorSet, previousParticipation) + weights := a.beaconChainCfg.ParticipationWeights() + flagsTotalBalances := make([]uint64, len(weights)) + + prevEpoch := uint64(0) + if epoch > 0 { + prevEpoch = epoch - 1 + } + + validatorSet.Range(func(validatorIndex int, v solid.Validator, l int) bool { + if v.Active(epoch) { + totalActiveBalance += v.EffectiveBalance() + } + + for i := range weights { + if flagsUnslashedIndiciesSet[i][validatorIndex] { + flagsTotalBalances[i] += v.EffectiveBalance() + } + } + return true + }) + version := a.beaconChainCfg.GetCurrentStateVersion(epoch) + inactivityPenaltyDenominator := a.beaconChainCfg.InactivityScoreBias * a.beaconChainCfg.GetPenaltyQuotient(version) + rewardMultipliers := make([]uint64, len(weights)) + for i := range weights { + rewardMultipliers[i] = weights[i] * (flagsTotalBalances[i] / a.beaconChainCfg.EffectiveBalanceIncrement) + } + + rewardDenominator := (totalActiveBalance / a.beaconChainCfg.EffectiveBalanceIncrement) * a.beaconChainCfg.WeightDenominator + var response *attestationsRewardsResponse + if len(filterIndicies) > 0 { + response = &attestationsRewardsResponse{ + IdealRewards: make([]IdealReward, 0, len(filterIndicies)), + TotalRewards: make([]TotalReward, 0, len(filterIndicies)), + } + } else { + response = &attestationsRewardsResponse{ + IdealRewards: make([]IdealReward, 0, validatorSet.Length()), + TotalRewards: make([]TotalReward, 0, validatorSet.Length()), + } + } + // make a map with the filter indicies + totalActiveBalanceSqrt := utils.IntegerSquareRoot(totalActiveBalance) + + fn := func(index uint64, v solid.Validator) error { + effectiveBalance := v.EffectiveBalance() + baseReward := a.baseReward(version, effectiveBalance, totalActiveBalanceSqrt) + // not eligible for rewards? then all empty + if !(v.Active(prevEpoch) || (v.Slashed() && prevEpoch+1 < v.WithdrawableEpoch())) { + response.IdealRewards = append(response.IdealRewards, IdealReward{EffectiveBalance: int64(effectiveBalance)}) + response.TotalRewards = append(response.TotalRewards, TotalReward{ValidatorIndex: int64(index)}) + return nil + } + idealReward := IdealReward{EffectiveBalance: int64(effectiveBalance)} + totalReward := TotalReward{ValidatorIndex: int64(index)} + if !inactivityLeak { + idealReward.Head = int64(baseReward * rewardMultipliers[a.beaconChainCfg.TimelyHeadFlagIndex] / rewardDenominator) + idealReward.Target = int64(baseReward * rewardMultipliers[a.beaconChainCfg.TimelyTargetFlagIndex] / rewardDenominator) + idealReward.Source = int64(baseReward * rewardMultipliers[a.beaconChainCfg.TimelySourceFlagIndex] / rewardDenominator) + } + // Note: for altair, we dont have the inclusion delay, always 0. + for flagIdx := range weights { + if flagsUnslashedIndiciesSet[flagIdx][index] { + if flagIdx == int(a.beaconChainCfg.TimelyHeadFlagIndex) { + totalReward.Head = idealReward.Head + } else if flagIdx == int(a.beaconChainCfg.TimelyTargetFlagIndex) { + totalReward.Target = idealReward.Target + } else if flagIdx == int(a.beaconChainCfg.TimelySourceFlagIndex) { + totalReward.Source = idealReward.Source + } + } else if flagIdx != int(a.beaconChainCfg.TimelyHeadFlagIndex) { + down := -int64(baseReward * weights[flagIdx] / a.beaconChainCfg.WeightDenominator) + if flagIdx == int(a.beaconChainCfg.TimelyHeadFlagIndex) { + totalReward.Head = down + } else if flagIdx == int(a.beaconChainCfg.TimelyTargetFlagIndex) { + totalReward.Target = down + } else if flagIdx == int(a.beaconChainCfg.TimelySourceFlagIndex) { + totalReward.Source = down + } + } + } + if !flagsUnslashedIndiciesSet[a.beaconChainCfg.TimelyTargetFlagIndex][index] { + inactivityScore := inactivityScores.Get(int(index)) + totalReward.Inactivity = -int64((effectiveBalance * inactivityScore) / inactivityPenaltyDenominator) + } + response.IdealRewards = append(response.IdealRewards, idealReward) + response.TotalRewards = append(response.TotalRewards, totalReward) + return nil + } + + if len(filterIndicies) > 0 { + for _, index := range filterIndicies { + if err := fn(index, validatorSet.Get(int(index))); err != nil { + return nil, err + } + } + } else { + for index := uint64(0); index < uint64(validatorSet.Length()); index++ { + if err := fn(index, validatorSet.Get(int(index))); err != nil { + return nil, err + } + } + } + return newBeaconResponse(response), nil +} + +// processRewardsAndPenaltiesPhase0 process rewards and penalties for phase0 state. +func (a *ApiHandler) computeAttestationsRewardsForPhase0(s *state.CachingBeaconState, filterIndicies []uint64, epoch uint64) (*beaconhttp.BeaconResponse, error) { + response := &attestationsRewardsResponse{} + beaconConfig := s.BeaconConfig() + if epoch == beaconConfig.GenesisEpoch { + return newBeaconResponse(response), nil + } + prevEpoch := uint64(0) + if epoch > 0 { + prevEpoch = epoch - 1 + } + if len(filterIndicies) > 0 { + response = &attestationsRewardsResponse{ + IdealRewards: make([]IdealReward, 0, len(filterIndicies)), + TotalRewards: make([]TotalReward, 0, len(filterIndicies)), + } + } else { + response = &attestationsRewardsResponse{ + IdealRewards: make([]IdealReward, 0, s.ValidatorLength()), + TotalRewards: make([]TotalReward, 0, s.ValidatorLength()), + } + } + + inactivityLeak := state.InactivityLeaking(s) + rewardDenominator := s.GetTotalActiveBalance() / beaconConfig.EffectiveBalanceIncrement + var unslashedMatchingSourceBalanceIncrements, unslashedMatchingTargetBalanceIncrements, unslashedMatchingHeadBalanceIncrements uint64 + var err error + s.ForEachValidator(func(validator solid.Validator, idx, total int) bool { + if validator.Slashed() { + return true + } + var previousMatchingSourceAttester, previousMatchingTargetAttester, previousMatchingHeadAttester bool + + if previousMatchingSourceAttester, err = s.ValidatorIsPreviousMatchingSourceAttester(idx); err != nil { + return false + } + if previousMatchingTargetAttester, err = s.ValidatorIsPreviousMatchingTargetAttester(idx); err != nil { + return false + } + if previousMatchingHeadAttester, err = s.ValidatorIsPreviousMatchingHeadAttester(idx); err != nil { + return false + } + if previousMatchingSourceAttester { + unslashedMatchingSourceBalanceIncrements += validator.EffectiveBalance() + } + if previousMatchingTargetAttester { + unslashedMatchingTargetBalanceIncrements += validator.EffectiveBalance() + } + if previousMatchingHeadAttester { + unslashedMatchingHeadBalanceIncrements += validator.EffectiveBalance() + } + return true + }) + if err != nil { + return nil, err + } + // Then compute their total increment. + unslashedMatchingSourceBalanceIncrements /= beaconConfig.EffectiveBalanceIncrement + unslashedMatchingTargetBalanceIncrements /= beaconConfig.EffectiveBalanceIncrement + unslashedMatchingHeadBalanceIncrements /= beaconConfig.EffectiveBalanceIncrement + fn := func(index uint64, currentValidator solid.Validator) error { + baseReward, err := s.BaseReward(index) + if err != nil { + return err + } + var previousMatchingSourceAttester, previousMatchingTargetAttester, previousMatchingHeadAttester bool + + if previousMatchingSourceAttester, err = s.ValidatorIsPreviousMatchingSourceAttester(int(index)); err != nil { + return err + } + if previousMatchingTargetAttester, err = s.ValidatorIsPreviousMatchingTargetAttester(int(index)); err != nil { + return err + } + if previousMatchingHeadAttester, err = s.ValidatorIsPreviousMatchingHeadAttester(int(index)); err != nil { + return err + } + totalReward := TotalReward{ValidatorIndex: int64(index)} + idealReward := IdealReward{EffectiveBalance: int64(currentValidator.EffectiveBalance())} + + // check inclusion delay + if !currentValidator.Slashed() && previousMatchingSourceAttester { + var attestation *solid.PendingAttestation + if attestation, err = s.ValidatorMinPreviousInclusionDelayAttestation(int(index)); err != nil { + return err + } + proposerReward := (baseReward / beaconConfig.ProposerRewardQuotient) + maxAttesterReward := baseReward - proposerReward + idealReward.InclusionDelay = int64(maxAttesterReward / attestation.InclusionDelay()) + totalReward.InclusionDelay = idealReward.InclusionDelay + } + // if it is not eligible for rewards, then do not continue further + if !(currentValidator.Active(prevEpoch) || (currentValidator.Slashed() && prevEpoch+1 < currentValidator.WithdrawableEpoch())) { + response.IdealRewards = append(response.IdealRewards, idealReward) + response.TotalRewards = append(response.TotalRewards, totalReward) + return nil + } + if inactivityLeak { + idealReward.Source = int64(baseReward) + idealReward.Target = int64(baseReward) + idealReward.Head = int64(baseReward) + } else { + idealReward.Source = int64(baseReward * unslashedMatchingSourceBalanceIncrements / rewardDenominator) + idealReward.Target = int64(baseReward * unslashedMatchingTargetBalanceIncrements / rewardDenominator) + idealReward.Head = int64(baseReward * unslashedMatchingHeadBalanceIncrements / rewardDenominator) + } + // we can use a multiplier to account for all attesting + var attested, missed uint64 + if currentValidator.Slashed() { + missed = 3 + } else { + if previousMatchingSourceAttester { + attested++ + totalReward.Source = idealReward.Source + } + if previousMatchingTargetAttester { + attested++ + totalReward.Target = idealReward.Target + } + if previousMatchingHeadAttester { + attested++ + totalReward.Head = idealReward.Head + } + missed = 3 - attested + } + // process inactivities + if inactivityLeak { + proposerReward := baseReward / beaconConfig.ProposerRewardQuotient + totalReward.Inactivity = -int64(beaconConfig.BaseRewardsPerEpoch*baseReward - proposerReward) + if currentValidator.Slashed() || !previousMatchingTargetAttester { + totalReward.Inactivity -= int64(currentValidator.EffectiveBalance() * state.FinalityDelay(s) / beaconConfig.InactivityPenaltyQuotient) + } + } + totalReward.Inactivity -= int64(baseReward * missed) + response.IdealRewards = append(response.IdealRewards, idealReward) + response.TotalRewards = append(response.TotalRewards, totalReward) + return nil + } + if len(filterIndicies) > 0 { + for _, index := range filterIndicies { + v, err := s.ValidatorForValidatorIndex(int(index)) + if err != nil { + return nil, err + } + if err := fn(index, v); err != nil { + return nil, err + } + } + } else { + for index := uint64(0); index < uint64(s.ValidatorLength()); index++ { + v, err := s.ValidatorForValidatorIndex(int(index)) + if err != nil { + return nil, err + } + if err := fn(index, v); err != nil { + return nil, err + } + } + } + return newBeaconResponse(response), nil +} diff --git a/cl/beacon/handler/blocks.go b/cl/beacon/handler/blocks.go index 8f0a274e43e..2fd7d38eebf 100644 --- a/cl/beacon/handler/blocks.go +++ b/cl/beacon/handler/blocks.go @@ -19,22 +19,22 @@ type headerResponse struct { } type getHeadersRequest struct { - Slot *uint64 `json:"slot,omitempty"` + Slot *uint64 `json:"slot,omitempty,string"` ParentRoot *libcommon.Hash `json:"root,omitempty"` } -func (a *ApiHandler) rootFromBlockId(ctx context.Context, tx kv.Tx, blockId *segmentID) (root libcommon.Hash, err error) { +func (a *ApiHandler) rootFromBlockId(ctx context.Context, tx kv.Tx, blockId *beaconhttp.SegmentID) (root libcommon.Hash, err error) { switch { - case blockId.head(): + case blockId.Head(): root, _, err = a.forkchoiceStore.GetHead() if err != nil { return libcommon.Hash{}, err } - case blockId.finalized(): + case blockId.Finalized(): root = a.forkchoiceStore.FinalizedCheckpoint().BlockRoot() - case blockId.justified(): + case blockId.Justified(): root = a.forkchoiceStore.JustifiedCheckpoint().BlockRoot() - case blockId.genesis(): + case blockId.Genesis(): root, err = beacon_indicies.ReadCanonicalBlockRoot(tx, 0) if err != nil { return libcommon.Hash{}, err @@ -42,24 +42,24 @@ func (a *ApiHandler) rootFromBlockId(ctx context.Context, tx kv.Tx, blockId *seg if root == (libcommon.Hash{}) { return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusNotFound, "genesis block not found") } - case blockId.getSlot() != nil: - root, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *blockId.getSlot()) + case blockId.GetSlot() != nil: + root, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *blockId.GetSlot()) if err != nil { return libcommon.Hash{}, err } if root == (libcommon.Hash{}) { - return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("block not found %d", *blockId.getSlot())) + return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("block not found %d", *blockId.GetSlot())) } - case blockId.getRoot() != nil: + case blockId.GetRoot() != nil: // first check if it exists - root = *blockId.getRoot() + root = *blockId.GetRoot() default: return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusInternalServerError, "cannot parse block id") } return } -func (a *ApiHandler) getBlock(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getBlock(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) if err != nil { @@ -67,7 +67,7 @@ func (a *ApiHandler) getBlock(r *http.Request) (*beaconResponse, error) { } defer tx.Rollback() - blockId, err := blockIdFromRequest(r) + blockId, err := beaconhttp.BlockIdFromRequest(r) if err != nil { return nil, err } @@ -90,11 +90,11 @@ func (a *ApiHandler) getBlock(r *http.Request) (*beaconResponse, error) { return nil, err } return newBeaconResponse(blk). - withFinalized(root == canonicalRoot && blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot()). - withVersion(blk.Version()), nil + WithFinalized(root == canonicalRoot && blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot()). + WithVersion(blk.Version()), nil } -func (a *ApiHandler) getBlindedBlock(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getBlindedBlock(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) if err != nil { @@ -102,7 +102,7 @@ func (a *ApiHandler) getBlindedBlock(r *http.Request) (*beaconResponse, error) { } defer tx.Rollback() - blockId, err := blockIdFromRequest(r) + blockId, err := beaconhttp.BlockIdFromRequest(r) if err != nil { return nil, err } @@ -129,18 +129,18 @@ func (a *ApiHandler) getBlindedBlock(r *http.Request) (*beaconResponse, error) { return nil, err } return newBeaconResponse(blinded). - withFinalized(root == canonicalRoot && blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot()). - withVersion(blk.Version()), nil + WithFinalized(root == canonicalRoot && blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot()). + WithVersion(blk.Version()), nil } -func (a *ApiHandler) getBlockAttestations(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getBlockAttestations(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) if err != nil { return nil, err } defer tx.Rollback() - blockId, err := blockIdFromRequest(r) + blockId, err := beaconhttp.BlockIdFromRequest(r) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } @@ -160,18 +160,19 @@ func (a *ApiHandler) getBlockAttestations(r *http.Request) (*beaconResponse, err if err != nil { return nil, err } - return newBeaconResponse(blk.Block.Body.Attestations).withFinalized(root == canonicalRoot && blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot()). - withVersion(blk.Version()), nil + return newBeaconResponse(blk.Block.Body.Attestations). + WithFinalized(root == canonicalRoot && blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot()). + WithVersion(blk.Version()), nil } -func (a *ApiHandler) getBlockRoot(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getBlockRoot(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) if err != nil { return nil, err } defer tx.Rollback() - blockId, err := blockIdFromRequest(r) + blockId, err := beaconhttp.BlockIdFromRequest(r) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } @@ -193,5 +194,7 @@ func (a *ApiHandler) getBlockRoot(r *http.Request) (*beaconResponse, error) { if err != nil { return nil, err } - return newBeaconResponse(struct{ Root libcommon.Hash }{Root: root}).withFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()), nil + return newBeaconResponse(struct { + Root libcommon.Hash `json:"root"` + }{Root: root}).WithFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()), nil } diff --git a/cl/beacon/handler/builder.go b/cl/beacon/handler/builder.go new file mode 100644 index 00000000000..94166e021c0 --- /dev/null +++ b/cl/beacon/handler/builder.go @@ -0,0 +1,69 @@ +package handler + +import ( + "net/http" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" +) + +func (a *ApiHandler) GetEth1V1BuilderStatesExpectedWit(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockId, err := beaconhttp.StateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + root, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, root) + if err != nil { + return nil, err + } + if slot == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "state not found") + } + if a.beaconChainCfg.GetCurrentStateVersion(*slot/a.beaconChainCfg.SlotsPerEpoch) < clparams.CapellaVersion { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "the specified state is not a capella state") + } + headRoot, _, err := a.forkchoiceStore.GetHead() + if err != nil { + return nil, err + } + if root == headRoot { + s, cn := a.syncedData.HeadState() + defer cn() + return newBeaconResponse(state.ExpectedWithdrawals(s)).WithFinalized(false), nil + } + lookAhead := 1024 + for currSlot := *slot + 1; currSlot < *slot+uint64(lookAhead); currSlot++ { + if currSlot > a.syncedData.HeadSlot() { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "state not found") + } + blockRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, currSlot) + if err != nil { + return nil, err + } + if blockRoot == (libcommon.Hash{}) { + continue + } + blk, err := a.blockReader.ReadBlockByRoot(ctx, tx, blockRoot) + if err != nil { + return nil, err + } + return newBeaconResponse(blk.Block.Body.ExecutionPayload.Withdrawals).WithFinalized(false), nil + } + + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "state not found") +} diff --git a/cl/beacon/handler/committees.go b/cl/beacon/handler/committees.go new file mode 100644 index 00000000000..c7e527fbe76 --- /dev/null +++ b/cl/beacon/handler/committees.go @@ -0,0 +1,147 @@ +package handler + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" +) + +type committeeResponse struct { + Index uint64 `json:"index,string"` + Slot uint64 `json:"slot,string"` + Validators []string `json:"validators"` // do string directly but it is still a base10 number +} + +func (a *ApiHandler) getCommittees(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + ctx := r.Context() + + epochReq, err := beaconhttp.Uint64FromQueryParams(r, "epoch") + if err != nil { + return nil, err + } + + index, err := beaconhttp.Uint64FromQueryParams(r, "index") + if err != nil { + return nil, err + } + + slotFilter, err := beaconhttp.Uint64FromQueryParams(r, "slot") + if err != nil { + return nil, err + } + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + blockId, err := beaconhttp.StateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + slotPtr, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + if slotPtr == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block slot: %x", blockRoot)) + } + slot := *slotPtr + epoch := slot / a.beaconChainCfg.SlotsPerEpoch + if epochReq != nil { + epoch = *epochReq + } + // check if the filter (if any) is in the epoch + if slotFilter != nil && !(epoch*a.beaconChainCfg.SlotsPerEpoch <= *slotFilter && *slotFilter < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch) { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("slot %d is not in epoch %d", *slotFilter, epoch)) + } + resp := make([]*committeeResponse, 0, a.beaconChainCfg.SlotsPerEpoch*a.beaconChainCfg.MaxCommitteesPerSlot) + isFinalized := slot <= a.forkchoiceStore.FinalizedSlot() + if a.forkchoiceStore.LowestAvaiableSlot() <= slot { + // non-finality case + s, cn := a.syncedData.HeadState() + defer cn() + if s == nil { + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, "node is syncing") + } + if epoch > state.Epoch(s)+1 { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("epoch %d is too far in the future", epoch)) + } + // get active validator indicies + committeeCount := s.CommitteeCount(epoch) + // now start obtaining the committees from the head state + for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { + if slotFilter != nil && currSlot != *slotFilter { + continue + } + for committeeIndex := uint64(0); committeeIndex < committeeCount; committeeIndex++ { + if index != nil && committeeIndex != *index { + continue + } + data := &committeeResponse{Index: committeeIndex, Slot: currSlot} + idxs, err := s.GetBeaconCommitee(currSlot, committeeIndex) + if err != nil { + return nil, err + } + for _, idx := range idxs { + data.Validators = append(data.Validators, strconv.FormatUint(idx, 10)) + } + resp = append(resp, data) + } + } + return newBeaconResponse(resp).WithFinalized(isFinalized), nil + } + // finality case + activeIdxs, err := state_accessors.ReadActiveIndicies(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + if err != nil { + return nil, err + } + + committeesPerSlot := uint64(len(activeIdxs)) / a.beaconChainCfg.SlotsPerEpoch / a.beaconChainCfg.TargetCommitteeSize + if a.beaconChainCfg.MaxCommitteesPerSlot < committeesPerSlot { + committeesPerSlot = a.beaconChainCfg.MaxCommitteesPerSlot + } + if committeesPerSlot < 1 { + committeesPerSlot = 1 + } + + mixPosition := (epoch + a.beaconChainCfg.EpochsPerHistoricalVector - a.beaconChainCfg.MinSeedLookahead - 1) % a.beaconChainCfg.EpochsPerHistoricalVector + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read randao mix: %v", err)) + } + + for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { + if slotFilter != nil && currSlot != *slotFilter { + continue + } + for committeeIndex := uint64(0); committeeIndex < committeesPerSlot; committeeIndex++ { + if index != nil && committeeIndex != *index { + continue + } + data := &committeeResponse{Index: committeeIndex, Slot: currSlot} + index := (currSlot%a.beaconChainCfg.SlotsPerEpoch)*committeesPerSlot + committeeIndex + committeeCount := committeesPerSlot * a.beaconChainCfg.SlotsPerEpoch + idxs, err := a.stateReader.ComputeCommittee(mix, activeIdxs, currSlot, committeeCount, index) + if err != nil { + return nil, err + } + for _, idx := range idxs { + data.Validators = append(data.Validators, strconv.FormatUint(idx, 10)) + } + resp = append(resp, data) + } + } + return newBeaconResponse(resp).WithFinalized(isFinalized), nil +} diff --git a/cl/beacon/handler/config.go b/cl/beacon/handler/config.go index b0e8972c2d8..8a0914e769a 100644 --- a/cl/beacon/handler/config.go +++ b/cl/beacon/handler/config.go @@ -6,23 +6,23 @@ import ( "sort" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" "github.com/ledgerwatch/erigon/cl/cltypes" ) -func (a *ApiHandler) getSpec(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getSpec(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { return newBeaconResponse(a.beaconChainCfg), nil } -func (a *ApiHandler) getDepositContract(r *http.Request) (*beaconResponse, error) { - +func (a *ApiHandler) getDepositContract(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { return newBeaconResponse(struct { - ChainId uint64 `json:"chain_id"` + ChainId uint64 `json:"chain_id,string"` DepositContract string `json:"address"` }{ChainId: a.beaconChainCfg.DepositChainID, DepositContract: a.beaconChainCfg.DepositContractAddress}), nil } -func (a *ApiHandler) getForkSchedule(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getForkSchedule(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { response := []cltypes.Fork{} // create first response (unordered and incomplete) for currentVersion, epoch := range a.beaconChainCfg.ForkVersionSchedule { diff --git a/cl/beacon/handler/data_test.go b/cl/beacon/handler/data_test.go new file mode 100644 index 00000000000..3b4276e4c8e --- /dev/null +++ b/cl/beacon/handler/data_test.go @@ -0,0 +1,98 @@ +package handler_test + +import ( + "embed" + "math" + "os" + "strings" + "testing" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/beacon/beacontest" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/afero" + "github.com/stretchr/testify/require" +) + +//go:embed test_data/* +var testData embed.FS + +var TestDatae = beacontest.NewBasePathFs(afero.FromIOFS{FS: testData}, "test_data") + +//go:embed harness/* +var testHarness embed.FS + +var Harnesses = beacontest.NewBasePathFs(afero.FromIOFS{FS: testHarness}, "harness") + +type harnessConfig struct { + t *testing.T + v clparams.StateVersion + finalized bool + forkmode int +} + +func defaultHarnessOpts(c harnessConfig) []beacontest.HarnessOption { + logger := log.New() + for _, v := range os.Args { + if !strings.Contains(v, "test.v") || strings.Contains(v, "test.v=false") { + logger.SetHandler(log.DiscardHandler()) + } + } + _, blocks, _, _, postState, handler, _, sm, fcu := setupTestingHandler(c.t, c.v, logger) + var err error + + if c.forkmode == 0 { + fcu.HeadVal, err = blocks[len(blocks)-1].Block.HashSSZ() + require.NoError(c.t, err) + fcu.HeadSlotVal = blocks[len(blocks)-1].Block.Slot + + fcu.JustifiedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + if c.finalized { + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + fcu.FinalizedSlotVal = math.MaxUint64 + } else { + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(fcu.HeadVal, fcu.HeadSlotVal/32) + fcu.FinalizedSlotVal = 0 + fcu.StateAtBlockRootVal[fcu.HeadVal] = postState + require.NoError(c.t, sm.OnHeadState(postState)) + } + } + + if c.forkmode == 1 { + sm.OnHeadState(postState) + s, cancel := sm.HeadState() + s.SetSlot(789274827847783) + cancel() + + fcu.HeadSlotVal = 128 + fcu.HeadVal = common.Hash{1, 2, 3} + + fcu.WeightsMock = []forkchoice.ForkNode{ + { + BlockRoot: common.Hash{1, 2, 3}, + ParentRoot: common.Hash{1, 2, 3}, + Slot: 128, + Weight: 1, + }, + { + BlockRoot: common.Hash{1, 2, 2, 4, 5, 3}, + ParentRoot: common.Hash{1, 2, 5}, + Slot: 128, + Weight: 2, + }, + } + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(common.Hash{1, 2, 3}, 1) + fcu.JustifiedCheckpointVal = solid.NewCheckpointFromParameters(common.Hash{1, 2, 3}, 2) + + } + + return []beacontest.HarnessOption{ + beacontest.WithTesting(c.t), + beacontest.WithFilesystem("td", TestDatae), + beacontest.WithHandler("i", handler), + } +} diff --git a/cl/beacon/handler/duties_attester.go b/cl/beacon/handler/duties_attester.go new file mode 100644 index 00000000000..3fb6a3f1609 --- /dev/null +++ b/cl/beacon/handler/duties_attester.go @@ -0,0 +1,163 @@ +package handler + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" +) + +type attesterDutyResponse struct { + Pubkey libcommon.Bytes48 `json:"pubkey"` + ValidatorIndex uint64 `json:"validator_index,string"` + CommitteeIndex uint64 `json:"committee_index,string"` + CommitteeLength uint64 `json:"committee_length,string"` + ValidatorCommitteeIndex uint64 `json:"validator_committee_index,string"` + CommitteesAtSlot uint64 `json:"committees_at_slot,string"` + Slot uint64 `json:"slot,string"` +} + +func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + epoch, err := beaconhttp.EpochFromRequest(r) + if err != nil { + return nil, err + } + + var idxsStr []string + if err := json.NewDecoder(r.Body).Decode(&idxsStr); err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not decode request body: %w. request body is required", err).Error()) + } + if len(idxsStr) == 0 { + return newBeaconResponse([]string{}).WithOptimistic(false), nil + } + idxSet := map[int]struct{}{} + // convert the request to uint64 + for _, idxStr := range idxsStr { + + idx, err := strconv.ParseUint(idxStr, 10, 64) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not parse validator index: %w", err).Error()) + } + if _, ok := idxSet[int(idx)]; ok { + continue + } + idxSet[int(idx)] = struct{}{} + } + + tx, err := a.indiciesDB.BeginRo(r.Context()) + if err != nil { + return nil, err + } + defer tx.Rollback() + + resp := []attesterDutyResponse{} + + // get the duties + if a.forkchoiceStore.LowestAvaiableSlot() <= epoch*a.beaconChainCfg.SlotsPerEpoch { + // non-finality case + s, cn := a.syncedData.HeadState() + defer cn() + if s == nil { + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, "node is syncing") + } + + if epoch > state.Epoch(s)+1 { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("epoch %d is too far in the future", epoch)) + } + + // get active validator indicies + committeeCount := s.CommitteeCount(epoch) + // now start obtaining the committees from the head state + for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { + for committeeIndex := uint64(0); committeeIndex < committeeCount; committeeIndex++ { + idxs, err := s.GetBeaconCommitee(currSlot, committeeIndex) + if err != nil { + return nil, err + } + for vIdx, idx := range idxs { + if _, ok := idxSet[int(idx)]; !ok { + continue + } + publicKey, err := s.ValidatorPublicKey(int(idx)) + if err != nil { + return nil, err + } + duty := attesterDutyResponse{ + Pubkey: publicKey, + ValidatorIndex: idx, + CommitteeIndex: committeeIndex, + CommitteeLength: uint64(len(idxs)), + ValidatorCommitteeIndex: uint64(vIdx), + CommitteesAtSlot: committeeCount, + Slot: currSlot, + } + resp = append(resp, duty) + } + } + } + return newBeaconResponse(resp).WithOptimistic(false), nil + } + + stageStateProgress, err := state_accessors.GetStateProcessingProgress(tx) + if err != nil { + return nil, err + } + if (epoch)*a.beaconChainCfg.SlotsPerEpoch >= stageStateProgress { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("epoch %d is too far in the future", epoch)) + } + // finality case + activeIdxs, err := state_accessors.ReadActiveIndicies(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + if err != nil { + return nil, err + } + + committeesPerSlot := uint64(len(activeIdxs)) / a.beaconChainCfg.SlotsPerEpoch / a.beaconChainCfg.TargetCommitteeSize + if a.beaconChainCfg.MaxCommitteesPerSlot < committeesPerSlot { + committeesPerSlot = a.beaconChainCfg.MaxCommitteesPerSlot + } + if committeesPerSlot < 1 { + committeesPerSlot = 1 + } + + mixPosition := (epoch + a.beaconChainCfg.EpochsPerHistoricalVector - a.beaconChainCfg.MinSeedLookahead - 1) % a.beaconChainCfg.EpochsPerHistoricalVector + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read randao mix: %v", err)) + } + + for currSlot := epoch * a.beaconChainCfg.SlotsPerEpoch; currSlot < (epoch+1)*a.beaconChainCfg.SlotsPerEpoch; currSlot++ { + for committeeIndex := uint64(0); committeeIndex < committeesPerSlot; committeeIndex++ { + index := (currSlot%a.beaconChainCfg.SlotsPerEpoch)*committeesPerSlot + committeeIndex + committeeCount := committeesPerSlot * a.beaconChainCfg.SlotsPerEpoch + idxs, err := a.stateReader.ComputeCommittee(mix, activeIdxs, currSlot, committeeCount, index) + if err != nil { + return nil, err + } + for vIdx, idx := range idxs { + if _, ok := idxSet[int(idx)]; !ok { + continue + } + publicKey, err := state_accessors.ReadPublicKeyByIndex(tx, idx) + if err != nil { + return nil, err + } + duty := attesterDutyResponse{ + Pubkey: publicKey, + ValidatorIndex: idx, + CommitteeIndex: committeeIndex, + CommitteeLength: uint64(len(idxs)), + ValidatorCommitteeIndex: uint64(vIdx), + CommitteesAtSlot: committeesPerSlot, + Slot: currSlot, + } + resp = append(resp, duty) + } + } + } + return newBeaconResponse(resp).WithOptimistic(false), nil +} diff --git a/cl/beacon/handler/duties_proposer.go b/cl/beacon/handler/duties_proposer.go index 609a8292c41..95fd2dfb648 100644 --- a/cl/beacon/handler/duties_proposer.go +++ b/cl/beacon/handler/duties_proposer.go @@ -7,33 +7,62 @@ import ( "sync" "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/persistence/base_encoding" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" shuffling2 "github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" ) type proposerDuties struct { Pubkey libcommon.Bytes48 `json:"pubkey"` - ValidatorIndex uint64 `json:"validator_index"` - Slot uint64 `json:"slot"` + ValidatorIndex uint64 `json:"validator_index,string"` + Slot uint64 `json:"slot,string"` } -func (a *ApiHandler) getDutiesProposer(r *http.Request) (*beaconResponse, error) { - - epoch, err := epochFromRequest(r) +func (a *ApiHandler) getDutiesProposer(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + epoch, err := beaconhttp.EpochFromRequest(r) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } if epoch < a.forkchoiceStore.FinalizedCheckpoint().Epoch() { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "invalid epoch") + tx, err := a.indiciesDB.BeginRo(r.Context()) + if err != nil { + return nil, err + } + defer tx.Rollback() + key := base_encoding.Encode64ToBytes4(epoch) + indiciesBytes, err := tx.GetOne(kv.Proposers, key) + if err != nil { + return nil, err + } + if len(indiciesBytes) != int(a.beaconChainCfg.SlotsPerEpoch*4) { + return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, "proposer duties is corrupted") + } + duties := make([]proposerDuties, a.beaconChainCfg.SlotsPerEpoch) + for i := uint64(0); i < a.beaconChainCfg.SlotsPerEpoch; i++ { + validatorIndex := binary.BigEndian.Uint32(indiciesBytes[i*4 : i*4+4]) + var pk libcommon.Bytes48 + pk, err := state_accessors.ReadPublicKeyByIndex(tx, uint64(validatorIndex)) + if err != nil { + return nil, err + } + duties[i] = proposerDuties{ + Pubkey: pk, + ValidatorIndex: uint64(validatorIndex), + Slot: epoch*a.beaconChainCfg.SlotsPerEpoch + i, + } + } + return newBeaconResponse(duties).WithFinalized(true).WithVersion(a.beaconChainCfg.GetCurrentStateVersion(epoch)), nil } // We need to compute our duties state, cancel := a.syncedData.HeadState() defer cancel() if state == nil { - return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, "beacon node is syncing") + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, "beacon node is syncing") } @@ -89,6 +118,5 @@ func (a *ApiHandler) getDutiesProposer(r *http.Request) (*beaconResponse, error) } wg.Wait() - return newBeaconResponse(duties).withFinalized(false).withVersion(a.beaconChainCfg.GetCurrentStateVersion(epoch)), nil - + return newBeaconResponse(duties).WithFinalized(false).WithVersion(a.beaconChainCfg.GetCurrentStateVersion(epoch)), nil } diff --git a/cl/beacon/handler/duties_sync.go b/cl/beacon/handler/duties_sync.go new file mode 100644 index 00000000000..154b7d40282 --- /dev/null +++ b/cl/beacon/handler/duties_sync.go @@ -0,0 +1,146 @@ +package handler + +import ( + "encoding/json" + "fmt" + "net/http" + "sort" + "strconv" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" +) + +type syncDutyResponse struct { + Pubkey libcommon.Bytes48 `json:"pubkey"` + ValidatorIndex uint64 `json:"validator_index,string"` + ValidatorSyncCommitteeIndicies []string `json:"validator_sync_committee_indicies"` +} + +func (a *ApiHandler) getSyncDuties(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + epoch, err := beaconhttp.EpochFromRequest(r) + if err != nil { + return nil, err + } + + // compute the sync committee period + period := epoch / a.beaconChainCfg.EpochsPerSyncCommitteePeriod + + var idxsStr []string + if err := json.NewDecoder(r.Body).Decode(&idxsStr); err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not decode request body: %w. request body is required.", err).Error()) + } + if len(idxsStr) == 0 { + return newBeaconResponse([]string{}).WithOptimistic(false), nil + } + duplicates := map[int]struct{}{} + // convert the request to uint64 + idxs := make([]uint64, 0, len(idxsStr)) + for _, idxStr := range idxsStr { + + idx, err := strconv.ParseUint(idxStr, 10, 64) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not parse validator index: %w", err).Error()) + } + if _, ok := duplicates[int(idx)]; ok { + continue + } + idxs = append(idxs, idx) + duplicates[int(idx)] = struct{}{} + } + + tx, err := a.indiciesDB.BeginRo(r.Context()) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // Try to find a slot in the epoch or close to it + referenceSlot := ((epoch + 1) * a.beaconChainCfg.SlotsPerEpoch) - 1 + + // Find the first slot in the epoch (or close enough that have a sync committee) + var referenceRoot libcommon.Hash + for ; referenceRoot != (libcommon.Hash{}); referenceSlot-- { + referenceRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, referenceSlot) + if err != nil { + return nil, err + } + } + referencePeriod := (referenceSlot / a.beaconChainCfg.SlotsPerEpoch) / a.beaconChainCfg.EpochsPerSyncCommitteePeriod + // Now try reading the sync committee + currentSyncCommittee, nextSyncCommittee, ok := a.forkchoiceStore.GetSyncCommittees(referenceRoot) + if !ok { + roundedSlotToPeriod := a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(referenceSlot) + switch { + case referencePeriod == period: + currentSyncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, roundedSlotToPeriod) + case referencePeriod+1 == period: + nextSyncCommittee, err = state_accessors.ReadNextSyncCommittee(tx, roundedSlotToPeriod) + default: + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find sync committee for epoch %d", epoch)) + } + if err != nil { + return nil, err + } + } + var syncCommittee *solid.SyncCommittee + // Determine which one to use. TODO(Giulio2002): Make this less rendundant. + switch { + case referencePeriod == period: + syncCommittee = currentSyncCommittee + case referencePeriod+1 == period: + syncCommittee = nextSyncCommittee + default: + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find sync committee for epoch %d", epoch)) + } + if syncCommittee == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find sync committee for epoch %d", epoch)) + } + // Now we have the sync committee, we can initialize our response set + dutiesSet := map[uint64]*syncDutyResponse{} + for _, idx := range idxs { + publicKey, err := state_accessors.ReadPublicKeyByIndex(tx, idx) + if err != nil { + return nil, err + } + if publicKey == (libcommon.Bytes48{}) { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find validator with index %d", idx)) + } + dutiesSet[idx] = &syncDutyResponse{ + Pubkey: publicKey, + ValidatorIndex: idx, + } + } + // Now we can iterate over the sync committee and fill the response + for idx, committeePartecipantPublicKey := range syncCommittee.GetCommittee() { + committeePartecipantIndex, ok, err := state_accessors.ReadValidatorIndexByPublicKey(tx, committeePartecipantPublicKey) + if err != nil { + return nil, err + } + if !ok { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find validator with public key %x", committeePartecipantPublicKey)) + } + if _, ok := dutiesSet[committeePartecipantIndex]; !ok { + continue + } + dutiesSet[committeePartecipantIndex].ValidatorSyncCommitteeIndicies = append( + dutiesSet[committeePartecipantIndex].ValidatorSyncCommitteeIndicies, + strconv.FormatUint(uint64(idx), 10)) + } + // Now we can convert the map to a slice + duties := make([]*syncDutyResponse, 0, len(dutiesSet)) + for _, duty := range dutiesSet { + if len(duty.ValidatorSyncCommitteeIndicies) == 0 { + continue + } + duties = append(duties, duty) + } + sort.Slice(duties, func(i, j int) bool { + return duties[i].ValidatorIndex < duties[j].ValidatorIndex + }) + + return newBeaconResponse(duties).WithOptimistic(false), nil +} diff --git a/cl/beacon/handler/forkchoice.go b/cl/beacon/handler/forkchoice.go new file mode 100644 index 00000000000..e8ceea1f655 --- /dev/null +++ b/cl/beacon/handler/forkchoice.go @@ -0,0 +1,39 @@ +package handler + +import ( + "encoding/json" + "net/http" + + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" +) + +func (a *ApiHandler) GetEthV2DebugBeaconHeads(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + if a.syncedData.Syncing() { + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, "beacon node is syncing") + } + hash, slotNumber, err := a.forkchoiceStore.GetHead() + if err != nil { + return nil, err + } + return newBeaconResponse( + []interface{}{ + map[string]interface{}{ + "slot": slotNumber, + "root": hash, + "execution_optimistic": false, + }, + }), nil +} + +func (a *ApiHandler) GetEthV1DebugBeaconForkChoice(w http.ResponseWriter, r *http.Request) { + justifiedCheckpoint := a.forkchoiceStore.JustifiedCheckpoint() + finalizedCheckpoint := a.forkchoiceStore.FinalizedCheckpoint() + forkNodes := a.forkchoiceStore.ForkNodes() + if err := json.NewEncoder(w).Encode(map[string]interface{}{ + "justified_checkpoint": justifiedCheckpoint, + "finalized_checkpoint": finalizedCheckpoint, + "fork_choice_nodes": forkNodes, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} diff --git a/cl/beacon/handler/format.go b/cl/beacon/handler/format.go index 9f2d5682a28..bb071a0b2fc 100644 --- a/cl/beacon/handler/format.go +++ b/cl/beacon/handler/format.go @@ -1,262 +1,9 @@ package handler import ( - "fmt" - "net/http" - "regexp" - "strconv" - - "github.com/go-chi/chi/v5" - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/types/ssz" "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" - "github.com/ledgerwatch/erigon/cl/clparams" ) -type apiError struct { - code int - err error -} - -type beaconResponse struct { - Data any `json:"data,omitempty"` - Finalized *bool `json:"finalized,omitempty"` - Version *clparams.StateVersion `json:"version,omitempty"` - ExecutionOptimistic *bool `json:"execution_optimistic,omitempty"` -} - -func (b *beaconResponse) EncodeSSZ(xs []byte) ([]byte, error) { - marshaler, ok := b.Data.(ssz.Marshaler) - if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "This endpoint does not support SSZ response") - } - encoded, err := marshaler.EncodeSSZ(nil) - if err != nil { - return nil, err - } - return encoded, nil -} - -func (b *beaconResponse) EncodingSizeSSZ() int { - marshaler, ok := b.Data.(ssz.Marshaler) - if !ok { - return 9 - } - return marshaler.EncodingSizeSSZ() -} - -func newBeaconResponse(data any) *beaconResponse { - return &beaconResponse{ - Data: data, - } -} - -func (r *beaconResponse) withFinalized(finalized bool) (out *beaconResponse) { - out = new(beaconResponse) - *out = *r - out.Finalized = new(bool) - out.ExecutionOptimistic = new(bool) - out.Finalized = &finalized - return out -} - -func (r *beaconResponse) withVersion(version clparams.StateVersion) (out *beaconResponse) { - out = new(beaconResponse) - *out = *r - out.Version = new(clparams.StateVersion) - out.Version = &version - return out -} - -//// In case of it being a json we need to also expose finalization, version, etc... -//type beaconHandlerFn func(r *http.Request) *beaconResponse -// -//func beaconHandlerWrapper(fn beaconHandlerFn, supportSSZ bool) func(w http.ResponseWriter, r *http.Request) { -// return func(w http.ResponseWriter, r *http.Request) { -// accept := r.Header.Get("Accept") -// isSSZ := !strings.Contains(accept, "application/json") && strings.Contains(accept, "application/stream-octect") -// start := time.Now() -// defer func() { -// log.Debug("[Beacon API] finished", "method", r.Method, "path", r.URL.Path, "duration", time.Since(start)) -// }() -// -// resp := fn(r) -// if resp.internalError != nil { -// http.Error(w, resp.internalError.Error(), http.StatusInternalServerError) -// log.Debug("[Beacon API] failed", "method", r.Method, "err", resp.internalError.Error(), "ssz", isSSZ) -// return -// } -// -// if resp.apiError != nil { -// http.Error(w, resp.apiError.err.Error(), resp.apiError.code) -// log.Debug("[Beacon API] failed", "method", r.Method, "err", resp.apiError.err.Error(), "ssz", isSSZ) -// return -// } -// -// if isSSZ && supportSSZ { -// data := resp.Data -// // SSZ encoding -// encoded, err := data.(ssz.Marshaler).EncodeSSZ(nil) -// if err != nil { -// http.Error(w, err.Error(), http.StatusInternalServerError) -// log.Debug("[Beacon API] failed", "method", r.Method, "err", err, "accepted", accept) -// return -// } -// w.Header().Set("Content-Type", "application/octet-stream") -// w.Write(encoded) -// return -// } -// w.Header().Set("Content-Type", "application/json") -// if err := json.NewEncoder(w).Encode(resp); err != nil { -// log.Warn("[Beacon API] failed", "method", r.Method, "err", err, "ssz", isSSZ) -// } -// } -//} - -type chainTag int - -var ( - Head chainTag = 0 - Finalized chainTag = 1 - Justified chainTag = 2 - Genesis chainTag = 3 -) - -// Represent either state id or block id -type segmentID struct { - tag chainTag - slot *uint64 - root *libcommon.Hash -} - -func (c *segmentID) head() bool { - return c.tag == Head && c.slot == nil && c.root == nil -} - -func (c *segmentID) finalized() bool { - return c.tag == Finalized -} - -func (c *segmentID) justified() bool { - return c.tag == Justified -} - -func (c *segmentID) genesis() bool { - return c.tag == Genesis -} - -func (c *segmentID) getSlot() *uint64 { - return c.slot -} - -func (c *segmentID) getRoot() *libcommon.Hash { - return c.root -} - -func epochFromRequest(r *http.Request) (uint64, error) { - // Must only be a number - regex := regexp.MustCompile(`^\d+$`) - epoch := chi.URLParam(r, "epoch") - if !regex.MatchString(epoch) { - return 0, fmt.Errorf("invalid path variable: {epoch}") - } - epochMaybe, err := strconv.ParseUint(epoch, 10, 64) - if err != nil { - return 0, err - } - return epochMaybe, nil -} - -func blockIdFromRequest(r *http.Request) (*segmentID, error) { - regex := regexp.MustCompile(`^(?:0x[0-9a-fA-F]{64}|head|finalized|genesis|\d+)$`) - - blockId := chi.URLParam(r, "block_id") - if !regex.MatchString(blockId) { - return nil, fmt.Errorf("invalid path variable: {block_id}") - } - - if blockId == "head" { - return &segmentID{tag: Head}, nil - } - if blockId == "finalized" { - return &segmentID{tag: Finalized}, nil - } - if blockId == "genesis" { - return &segmentID{tag: Genesis}, nil - } - slotMaybe, err := strconv.ParseUint(blockId, 10, 64) - if err == nil { - return &segmentID{slot: &slotMaybe}, nil - } - root := libcommon.HexToHash(blockId) - return &segmentID{ - root: &root, - }, nil -} - -func stateIdFromRequest(r *http.Request) (*segmentID, error) { - regex := regexp.MustCompile(`^(?:0x[0-9a-fA-F]{64}|head|finalized|genesis|justified|\d+)$`) - - stateId := chi.URLParam(r, "state_id") - if !regex.MatchString(stateId) { - return nil, fmt.Errorf("invalid path variable: {block_id}") - } - - if stateId == "head" { - return &segmentID{tag: Head}, nil - } - if stateId == "finalized" { - return &segmentID{tag: Finalized}, nil - } - if stateId == "genesis" { - return &segmentID{tag: Genesis}, nil - } - if stateId == "justified" { - return &segmentID{tag: Justified}, nil - } - slotMaybe, err := strconv.ParseUint(stateId, 10, 64) - if err == nil { - return &segmentID{slot: &slotMaybe}, nil - } - root := libcommon.HexToHash(stateId) - return &segmentID{ - root: &root, - }, nil -} - -func hashFromQueryParams(r *http.Request, name string) (*libcommon.Hash, error) { - hashStr := r.URL.Query().Get(name) - if hashStr == "" { - return nil, nil - } - // check if hashstr is an hex string - if len(hashStr) != 2+2*32 { - return nil, fmt.Errorf("invalid hash length") - } - if hashStr[:2] != "0x" { - return nil, fmt.Errorf("invalid hash prefix") - } - notHex, err := regexp.MatchString("[^0-9A-Fa-f]", hashStr[2:]) - if err != nil { - return nil, err - } - if notHex { - return nil, fmt.Errorf("invalid hash characters") - } - - hash := libcommon.HexToHash(hashStr) - return &hash, nil -} - -// uint64FromQueryParams retrieves a number from the query params, in base 10. -func uint64FromQueryParams(r *http.Request, name string) (*uint64, error) { - str := r.URL.Query().Get(name) - if str == "" { - return nil, nil - } - num, err := strconv.ParseUint(str, 10, 64) - if err != nil { - return nil, err - } - return &num, nil +func newBeaconResponse(data any) *beaconhttp.BeaconResponse { + return beaconhttp.NewBeaconResponse(data) } diff --git a/cl/beacon/handler/genesis.go b/cl/beacon/handler/genesis.go index 05af01dd8b5..562c65429d2 100644 --- a/cl/beacon/handler/genesis.go +++ b/cl/beacon/handler/genesis.go @@ -10,12 +10,12 @@ import ( ) type genesisResponse struct { - GenesisTime uint64 `json:"genesis_time,omitempty"` - GenesisValidatorRoot common.Hash `json:"genesis_validator_root,omitempty"` - GenesisForkVersion libcommon.Bytes4 `json:"genesis_fork_version,omitempty"` + GenesisTime uint64 `json:"genesis_time,string"` + GenesisValidatorRoot common.Hash `json:"genesis_validators_root"` + GenesisForkVersion libcommon.Bytes4 `json:"genesis_fork_version"` } -func (a *ApiHandler) getGenesis(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getGenesis(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { if a.genesisCfg == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "Genesis Config is missing") } diff --git a/cl/beacon/handler/handler.go b/cl/beacon/handler/handler.go index 36416e7bd06..8328efdacff 100644 --- a/cl/beacon/handler/handler.go +++ b/cl/beacon/handler/handler.go @@ -5,10 +5,12 @@ import ( "sync" "github.com/go-chi/chi/v5" + "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" "github.com/ledgerwatch/erigon/cl/beacon/synced_data" "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/persistence" "github.com/ledgerwatch/erigon/cl/persistence/state/historical_states_reader" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" @@ -28,12 +30,25 @@ type ApiHandler struct { operationsPool pool.OperationsPool syncedData *synced_data.SyncedDataManager stateReader *historical_states_reader.HistoricalStatesReader + sentinel sentinel.SentinelClient + + version string // Node's version + + // pools + randaoMixesPool sync.Pool } -func NewApiHandler(genesisConfig *clparams.GenesisConfig, beaconChainConfig *clparams.BeaconChainConfig, source persistence.RawBeaconBlockChain, indiciesDB kv.RoDB, forkchoiceStore forkchoice.ForkChoiceStorage, operationsPool pool.OperationsPool, rcsn freezeblocks.BeaconSnapshotReader, syncedData *synced_data.SyncedDataManager, stateReader *historical_states_reader.HistoricalStatesReader) *ApiHandler { - return &ApiHandler{o: sync.Once{}, genesisCfg: genesisConfig, beaconChainCfg: beaconChainConfig, indiciesDB: indiciesDB, forkchoiceStore: forkchoiceStore, operationsPool: operationsPool, blockReader: rcsn, syncedData: syncedData, stateReader: stateReader} +func NewApiHandler(genesisConfig *clparams.GenesisConfig, beaconChainConfig *clparams.BeaconChainConfig, source persistence.RawBeaconBlockChain, indiciesDB kv.RoDB, forkchoiceStore forkchoice.ForkChoiceStorage, operationsPool pool.OperationsPool, rcsn freezeblocks.BeaconSnapshotReader, syncedData *synced_data.SyncedDataManager, stateReader *historical_states_reader.HistoricalStatesReader, sentinel sentinel.SentinelClient, version string) *ApiHandler { + return &ApiHandler{o: sync.Once{}, genesisCfg: genesisConfig, beaconChainCfg: beaconChainConfig, indiciesDB: indiciesDB, forkchoiceStore: forkchoiceStore, operationsPool: operationsPool, blockReader: rcsn, syncedData: syncedData, stateReader: stateReader, randaoMixesPool: sync.Pool{New: func() interface{} { + return solid.NewHashVector(int(beaconChainConfig.EpochsPerHistoricalVector)) + }}, sentinel: sentinel, version: version} } +func (a *ApiHandler) Init() { + a.o.Do(func() { + a.init() + }) +} func (a *ApiHandler) init() { r := chi.NewRouter() a.mux = r @@ -41,18 +56,28 @@ func (a *ApiHandler) init() { // otterscn specific ones are commented as such r.Route("/eth", func(r chi.Router) { r.Route("/v1", func(r chi.Router) { + r.Get("/builder/states/{state_id}/expected_withdrawals", beaconhttp.HandleEndpointFunc(a.GetEth1V1BuilderStatesExpectedWit)) r.Get("/events", http.NotFound) + r.Route("/node", func(r chi.Router) { + r.Get("/health", a.GetEthV1NodeHealth) + r.Get("/version", a.GetEthV1NodeVersion) + }) + r.Get("/debug/fork_choice", a.GetEthV1DebugBeaconForkChoice) r.Route("/config", func(r chi.Router) { r.Get("/spec", beaconhttp.HandleEndpointFunc(a.getSpec)) r.Get("/deposit_contract", beaconhttp.HandleEndpointFunc(a.getDepositContract)) r.Get("/fork_schedule", beaconhttp.HandleEndpointFunc(a.getForkSchedule)) }) r.Route("/beacon", func(r chi.Router) { - // r.Route("/headers", func(r chi.Router) { - // r.Get("/", beaconhttp.HandleEndpointFunc(a.getHeaders)) - // r.Get("/{block_id}", beaconhttp.HandleEndpointFunc(a.getHeader)) - // }) - r.Get("/headers", beaconhttp.HandleEndpointFunc(a.getHeaders)) + r.Route("/rewards", func(r chi.Router) { + r.Post("/sync_committee/{block_id}", beaconhttp.HandleEndpointFunc(a.getSyncCommitteesRewards)) + r.Get("/blocks/{block_id}", beaconhttp.HandleEndpointFunc(a.getBlockRewards)) + r.Post("/attestations/{epoch}", beaconhttp.HandleEndpointFunc(a.getAttestationsRewards)) + }) + r.Route("/headers", func(r chi.Router) { + r.Get("/", beaconhttp.HandleEndpointFunc(a.getHeaders)) + r.Get("/{block_id}", beaconhttp.HandleEndpointFunc(a.getHeader)) + }) r.Route("/blocks", func(r chi.Router) { r.Post("/", http.NotFound) r.Get("/{block_id}", beaconhttp.HandleEndpointFunc(a.getBlock)) @@ -62,53 +87,61 @@ func (a *ApiHandler) init() { r.Get("/genesis", beaconhttp.HandleEndpointFunc(a.getGenesis)) r.Get("/blinded_blocks/{block_id}", beaconhttp.HandleEndpointFunc(a.getBlindedBlock)) r.Route("/pool", func(r chi.Router) { - r.Post("/attestations", http.NotFound) - r.Get("/voluntary_exits", beaconhttp.HandleEndpointFunc(a.poolVoluntaryExits)) - r.Get("/attester_slashings", beaconhttp.HandleEndpointFunc(a.poolAttesterSlashings)) - r.Get("/proposer_slashings", beaconhttp.HandleEndpointFunc(a.poolProposerSlashings)) - r.Get("/bls_to_execution_changes", beaconhttp.HandleEndpointFunc(a.poolBlsToExecutionChanges)) - r.Get("/attestations", beaconhttp.HandleEndpointFunc(a.poolAttestations)) - r.Post("/sync_committees", http.NotFound) + r.Get("/voluntary_exits", beaconhttp.HandleEndpointFunc(a.GetEthV1BeaconPoolVoluntaryExits)) + r.Post("/voluntary_exits", a.PostEthV1BeaconPoolVoluntaryExits) + r.Get("/attester_slashings", beaconhttp.HandleEndpointFunc(a.GetEthV1BeaconPoolAttesterSlashings)) + r.Post("/attester_slashings", a.PostEthV1BeaconPoolAttesterSlashings) + r.Get("/proposer_slashings", beaconhttp.HandleEndpointFunc(a.GetEthV1BeaconPoolProposerSlashings)) + r.Post("/proposer_slashings", a.PostEthV1BeaconPoolProposerSlashings) + r.Get("/bls_to_execution_changes", beaconhttp.HandleEndpointFunc(a.GetEthV1BeaconPoolBLSExecutionChanges)) + r.Post("/bls_to_execution_changes", a.PostEthV1BeaconPoolBlsToExecutionChanges) + r.Get("/attestations", beaconhttp.HandleEndpointFunc(a.GetEthV1BeaconPoolAttestations)) + r.Post("/attestations", http.NotFound) // TODO + r.Post("/sync_committees", http.NotFound) // TODO }) r.Get("/node/syncing", http.NotFound) r.Route("/states", func(r chi.Router) { - r.Get("/head/validators/{index}", http.NotFound) // otterscan - r.Get("/head/committees", http.NotFound) // otterscan r.Route("/{state_id}", func(r chi.Router) { + r.Get("/randao", beaconhttp.HandleEndpointFunc(a.getRandao)) + r.Get("/committees", beaconhttp.HandleEndpointFunc(a.getCommittees)) r.Get("/sync_committees", beaconhttp.HandleEndpointFunc(a.getSyncCommittees)) // otterscan r.Get("/finality_checkpoints", beaconhttp.HandleEndpointFunc(a.getFinalityCheckpoints)) r.Get("/validators", http.NotFound) r.Get("/root", beaconhttp.HandleEndpointFunc(a.getStateRoot)) r.Get("/fork", beaconhttp.HandleEndpointFunc(a.getStateFork)) - r.Get("/validators/{id}", http.NotFound) + r.Get("/validators", beaconhttp.HandleEndpointFunc(a.getAllValidators)) + r.Get("/validator_balances", beaconhttp.HandleEndpointFunc(a.getAllValidatorsBalances)) + r.Get("/validators/{validator_id}", beaconhttp.HandleEndpointFunc(a.getSingleValidator)) }) }) }) r.Route("/validator", func(r chi.Router) { r.Route("/duties", func(r chi.Router) { - r.Post("/attester/{epoch}", http.NotFound) + r.Post("/attester/{epoch}", beaconhttp.HandleEndpointFunc(a.getAttesterDuties)) r.Get("/proposer/{epoch}", beaconhttp.HandleEndpointFunc(a.getDutiesProposer)) - r.Post("/sync/{epoch}", http.NotFound) + r.Post("/sync/{epoch}", beaconhttp.HandleEndpointFunc(a.getSyncDuties)) }) r.Get("/blinded_blocks/{slot}", http.NotFound) r.Get("/attestation_data", http.NotFound) r.Get("/aggregate_attestation", http.NotFound) - r.Post("/aggregate_and_proofs", http.NotFound) + r.Post("/aggregate_and_proofs", a.PostEthV1ValidatorAggregatesAndProof) r.Post("/beacon_committee_subscriptions", http.NotFound) r.Post("/sync_committee_subscriptions", http.NotFound) r.Get("/sync_committee_contribution", http.NotFound) r.Post("/contribution_and_proofs", http.NotFound) r.Post("/prepare_beacon_proposer", http.NotFound) + r.Post("/liveness/{epoch}", beaconhttp.HandleEndpointFunc(a.liveness)) }) }) r.Route("/v2", func(r chi.Router) { r.Route("/debug", func(r chi.Router) { r.Route("/beacon", func(r chi.Router) { r.Get("/states/{state_id}", beaconhttp.HandleEndpointFunc(a.getFullState)) + r.Get("/heads", beaconhttp.HandleEndpointFunc(a.GetEthV2DebugBeaconHeads)) }) }) r.Route("/beacon", func(r chi.Router) { - r.Get("/blocks/{block_id}", beaconhttp.HandleEndpointFunc(a.getBlock)) //otterscan + r.Get("/blocks/{block_id}", beaconhttp.HandleEndpointFunc(a.getBlock)) }) r.Route("/validator", func(r chi.Router) { r.Post("/blocks/{slot}", http.NotFound) diff --git a/cl/beacon/handler/harness/attestation_rewards_bellatrix.yml b/cl/beacon/handler/harness/attestation_rewards_bellatrix.yml new file mode 100644 index 00000000000..66603735457 --- /dev/null +++ b/cl/beacon/handler/harness/attestation_rewards_bellatrix.yml @@ -0,0 +1,31 @@ +vars: + finalized_epoch: "99999999" + justified_slot: "160" + justified_epoch: "4" +tests: + ## blocks + - name: all validators + expect: + file: "attestations_1" + fs: td + actual: + handler: i + method: post + path: /eth/v1/beacon/rewards/attestations/{{.Vars.justified_epoch}} + - name: two validators + expect: + file: "attestations_2" + fs: td + actual: + handler: i + method: post + path: /eth/v1/beacon/rewards/attestations/{{.Vars.justified_epoch}} + body: + data: ["1","4"] + - name: not found + actual: + handler: i + method: post + path: /eth/v1/beacon/rewards/attestations/{{.Vars.finalized_epoch}} + compare: + expr: "actual_code == 404" diff --git a/cl/beacon/handler/harness/attestation_rewards_phase0.yml b/cl/beacon/handler/harness/attestation_rewards_phase0.yml new file mode 100644 index 00000000000..9ce844054c8 --- /dev/null +++ b/cl/beacon/handler/harness/attestation_rewards_phase0.yml @@ -0,0 +1,33 @@ +vars: + head_hash: '0xeffdd8ef40c3c901f0724d48e04ce257967cf1da31929f3b6db614f89ef8d660' + bad_hash: '0xbeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef' + finalized_epoch: "99999999" + justified_slot: "8322" + justified_epoch: "259" +tests: + ## blocks + - name: all validators + expect: + file: "attestations_3" + fs: td + actual: + handler: i + method: post + path: /eth/v1/beacon/rewards/attestations/{{.Vars.justified_epoch}} + - name: two validators + expect: + file: "attestations_4" + fs: td + actual: + handler: i + path: /eth/v1/beacon/rewards/attestations/{{.Vars.justified_epoch}} + method: post + body: + data: ["1","4"] + - name: not found + actual: + handler: i + method: post + path: /eth/v1/beacon/rewards/attestations/{{.Vars.finalized_epoch}} + compare: + expr: "actual_code == 404" diff --git a/cl/beacon/handler/harness/blocks.yml b/cl/beacon/handler/harness/blocks.yml new file mode 100644 index 00000000000..0961b45a3fe --- /dev/null +++ b/cl/beacon/handler/harness/blocks.yml @@ -0,0 +1,100 @@ +vars: + head_hash: '0xeffdd8ef40c3c901f0724d48e04ce257967cf1da31929f3b6db614f89ef8d660' + bad_hash: '0xbeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef' +tests: + ## blocks + - name: by hash + expect: + file: "block_1" + fs: td + actual: + handler: i + path: /eth/v2/beacon/blocks/{{.Vars.head_hash}} + - name: by head + expect: + file: "block_1" + fs: td + compare: + exprs: + - actual_code == 200 + - actual == expect + actual: + handler: i + path: /eth/v2/beacon/blocks/head + - name: not found + actual: + handler: i + path: /eth/v2/beacon/blocks/{{.Vars.bad_hash}} + compare: + expr: "actual_code == 404" + ## blinded blocks + - name: blinded by hash + expect: + file: "blinded_block_1" + fs: td + actual: + handler: i + path: /eth/v1/beacon/blinded_blocks/{{.Vars.head_hash}} + - name: blinded by head + expect: + file: "blinded_block_1" + fs: td + actual: + handler: i + path: /eth/v1/beacon/blinded_blocks/head + - name: blinded not found + actual: + handler: i + path: /eth/v1/beacon/blinded_blocks/{{.Vars.bad_hash}} + compare: + expr: "actual_code == 404" + ### attestations + - name: attestations by hash + expect: + file: "block_1" + fs: td + actual: + handler: i + path: /eth/v1/beacon/blocks/{{.Vars.head_hash}}/attestations + compare: + expr: "size(actual.data) == size(expect.data.message.body.attestations)" + - name: attestions by head + expect: + file: "block_1" + fs: td + actual: + handler: i + path: /eth/v1/beacon/blocks/head/attestations + compare: + exprs: + - actual_code == 200 + - size(actual.data) == size(expect.data.message.body.attestations) + - name: attestions not found + actual: + handler: i + path: /eth/v1/beacon/blocks/{{.Vars.bad_hash}}/attestations + compare: + expr: "actual_code == 404" + ### root + - name: root by hash + actual: + handler: i + path: /eth/v1/beacon/blocks/{{.Vars.head_hash}}/root + compare: + exprs: + - actual_code == 200 + - actual.data.root == "{{.Vars.head_hash}}" + - name: root by head + actual: + handler: i + path: /eth/v1/beacon/blocks/head/root + compare: + exprs: + - actual_code == 200 + - actual.data.root == "{{.Vars.head_hash}}" + - name: root not found + actual: + handler: i + path: /eth/v1/beacon/blocks/19912929/root + compare: + expr: "actual_code == 404" diff --git a/cl/beacon/handler/harness/committees.yml b/cl/beacon/handler/harness/committees.yml new file mode 100644 index 00000000000..268ef20ed1c --- /dev/null +++ b/cl/beacon/handler/harness/committees.yml @@ -0,0 +1,55 @@ +vars: + head_hash: '0xeffdd8ef40c3c901f0724d48e04ce257967cf1da31929f3b6db614f89ef8d660' + bad_hash: '0xbeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef' + post_root: '0x933d6650f2999f17012e781f5012981edb549e5935de1c981fce81cdd241d4e1' + head_slot: 8322 + head_epoch: "260" +tests: + - name: slot non antiquated + expect: + file: "committees_1" + fs: td + actual: + handler: i + path: /eth/v1/beacon/states/{{.Vars.post_root}}/committees + query: + slot: "8322" + compare: + exprs: + - "actual_code == 200" + - "expect[3] == actual" + - name: empty index non antiquated + expect: + file: "committees_1" + fs: td + actual: + handler: i + path: /eth/v1/beacon/states/{{.Vars.post_root}}/committees + query: + index: "1" + compare: + exprs: + - "actual_code == 200" + - "expect[4] == actual" + - name: all queries non antiquated + expect: + file: "committees_1" + fs: td + actual: + handler: i + path: /eth/v1/beacon/states/{{.Vars.post_root}}/committees + query: + index: "0" + slot: "{{sub .Vars.head_slot 32}}" + epoch: "{{sub .Vars.head_epoch 1}}" + compare: + exprs: + - "actual_code == 200" + - "expect[5] == actual" + - name: 404 non antiquated + actual: + handler: i + path: /eth/v1/beacon/states/{{.Vars.bad_hash}}/committees + compare: + exprs: + - "actual_code == 404" diff --git a/cl/beacon/handler/harness/committees_f.yml b/cl/beacon/handler/harness/committees_f.yml new file mode 100644 index 00000000000..f71de0b3958 --- /dev/null +++ b/cl/beacon/handler/harness/committees_f.yml @@ -0,0 +1,55 @@ +vars: + head_hash: '0xeffdd8ef40c3c901f0724d48e04ce257967cf1da31929f3b6db614f89ef8d660' + bad_hash: '0xbeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef' + post_root: '0x933d6650f2999f17012e781f5012981edb549e5935de1c981fce81cdd241d4e1' + head_slot: 8322 + head_epoch: "260" +tests: + - name: slot + expect: + file: "committees_1" + fs: td + actual: + handler: i + path: /eth/v1/beacon/states/{{.Vars.post_root}}/committees + query: + slot: "8322" + compare: + exprs: + - "actual_code == 200" + - "expect[0] == actual" + - name: empty index + expect: + file: "committees_1" + fs: td + actual: + handler: i + path: /eth/v1/beacon/states/{{.Vars.post_root}}/committees + query: + index: "1" + compare: + exprs: + - "actual_code == 200" + - "expect[1] == actual" + - name: all queries + expect: + file: "committees_1" + fs: td + actual: + handler: i + path: /eth/v1/beacon/states/{{.Vars.post_root}}/committees + query: + index: "0" + slot: "{{sub .Vars.head_slot 32}}" + epoch: "{{sub .Vars.head_epoch 1}}" + compare: + exprs: + - "actual_code == 200" + - "expect[2] == actual" + - name: "404" + actual: + handler: i + path: /eth/v1/beacon/states/{{.Vars.bad_hash}}/committees + compare: + exprs: + - "actual_code == 404" diff --git a/cl/beacon/handler/harness/config.yml b/cl/beacon/handler/harness/config.yml new file mode 100644 index 00000000000..e9910ba5c6a --- /dev/null +++ b/cl/beacon/handler/harness/config.yml @@ -0,0 +1,32 @@ +tests: + - name: spec + actual: + handler: i + path: /eth/v1/config/spec + compare: + exprs: + - actual_code == 200 + - actual.data.SlotsPerEpoch == 32 + - actual.data.SlotsPerHistoricalRoot == 8192 + - name: fork schedule + actual: + handler: i + path: /eth/v1/config/fork_schedule + compare: + exprs: + - actual_code == 200 + - has(actual.data[0].current_version) + - has(actual.data[0].previous_version) + - has(actual.data[0].epoch) + - has(actual.data[1].current_version) + - has(actual.data[1].previous_version) + - has(actual.data[1].epoch) + - name: deposit contract + actual: + handler: i + path: /eth/v1/config/deposit_contract + compare: + exprs: + - actual_code == 200 + - actual.data.address == "0x00000000219ab540356cBB839Cbe05303d7705Fa" + - actual.data.chain_id == "1" diff --git a/cl/beacon/handler/harness/duties_attester.yml b/cl/beacon/handler/harness/duties_attester.yml new file mode 100644 index 00000000000..840927438ba --- /dev/null +++ b/cl/beacon/handler/harness/duties_attester.yml @@ -0,0 +1,45 @@ +vars: + head_hash: '0xeffdd8ef40c3c901f0724d48e04ce257967cf1da31929f3b6db614f89ef8d660' + bad_hash: '0xbeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef' + post_root: '0x933d6650f2999f17012e781f5012981edb549e5935de1c981fce81cdd241d4e1' + head_slot: 8322 + head_epoch: "260" +tests: + - name: non empty indices + expect: + file: "duties_1" + fs: td + actual: + handler: i + path: /eth/v1/validator/duties/attester/{{.Vars.head_epoch}} + method: post + body: + data: ["0","1","2","3","4","5","6","7","8","9"] + compare: + exprs: + - "actual_code == 200" + - "expect[0] == actual" + - name: empty index + expect: + file: "duties_1" + fs: td + actual: + handler: i + path: /eth/v1/validator/duties/attester/{{.Vars.head_epoch}} + method: post + body: + data: [] + compare: + exprs: + - "actual_code == 200" + - "expect[1] == actual" + - name: 400 non antiquated + actual: + handler: i + path: /eth/v1/validator/duties/attester/999999999 + method: post + body: + data: ["0","1","2","3","4","5","6","7","8","9"] + compare: + exprs: + - "actual_code == 400" diff --git a/cl/beacon/handler/harness/duties_attester_f.yml b/cl/beacon/handler/harness/duties_attester_f.yml new file mode 100644 index 00000000000..840927438ba --- /dev/null +++ b/cl/beacon/handler/harness/duties_attester_f.yml @@ -0,0 +1,45 @@ +vars: + head_hash: '0xeffdd8ef40c3c901f0724d48e04ce257967cf1da31929f3b6db614f89ef8d660' + bad_hash: '0xbeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef' + post_root: '0x933d6650f2999f17012e781f5012981edb549e5935de1c981fce81cdd241d4e1' + head_slot: 8322 + head_epoch: "260" +tests: + - name: non empty indices + expect: + file: "duties_1" + fs: td + actual: + handler: i + path: /eth/v1/validator/duties/attester/{{.Vars.head_epoch}} + method: post + body: + data: ["0","1","2","3","4","5","6","7","8","9"] + compare: + exprs: + - "actual_code == 200" + - "expect[0] == actual" + - name: empty index + expect: + file: "duties_1" + fs: td + actual: + handler: i + path: /eth/v1/validator/duties/attester/{{.Vars.head_epoch}} + method: post + body: + data: [] + compare: + exprs: + - "actual_code == 200" + - "expect[1] == actual" + - name: 400 non antiquated + actual: + handler: i + path: /eth/v1/validator/duties/attester/999999999 + method: post + body: + data: ["0","1","2","3","4","5","6","7","8","9"] + compare: + exprs: + - "actual_code == 400" diff --git a/cl/beacon/handler/harness/duties_proposer.yml b/cl/beacon/handler/harness/duties_proposer.yml new file mode 100644 index 00000000000..30bded64f3a --- /dev/null +++ b/cl/beacon/handler/harness/duties_proposer.yml @@ -0,0 +1,42 @@ +vars: + head_hash: '0xeffdd8ef40c3c901f0724d48e04ce257967cf1da31929f3b6db614f89ef8d660' + bad_hash: '0xbeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef' + post_root: '0x933d6650f2999f17012e781f5012981edb549e5935de1c981fce81cdd241d4e1' + head_slot: 8322 + head_epoch: "260" +tests: + - name: proposer duties + actual: + handler: i + path: /eth/v1/validator/duties/proposer/{{.Vars.head_epoch}} + compare: + exprs: + - actual_code == 200 + - size(actual.data) == 32 + - has(actual.data[0].pubkey) + - has(actual.data[0].validator_index) + - has(actual.data[0].slot) + - name: proposer bad epoch + actual: + handler: i + path: /eth/v1/validator/duties/proposer/abc + compare: + expr: "actual_code == 400" + + - name: proposer duties not synced + actual: + handler: i + path: /eth/v1/validator/duties/proposer/1 + compare: + expr: "actual_code == 503" + - name: fcu historical + actual: + handler: i + path: /eth/v1/validator/duties/proposer/{{sub .Vars.head_epoch 1}} + compare: + exprs: + - actual_code == 200 + - size(actual.data) == 32 + - has(actual.data[0].pubkey) + - has(actual.data[0].validator_index) + - has(actual.data[0].slot) diff --git a/cl/beacon/handler/harness/duties_sync_bellatrix.yml b/cl/beacon/handler/harness/duties_sync_bellatrix.yml new file mode 100644 index 00000000000..5d19789235f --- /dev/null +++ b/cl/beacon/handler/harness/duties_sync_bellatrix.yml @@ -0,0 +1,43 @@ +vars: + finalized_epoch: 99999999 + head_slot: 160 + head_epoch: 4 +tests: + - name: non empty indices + expect: + file: "duties_sync_1" + fs: td + actual: + handler: i + path: /eth/v1/validator/duties/sync/{{.Vars.head_epoch}} + method: post + body: + data: ["0","1","2","3","4","5","6","7","8","9"] + compare: + exprs: + - "actual_code == 200" + - "expect[0] == actual" + - name: empty index + expect: + file: "duties_sync_1" + fs: td + actual: + handler: i + path: /eth/v1/validator/duties/sync/{{.Vars.head_epoch}} + method: post + body: + data: [] + compare: + exprs: + - "actual_code == 200" + - "expect[1] == actual" + - name: "404 giant epoch" + actual: + handler: i + path: /eth/v1/validator/duties/sync/999999999 + method: post + body: + data: ["0","1","2","3","4","5","6","7","8","9"] + compare: + exprs: + - "actual_code == 404" diff --git a/cl/beacon/handler/harness/fork_choice.yml b/cl/beacon/handler/harness/fork_choice.yml new file mode 100644 index 00000000000..3c1c97b8256 --- /dev/null +++ b/cl/beacon/handler/harness/fork_choice.yml @@ -0,0 +1,24 @@ +vars: +tests: + - name: get fork choice + expect: + file: "forkchoice_1" + fs: td + actual: + handler: i + path: /eth/v2/debug/beacon/heads + compare: + exprs: + - "actual_code == 200" + - "actual == expect[0]" + - name: get heads + expect: + file: "forkchoice_1" + fs: td + actual: + handler: i + path: /eth/v1/debug/fork_choice + compare: + exprs: + - "actual_code == 200" + - "actual == expect[1]" diff --git a/cl/beacon/handler/harness/headers.yml b/cl/beacon/handler/harness/headers.yml new file mode 100644 index 00000000000..38a6a9fbfaa --- /dev/null +++ b/cl/beacon/handler/harness/headers.yml @@ -0,0 +1,38 @@ +vars: + head_hash: '0xeffdd8ef40c3c901f0724d48e04ce257967cf1da31929f3b6db614f89ef8d660' + post_root: '0x933d6650f2999f17012e781f5012981edb549e5935de1c981fce81cdd241d4e1' + bad_hash: '0xbeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef' + head_slot: 8322 + first_slot: 8288 + head_epoch: "260" + body_root_1: "0x8d07005613673b3684b527f9c4dab5191403177e79b0e0bc1d58f15021abab19" + body_root_2: "0xa6957819a5055b6d760c1b2ec034522cc033a7dd94c743ed936d8f8d0eb5ccce" + block_1_hash: "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" +tests: + - name: not head + actual: + handler: i + path: /eth/v1/beacon/headers/{{.Vars.block_1_hash}} + compare: + exprs: + - actual_code == 200 + - actual.data.canonical == true + - actual.data.header.message.body_root == "{{.Vars.body_root_1}}" + - actual.data.header.message.slot == "{{.Vars.first_slot}}" + - name: head + actual: + handler: i + path: /eth/v1/beacon/headers/head + compare: + exprs: + - actual_code == 200 + - actual.data.canonical == true + - actual.data.header.message.body_root == "{{.Vars.body_root_2}}" + - actual.data.header.message.slot == "{{.Vars.head_slot}}" + - name: not head + actual: + handler: i + path: /eth/v1/beacon/headers/{{.Vars.bad_hash}} + compare: + exprs: + - actual_code == 404 diff --git a/cl/beacon/handler/harness/liveness.yml b/cl/beacon/handler/harness/liveness.yml new file mode 100644 index 00000000000..4d793f7dff9 --- /dev/null +++ b/cl/beacon/handler/harness/liveness.yml @@ -0,0 +1,13 @@ +tests: + - name: spec + actual: + handler: i + path: /eth/v1/validator/liveness/260 + method: post + body: + data: ["0","1","2","3","4","5","6","7","8","9","10"] + compare: + exprs: + - "actual_code==200" + - "size(actual.data) == 11" + - "actual.data.all(x,has(x.is_live) && has(x.index))" diff --git a/cl/beacon/handler/harness_test.go b/cl/beacon/handler/harness_test.go new file mode 100644 index 00000000000..8c0086a09cc --- /dev/null +++ b/cl/beacon/handler/harness_test.go @@ -0,0 +1,53 @@ +package handler_test + +import ( + "testing" + + "github.com/ledgerwatch/erigon/cl/beacon/beacontest" + "github.com/ledgerwatch/erigon/cl/clparams" + + _ "embed" +) + +func TestHarnessPhase0(t *testing.T) { + beacontest.Execute( + append( + defaultHarnessOpts(harnessConfig{t: t, v: clparams.Phase0Version}), + beacontest.WithTestFromFs(Harnesses, "blocks"), + beacontest.WithTestFromFs(Harnesses, "config"), + beacontest.WithTestFromFs(Harnesses, "headers"), + beacontest.WithTestFromFs(Harnesses, "attestation_rewards_phase0"), + beacontest.WithTestFromFs(Harnesses, "committees"), + beacontest.WithTestFromFs(Harnesses, "duties_attester"), + beacontest.WithTestFromFs(Harnesses, "duties_proposer"), + )..., + ) +} +func TestHarnessPhase0Finalized(t *testing.T) { + beacontest.Execute( + append( + defaultHarnessOpts(harnessConfig{t: t, v: clparams.Phase0Version, finalized: true}), + beacontest.WithTestFromFs(Harnesses, "liveness"), + beacontest.WithTestFromFs(Harnesses, "duties_attester_f"), + beacontest.WithTestFromFs(Harnesses, "committees_f"), + )..., + ) +} + +func TestHarnessBellatrix(t *testing.T) { + beacontest.Execute( + append( + defaultHarnessOpts(harnessConfig{t: t, v: clparams.BellatrixVersion, finalized: true}), + beacontest.WithTestFromFs(Harnesses, "attestation_rewards_bellatrix"), + beacontest.WithTestFromFs(Harnesses, "duties_sync_bellatrix"), + )..., + ) +} +func TestHarnessForkChoice(t *testing.T) { + beacontest.Execute( + append( + defaultHarnessOpts(harnessConfig{t: t, v: clparams.BellatrixVersion, forkmode: 1}), + beacontest.WithTestFromFs(Harnesses, "fork_choice"), + )..., + ) +} diff --git a/cl/beacon/handler/headers.go b/cl/beacon/handler/headers.go index e6b18607115..43c9a66850f 100644 --- a/cl/beacon/handler/headers.go +++ b/cl/beacon/handler/headers.go @@ -9,14 +9,14 @@ import ( "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" ) -func (a *ApiHandler) getHeaders(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getHeaders(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { ctx := r.Context() - querySlot, err := uint64FromQueryParams(r, "slot") + querySlot, err := beaconhttp.Uint64FromQueryParams(r, "slot") if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } - queryParentHash, err := hashFromQueryParams(r, "parent_root") + queryParentHash, err := beaconhttp.HashFromQueryParams(r, "parent_root") if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } @@ -89,14 +89,14 @@ func (a *ApiHandler) getHeaders(r *http.Request) (*beaconResponse, error) { return newBeaconResponse(headers), nil } -func (a *ApiHandler) getHeader(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getHeader(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) if err != nil { return nil, err } defer tx.Rollback() - blockId, err := blockIdFromRequest(r) + blockId, err := beaconhttp.BlockIdFromRequest(r) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } @@ -125,5 +125,5 @@ func (a *ApiHandler) getHeader(r *http.Request) (*beaconResponse, error) { Root: root, Canonical: canonicalRoot == root, Header: signedHeader, - }).withFinalized(canonicalRoot == root && signedHeader.Header.Slot <= a.forkchoiceStore.FinalizedSlot()).withVersion(version), nil + }).WithFinalized(canonicalRoot == root && signedHeader.Header.Slot <= a.forkchoiceStore.FinalizedSlot()).WithVersion(version), nil } diff --git a/cl/beacon/handler/liveness.go b/cl/beacon/handler/liveness.go new file mode 100644 index 00000000000..99ec031d831 --- /dev/null +++ b/cl/beacon/handler/liveness.go @@ -0,0 +1,153 @@ +package handler + +import ( + "encoding/json" + "fmt" + "net/http" + "sort" + "strconv" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/utils" +) + +type live struct { + Index int `json:"index,string"` + IsLive bool `json:"is_live"` +} + +func (a *ApiHandler) liveness(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + epoch, err := beaconhttp.EpochFromRequest(r) + if err != nil { + return nil, err + } + maxEpoch := utils.GetCurrentEpoch(a.genesisCfg.GenesisTime, a.beaconChainCfg.SecondsPerSlot, a.beaconChainCfg.SlotsPerEpoch) + if epoch > maxEpoch { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is in the future, max epoch is %d", epoch, maxEpoch).Error()) + } + + var idxsStr []string + if err := json.NewDecoder(r.Body).Decode(&idxsStr); err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not decode request body: %w. request body is required.", err).Error()) + } + if len(idxsStr) == 0 { + return newBeaconResponse([]string{}), nil + } + idxSet := map[int]struct{}{} + // convert the request to uint64 + idxs := make([]uint64, 0, len(idxsStr)) + for _, idxStr := range idxsStr { + idx, err := strconv.ParseUint(idxStr, 10, 64) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("could not parse validator index: %w", err).Error()) + } + if _, ok := idxSet[int(idx)]; ok { + continue + } + idxs = append(idxs, idx) + idxSet[int(idx)] = struct{}{} + } + + tx, err := a.indiciesDB.BeginRo(r.Context()) + if err != nil { + return nil, err + } + defer tx.Rollback() + ctx := r.Context() + liveSet := map[uint64]*live{} + // initialize resp. + for _, idx := range idxs { + liveSet[idx] = &live{Index: int(idx), IsLive: false} + } + var lastBlockRootProcess libcommon.Hash + var lastSlotProcess uint64 + // we need to obtain the relevant data: + // Use the blocks in the epoch as heuristic + for i := epoch * a.beaconChainCfg.SlotsPerEpoch; i < ((epoch+1)*a.beaconChainCfg.SlotsPerEpoch)-1; i++ { + block, err := a.blockReader.ReadBlockBySlot(ctx, tx, i) + if err != nil { + return nil, err + } + if block == nil { + continue + } + updateLivenessWithBlock(block, liveSet) + lastBlockRootProcess, err = block.Block.HashSSZ() + if err != nil { + return nil, err + } + lastSlotProcess = block.Block.Slot + } + // use the epoch partecipation as an additional heuristic + currentEpochPartecipation, previousEpochPartecipation, err := a.obtainCurrentEpochPartecipationFromEpoch(tx, epoch, lastBlockRootProcess, lastSlotProcess) + if err != nil { + return nil, err + } + if currentEpochPartecipation == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not find partecipations for epoch %d, if this was an historical query, turn on --caplin.archive", epoch)) + } + for idx, live := range liveSet { + if live.IsLive { + continue + } + if idx >= uint64(currentEpochPartecipation.Length()) { + continue + } + if currentEpochPartecipation.Get(int(idx)) != 0 { + live.IsLive = true + continue + } + if idx >= uint64(previousEpochPartecipation.Length()) { + continue + } + live.IsLive = previousEpochPartecipation.Get(int(idx)) != 0 + } + + resp := []*live{} + for _, v := range liveSet { + resp = append(resp, v) + } + sort.Slice(resp, func(i, j int) bool { + return resp[i].Index < resp[j].Index + }) + + return newBeaconResponse(resp), nil +} + +func (a *ApiHandler) obtainCurrentEpochPartecipationFromEpoch(tx kv.Tx, epoch uint64, blockRoot libcommon.Hash, blockSlot uint64) (*solid.BitList, *solid.BitList, error) { + prevEpoch := epoch + if epoch > 0 { + prevEpoch-- + } + + currPartecipation, ok1 := a.forkchoiceStore.Partecipation(epoch) + prevPartecipation, ok2 := a.forkchoiceStore.Partecipation(prevEpoch) + if !ok1 || !ok2 { + return a.stateReader.ReadPartecipations(tx, blockSlot) + } + return currPartecipation, prevPartecipation, nil + +} + +func updateLivenessWithBlock(block *cltypes.SignedBeaconBlock, liveSet map[uint64]*live) { + body := block.Block.Body + if _, ok := liveSet[block.Block.ProposerIndex]; ok { + liveSet[block.Block.ProposerIndex].IsLive = true + } + body.VoluntaryExits.Range(func(index int, value *cltypes.SignedVoluntaryExit, length int) bool { + if _, ok := liveSet[value.VoluntaryExit.ValidatorIndex]; ok { + liveSet[value.VoluntaryExit.ValidatorIndex].IsLive = true + } + return true + }) + body.ExecutionChanges.Range(func(index int, value *cltypes.SignedBLSToExecutionChange, length int) bool { + if _, ok := liveSet[value.Message.ValidatorIndex]; ok { + liveSet[value.Message.ValidatorIndex].IsLive = true + } + return true + }) +} diff --git a/cl/beacon/handler/node.go b/cl/beacon/handler/node.go new file mode 100644 index 00000000000..063adacdeb5 --- /dev/null +++ b/cl/beacon/handler/node.go @@ -0,0 +1,38 @@ +package handler + +import ( + "encoding/json" + "fmt" + "net/http" + "runtime" + + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" +) + +func (a *ApiHandler) GetEthV1NodeHealth(w http.ResponseWriter, r *http.Request) { + syncingStatus, err := beaconhttp.Uint64FromQueryParams(r, "syncing_status") + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + syncingCode := http.StatusOK + if syncingStatus != nil { + syncingCode = int(*syncingStatus) + } + if a.syncedData.Syncing() { + w.WriteHeader(syncingCode) + return + } + w.WriteHeader(http.StatusOK) +} + +func (a *ApiHandler) GetEthV1NodeVersion(w http.ResponseWriter, r *http.Request) { + // Get OS and Arch + if err := json.NewEncoder(w).Encode(map[string]interface{}{ + "data": map[string]interface{}{ + "version": fmt.Sprintf("Caplin/%s %s/%s", a.version, runtime.GOOS, runtime.GOARCH), + }, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} diff --git a/cl/beacon/handler/pool.go b/cl/beacon/handler/pool.go index 66614f904f2..6b3a07af7b0 100644 --- a/cl/beacon/handler/pool.go +++ b/cl/beacon/handler/pool.go @@ -1,25 +1,225 @@ package handler import ( + "encoding/json" + "fmt" "net/http" + + "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/gossip" ) -func (a *ApiHandler) poolVoluntaryExits(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) GetEthV1BeaconPoolVoluntaryExits(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { return newBeaconResponse(a.operationsPool.VoluntaryExistsPool.Raw()), nil } -func (a *ApiHandler) poolAttesterSlashings(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) GetEthV1BeaconPoolAttesterSlashings(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + fmt.Println("GetEthV1BeaconPoolAttesterSlashings", a.operationsPool.AttesterSlashingsPool.Raw()) return newBeaconResponse(a.operationsPool.AttesterSlashingsPool.Raw()), nil } -func (a *ApiHandler) poolProposerSlashings(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) GetEthV1BeaconPoolProposerSlashings(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { return newBeaconResponse(a.operationsPool.ProposerSlashingsPool.Raw()), nil } -func (a *ApiHandler) poolBlsToExecutionChanges(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) GetEthV1BeaconPoolBLSExecutionChanges(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { return newBeaconResponse(a.operationsPool.BLSToExecutionChangesPool.Raw()), nil } -func (a *ApiHandler) poolAttestations(r *http.Request) (*beaconResponse, error) { - return newBeaconResponse(a.operationsPool.AttestationsPool.Raw()), nil +func (a *ApiHandler) GetEthV1BeaconPoolAttestations(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + slot, err := beaconhttp.Uint64FromQueryParams(r, "slot") + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + committeeIndex, err := beaconhttp.Uint64FromQueryParams(r, "committee_index") + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + atts := a.operationsPool.AttestationsPool.Raw() + if slot == nil && committeeIndex == nil { + return newBeaconResponse(atts), nil + } + ret := make([]any, 0, len(atts)) + for i := range atts { + if slot != nil && atts[i].AttestantionData().Slot() != *slot { + continue + } + if committeeIndex != nil && atts[i].AttestantionData().ValidatorIndex() != *committeeIndex { + continue + } + ret = append(ret, atts[i]) + } + + return newBeaconResponse(ret), nil +} + +func (a *ApiHandler) PostEthV1BeaconPoolVoluntaryExits(w http.ResponseWriter, r *http.Request) { + req := cltypes.SignedVoluntaryExit{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if err := a.forkchoiceStore.OnVoluntaryExit(&req, false); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + // Broadcast to gossip + if a.sentinel != nil { + encodedSSZ, err := req.EncodeSSZ(nil) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if _, err := a.sentinel.PublishGossip(r.Context(), &sentinel.GossipData{ + Data: encodedSSZ, + Name: gossip.TopicNameVoluntaryExit, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + a.operationsPool.VoluntaryExistsPool.Insert(req.VoluntaryExit.ValidatorIndex, &req) + } + // Only write 200 + w.WriteHeader(http.StatusOK) +} + +func (a *ApiHandler) PostEthV1BeaconPoolAttesterSlashings(w http.ResponseWriter, r *http.Request) { + req := cltypes.NewAttesterSlashing() + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if err := a.forkchoiceStore.OnAttesterSlashing(req, false); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + // Broadcast to gossip + if a.sentinel != nil { + encodedSSZ, err := req.EncodeSSZ(nil) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if _, err := a.sentinel.PublishGossip(r.Context(), &sentinel.GossipData{ + Data: encodedSSZ, + Name: gossip.TopicNameAttesterSlashing, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + // Only write 200 + w.WriteHeader(http.StatusOK) +} + +func (a *ApiHandler) PostEthV1BeaconPoolProposerSlashings(w http.ResponseWriter, r *http.Request) { + req := cltypes.ProposerSlashing{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if err := a.forkchoiceStore.OnProposerSlashing(&req, false); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + // Broadcast to gossip + if a.sentinel != nil { + encodedSSZ, err := req.EncodeSSZ(nil) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if _, err := a.sentinel.PublishGossip(r.Context(), &sentinel.GossipData{ + Data: encodedSSZ, + Name: gossip.TopicNameProposerSlashing, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + // Only write 200 + w.WriteHeader(http.StatusOK) +} + +type poolingFailure struct { + Index int `json:"index"` + Message string `json:"message"` +} + +type poolingError struct { + Code int `json:"code"` + Message string `json:"message"` + Failures []poolingFailure `json:"failures"` +} + +func (a *ApiHandler) PostEthV1BeaconPoolBlsToExecutionChanges(w http.ResponseWriter, r *http.Request) { + req := []*cltypes.SignedBLSToExecutionChange{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + failures := []poolingFailure{} + for _, v := range req { + if err := a.forkchoiceStore.OnBlsToExecutionChange(v, false); err != nil { + failures = append(failures, poolingFailure{Index: len(failures), Message: err.Error()}) + continue + } + // Broadcast to gossip + if a.sentinel != nil { + encodedSSZ, err := v.EncodeSSZ(nil) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if _, err := a.sentinel.PublishGossip(r.Context(), &sentinel.GossipData{ + Data: encodedSSZ, + Name: gossip.TopicNameBlsToExecutionChange, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + } + + if len(failures) > 0 { + w.WriteHeader(http.StatusBadRequest) + json.NewEncoder(w).Encode(poolingError{Code: http.StatusBadRequest, Message: "some failures", Failures: failures}) + return + } + // Only write 200 + w.WriteHeader(http.StatusOK) +} + +func (a *ApiHandler) PostEthV1ValidatorAggregatesAndProof(w http.ResponseWriter, r *http.Request) { + req := []*cltypes.SignedAggregateAndProof{} + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + failures := []poolingFailure{} + for _, v := range req { + if err := a.forkchoiceStore.OnAggregateAndProof(v, false); err != nil { + failures = append(failures, poolingFailure{Index: len(failures), Message: err.Error()}) + continue + } + // Broadcast to gossip + if a.sentinel != nil { + encodedSSZ, err := v.EncodeSSZ(nil) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if _, err := a.sentinel.PublishGossip(r.Context(), &sentinel.GossipData{ + Data: encodedSSZ, + Name: gossip.TopicNameBeaconAggregateAndProof, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + } } diff --git a/cl/beacon/handler/rewards.go b/cl/beacon/handler/rewards.go new file mode 100644 index 00000000000..cb41352986f --- /dev/null +++ b/cl/beacon/handler/rewards.go @@ -0,0 +1,233 @@ +package handler + +import ( + "encoding/json" + "io" + "net/http" + "sort" + + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/utils" +) + +type blockRewardsResponse struct { + ProposerIndex uint64 `json:"proposer_index,string"` + Attestations uint64 `json:"attestations,string"` + ProposerSlashings uint64 `json:"proposer_slashings,string"` + AttesterSlashings uint64 `json:"attester_slashings,string"` + SyncAggregate uint64 `json:"sync_aggregate,string"` + Total uint64 `json:"total,string"` +} + +func (a *ApiHandler) getBlockRewards(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + ctx := r.Context() + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockId, err := beaconhttp.BlockIdFromRequest(r) + if err != nil { + return nil, err + } + root, err := a.rootFromBlockId(ctx, tx, blockId) + if err != nil { + return nil, err + } + blk, err := a.blockReader.ReadHeaderByRoot(ctx, tx, root) + if err != nil { + return nil, err + } + if blk == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "block not found") + } + slot := blk.Header.Slot + isFinalized := slot <= a.forkchoiceStore.FinalizedSlot() + if slot >= a.forkchoiceStore.LowestAvaiableSlot() { + // finalized case + blkRewards, ok := a.forkchoiceStore.BlockRewards(root) + if !ok { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "block not found") + } + return newBeaconResponse(blockRewardsResponse{ + ProposerIndex: blk.Header.ProposerIndex, + Attestations: blkRewards.Attestations, + ProposerSlashings: blkRewards.ProposerSlashings, + AttesterSlashings: blkRewards.AttesterSlashings, + SyncAggregate: blkRewards.SyncAggregate, + Total: blkRewards.Attestations + blkRewards.ProposerSlashings + blkRewards.AttesterSlashings + blkRewards.SyncAggregate, + }).WithFinalized(isFinalized), nil + } + slotData, err := state_accessors.ReadSlotData(tx, slot) + if err != nil { + return nil, err + } + if slotData == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "could not read historical block rewards, node may not be archive or it still processing historical states") + } + return newBeaconResponse(blockRewardsResponse{ + ProposerIndex: blk.Header.ProposerIndex, + Attestations: slotData.AttestationsRewards, + ProposerSlashings: slotData.ProposerSlashings, + AttesterSlashings: slotData.AttesterSlashings, + SyncAggregate: slotData.SyncAggregateRewards, + Total: slotData.AttestationsRewards + slotData.ProposerSlashings + slotData.AttesterSlashings + slotData.SyncAggregateRewards, + }).WithFinalized(isFinalized), nil +} + +type syncCommitteeReward struct { + ValidatorIndex uint64 `json:"validator_index,string"` + Reward int64 `json:"reward,string"` +} + +func (a *ApiHandler) getSyncCommitteesRewards(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + // Retrieve all the request data ------------------------------------------- + req := []string{} + // read the entire body + jsonBytes, err := io.ReadAll(r.Body) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + // parse json body request + if len(jsonBytes) > 0 { + if err := json.Unmarshal(jsonBytes, &req); err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + } + filterIndicies, err := parseQueryValidatorIndicies(tx, req) + if err != nil { + return nil, err + } + + blockId, err := beaconhttp.BlockIdFromRequest(r) + if err != nil { + return nil, err + } + root, err := a.rootFromBlockId(ctx, tx, blockId) + if err != nil { + return nil, err + } + blk, err := a.blockReader.ReadBlockByRoot(ctx, tx, root) + if err != nil { + return nil, err + } + if blk == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "block not found") + } + version := a.beaconChainCfg.GetCurrentStateVersion(blk.Block.Slot / a.beaconChainCfg.SlotsPerEpoch) + if version < clparams.AltairVersion { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "sync committee rewards not available before Altair fork") + } + // retrieve the state we need ----------------------------------------------- + // We need: + // - sync committee of the block + // - total active balance of the block + canonicalBlockRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, blk.Block.Slot) + if err != nil { + return nil, err + } + + isCanonical := canonicalBlockRoot == root + + isFinalized := blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot() + var ( + syncCommittee *solid.SyncCommittee + totalActiveBalance uint64 + ) + if isFinalized { + if !isCanonical { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "non-canonical finalized block not found") + } + epochData, err := state_accessors.ReadEpochData(tx, blk.Block.Slot) + if err != nil { + return nil, err + } + if epochData == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "could not read historical sync committee rewards, node may not be archive or it still processing historical states") + } + totalActiveBalance = epochData.TotalActiveBalance + syncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(blk.Block.Slot)) + if err != nil { + return nil, err + } + if syncCommittee == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "could not read historical sync committee, node may not be archive or it still processing historical states") + } + } else { + var ok bool + syncCommittee, _, ok = a.forkchoiceStore.GetSyncCommittees(root) + if !ok { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "non-finalized sync committee not found") + } + totalActiveBalance, ok = a.forkchoiceStore.TotalActiveBalance(root) + if !ok { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "non-finalized total active balance not found") + } + } + committee := syncCommittee.GetCommittee() + rewards := make([]syncCommitteeReward, 0, len(committee)) + + syncAggregate := blk.Block.Body.SyncAggregate + + filterIndiciesSet := make(map[uint64]struct{}) + for _, v := range filterIndicies { + filterIndiciesSet[v] = struct{}{} + } + // validator index -> accumulated rewards + accumulatedRewards := map[uint64]int64{} + for _, idx := range filterIndicies { + accumulatedRewards[idx] = 0 + } + partecipantReward := int64(a.syncPartecipantReward(totalActiveBalance)) + + for committeeIdx, v := range committee { + idx, ok, err := state_accessors.ReadValidatorIndexByPublicKey(tx, v) + if err != nil { + return nil, err + } + if !ok { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "sync committee public key not found") + } + if len(filterIndiciesSet) > 0 { + if _, ok := filterIndiciesSet[idx]; !ok { + continue + } + } + if syncAggregate.IsSet(uint64(committeeIdx)) { + accumulatedRewards[idx] += partecipantReward + continue + } + accumulatedRewards[idx] -= partecipantReward + } + for idx, reward := range accumulatedRewards { + rewards = append(rewards, syncCommitteeReward{ + ValidatorIndex: idx, + Reward: reward, + }) + } + sort.Slice(rewards, func(i, j int) bool { + return rewards[i].ValidatorIndex < rewards[j].ValidatorIndex + }) + return newBeaconResponse(rewards).WithFinalized(isFinalized), nil +} + +func (a *ApiHandler) syncPartecipantReward(activeBalance uint64) uint64 { + activeBalanceSqrt := utils.IntegerSquareRoot(activeBalance) + totalActiveIncrements := activeBalance / a.beaconChainCfg.EffectiveBalanceIncrement + baseRewardPerInc := a.beaconChainCfg.EffectiveBalanceIncrement * a.beaconChainCfg.BaseRewardFactor / activeBalanceSqrt + totalBaseRewards := baseRewardPerInc * totalActiveIncrements + maxParticipantRewards := totalBaseRewards * a.beaconChainCfg.SyncRewardWeight / a.beaconChainCfg.WeightDenominator / a.beaconChainCfg.SlotsPerEpoch + return maxParticipantRewards / a.beaconChainCfg.SyncCommitteeSize +} diff --git a/cl/beacon/handler/states.go b/cl/beacon/handler/states.go index bedc8c21c9c..e232084d1e2 100644 --- a/cl/beacon/handler/states.go +++ b/cl/beacon/handler/states.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "strconv" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" @@ -16,48 +17,47 @@ import ( "github.com/ledgerwatch/erigon/cl/utils" ) -func (a *ApiHandler) rootFromStateId(ctx context.Context, tx kv.Tx, stateId *segmentID) (root libcommon.Hash, httpStatusErr int, err error) { - var blockRoot libcommon.Hash +func (a *ApiHandler) blockRootFromStateId(ctx context.Context, tx kv.Tx, stateId *beaconhttp.SegmentID) (root libcommon.Hash, httpStatusErr int, err error) { switch { - case stateId.head(): - blockRoot, _, err = a.forkchoiceStore.GetHead() + case stateId.Head(): + root, _, err = a.forkchoiceStore.GetHead() if err != nil { return libcommon.Hash{}, http.StatusInternalServerError, err } - case stateId.finalized(): - blockRoot = a.forkchoiceStore.FinalizedCheckpoint().BlockRoot() - case stateId.justified(): - blockRoot = a.forkchoiceStore.JustifiedCheckpoint().BlockRoot() - case stateId.genesis(): - blockRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, 0) + return + case stateId.Finalized(): + root = a.forkchoiceStore.FinalizedCheckpoint().BlockRoot() + return + case stateId.Justified(): + root = a.forkchoiceStore.JustifiedCheckpoint().BlockRoot() + return + case stateId.Genesis(): + root, err = beacon_indicies.ReadCanonicalBlockRoot(tx, 0) if err != nil { return libcommon.Hash{}, http.StatusInternalServerError, err } - if blockRoot == (libcommon.Hash{}) { + if root == (libcommon.Hash{}) { return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("genesis block not found") } - case stateId.getSlot() != nil: - blockRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *stateId.getSlot()) + return + case stateId.GetSlot() != nil: + root, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *stateId.GetSlot()) if err != nil { return libcommon.Hash{}, http.StatusInternalServerError, err } - if blockRoot == (libcommon.Hash{}) { - return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("block not found %d", *stateId.getSlot()) + if root == (libcommon.Hash{}) { + return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("block not found %d", *stateId.GetSlot()) + } + return + case stateId.GetRoot() != nil: + root, err = beacon_indicies.ReadBlockRootByStateRoot(tx, *stateId.GetRoot()) + if err != nil { + return libcommon.Hash{}, http.StatusInternalServerError, err } - case stateId.getRoot() != nil: - root = *stateId.getRoot() return default: return libcommon.Hash{}, http.StatusInternalServerError, fmt.Errorf("cannot parse state id") } - root, err = beacon_indicies.ReadStateRootByBlockRoot(ctx, tx, blockRoot) - if err != nil { - return libcommon.Hash{}, http.StatusInternalServerError, err - } - if root == (libcommon.Hash{}) { - return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("block not found") - } - return } type rootResponse struct { @@ -71,7 +71,7 @@ func previousVersion(v clparams.StateVersion) clparams.StateVersion { return v - 1 } -func (a *ApiHandler) getStateFork(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getStateFork(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) @@ -80,11 +80,11 @@ func (a *ApiHandler) getStateFork(r *http.Request) (*beaconResponse, error) { } defer tx.Rollback() - blockId, err := stateIdFromRequest(r) + blockId, err := beaconhttp.StateIdFromRequest(r) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } - root, httpStatus, err := a.rootFromStateId(ctx, tx, blockId) + root, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) if err != nil { return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) } @@ -94,7 +94,7 @@ func (a *ApiHandler) getStateFork(r *http.Request) (*beaconResponse, error) { return nil, err } if slot == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, err.Error()) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block slot: %x", root)) } epoch := *slot / a.beaconChainCfg.SlotsPerEpoch @@ -110,7 +110,7 @@ func (a *ApiHandler) getStateFork(r *http.Request) (*beaconResponse, error) { }), nil } -func (a *ApiHandler) getStateRoot(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getStateRoot(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) @@ -119,11 +119,11 @@ func (a *ApiHandler) getStateRoot(r *http.Request) (*beaconResponse, error) { } defer tx.Rollback() - blockId, err := stateIdFromRequest(r) + blockId, err := beaconhttp.StateIdFromRequest(r) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } - root, httpStatus, err := a.rootFromStateId(ctx, tx, blockId) + root, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) if err != nil { return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) } @@ -149,10 +149,10 @@ func (a *ApiHandler) getStateRoot(r *http.Request) (*beaconResponse, error) { } return newBeaconResponse(&rootResponse{Root: stateRoot}). - withFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()), nil + WithFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()), nil } -func (a *ApiHandler) getFullState(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getFullState(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) @@ -161,27 +161,47 @@ func (a *ApiHandler) getFullState(r *http.Request) (*beaconResponse, error) { } defer tx.Rollback() - blockId, err := stateIdFromRequest(r) + blockId, err := beaconhttp.StateIdFromRequest(r) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } - root, httpStatus, err := a.rootFromStateId(ctx, tx, blockId) + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) if err != nil { return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) } - blockRoot, err := beacon_indicies.ReadBlockRootByStateRoot(tx, root) - if err != nil { - return nil, err - } - state, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } + if state == nil { + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + // Sanity checks slot and canonical data. + if slot == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block slot: %x", blockRoot)) + } + canonicalRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, *slot) + if err != nil { + return nil, err + } + if canonicalRoot != blockRoot { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read state: %x", blockRoot)) + } + state, err := a.stateReader.ReadHistoricalState(ctx, tx, *slot) + if err != nil { + return nil, err + } + if state == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read state: %x", blockRoot)) + } + return newBeaconResponse(state).WithFinalized(true).WithVersion(state.Version()), nil + } - return newBeaconResponse(state).withFinalized(false).withVersion(state.Version()), nil + return newBeaconResponse(state).WithFinalized(false).WithVersion(state.Version()), nil } type finalityCheckpointsResponse struct { @@ -190,7 +210,7 @@ type finalityCheckpointsResponse struct { PreviousJustifiedCheckpoint solid.Checkpoint `json:"previous_justified_checkpoint"` } -func (a *ApiHandler) getFinalityCheckpoints(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getFinalityCheckpoints(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) @@ -198,21 +218,16 @@ func (a *ApiHandler) getFinalityCheckpoints(r *http.Request) (*beaconResponse, e return nil, err } defer tx.Rollback() - blockId, err := stateIdFromRequest(r) + blockId, err := beaconhttp.StateIdFromRequest(r) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } - root, httpStatus, err := a.rootFromStateId(ctx, tx, blockId) + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) if err != nil { return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) } - blockRoot, err := beacon_indicies.ReadBlockRootByStateRoot(tx, root) - if err != nil { - return nil, err - } - slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) if err != nil { return nil, err @@ -244,15 +259,15 @@ func (a *ApiHandler) getFinalityCheckpoints(r *http.Request) (*beaconResponse, e FinalizedCheckpoint: finalizedCheckpoint, CurrentJustifiedCheckpoint: currentJustifiedCheckpoint, PreviousJustifiedCheckpoint: previousJustifiedCheckpoint, - }).withFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()).withVersion(version), nil + }).WithFinalized(canonicalRoot == blockRoot && *slot <= a.forkchoiceStore.FinalizedSlot()).WithVersion(version), nil } type syncCommitteesResponse struct { - Validators []uint64 `json:"validators"` - ValidatorAggregates [][]uint64 `json:"validator_aggregates"` + Validators []string `json:"validators"` + ValidatorAggregates [][]string `json:"validator_aggregates"` } -func (a *ApiHandler) getSyncCommittees(r *http.Request) (*beaconResponse, error) { +func (a *ApiHandler) getSyncCommittees(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { ctx := r.Context() tx, err := a.indiciesDB.BeginRo(ctx) @@ -260,21 +275,16 @@ func (a *ApiHandler) getSyncCommittees(r *http.Request) (*beaconResponse, error) return nil, err } defer tx.Rollback() - blockId, err := stateIdFromRequest(r) + blockId, err := beaconhttp.StateIdFromRequest(r) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) } - root, httpStatus, err := a.rootFromStateId(ctx, tx, blockId) + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) if err != nil { return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) } - blockRoot, err := beacon_indicies.ReadBlockRootByStateRoot(tx, root) - if err != nil { - return nil, err - } - slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) if err != nil { return nil, err @@ -302,7 +312,7 @@ func (a *ApiHandler) getSyncCommittees(r *http.Request) (*beaconResponse, error) } // Now fetch the data we need statePeriod := a.beaconChainCfg.SyncCommitteePeriod(*slot) - queryEpoch, err := uint64FromQueryParams(r, "epoch") + queryEpoch, err := beaconhttp.Uint64FromQueryParams(r, "epoch") if err != nil { return nil, err } @@ -319,27 +329,91 @@ func (a *ApiHandler) getSyncCommittees(r *http.Request) (*beaconResponse, error) // Lastly construct the response validatorsPerSubcommittee := a.beaconChainCfg.SyncCommitteeSize / a.beaconChainCfg.SyncCommitteeSubnetCount response := syncCommitteesResponse{ - Validators: make([]uint64, a.beaconChainCfg.SyncCommitteeSize), - ValidatorAggregates: make([][]uint64, a.beaconChainCfg.SyncCommitteeSubnetCount), + Validators: make([]string, a.beaconChainCfg.SyncCommitteeSize), + ValidatorAggregates: make([][]string, a.beaconChainCfg.SyncCommitteeSubnetCount), } for i, publicKey := range committee { // get the validator index of the committee - validatorIndex, err := state_accessors.ReadValidatorIndexByPublicKey(tx, publicKey) + validatorIndex, ok, err := state_accessors.ReadValidatorIndexByPublicKey(tx, publicKey) if err != nil { return nil, err } - response.Validators[i] = validatorIndex + if !ok { + return nil, fmt.Errorf("could not read validator index: %x", publicKey) + } + idx := strconv.FormatInt(int64(validatorIndex), 10) + response.Validators[i] = idx // add the index to the subcommittee subCommitteeIndex := uint64(i) / validatorsPerSubcommittee if len(response.ValidatorAggregates[subCommitteeIndex]) == 0 { - response.ValidatorAggregates[subCommitteeIndex] = make([]uint64, validatorsPerSubcommittee) + response.ValidatorAggregates[subCommitteeIndex] = make([]string, validatorsPerSubcommittee) } - response.ValidatorAggregates[subCommitteeIndex][uint64(i)%validatorsPerSubcommittee] = validatorIndex + response.ValidatorAggregates[subCommitteeIndex][uint64(i)%validatorsPerSubcommittee] = idx } canonicalRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, *slot) if err != nil { return nil, err } - return newBeaconResponse(response).withFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()), nil + return newBeaconResponse(response).WithFinalized(canonicalRoot == blockRoot && *slot <= a.forkchoiceStore.FinalizedSlot()), nil +} + +type randaoResponse struct { + Randao libcommon.Hash `json:"randao"` +} + +func (a *ApiHandler) getRandao(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + blockId, err := beaconhttp.StateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + epochReq, err := beaconhttp.Uint64FromQueryParams(r, "epoch") + if err != nil { + return nil, err + } + slotPtr, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + if slotPtr == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block slot: %x", blockRoot)) + } + slot := *slotPtr + epoch := slot / a.beaconChainCfg.SlotsPerEpoch + if epochReq != nil { + epoch = *epochReq + } + randaoMixes := a.randaoMixesPool.Get().(solid.HashListSSZ) + defer a.randaoMixesPool.Put(randaoMixes) + + if a.forkchoiceStore.RandaoMixes(blockRoot, randaoMixes) { + mix := randaoMixes.Get(int(epoch % a.beaconChainCfg.EpochsPerHistoricalVector)) + return newBeaconResponse(randaoResponse{Randao: mix}).WithFinalized(slot <= a.forkchoiceStore.FinalizedSlot()), nil + } + // check if the block is canonical + canonicalRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, slot) + if err != nil { + return nil, err + } + if canonicalRoot != blockRoot { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read randao: %x", blockRoot)) + } + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, slot, epoch%a.beaconChainCfg.EpochsPerHistoricalVector) + if err != nil { + return nil, err + } + return newBeaconResponse(randaoResponse{Randao: mix}).WithFinalized(slot <= a.forkchoiceStore.FinalizedSlot()), nil } diff --git a/cl/beacon/handler/test_data/attestations_1.json b/cl/beacon/handler/test_data/attestations_1.json new file mode 100644 index 00000000000..c73603b8912 --- /dev/null +++ b/cl/beacon/handler/test_data/attestations_1.json @@ -0,0 +1 @@ +{"data":{"ideal_rewards":[{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"}],"total_rewards":[{"validator_index":"0","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"1","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"2","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"3","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"4","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"5","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"6","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"7","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"8","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"9","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"10","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"11","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"12","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"13","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"14","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"15","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"16","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"17","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"18","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"19","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"20","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"21","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"22","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"23","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"24","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"25","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"26","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"27","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"28","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"29","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"30","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"31","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"32","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"33","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"34","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"35","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"36","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"37","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"38","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"39","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"40","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"41","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"42","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"43","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"44","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"45","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"46","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"47","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"48","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"49","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"50","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"51","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"52","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"53","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"54","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"55","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"56","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"57","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"58","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"59","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"60","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"61","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"62","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"63","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"64","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"65","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"66","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"67","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"68","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"69","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"70","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"71","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"72","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"73","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"74","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"75","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"76","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"77","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"78","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"79","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"80","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"81","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"82","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"83","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"84","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"85","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"86","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"87","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"88","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"89","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"90","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"91","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"92","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"93","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"94","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"95","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"96","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"97","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"98","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"99","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"100","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"101","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"102","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"103","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"104","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"105","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"106","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"107","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"108","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"109","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"110","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"111","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"112","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"113","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"114","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"115","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"116","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"117","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"118","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"119","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"120","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"121","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"122","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"123","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"124","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"125","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"126","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"127","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"128","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"129","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"130","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"131","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"132","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"133","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"134","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"135","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"136","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"137","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"138","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"139","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"140","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"141","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"142","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"143","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"144","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"145","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"146","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"147","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"148","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"149","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"150","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"151","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"152","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"153","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"154","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"155","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"156","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"157","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"158","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"159","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"160","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"161","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"162","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"163","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"164","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"165","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"166","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"167","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"168","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"169","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"170","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"171","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"172","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"173","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"174","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"175","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"176","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"177","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"178","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"179","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"180","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"181","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"182","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"183","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"184","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"185","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"186","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"187","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"188","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"189","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"190","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"191","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"192","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"193","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"194","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"195","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"196","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"197","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"198","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"199","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"200","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"201","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"202","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"203","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"204","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"205","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"206","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"207","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"208","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"209","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"210","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"211","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"212","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"213","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"214","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"215","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"216","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"217","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"218","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"219","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"220","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"221","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"222","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"223","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"224","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"225","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"226","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"227","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"228","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"229","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"230","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"231","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"232","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"233","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"234","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"235","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"236","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"237","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"238","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"239","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"240","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"241","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"242","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"243","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"244","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"245","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"246","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"247","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"248","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"249","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"250","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"251","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"252","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"253","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"254","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"255","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"}]}} diff --git a/cl/beacon/handler/test_data/attestations_2.json b/cl/beacon/handler/test_data/attestations_2.json new file mode 100644 index 00000000000..5f3b03eb6a9 --- /dev/null +++ b/cl/beacon/handler/test_data/attestations_2.json @@ -0,0 +1 @@ +{"data":{"ideal_rewards":[{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"290680","source":"0","inclusion_delay":"0","inactivity":"0"}],"total_rewards":[{"validator_index":"1","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"},{"validator_index":"4","head":"0","target":"290680","source":"-156520","inclusion_delay":"0","inactivity":"0"}]}} diff --git a/cl/beacon/handler/test_data/attestations_3.json b/cl/beacon/handler/test_data/attestations_3.json new file mode 100644 index 00000000000..10168612a39 --- /dev/null +++ b/cl/beacon/handler/test_data/attestations_3.json @@ -0,0 +1 @@ +{"data":{"ideal_rewards":[{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"20000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"22000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"4348","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"effective_balance":"20000000000","head":"67482","target":"67482","source":"67482","inclusion_delay":"4646","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"4941","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"9123","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"17562","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"17423","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"4355","inactivity":"0"},{"effective_balance":"22000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3231","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"5854","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"10752","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"3920","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"28000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"5675","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3231","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"3763","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"10686","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"4561","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"effective_balance":"20000000000","head":"67482","target":"67482","source":"67482","inclusion_delay":"2697","inactivity":"0"},{"effective_balance":"22000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"8743","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"7898","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"5675","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"4673","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"4348","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"27000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"effective_balance":"20000000000","head":"67482","target":"67482","source":"67482","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3949","inactivity":"0"},{"effective_balance":"28000000000","head":"94475","target":"94475","source":"94475","inclusion_delay":"7805","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"3434","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"5401","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"20211","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"5131","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"3507","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"29000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"2508","inactivity":"0"},{"effective_balance":"28000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"28000000000","head":"94475","target":"94475","source":"94475","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"28000000000","head":"94475","target":"94475","source":"94475","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"13739","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"14635","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"20211","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"19235","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"5227","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"3604","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"6272","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"8827","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"20071","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"3310","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"14934","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"54361","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"20000000000","head":"67482","target":"67482","source":"67482","inclusion_delay":"3636","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"6111","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"27000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"effective_balance":"20000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"4470","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"effective_balance":"22000000000","head":"74230","target":"74230","source":"74230","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"26000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"23000000000","head":"77605","target":"77605","source":"77605","inclusion_delay":"3316","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"effective_balance":"25000000000","head":"84353","target":"84353","source":"84353","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"60633","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"20000000000","head":"67482","target":"67482","source":"67482","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"7898","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"4704","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"25000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"effective_balance":"28000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"20000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2293","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"effective_balance":"22000000000","head":"74230","target":"74230","source":"74230","inclusion_delay":"5110","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"24000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"31000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"effective_balance":"27000000000","head":"91101","target":"91101","source":"91101","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"4739","inactivity":"0"},{"effective_balance":"18000000000","head":"60734","target":"60734","source":"60734","inclusion_delay":"4427","inactivity":"0"},{"effective_balance":"28000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"effective_balance":"28000000000","head":"94475","target":"94475","source":"94475","inclusion_delay":"29271","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"25089","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"30000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"3789","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"7317","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"7201","inactivity":"0"},{"effective_balance":"22000000000","head":"74230","target":"74230","source":"74230","inclusion_delay":"5411","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"7168","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"effective_balance":"26000000000","head":"87727","target":"87727","source":"87727","inclusion_delay":"4530","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"19000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"4050","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2961","inactivity":"0"},{"effective_balance":"24000000000","head":"80979","target":"80979","source":"80979","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"effective_balance":"21000000000","head":"70856","target":"70856","source":"70856","inclusion_delay":"7983","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"5272","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"30000000000","head":"101224","target":"101224","source":"101224","inclusion_delay":"4646","inactivity":"0"},{"effective_balance":"19000000000","head":"64108","target":"64108","source":"64108","inclusion_delay":"39725","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"effective_balance":"29000000000","head":"97850","target":"97850","source":"97850","inclusion_delay":"4181","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"31000000000","head":"104598","target":"104598","source":"104598","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"29000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"30000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"22000000000","head":"74230","target":"74230","source":"74230","inclusion_delay":"22998","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"effective_balance":"17000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2293","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"effective_balance":"22000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"effective_balance":"32000000000","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"32000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"}],"total_rewards":[{"validator_index":"0","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"1","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"2","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-358422"},{"validator_index":"3","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"4","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"validator_index":"5","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"6","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"7","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"8","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"9","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"validator_index":"10","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"11","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"12","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"13","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"14","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"15","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"16","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"17","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"validator_index":"18","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"validator_index":"19","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"20","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"21","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"22","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"23","head":"87727","target":"87727","source":"87727","inclusion_delay":"4348","inactivity":"0"},{"validator_index":"24","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"validator_index":"25","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"26","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"validator_index":"27","head":"67482","target":"67482","source":"67482","inclusion_delay":"4646","inactivity":"0"},{"validator_index":"28","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"29","head":"87727","target":"87727","source":"87727","inclusion_delay":"4941","inactivity":"0"},{"validator_index":"30","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"31","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"32","head":"80979","target":"80979","source":"80979","inclusion_delay":"9123","inactivity":"0"},{"validator_index":"33","head":"70856","target":"70856","source":"70856","inclusion_delay":"17562","inactivity":"0"},{"validator_index":"34","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"35","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"36","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"37","head":"84353","target":"84353","source":"84353","inclusion_delay":"17423","inactivity":"0"},{"validator_index":"38","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"39","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"validator_index":"40","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"41","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"validator_index":"42","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"43","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"44","head":"84353","target":"84353","source":"84353","inclusion_delay":"4355","inactivity":"0"},{"validator_index":"45","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"46","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"47","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"48","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"49","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"50","head":"57360","target":"57360","source":"57360","inclusion_delay":"3231","inactivity":"0"},{"validator_index":"51","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"52","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"53","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"54","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"55","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"56","head":"70856","target":"70856","source":"70856","inclusion_delay":"5854","inactivity":"0"},{"validator_index":"57","head":"60734","target":"60734","source":"60734","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"58","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"59","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"60","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"61","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"62","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"63","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"64","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"validator_index":"65","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"66","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"validator_index":"67","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"68","head":"60734","target":"60734","source":"60734","inclusion_delay":"10752","inactivity":"0"},{"validator_index":"69","head":"101224","target":"101224","source":"101224","inclusion_delay":"3920","inactivity":"0"},{"validator_index":"70","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"71","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"72","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"validator_index":"73","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"74","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"75","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"validator_index":"76","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"77","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"validator_index":"78","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"79","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"80","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"81","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-372759"},{"validator_index":"82","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"83","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"validator_index":"84","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"validator_index":"85","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"validator_index":"86","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"87","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"88","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"89","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"90","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"91","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"92","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"93","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"94","head":"64108","target":"64108","source":"64108","inclusion_delay":"5675","inactivity":"0"},{"validator_index":"95","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"96","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"97","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"98","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"99","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"validator_index":"100","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"101","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"validator_index":"102","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"103","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"104","head":"57360","target":"57360","source":"57360","inclusion_delay":"3231","inactivity":"0"},{"validator_index":"105","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"106","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"107","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-329748"},{"validator_index":"108","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"validator_index":"109","head":"91101","target":"91101","source":"91101","inclusion_delay":"3763","inactivity":"0"},{"validator_index":"110","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"111","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"112","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"113","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"114","head":"77605","target":"77605","source":"77605","inclusion_delay":"10686","inactivity":"0"},{"validator_index":"115","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"validator_index":"116","head":"80979","target":"80979","source":"80979","inclusion_delay":"4561","inactivity":"0"},{"validator_index":"117","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"validator_index":"118","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"119","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"120","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"validator_index":"121","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"122","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"123","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"validator_index":"124","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"125","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"126","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"127","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"128","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"129","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"130","head":"107972","target":"107972","source":"107972","inclusion_delay":"33453","inactivity":"0"},{"validator_index":"131","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"132","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"validator_index":"133","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"validator_index":"134","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"validator_index":"135","head":"67482","target":"67482","source":"67482","inclusion_delay":"2697","inactivity":"0"},{"validator_index":"136","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"137","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"validator_index":"138","head":"77605","target":"77605","source":"77605","inclusion_delay":"8743","inactivity":"0"},{"validator_index":"139","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"validator_index":"140","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"141","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"142","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"143","head":"57360","target":"57360","source":"57360","inclusion_delay":"7898","inactivity":"0"},{"validator_index":"144","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"validator_index":"145","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"146","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"147","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"148","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"validator_index":"149","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"150","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"validator_index":"151","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"152","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"153","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"154","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"155","head":"64108","target":"64108","source":"64108","inclusion_delay":"5675","inactivity":"0"},{"validator_index":"156","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"157","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"158","head":"64108","target":"64108","source":"64108","inclusion_delay":"4673","inactivity":"0"},{"validator_index":"159","head":"87727","target":"87727","source":"87727","inclusion_delay":"4348","inactivity":"0"},{"validator_index":"160","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"161","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"162","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"163","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"164","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"165","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"166","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"167","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-344085"},{"validator_index":"168","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"validator_index":"169","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-286737"},{"validator_index":"170","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"171","head":"57360","target":"57360","source":"57360","inclusion_delay":"3949","inactivity":"0"},{"validator_index":"172","head":"94475","target":"94475","source":"94475","inclusion_delay":"7805","inactivity":"0"},{"validator_index":"173","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"174","head":"60734","target":"60734","source":"60734","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"175","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"validator_index":"176","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"validator_index":"177","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"validator_index":"178","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"179","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"180","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"181","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"182","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"validator_index":"183","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"184","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"185","head":"77605","target":"77605","source":"77605","inclusion_delay":"3434","inactivity":"0"},{"validator_index":"186","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"validator_index":"187","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"validator_index":"188","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"189","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"190","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"191","head":"104598","target":"104598","source":"104598","inclusion_delay":"5401","inactivity":"0"},{"validator_index":"192","head":"97850","target":"97850","source":"97850","inclusion_delay":"20211","inactivity":"0"},{"validator_index":"193","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"194","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"validator_index":"195","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"validator_index":"196","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"validator_index":"197","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"198","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"199","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"200","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"validator_index":"201","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"202","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"validator_index":"203","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"204","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"validator_index":"205","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"206","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"207","head":"91101","target":"91101","source":"91101","inclusion_delay":"5131","inactivity":"0"},{"validator_index":"208","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"209","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"210","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"211","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"validator_index":"212","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"213","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"validator_index":"214","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"215","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-329748"},{"validator_index":"216","head":"87727","target":"87727","source":"87727","inclusion_delay":"3507","inactivity":"0"},{"validator_index":"217","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"218","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"validator_index":"219","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"220","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"221","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"222","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"223","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-258063"},{"validator_index":"224","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-301074"},{"validator_index":"225","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"226","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"227","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"228","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"229","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"230","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"231","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"232","head":"60734","target":"60734","source":"60734","inclusion_delay":"2508","inactivity":"0"},{"validator_index":"233","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"234","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-401433"},{"validator_index":"235","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-401433"},{"validator_index":"236","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"237","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"238","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"239","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-444444"},{"validator_index":"240","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"241","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"242","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"243","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"244","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"245","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-301074"},{"validator_index":"246","head":"107972","target":"107972","source":"107972","inclusion_delay":"7871","inactivity":"0"},{"validator_index":"247","head":"77605","target":"77605","source":"77605","inclusion_delay":"13739","inactivity":"0"},{"validator_index":"248","head":"107972","target":"107972","source":"107972","inclusion_delay":"19116","inactivity":"0"},{"validator_index":"249","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"validator_index":"250","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"251","head":"70856","target":"70856","source":"70856","inclusion_delay":"14635","inactivity":"0"},{"validator_index":"252","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"253","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-301074"},{"validator_index":"254","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"255","head":"97850","target":"97850","source":"97850","inclusion_delay":"20211","inactivity":"0"},{"validator_index":"256","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"257","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"258","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"259","head":"77605","target":"77605","source":"77605","inclusion_delay":"19235","inactivity":"0"},{"validator_index":"260","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-258063"},{"validator_index":"261","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"validator_index":"262","head":"101224","target":"101224","source":"101224","inclusion_delay":"5227","inactivity":"0"},{"validator_index":"263","head":"84353","target":"84353","source":"84353","inclusion_delay":"3604","inactivity":"0"},{"validator_index":"264","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"265","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"266","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-387096"},{"validator_index":"267","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"268","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"269","head":"101224","target":"101224","source":"101224","inclusion_delay":"6272","inactivity":"0"},{"validator_index":"270","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"271","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"validator_index":"272","head":"64108","target":"64108","source":"64108","inclusion_delay":"8827","inactivity":"0"},{"validator_index":"273","head":"80979","target":"80979","source":"80979","inclusion_delay":"20071","inactivity":"0"},{"validator_index":"274","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"275","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-444444"},{"validator_index":"276","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"277","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"278","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"279","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"validator_index":"280","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"281","head":"64108","target":"64108","source":"64108","inclusion_delay":"3310","inactivity":"0"},{"validator_index":"282","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"283","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"284","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"285","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"286","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"287","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"288","head":"84353","target":"84353","source":"84353","inclusion_delay":"14934","inactivity":"0"},{"validator_index":"289","head":"87727","target":"87727","source":"87727","inclusion_delay":"54361","inactivity":"0"},{"validator_index":"290","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"291","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"292","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"293","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"validator_index":"294","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"295","head":"67482","target":"67482","source":"67482","inclusion_delay":"3636","inactivity":"0"},{"validator_index":"296","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-387096"},{"validator_index":"297","head":"64108","target":"64108","source":"64108","inclusion_delay":"6111","inactivity":"0"},{"validator_index":"298","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"299","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"300","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-344085"},{"validator_index":"301","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"302","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"303","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-358422"},{"validator_index":"304","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"305","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"306","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"validator_index":"307","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"validator_index":"308","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"309","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"310","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"311","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"validator_index":"312","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"313","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"314","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"315","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"validator_index":"316","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"317","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"318","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"319","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"validator_index":"320","head":"107972","target":"107972","source":"107972","inclusion_delay":"9558","inactivity":"0"},{"validator_index":"321","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"322","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"323","head":"104598","target":"104598","source":"104598","inclusion_delay":"4470","inactivity":"0"},{"validator_index":"324","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"validator_index":"325","head":"107972","target":"107972","source":"107972","inclusion_delay":"8920","inactivity":"0"},{"validator_index":"326","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-315411"},{"validator_index":"327","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"328","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"329","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"330","head":"107972","target":"107972","source":"107972","inclusion_delay":"26762","inactivity":"0"},{"validator_index":"331","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"332","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"333","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"334","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"335","head":"77605","target":"77605","source":"77605","inclusion_delay":"3316","inactivity":"0"},{"validator_index":"336","head":"57360","target":"57360","source":"57360","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"337","head":"107972","target":"107972","source":"107972","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"338","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"339","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"validator_index":"340","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-358422"},{"validator_index":"341","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"validator_index":"342","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"validator_index":"343","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-387096"},{"validator_index":"344","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"345","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"346","head":"107972","target":"107972","source":"107972","inclusion_delay":"66906","inactivity":"0"},{"validator_index":"347","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"348","head":"97850","target":"97850","source":"97850","inclusion_delay":"60633","inactivity":"0"},{"validator_index":"349","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"350","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-286737"},{"validator_index":"351","head":"57360","target":"57360","source":"57360","inclusion_delay":"7898","inactivity":"0"},{"validator_index":"352","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"353","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"},{"validator_index":"354","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"355","head":"60734","target":"60734","source":"60734","inclusion_delay":"4704","inactivity":"0"},{"validator_index":"356","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"validator_index":"357","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"358","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"359","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"360","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"validator_index":"361","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"362","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"363","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"364","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"validator_index":"365","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"validator_index":"366","head":"57360","target":"57360","source":"57360","inclusion_delay":"2293","inactivity":"0"},{"validator_index":"367","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"368","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"369","head":"57360","target":"57360","source":"57360","inclusion_delay":"4442","inactivity":"0"},{"validator_index":"370","head":"74230","target":"74230","source":"74230","inclusion_delay":"5110","inactivity":"0"},{"validator_index":"371","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"validator_index":"372","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"373","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"374","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"375","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"376","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"377","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"validator_index":"378","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"379","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"validator_index":"380","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-387096"},{"validator_index":"381","head":"107972","target":"107972","source":"107972","inclusion_delay":"6372","inactivity":"0"},{"validator_index":"382","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"validator_index":"383","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"384","head":"57360","target":"57360","source":"57360","inclusion_delay":"4739","inactivity":"0"},{"validator_index":"385","head":"60734","target":"60734","source":"60734","inclusion_delay":"4427","inactivity":"0"},{"validator_index":"386","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"387","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"388","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"validator_index":"389","head":"94475","target":"94475","source":"94475","inclusion_delay":"29271","inactivity":"0"},{"validator_index":"390","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"391","head":"107972","target":"107972","source":"107972","inclusion_delay":"5575","inactivity":"0"},{"validator_index":"392","head":"101224","target":"101224","source":"101224","inclusion_delay":"25089","inactivity":"0"},{"validator_index":"393","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"394","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"395","head":"97850","target":"97850","source":"97850","inclusion_delay":"3789","inactivity":"0"},{"validator_index":"396","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"397","head":"70856","target":"70856","source":"70856","inclusion_delay":"7317","inactivity":"0"},{"validator_index":"398","head":"104598","target":"104598","source":"104598","inclusion_delay":"7201","inactivity":"0"},{"validator_index":"399","head":"74230","target":"74230","source":"74230","inclusion_delay":"5411","inactivity":"0"},{"validator_index":"400","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"401","head":"80979","target":"80979","source":"80979","inclusion_delay":"7168","inactivity":"0"},{"validator_index":"402","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"403","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"404","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"405","head":"107972","target":"107972","source":"107972","inclusion_delay":"4460","inactivity":"0"},{"validator_index":"406","head":"107972","target":"107972","source":"107972","inclusion_delay":"4316","inactivity":"0"},{"validator_index":"407","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-272400"},{"validator_index":"408","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"validator_index":"409","head":"57360","target":"57360","source":"57360","inclusion_delay":"2843","inactivity":"0"},{"validator_index":"410","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"411","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"412","head":"107972","target":"107972","source":"107972","inclusion_delay":"10293","inactivity":"0"},{"validator_index":"413","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"414","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"validator_index":"415","head":"87727","target":"87727","source":"87727","inclusion_delay":"4530","inactivity":"0"},{"validator_index":"416","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"validator_index":"417","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"418","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"419","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"420","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"421","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"422","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"423","head":"107972","target":"107972","source":"107972","inclusion_delay":"22302","inactivity":"0"},{"validator_index":"424","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"425","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"426","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"427","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"428","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"429","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"430","head":"57360","target":"57360","source":"57360","inclusion_delay":"5468","inactivity":"0"},{"validator_index":"431","head":"104598","target":"104598","source":"104598","inclusion_delay":"4050","inactivity":"0"},{"validator_index":"432","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"433","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"434","head":"57360","target":"57360","source":"57360","inclusion_delay":"17771","inactivity":"0"},{"validator_index":"435","head":"57360","target":"57360","source":"57360","inclusion_delay":"2961","inactivity":"0"},{"validator_index":"436","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-344085"},{"validator_index":"437","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"438","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-430107"},{"validator_index":"439","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"440","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"validator_index":"441","head":"70856","target":"70856","source":"70856","inclusion_delay":"7983","inactivity":"0"},{"validator_index":"442","head":"57360","target":"57360","source":"57360","inclusion_delay":"5923","inactivity":"0"},{"validator_index":"443","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"444","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"445","head":"57360","target":"57360","source":"57360","inclusion_delay":"2538","inactivity":"0"},{"validator_index":"446","head":"107972","target":"107972","source":"107972","inclusion_delay":"14868","inactivity":"0"},{"validator_index":"447","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"448","head":"97850","target":"97850","source":"97850","inclusion_delay":"5272","inactivity":"0"},{"validator_index":"449","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"450","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"451","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"452","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"453","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"454","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"455","head":"101224","target":"101224","source":"101224","inclusion_delay":"4646","inactivity":"0"},{"validator_index":"456","head":"64108","target":"64108","source":"64108","inclusion_delay":"39725","inactivity":"0"},{"validator_index":"457","head":"107972","target":"107972","source":"107972","inclusion_delay":"7434","inactivity":"0"},{"validator_index":"458","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"459","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"460","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"461","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"462","head":"57360","target":"57360","source":"57360","inclusion_delay":"3554","inactivity":"0"},{"validator_index":"463","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"464","head":"107972","target":"107972","source":"107972","inclusion_delay":"5817","inactivity":"0"},{"validator_index":"465","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"466","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"467","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"468","head":"57360","target":"57360","source":"57360","inclusion_delay":"2369","inactivity":"0"},{"validator_index":"469","head":"97850","target":"97850","source":"97850","inclusion_delay":"4181","inactivity":"0"},{"validator_index":"470","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"471","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"472","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"473","head":"57360","target":"57360","source":"57360","inclusion_delay":"10155","inactivity":"0"},{"validator_index":"474","head":"57360","target":"57360","source":"57360","inclusion_delay":"11847","inactivity":"0"},{"validator_index":"475","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"476","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-444444"},{"validator_index":"477","head":"107972","target":"107972","source":"107972","inclusion_delay":"6082","inactivity":"0"},{"validator_index":"478","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"479","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"480","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"481","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"482","head":"57360","target":"57360","source":"57360","inclusion_delay":"2632","inactivity":"0"},{"validator_index":"483","head":"107972","target":"107972","source":"107972","inclusion_delay":"12164","inactivity":"0"},{"validator_index":"484","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"485","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"486","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"487","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"488","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"validator_index":"489","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"490","head":"74230","target":"74230","source":"74230","inclusion_delay":"22998","inactivity":"0"},{"validator_index":"491","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"validator_index":"492","head":"107972","target":"107972","source":"107972","inclusion_delay":"4614","inactivity":"0"},{"validator_index":"493","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"494","head":"57360","target":"57360","source":"57360","inclusion_delay":"2293","inactivity":"0"},{"validator_index":"495","head":"57360","target":"57360","source":"57360","inclusion_delay":"3385","inactivity":"0"},{"validator_index":"496","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"497","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"498","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"499","head":"57360","target":"57360","source":"57360","inclusion_delay":"2221","inactivity":"0"},{"validator_index":"500","head":"107972","target":"107972","source":"107972","inclusion_delay":"6690","inactivity":"0"},{"validator_index":"501","head":"107972","target":"107972","source":"107972","inclusion_delay":"4779","inactivity":"0"},{"validator_index":"502","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-243726"},{"validator_index":"503","head":"107972","target":"107972","source":"107972","inclusion_delay":"11151","inactivity":"0"},{"validator_index":"504","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"-458781"},{"validator_index":"505","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"506","head":"107972","target":"107972","source":"107972","inclusion_delay":"8363","inactivity":"0"},{"validator_index":"507","head":"107972","target":"107972","source":"107972","inclusion_delay":"4956","inactivity":"0"},{"validator_index":"508","head":"57360","target":"57360","source":"57360","inclusion_delay":"35543","inactivity":"0"},{"validator_index":"509","head":"57360","target":"57360","source":"57360","inclusion_delay":"5077","inactivity":"0"},{"validator_index":"510","head":"57360","target":"57360","source":"57360","inclusion_delay":"6462","inactivity":"0"},{"validator_index":"511","head":"107972","target":"107972","source":"107972","inclusion_delay":"5352","inactivity":"0"},{"validator_index":"512","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"513","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"514","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"515","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"516","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"517","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"518","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"519","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"520","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"521","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"522","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"523","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"524","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"525","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"526","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"527","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"}]}} diff --git a/cl/beacon/handler/test_data/attestations_4.json b/cl/beacon/handler/test_data/attestations_4.json new file mode 100644 index 00000000000..0904e1e270a --- /dev/null +++ b/cl/beacon/handler/test_data/attestations_4.json @@ -0,0 +1 @@ +{"data":{"ideal_rewards":[{"effective_balance":"20000000000","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"effective_balance":"17000000000","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"}],"total_rewards":[{"validator_index":"1","head":"0","target":"0","source":"0","inclusion_delay":"0","inactivity":"0"},{"validator_index":"4","head":"57360","target":"57360","source":"57360","inclusion_delay":"14217","inactivity":"0"}]}} diff --git a/cl/beacon/handler/test_data/blinded_block_1.json b/cl/beacon/handler/test_data/blinded_block_1.json new file mode 100644 index 00000000000..de776b31230 --- /dev/null +++ b/cl/beacon/handler/test_data/blinded_block_1.json @@ -0,0 +1,1975 @@ +{ + "data": { + "signature": "0x8b915f3b9d2d4c7ccaacf5d56c1152b1e91eafd1f59ba734d09e78996930b63ca550499997fe6d590343aaf5997f0d0c14c986571992ac9ed188de2b31ae4b7d70dfb68edae8b012f72f284dc8da44f4af5a2bdf3dfc9c0897ec4f7165daa07a", + "message": { + "slot": "8322", + "proposer_index": "210", + "parent_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "state_root": "0x933d6650f2999f17012e781f5012981edb549e5935de1c981fce81cdd241d4e1", + "body": { + "randao_reveal": "0xa182a6c7224c53cc43492b7ba87b54e8303094ebcb8c822da09c4224791b461e34d089ac857acf05cd695679c25cffa30404832791fe424fd104e2e96ebbf583dd5ec4dcbc891e7f4e0dea402071dbd294810417221fc41e4f90e4837c694e1a", + "eth1_data": { + "deposit_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "deposit_count": "528", + "block_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "graffiti": "0x0000000000000000000000000000000000000000000000000000000000000000", + "proposer_slashings": [ + { + "signed_header_1": { + "message": { + "slot": "8321", + "proposer_index": "476", + "parent_root": "0x3333333333333333333333333333333333333333333333333333333333333333", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x939584df88598e56fe144105c6933b4727d7b772539e65c57289df64cedee771377e4d0e94f85c25d39a6072997d309c09da8c477267670aa42f26fb0836c72ec5867fa2f34dc0eb7e043ef5d6421282d1515b0f8c7ffd4bbbf56ee8d61ed063" + }, + "signed_header_2": { + "message": { + "slot": "8321", + "proposer_index": "476", + "parent_root": "0x9999999999999999999999999999999999999999999999999999999999999999", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x8a184441d5d944ed3c18549dd9e4640eda879f9e737ac4211fdddfd30a65e1a2a32a8aa918ca65ad9b863a15e8cfefc412608ca78fd54ea1e5cbbd5697d125cc721aac1b01e8984a33f025c4707623669573244a632ec7f37808c01fab143f58" + } + }, + { + "signed_header_1": { + "message": { + "slot": "8321", + "proposer_index": "406", + "parent_root": "0x3333333333333333333333333333333333333333333333333333333333333333", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0xad97a43e9f28a90ff46b07a7bf65d520b89a78af47dbff1c10e4fc6bb36b4ee9c4f27f2a72c65311a03e7b48e06d86db1149147b14a8803d46f6a457092642dc89d3f2782bd48a373e3125af1a84f5b76d4ff7ddc85ac2650ca4c0f99e1af592" + }, + "signed_header_2": { + "message": { + "slot": "8321", + "proposer_index": "406", + "parent_root": "0x9999999999999999999999999999999999999999999999999999999999999999", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x88d860d460526de062ee196400e24cb3055de2ff6abb31331d0bfeeebcdc77839d22ad6dfec39d81279f5527d1ffbd7e0a9d6eee7dce5a1cd6f79451537e9dfb6384f595e9d49673c58c181527a599dd4b38154e1322f1607f192ab0394f1411" + } + }, + { + "signed_header_1": { + "message": { + "slot": "8321", + "proposer_index": "281", + "parent_root": "0x3333333333333333333333333333333333333333333333333333333333333333", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x8a2358ff11a30100a2492001827f54ff6c10dd6dcea66f6814dd1cccc4a49850bbbe36546e4f9b72410042a9d5882e8219a5a01708b8a95ca57984debe78f419a4ac921270a0f0c11c795a6c5ef1e6bfb96712751a4fee61059ca8fbe69639b6" + }, + "signed_header_2": { + "message": { + "slot": "8321", + "proposer_index": "281", + "parent_root": "0x9999999999999999999999999999999999999999999999999999999999999999", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0xb820e03b7bfd21c2d97a4f2bc9dd1fd5325894757f7129646c7a39a02b2c1c8ca33d509b4e83491e79db02ac0490aa3308ee23bfa1f65bf4130ab07e377a8cbd4eace5b69801528322dde425b0a78310504c330da30be7cefc674573dbdb4502" + } + }, + { + "signed_header_1": { + "message": { + "slot": "8321", + "proposer_index": "169", + "parent_root": "0x3333333333333333333333333333333333333333333333333333333333333333", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x88c81a6029f097a9f23e37c7677abfafa2921982e9aebffc35ca700e1aefcd49c2ab5d51c7b28ef3db3aad49d58a6407082ce1ecd7f7bd89cb764242890440b684fc0e1511e047434b25f3ad1a5e238e5bf97f51e9e37d6eed48e0b9fef64333" + }, + "signed_header_2": { + "message": { + "slot": "8321", + "proposer_index": "169", + "parent_root": "0x9999999999999999999999999999999999999999999999999999999999999999", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x815b492a6a3fb606f01dbc595c8b18b51b7f7a5a86b11f3ae57c48f7506a34606556a3cf2be683ce23cd0c7b2235667613f9dbcf98408b176f134645f122684bd8fe704c7a4eccb7bb7cbe33c6de377be4d742291d35d0ec8d6083c1b17b7261" + } + }, + { + "signed_header_1": { + "message": { + "slot": "8321", + "proposer_index": "397", + "parent_root": "0x3333333333333333333333333333333333333333333333333333333333333333", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0xae352ba8550d04c07591224449bd4967f66f9d639b731795f643b1e3fc5ad28317268dc9e289ce6075e8981a0e37d9440885e4f4292cb4b4656bd0c7bd9fc22d21eb4c7d1b46f1b08cdb1eb08d7a405985e8a406e6d93c5c3fdd20e91baba122" + }, + "signed_header_2": { + "message": { + "slot": "8321", + "proposer_index": "397", + "parent_root": "0x9999999999999999999999999999999999999999999999999999999999999999", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0xb9152f5510f2bfa5ab7b61829823f25f0c879ab9b852fcd90c17f751bed6e687dc523fcda177503509cd1befec36046a056a66f5826e2333b6de67430a16f6194416681ae69a1c3498cf8351abae4fac5d8f0b51b1734633d545d540bf269270" + } + } + ], + "attester_slashings": [ + { + "attestation_1": { + "attesting_indicies": [ + "96", + "353", + "445" + ], + "data": { + "slot": "555", + "index": "0", + "beacon_block_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "17", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + }, + "signature": "0xa7e932307a82913b23743198182a7e3c97675e8a1133e8d946bc59c62b1765046214ca0ea0e13b77e4f8acc8f226498103684f382826a9fff6c6c2ffdf9c65ffeb1680155025f489f676457634581ee4363bdfbe4d46fc4d1d9df93c3df8750d" + }, + "attestation_2": { + "attesting_indicies": [ + "96", + "353", + "445" + ], + "data": { + "slot": "555", + "index": "0", + "beacon_block_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "17", + "root": "0x0101010101010101010101010101010101010101010101010101010101010101" + } + }, + "signature": "0x89aadbd74370dc6d86b6b61c544c1e18949b0d8aa2d706605d1014d0266a043588a829243d343d1c3812621944ea34540aef1fbd34fe51b03a5734ebc5ec31057d1df0004faeca71d8687dd3af806e4332e19f6da5ab1d7da67fe017c2f2e68b" + } + } + ], + "attestations": [ + { + "aggregation_bits": "0xff3f", + "signature": "0x8b63c286bff1c5dc6fb2e4878e73631e16db1cd3b07e9d0150b3ede4175635fe8db571cb486398e35923640606643d630bacc148d84e9c1060c32b55fe644e5c2573326b041767c5d45d45509a5403a7f2f1b2dd60e54bed26f407bb367a8642", + "data": { + "slot": "8314", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xb731b2df4dcaf841d50747f85b332170471895508c3af7e8bada14e58a816fed435460e1694e87e2887f19a0de201c3d0bc1ece52c26c519fd9131b25fa8a69b229c14ffd1c935d9e853aca8ab07eaae98a65daec09b2640b91961685e96d58c", + "data": { + "slot": "8292", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x876c656d7889c15cd355d6652b347a25dc8fd7ffc383f0d14ad436b5f1af9ac09e06168ad71be76b85c3dd44ae79cc0f04f250a0bcc529d06a1032283e2b8b384d582c0ace50bf747264a199647697f159d06be75ecfb24da2a8b625a3087804", + "data": { + "slot": "8293", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x876c656d7889c15cd355d6652b347a25dc8fd7ffc383f0d14ad436b5f1af9ac09e06168ad71be76b85c3dd44ae79cc0f04f250a0bcc529d06a1032283e2b8b384d582c0ace50bf747264a199647697f159d06be75ecfb24da2a8b625a3087804", + "data": { + "slot": "8293", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x98416260b644654a4a90bda6032053f1eb3a12c59a3c7534f1ef348f2108c2837245bce74b3fd9f61ebae24860cc698100f864c4f26966c36431acbf0beea679807ba4eba9adfd1a267ef8d990290a2548af6456b1d0def6639ac47fd30c5542", + "data": { + "slot": "8317", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x805e92ba1e4c88489f178202ca5d7ce96e7b874fe98bdee80ab6423441bd37ff3f0fe724bc088f58050ac2a8f81ec5e80401d76caeb65795b5794e7a20d0384f3bfd162b281a17e96cc98087c651d893b05154203af7a7591afe1056db934ec4", + "data": { + "slot": "8309", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x95bbaf8dcff64306f01e0b09b27ebe3c761def7edd75542e213586ee0c6d3fc313ae102760abd1262b4f8c00e57603fa01627390011e3a5dea555c74798d7a3e1da68e00e3cdb9d8e4af112b6ff83951bd926288d24eb82e3f203a3160a4d7a9", + "data": { + "slot": "8312", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x894270af1854ce4e65c6e09bc83c15171d564a2af871d0b442cacea78536e5cd34cf4a906025a6d87e12a172ceeb79990b86a1de7ed4ef40cffeca6b93402c3542682bb2914c34430e23038a57e8490abe809dc9f96f3b2caebed380113280b3", + "data": { + "slot": "8297", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa44792a6341475c3eff0c11ad62bc19154d5ed48a4b81869ed49885499d5f768c81f357dd6a3ea60aa2f15a184b098f40a1382cfaea0a8438d62d9cca27c85023245b1838e2e4f1d4e65926f8c6a3032740b36c0a3c8aa69850648ac6c12b3ce", + "data": { + "slot": "8306", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x98aade2cf9dad0e1528edec0e76be15577601b6cbef68353e51748b6286bf08812e42fe8791147a54eeed34782249e3f0cc463e22d6cb1c6050636ca8d070531fe40e16913f2e5560f6e683a6781268ff08d32bc5899b00306a87eecc5603928", + "data": { + "slot": "8290", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xab42974cba2e3fa75faa4c1f717caf7c2a953f4964063462ed32629764336124fd2f2f430ddf0291325722206250452c109b14cded0e51171b89b106b6b2044291128c3d6966c804490033b9e5fd06450ea50d22b5ab761892c6c3f4de79e098", + "data": { + "slot": "8291", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8d852ffa1c5960ba3a5d09837fbdb859bbf9045001b3d1dc1c4d22c6b4bc5b6d506f6ef667b5c7c9fbfb1dd0cfe3617405f56750f8b5eb25b3539d0a4c94822b198c524de92a6c68982ce17f985ff5283cea6ac8dabe41828ce38edb7e9fe223", + "data": { + "slot": "8311", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8bfaed4667e28ed9e39464c7c57027ae345f22847b6ac1aa7e5f342fdb6cdca9d78a962da68f9e34e0453f68fa363fcd196881e2dd76abcab6814439d73448f404124ad2e2f57b59b0df57699d913e24f79c53f129a09c05f2659e4444f4bb53", + "data": { + "slot": "8320", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "258", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "260", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x97426dbbe61af8a68ac683ba95ad871baade096e9287e2d533c1efba04430b7083283485db5b1624fb03639065e8c754155cfe68986d526c1a771b67e45c0e8c97428dee8c6d80cc68892b961e8352d50f34e2623dc3b7ba2cb5dba28a854079", + "data": { + "slot": "8302", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x94fab4732e767881653a5923ca4a93fc2778d349cef972b077aa0fd2553946f578be6571d7a02fe14aa98b11a77475e115bc8062308b23a23c6ce71cd07c528a6e37d30324d57dcc36fa336575210bce5d71ccabf74f0dd96f839eefc1a49343", + "data": { + "slot": "8296", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b63c286bff1c5dc6fb2e4878e73631e16db1cd3b07e9d0150b3ede4175635fe8db571cb486398e35923640606643d630bacc148d84e9c1060c32b55fe644e5c2573326b041767c5d45d45509a5403a7f2f1b2dd60e54bed26f407bb367a8642", + "data": { + "slot": "8314", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x805e92ba1e4c88489f178202ca5d7ce96e7b874fe98bdee80ab6423441bd37ff3f0fe724bc088f58050ac2a8f81ec5e80401d76caeb65795b5794e7a20d0384f3bfd162b281a17e96cc98087c651d893b05154203af7a7591afe1056db934ec4", + "data": { + "slot": "8309", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xb60160a4024734b6c22e6083d755d97b22d107001965d35cd1aa5fc3c1059b4cb482c36c78609c0fa131631eb847d165177c877949e5baebb96a48f6e471c1d1d700619b4adeafa728b4d69de8d03d02854e4240d8e16d790168619cc2027247", + "data": { + "slot": "8318", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b1fbaba2982cd546a7d19d4af3755163112349a0e6d13f05c69807c709f363359c2cfff8a4afa66bd24445eb12b923615c33892a82d8081575207e4165a1d0c944fd3871ff885662c3921b152e674130879d67a0692b4867ad9fc2d20e24fa3", + "data": { + "slot": "8307", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa44792a6341475c3eff0c11ad62bc19154d5ed48a4b81869ed49885499d5f768c81f357dd6a3ea60aa2f15a184b098f40a1382cfaea0a8438d62d9cca27c85023245b1838e2e4f1d4e65926f8c6a3032740b36c0a3c8aa69850648ac6c12b3ce", + "data": { + "slot": "8306", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b1fbaba2982cd546a7d19d4af3755163112349a0e6d13f05c69807c709f363359c2cfff8a4afa66bd24445eb12b923615c33892a82d8081575207e4165a1d0c944fd3871ff885662c3921b152e674130879d67a0692b4867ad9fc2d20e24fa3", + "data": { + "slot": "8307", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xab42974cba2e3fa75faa4c1f717caf7c2a953f4964063462ed32629764336124fd2f2f430ddf0291325722206250452c109b14cded0e51171b89b106b6b2044291128c3d6966c804490033b9e5fd06450ea50d22b5ab761892c6c3f4de79e098", + "data": { + "slot": "8291", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x9494651d4491cfc326f3439cebc3304aaf50a8e5598217da6df2a13b5cb9f9731cc8934f406c0243786b17f936d5892801fc34fc74fb4f52fec147536375dabd9f892940aacdea196e28cb21320bce9ede79b0a11333569d90e6deeb59869217", + "data": { + "slot": "8300", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x87c3f6fac9ea937a8e8bd4f6dccb7893cb8ea39c65e0313a30e903c220dba2c8597df1d75ee21fd905eab1ebf2261ebf085b13115363d72adc9ccd9527293b7218c39e94c257c94a8c95c32cf909cf58e8b7ece89a9bd21107a413b3fe3172e0", + "data": { + "slot": "8304", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x94fab4732e767881653a5923ca4a93fc2778d349cef972b077aa0fd2553946f578be6571d7a02fe14aa98b11a77475e115bc8062308b23a23c6ce71cd07c528a6e37d30324d57dcc36fa336575210bce5d71ccabf74f0dd96f839eefc1a49343", + "data": { + "slot": "8296", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x97426dbbe61af8a68ac683ba95ad871baade096e9287e2d533c1efba04430b7083283485db5b1624fb03639065e8c754155cfe68986d526c1a771b67e45c0e8c97428dee8c6d80cc68892b961e8352d50f34e2623dc3b7ba2cb5dba28a854079", + "data": { + "slot": "8302", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x94fab4732e767881653a5923ca4a93fc2778d349cef972b077aa0fd2553946f578be6571d7a02fe14aa98b11a77475e115bc8062308b23a23c6ce71cd07c528a6e37d30324d57dcc36fa336575210bce5d71ccabf74f0dd96f839eefc1a49343", + "data": { + "slot": "8296", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x98416260b644654a4a90bda6032053f1eb3a12c59a3c7534f1ef348f2108c2837245bce74b3fd9f61ebae24860cc698100f864c4f26966c36431acbf0beea679807ba4eba9adfd1a267ef8d990290a2548af6456b1d0def6639ac47fd30c5542", + "data": { + "slot": "8317", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b1fbaba2982cd546a7d19d4af3755163112349a0e6d13f05c69807c709f363359c2cfff8a4afa66bd24445eb12b923615c33892a82d8081575207e4165a1d0c944fd3871ff885662c3921b152e674130879d67a0692b4867ad9fc2d20e24fa3", + "data": { + "slot": "8307", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x894270af1854ce4e65c6e09bc83c15171d564a2af871d0b442cacea78536e5cd34cf4a906025a6d87e12a172ceeb79990b86a1de7ed4ef40cffeca6b93402c3542682bb2914c34430e23038a57e8490abe809dc9f96f3b2caebed380113280b3", + "data": { + "slot": "8297", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x98aade2cf9dad0e1528edec0e76be15577601b6cbef68353e51748b6286bf08812e42fe8791147a54eeed34782249e3f0cc463e22d6cb1c6050636ca8d070531fe40e16913f2e5560f6e683a6781268ff08d32bc5899b00306a87eecc5603928", + "data": { + "slot": "8290", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x805e92ba1e4c88489f178202ca5d7ce96e7b874fe98bdee80ab6423441bd37ff3f0fe724bc088f58050ac2a8f81ec5e80401d76caeb65795b5794e7a20d0384f3bfd162b281a17e96cc98087c651d893b05154203af7a7591afe1056db934ec4", + "data": { + "slot": "8309", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa44792a6341475c3eff0c11ad62bc19154d5ed48a4b81869ed49885499d5f768c81f357dd6a3ea60aa2f15a184b098f40a1382cfaea0a8438d62d9cca27c85023245b1838e2e4f1d4e65926f8c6a3032740b36c0a3c8aa69850648ac6c12b3ce", + "data": { + "slot": "8306", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa44792a6341475c3eff0c11ad62bc19154d5ed48a4b81869ed49885499d5f768c81f357dd6a3ea60aa2f15a184b098f40a1382cfaea0a8438d62d9cca27c85023245b1838e2e4f1d4e65926f8c6a3032740b36c0a3c8aa69850648ac6c12b3ce", + "data": { + "slot": "8306", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x980e36beab885b1f2d8460e7ece21054e9d235fea5429836bc6df687e0c2f41b7556d9c86cd9c1ca7a69e5a51991b8d617eea619ba8e312d568e38f8de8adb8b4a9ec3e9dab2d47df45b35d9f2488236c042d66cd0916fee70e8a3295353b0ed", + "data": { + "slot": "8308", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xb60160a4024734b6c22e6083d755d97b22d107001965d35cd1aa5fc3c1059b4cb482c36c78609c0fa131631eb847d165177c877949e5baebb96a48f6e471c1d1d700619b4adeafa728b4d69de8d03d02854e4240d8e16d790168619cc2027247", + "data": { + "slot": "8318", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xb731b2df4dcaf841d50747f85b332170471895508c3af7e8bada14e58a816fed435460e1694e87e2887f19a0de201c3d0bc1ece52c26c519fd9131b25fa8a69b229c14ffd1c935d9e853aca8ab07eaae98a65daec09b2640b91961685e96d58c", + "data": { + "slot": "8292", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b63c286bff1c5dc6fb2e4878e73631e16db1cd3b07e9d0150b3ede4175635fe8db571cb486398e35923640606643d630bacc148d84e9c1060c32b55fe644e5c2573326b041767c5d45d45509a5403a7f2f1b2dd60e54bed26f407bb367a8642", + "data": { + "slot": "8314", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x906fd8d1a45b719a36eb5e09b5e13f9d0fb7faaa194d84b90e0b2b811ce299f385bf18bb07844620ec032b6f267d04781480dc303081be7c5d8ba735bccd682dd3ddb6345bae13bd96068eb86b148e73b8931b642705b1696d9ada4159b1dd65", + "data": { + "slot": "8305", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x9494651d4491cfc326f3439cebc3304aaf50a8e5598217da6df2a13b5cb9f9731cc8934f406c0243786b17f936d5892801fc34fc74fb4f52fec147536375dabd9f892940aacdea196e28cb21320bce9ede79b0a11333569d90e6deeb59869217", + "data": { + "slot": "8300", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x98416260b644654a4a90bda6032053f1eb3a12c59a3c7534f1ef348f2108c2837245bce74b3fd9f61ebae24860cc698100f864c4f26966c36431acbf0beea679807ba4eba9adfd1a267ef8d990290a2548af6456b1d0def6639ac47fd30c5542", + "data": { + "slot": "8317", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x980e36beab885b1f2d8460e7ece21054e9d235fea5429836bc6df687e0c2f41b7556d9c86cd9c1ca7a69e5a51991b8d617eea619ba8e312d568e38f8de8adb8b4a9ec3e9dab2d47df45b35d9f2488236c042d66cd0916fee70e8a3295353b0ed", + "data": { + "slot": "8308", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x815ca84fb30789d731ebf977b6ecdd60c30818202d464acdc2947143f62342c4a5d01c6cdb32b1e223d032c746fa98d30899164e6ab37828e6d049f32e46a5c59d742d82005f9a629938761e3abce454cec104352665cd81bbcffa2fce22a935", + "data": { + "slot": "8299", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x906fd8d1a45b719a36eb5e09b5e13f9d0fb7faaa194d84b90e0b2b811ce299f385bf18bb07844620ec032b6f267d04781480dc303081be7c5d8ba735bccd682dd3ddb6345bae13bd96068eb86b148e73b8931b642705b1696d9ada4159b1dd65", + "data": { + "slot": "8305", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x906fd8d1a45b719a36eb5e09b5e13f9d0fb7faaa194d84b90e0b2b811ce299f385bf18bb07844620ec032b6f267d04781480dc303081be7c5d8ba735bccd682dd3ddb6345bae13bd96068eb86b148e73b8931b642705b1696d9ada4159b1dd65", + "data": { + "slot": "8305", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x876c656d7889c15cd355d6652b347a25dc8fd7ffc383f0d14ad436b5f1af9ac09e06168ad71be76b85c3dd44ae79cc0f04f250a0bcc529d06a1032283e2b8b384d582c0ace50bf747264a199647697f159d06be75ecfb24da2a8b625a3087804", + "data": { + "slot": "8293", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x876c656d7889c15cd355d6652b347a25dc8fd7ffc383f0d14ad436b5f1af9ac09e06168ad71be76b85c3dd44ae79cc0f04f250a0bcc529d06a1032283e2b8b384d582c0ace50bf747264a199647697f159d06be75ecfb24da2a8b625a3087804", + "data": { + "slot": "8293", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8bfaed4667e28ed9e39464c7c57027ae345f22847b6ac1aa7e5f342fdb6cdca9d78a962da68f9e34e0453f68fa363fcd196881e2dd76abcab6814439d73448f404124ad2e2f57b59b0df57699d913e24f79c53f129a09c05f2659e4444f4bb53", + "data": { + "slot": "8320", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "258", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "260", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xabdb4b0a06e2d036021b0cd847fb6e8f4d2deca86e60788a6ae2bb9bd55b62ebf35716290f958e075812e8dfcba2beef00b002459e5932d7e7478cf00e91300f9f53a84f593ce40afb1f3c07b1db789ba5da757d313a9ee4cac6b2e28ed2f929", + "data": { + "slot": "8298", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xabdb4b0a06e2d036021b0cd847fb6e8f4d2deca86e60788a6ae2bb9bd55b62ebf35716290f958e075812e8dfcba2beef00b002459e5932d7e7478cf00e91300f9f53a84f593ce40afb1f3c07b1db789ba5da757d313a9ee4cac6b2e28ed2f929", + "data": { + "slot": "8298", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x912fe61ef99df1c96d7e5e6bd01ee5a6be73389978c7f4670c4e978beb6b8e4d640f238c6ba3426e935ac8f8527d118c06f464b08f6527ebebac793728ccc1190ee6701838c6f2b3b06391dc2d69232e63af11023ffe8e1c66eb3bd1075085a6", + "data": { + "slot": "8310", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x815ca84fb30789d731ebf977b6ecdd60c30818202d464acdc2947143f62342c4a5d01c6cdb32b1e223d032c746fa98d30899164e6ab37828e6d049f32e46a5c59d742d82005f9a629938761e3abce454cec104352665cd81bbcffa2fce22a935", + "data": { + "slot": "8299", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8d852ffa1c5960ba3a5d09837fbdb859bbf9045001b3d1dc1c4d22c6b4bc5b6d506f6ef667b5c7c9fbfb1dd0cfe3617405f56750f8b5eb25b3539d0a4c94822b198c524de92a6c68982ce17f985ff5283cea6ac8dabe41828ce38edb7e9fe223", + "data": { + "slot": "8311", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xab42974cba2e3fa75faa4c1f717caf7c2a953f4964063462ed32629764336124fd2f2f430ddf0291325722206250452c109b14cded0e51171b89b106b6b2044291128c3d6966c804490033b9e5fd06450ea50d22b5ab761892c6c3f4de79e098", + "data": { + "slot": "8291", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa44792a6341475c3eff0c11ad62bc19154d5ed48a4b81869ed49885499d5f768c81f357dd6a3ea60aa2f15a184b098f40a1382cfaea0a8438d62d9cca27c85023245b1838e2e4f1d4e65926f8c6a3032740b36c0a3c8aa69850648ac6c12b3ce", + "data": { + "slot": "8306", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x912fe61ef99df1c96d7e5e6bd01ee5a6be73389978c7f4670c4e978beb6b8e4d640f238c6ba3426e935ac8f8527d118c06f464b08f6527ebebac793728ccc1190ee6701838c6f2b3b06391dc2d69232e63af11023ffe8e1c66eb3bd1075085a6", + "data": { + "slot": "8310", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xabdb4b0a06e2d036021b0cd847fb6e8f4d2deca86e60788a6ae2bb9bd55b62ebf35716290f958e075812e8dfcba2beef00b002459e5932d7e7478cf00e91300f9f53a84f593ce40afb1f3c07b1db789ba5da757d313a9ee4cac6b2e28ed2f929", + "data": { + "slot": "8298", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xab42974cba2e3fa75faa4c1f717caf7c2a953f4964063462ed32629764336124fd2f2f430ddf0291325722206250452c109b14cded0e51171b89b106b6b2044291128c3d6966c804490033b9e5fd06450ea50d22b5ab761892c6c3f4de79e098", + "data": { + "slot": "8291", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x876c656d7889c15cd355d6652b347a25dc8fd7ffc383f0d14ad436b5f1af9ac09e06168ad71be76b85c3dd44ae79cc0f04f250a0bcc529d06a1032283e2b8b384d582c0ace50bf747264a199647697f159d06be75ecfb24da2a8b625a3087804", + "data": { + "slot": "8293", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x805e92ba1e4c88489f178202ca5d7ce96e7b874fe98bdee80ab6423441bd37ff3f0fe724bc088f58050ac2a8f81ec5e80401d76caeb65795b5794e7a20d0384f3bfd162b281a17e96cc98087c651d893b05154203af7a7591afe1056db934ec4", + "data": { + "slot": "8309", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x95bbaf8dcff64306f01e0b09b27ebe3c761def7edd75542e213586ee0c6d3fc313ae102760abd1262b4f8c00e57603fa01627390011e3a5dea555c74798d7a3e1da68e00e3cdb9d8e4af112b6ff83951bd926288d24eb82e3f203a3160a4d7a9", + "data": { + "slot": "8312", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x815ca84fb30789d731ebf977b6ecdd60c30818202d464acdc2947143f62342c4a5d01c6cdb32b1e223d032c746fa98d30899164e6ab37828e6d049f32e46a5c59d742d82005f9a629938761e3abce454cec104352665cd81bbcffa2fce22a935", + "data": { + "slot": "8299", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x87c3f6fac9ea937a8e8bd4f6dccb7893cb8ea39c65e0313a30e903c220dba2c8597df1d75ee21fd905eab1ebf2261ebf085b13115363d72adc9ccd9527293b7218c39e94c257c94a8c95c32cf909cf58e8b7ece89a9bd21107a413b3fe3172e0", + "data": { + "slot": "8304", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b1fbaba2982cd546a7d19d4af3755163112349a0e6d13f05c69807c709f363359c2cfff8a4afa66bd24445eb12b923615c33892a82d8081575207e4165a1d0c944fd3871ff885662c3921b152e674130879d67a0692b4867ad9fc2d20e24fa3", + "data": { + "slot": "8307", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa46775d208c119b097221ead6ee9afbf011258b03da07138d01fef8d5bd4681ecbab6f36687e8ae644191acebc94800a002b136de6ff892e4e0910d05402def66858ee8ad8f4b706fab163fe742959dcb86fa90d0b822e5937092852962acbb1", + "data": { + "slot": "8294", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x97426dbbe61af8a68ac683ba95ad871baade096e9287e2d533c1efba04430b7083283485db5b1624fb03639065e8c754155cfe68986d526c1a771b67e45c0e8c97428dee8c6d80cc68892b961e8352d50f34e2623dc3b7ba2cb5dba28a854079", + "data": { + "slot": "8302", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + } + ], + "deposits": [ + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xa19c8e80ddc1caad60a172b66eb24e83ef200d77034b3e16bbee4d95e929a5c1a473563973338d22e7a566fdbd352f65", + "withdrawal_credentials": "0x00edbcfc97a6985ac86187522426240ed81b6493c880d0798360149ec8ce96d8", + "amount": "32000000000", + "signature": "0xb9b4b512b2c67a3e89edcbef91fc0ccd88c9a8c8654c51a130ffb2ab539c22a0c6b84928e8db4ca8a9d04f2dee312c3817a2bf360b6f5f2f3d1ba69b43cf4671290f7f58621887ad4dd1c9fe6d02cc59443e12447a20b38913f67597b0e3cc93" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xb1f92d1a612942fb266c1e436f8d417282efa2805d5a5a819e3d07e358a70efbf0cc1671412ee986cd342c3d2255a324", + "withdrawal_credentials": "0x004ac0f181a01d43a7de32602b440cfbe3a091bb8c108c1fa35726ed301743f9", + "amount": "32000000000", + "signature": "0x8dbd6f9b4ce0a5277f66da9ec41776cff88a647ae1b4dde221a3bf41b9d4af1e77d0cff23185796815448f2e8148126a046b4b60947a32a1e201b4e979c91b395c1d4804ead1324d699eaa9c481efa69484a7946a0bad9788e50cf05847a30c4" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xb532643cb8824a2fbd9196c10961f3ad2f0e319c3612bb15a51a3454593f44726383f006425c2e5952b156a6e14aceb0", + "withdrawal_credentials": "0x00f68c08152911b76f556f9d6dfc66d54e5abd63de04dc073d6b03f333ac00f3", + "amount": "32000000000", + "signature": "0x97852e8c02386bcc8a2dd51c70c48661c79bc1f89f9dce113a60fcde345abedf96fa186c4230013cf61f3546c5d9877a0eab7a5a4f4e4e0e4bcd917dc8368a88e3b8380de9e96ed36bfd605d55956af64a17b877f12762acfdd1c3effe4b4d42" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xa7a1c0bbad929dc02699e92597a66266bbd9533419693270c9b56bbdea643cd2ded9664da3c9fd8db2389277b5e585cc", + "withdrawal_credentials": "0x00e64188226da03f1f3d787ef65d86690aaa24d44e5ac92c99c413463ec47c26", + "amount": "32000000000", + "signature": "0xb0e97772997255840a5758e5325b9d1c56a292500838c5b2b697b7dd207c65a2ef928ebb9466d57782edf79f9b74bbbb069235c752f6527e8d8eb1c785d99326da78680056ee3084811b980185287259af64607e218d67a3b8f24d27c0659ce2" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0x9919842dee455266e4dc77c74088bddbfdb535b9a1bbe75a3cced0e428598038365afe11c7578e4dbd8fe4cae7237543", + "withdrawal_credentials": "0x000a2baaef8f6cc730d6a5474879aed4fe8c95da787cc2e15c3cdba14a9cef12", + "amount": "32000000000", + "signature": "0x99ef1ab7cfbe40d0a1e136138a4a8094e8f54a59c8d05052749b7af14931274fad1c0a44577de51099f2700505fa8861023b7bddabb274249a091acb3a4f7543f877da3792dad7897351c7a01343116a65959812fd55cc4ce4197b05f698761f" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xb4ed73c02a816ba9d23ba0e023970772f82dd3a32a85eefd922958e33bcab7f9c85e20372e49107665926cca852b8b9a", + "withdrawal_credentials": "0x0017c0e8e177a6d58e4f8b93b2b66b13aef9c186cfccb9466d857a474b32b0d4", + "amount": "32000000000", + "signature": "0xa6dfce815f61ce81bf107bf5ccc1beae5f32b63a55e836a5983b63b90c0e7eac873387107c145ab59c32679091cfd28a0dbf2b73f75cd5ab01b75c6ba984b83c796c92b77adba152ab2a20132324fc4b20c8ec002663f16edec9308bb8f3d298" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xb0d0dfaf7479f59319beb513bee16e1af576a0740a7a124a9947ec7c3826dbc0a5d5db15519e8423d7aa683f638f3da3", + "withdrawal_credentials": "0x00a61d2fddabb70c2db059af7e298b0395ef882dda24ae144f2b7ac88026e55d", + "amount": "32000000000", + "signature": "0x85a06ab8d9d576cb2810a88635b7a462d1cfb238db066b8caeba7f36562bb903630f8f24d157747debad5428c4f42a9a0a08dfd53c687cd7c3e17ec539f353357bbd89b7111246c99cc7fab24b8cd33a88cddf845f7d27c8a33079aa097069e3" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xb69614adf68d58f7d67110d7ced171ab934cb973f19c60cbb83161468655c42fe19a80a8e903030650bfaa9613a1ab2d", + "withdrawal_credentials": "0x0037c021fdef99bcf9fb90c02440571ab2faa0238485ed72e427b69dc8dddc91", + "amount": "32000000000", + "signature": "0x957f48b82d761d3e7f2e34eeff5922358d87f9b31c51e5af37a54fedeab7cfc09c3068f6ef5c97e0323dabff706bc7520113d51841c6dc2eaa044c8526bdaebcf35476c0b08cccb69ab0bab07c8e7ca2d6573b0ae96c32ae3d18764ae7ea78e0" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xac897c8892a6f3effcd276e4f44f410644846a333db600ad12e1099020196b2f8104563c04d78fedf5afc5d87b91b1b5", + "withdrawal_credentials": "0x0075f9178dd8a199c55d5cebb9dccb00508e619d5b9abd2b7cd5ad3f671c5a9f", + "amount": "32000000000", + "signature": "0x95a886b35ead6f8fc09d33975108857abffc32d53db6546a7251d32ca6d1706e899155b3883b05e65a041e44c51db8480703f13cccc6575cd2d50d0506485b9669a096bb1a2d4879008c15b8c1cdcd2e1a5c4f12885311e24dd87dc32e1bce87" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0x8794fd3f4e5e66e6e81735d5726943833b82d1efd7d877e495a8c36955b7dfb95b3f6cfcef865fd7969fa2e17e628ab9", + "withdrawal_credentials": "0x0087adf1a29896ae52be67356ee9a4a5035450764c278382f8940d554668c208", + "amount": "32000000000", + "signature": "0xb42aa548fd9068db7916757390f6d011ad890b9f27a75d4676dd9edcd9017f5d7e2cec215a04502fcff253aa821865fb0c30549e7b5d5e62cc8df0264dc3b55538f15cfd375f9cb022a94c2a39201d757a502701acd50554dc4da29173c945bd" + } + } + ], + "voluntary_exits": [ + { + "message": { + "epoch": "260", + "validator_index": "504" + }, + "signature": "0x8fedc3077271b41f631d6062cc1cc8c8f074e486e9e692f198c5f82b94d2bb3b0fbf71cbac043cee94b56a7a06adf06d07bb7ecf06d8f699add17972ceb54b25e6021c3a2a727afd3370e960afbf345a75fddd2d221ba85a5f7b07e5607eec1e" + }, + { + "message": { + "epoch": "260", + "validator_index": "503" + }, + "signature": "0xa44079752dfa36b925f0ff675dfd10b5b7cc0c178839356d0bda9c83b6df01f6bfdd904af92373002bfac40277941d2809c4152fc61007ae4f2c73e550ed02f425419efae0461d8829746c7a3d36dcae5bc37158ede7dd30ccc33930783b6194" + }, + { + "message": { + "epoch": "260", + "validator_index": "502" + }, + "signature": "0xb193b547c2d45341c9aedd0a22f4afc565d9aaa3a04889df2f8ad608bb31b44a0391c69383f0f4725cea291332c081ff0a48e850d246dd0be40880bf17316eb4b2eaf4b8b6ba6d59c93aea3af98988f05cb2ddf61d8637f943864ebfe7c9707c" + }, + { + "message": { + "epoch": "260", + "validator_index": "501" + }, + "signature": "0x88afe9a0215d2a67c451fcbdc358237c4d5dce6b46973ae527afb7f8fb1da800d6a3dd7f6387028a57737b354b7db88803bd6f2a59c7fb84229f42e6c6ea1b7510cb2a28026ff8f2eefb8fc7e2a83115197b7a1bd35fbf0afcc69e4b6e581911" + }, + { + "message": { + "epoch": "260", + "validator_index": "500" + }, + "signature": "0xa2f2399070bcfa3f50894d7170d1343ab5f52d6bdc155124e867bcde936aee4e0bb69f164dee5fa07d47abccb8844ec101126caf0402f1a757934f8e7b5904a60cedc283b5e9801f2a71f80cda16e910d72518d469a9a40cd94b8ad3cca10136" + }, + { + "message": { + "epoch": "260", + "validator_index": "499" + }, + "signature": "0x86abacd204c85cfc40d71853422001e44134b1900138fccb409928b7e663270476e3d7a7e0aaa103c693cad3629da1aa056cac30c8aab1a4eb50d81bb0711db3dba1d741562b103f67f495996b18fad779d3d9cc508763ab883a7cd6858bdc51" + }, + { + "message": { + "epoch": "260", + "validator_index": "498" + }, + "signature": "0xb86533e02779dd0f959dbf1b0fa195126ccc945fd0a7c5b7370aefc16f8f130d083c0c1c58a5c18e8119d7912dd532d91765dd26ad5ef3991238bc093bab79d511b1d8484482eec9b6b4a98f4a8928819ea58fc857ed80b59fe9cb7a33fe60a2" + }, + { + "message": { + "epoch": "260", + "validator_index": "495" + }, + "signature": "0x80a5c7c52a246dcaaf67caf6285ea518581835af668d1a64723b321b167464e238248c0017d5265be373c9079d7b529b10aedc37835683e5e1320c3ad6fa1f72d52046a49b061935e1631565912d2f2482434007957fe9903edecf4dad8e5bb8" + }, + { + "message": { + "epoch": "260", + "validator_index": "494" + }, + "signature": "0xb6a0e4cdc1815f03166218963ec9cc4c5d607a67d659d1227386e16f90d3e39c6cddf696e3534f3824ca5aff8c734bab153f3bab701247cdcea16db31c94846c1cd3781b1861485ad813d025bf0a486c592dd1f9afa1134e8288e4fef44d2f3c" + }, + { + "message": { + "epoch": "260", + "validator_index": "492" + }, + "signature": "0xad850276510c2e41d059df6a1cefab9f1b66463da47b0fc772b21ed90c13e1bd6f86def8b2ecb867f4f752612d9d25e30a151aa6ef630a1b6ddaa4420c240b37df0234ee332373fe132b0101a0486900c5733762beeacd95429dd34c34230d13" + }, + { + "message": { + "epoch": "260", + "validator_index": "491" + }, + "signature": "0x837669180ba01b65157087f49c7af19acb1439016eca9c699b7136da7e9bbc89d6bddc7a030388bbb7e149ebd521c4810f457846b9cf913f7ee6f01db4363d3ce92fc732e52359917d36c7e4a08158653f1a9a78a608c4b56ff3e155b2783974" + } + ], + "sync_aggregate": { + "sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "execution_payload_header": { + "parent_hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "state_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receipts_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "prev_randao": "0x0000000000000000000000000000000000000000000000000000000000000000", + "block_number": "0", + "gas_limit": "0", + "gas_used": "0", + "time": "0", + "extra_data": null, + "base_fee_per_gas": "0x0000000000000000000000000000000000000000000000000000000000000000", + "block_hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactions_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "withdrawals_root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "execution_changes": [], + "blob_kzg_commitments": [] + } + } + }, + "finalized": false, + "version": 0, + "execution_optimistic": false +} diff --git a/cl/beacon/handler/test_data/block_1.json b/cl/beacon/handler/test_data/block_1.json new file mode 100644 index 00000000000..05289c754eb --- /dev/null +++ b/cl/beacon/handler/test_data/block_1.json @@ -0,0 +1,1974 @@ +{ + "data": { + "signature": "0x8b915f3b9d2d4c7ccaacf5d56c1152b1e91eafd1f59ba734d09e78996930b63ca550499997fe6d590343aaf5997f0d0c14c986571992ac9ed188de2b31ae4b7d70dfb68edae8b012f72f284dc8da44f4af5a2bdf3dfc9c0897ec4f7165daa07a", + "message": { + "slot": "8322", + "proposer_index": "210", + "parent_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "state_root": "0x933d6650f2999f17012e781f5012981edb549e5935de1c981fce81cdd241d4e1", + "body": { + "randao_reveal": "0xa182a6c7224c53cc43492b7ba87b54e8303094ebcb8c822da09c4224791b461e34d089ac857acf05cd695679c25cffa30404832791fe424fd104e2e96ebbf583dd5ec4dcbc891e7f4e0dea402071dbd294810417221fc41e4f90e4837c694e1a", + "eth1_data": { + "deposit_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "deposit_count": "528", + "block_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "graffiti": "0x0000000000000000000000000000000000000000000000000000000000000000", + "proposer_slashings": [ + { + "signed_header_1": { + "message": { + "slot": "8321", + "proposer_index": "476", + "parent_root": "0x3333333333333333333333333333333333333333333333333333333333333333", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x939584df88598e56fe144105c6933b4727d7b772539e65c57289df64cedee771377e4d0e94f85c25d39a6072997d309c09da8c477267670aa42f26fb0836c72ec5867fa2f34dc0eb7e043ef5d6421282d1515b0f8c7ffd4bbbf56ee8d61ed063" + }, + "signed_header_2": { + "message": { + "slot": "8321", + "proposer_index": "476", + "parent_root": "0x9999999999999999999999999999999999999999999999999999999999999999", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x8a184441d5d944ed3c18549dd9e4640eda879f9e737ac4211fdddfd30a65e1a2a32a8aa918ca65ad9b863a15e8cfefc412608ca78fd54ea1e5cbbd5697d125cc721aac1b01e8984a33f025c4707623669573244a632ec7f37808c01fab143f58" + } + }, + { + "signed_header_1": { + "message": { + "slot": "8321", + "proposer_index": "406", + "parent_root": "0x3333333333333333333333333333333333333333333333333333333333333333", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0xad97a43e9f28a90ff46b07a7bf65d520b89a78af47dbff1c10e4fc6bb36b4ee9c4f27f2a72c65311a03e7b48e06d86db1149147b14a8803d46f6a457092642dc89d3f2782bd48a373e3125af1a84f5b76d4ff7ddc85ac2650ca4c0f99e1af592" + }, + "signed_header_2": { + "message": { + "slot": "8321", + "proposer_index": "406", + "parent_root": "0x9999999999999999999999999999999999999999999999999999999999999999", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x88d860d460526de062ee196400e24cb3055de2ff6abb31331d0bfeeebcdc77839d22ad6dfec39d81279f5527d1ffbd7e0a9d6eee7dce5a1cd6f79451537e9dfb6384f595e9d49673c58c181527a599dd4b38154e1322f1607f192ab0394f1411" + } + }, + { + "signed_header_1": { + "message": { + "slot": "8321", + "proposer_index": "281", + "parent_root": "0x3333333333333333333333333333333333333333333333333333333333333333", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x8a2358ff11a30100a2492001827f54ff6c10dd6dcea66f6814dd1cccc4a49850bbbe36546e4f9b72410042a9d5882e8219a5a01708b8a95ca57984debe78f419a4ac921270a0f0c11c795a6c5ef1e6bfb96712751a4fee61059ca8fbe69639b6" + }, + "signed_header_2": { + "message": { + "slot": "8321", + "proposer_index": "281", + "parent_root": "0x9999999999999999999999999999999999999999999999999999999999999999", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0xb820e03b7bfd21c2d97a4f2bc9dd1fd5325894757f7129646c7a39a02b2c1c8ca33d509b4e83491e79db02ac0490aa3308ee23bfa1f65bf4130ab07e377a8cbd4eace5b69801528322dde425b0a78310504c330da30be7cefc674573dbdb4502" + } + }, + { + "signed_header_1": { + "message": { + "slot": "8321", + "proposer_index": "169", + "parent_root": "0x3333333333333333333333333333333333333333333333333333333333333333", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x88c81a6029f097a9f23e37c7677abfafa2921982e9aebffc35ca700e1aefcd49c2ab5d51c7b28ef3db3aad49d58a6407082ce1ecd7f7bd89cb764242890440b684fc0e1511e047434b25f3ad1a5e238e5bf97f51e9e37d6eed48e0b9fef64333" + }, + "signed_header_2": { + "message": { + "slot": "8321", + "proposer_index": "169", + "parent_root": "0x9999999999999999999999999999999999999999999999999999999999999999", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0x815b492a6a3fb606f01dbc595c8b18b51b7f7a5a86b11f3ae57c48f7506a34606556a3cf2be683ce23cd0c7b2235667613f9dbcf98408b176f134645f122684bd8fe704c7a4eccb7bb7cbe33c6de377be4d742291d35d0ec8d6083c1b17b7261" + } + }, + { + "signed_header_1": { + "message": { + "slot": "8321", + "proposer_index": "397", + "parent_root": "0x3333333333333333333333333333333333333333333333333333333333333333", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0xae352ba8550d04c07591224449bd4967f66f9d639b731795f643b1e3fc5ad28317268dc9e289ce6075e8981a0e37d9440885e4f4292cb4b4656bd0c7bd9fc22d21eb4c7d1b46f1b08cdb1eb08d7a405985e8a406e6d93c5c3fdd20e91baba122" + }, + "signed_header_2": { + "message": { + "slot": "8321", + "proposer_index": "397", + "parent_root": "0x9999999999999999999999999999999999999999999999999999999999999999", + "state_root": "0x4444444444444444444444444444444444444444444444444444444444444444", + "body_root": "0x5555555555555555555555555555555555555555555555555555555555555555" + }, + "signature": "0xb9152f5510f2bfa5ab7b61829823f25f0c879ab9b852fcd90c17f751bed6e687dc523fcda177503509cd1befec36046a056a66f5826e2333b6de67430a16f6194416681ae69a1c3498cf8351abae4fac5d8f0b51b1734633d545d540bf269270" + } + } + ], + "attester_slashings": [ + { + "attestation_1": { + "attesting_indicies": [ + "96", + "353", + "445" + ], + "data": { + "slot": "555", + "index": "0", + "beacon_block_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "17", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + }, + "signature": "0xa7e932307a82913b23743198182a7e3c97675e8a1133e8d946bc59c62b1765046214ca0ea0e13b77e4f8acc8f226498103684f382826a9fff6c6c2ffdf9c65ffeb1680155025f489f676457634581ee4363bdfbe4d46fc4d1d9df93c3df8750d" + }, + "attestation_2": { + "attesting_indicies": [ + "96", + "353", + "445" + ], + "data": { + "slot": "555", + "index": "0", + "beacon_block_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "17", + "root": "0x0101010101010101010101010101010101010101010101010101010101010101" + } + }, + "signature": "0x89aadbd74370dc6d86b6b61c544c1e18949b0d8aa2d706605d1014d0266a043588a829243d343d1c3812621944ea34540aef1fbd34fe51b03a5734ebc5ec31057d1df0004faeca71d8687dd3af806e4332e19f6da5ab1d7da67fe017c2f2e68b" + } + } + ], + "attestations": [ + { + "aggregation_bits": "0xff3f", + "signature": "0x8b63c286bff1c5dc6fb2e4878e73631e16db1cd3b07e9d0150b3ede4175635fe8db571cb486398e35923640606643d630bacc148d84e9c1060c32b55fe644e5c2573326b041767c5d45d45509a5403a7f2f1b2dd60e54bed26f407bb367a8642", + "data": { + "slot": "8314", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xb731b2df4dcaf841d50747f85b332170471895508c3af7e8bada14e58a816fed435460e1694e87e2887f19a0de201c3d0bc1ece52c26c519fd9131b25fa8a69b229c14ffd1c935d9e853aca8ab07eaae98a65daec09b2640b91961685e96d58c", + "data": { + "slot": "8292", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x876c656d7889c15cd355d6652b347a25dc8fd7ffc383f0d14ad436b5f1af9ac09e06168ad71be76b85c3dd44ae79cc0f04f250a0bcc529d06a1032283e2b8b384d582c0ace50bf747264a199647697f159d06be75ecfb24da2a8b625a3087804", + "data": { + "slot": "8293", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x876c656d7889c15cd355d6652b347a25dc8fd7ffc383f0d14ad436b5f1af9ac09e06168ad71be76b85c3dd44ae79cc0f04f250a0bcc529d06a1032283e2b8b384d582c0ace50bf747264a199647697f159d06be75ecfb24da2a8b625a3087804", + "data": { + "slot": "8293", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x98416260b644654a4a90bda6032053f1eb3a12c59a3c7534f1ef348f2108c2837245bce74b3fd9f61ebae24860cc698100f864c4f26966c36431acbf0beea679807ba4eba9adfd1a267ef8d990290a2548af6456b1d0def6639ac47fd30c5542", + "data": { + "slot": "8317", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x805e92ba1e4c88489f178202ca5d7ce96e7b874fe98bdee80ab6423441bd37ff3f0fe724bc088f58050ac2a8f81ec5e80401d76caeb65795b5794e7a20d0384f3bfd162b281a17e96cc98087c651d893b05154203af7a7591afe1056db934ec4", + "data": { + "slot": "8309", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x95bbaf8dcff64306f01e0b09b27ebe3c761def7edd75542e213586ee0c6d3fc313ae102760abd1262b4f8c00e57603fa01627390011e3a5dea555c74798d7a3e1da68e00e3cdb9d8e4af112b6ff83951bd926288d24eb82e3f203a3160a4d7a9", + "data": { + "slot": "8312", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x894270af1854ce4e65c6e09bc83c15171d564a2af871d0b442cacea78536e5cd34cf4a906025a6d87e12a172ceeb79990b86a1de7ed4ef40cffeca6b93402c3542682bb2914c34430e23038a57e8490abe809dc9f96f3b2caebed380113280b3", + "data": { + "slot": "8297", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa44792a6341475c3eff0c11ad62bc19154d5ed48a4b81869ed49885499d5f768c81f357dd6a3ea60aa2f15a184b098f40a1382cfaea0a8438d62d9cca27c85023245b1838e2e4f1d4e65926f8c6a3032740b36c0a3c8aa69850648ac6c12b3ce", + "data": { + "slot": "8306", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x98aade2cf9dad0e1528edec0e76be15577601b6cbef68353e51748b6286bf08812e42fe8791147a54eeed34782249e3f0cc463e22d6cb1c6050636ca8d070531fe40e16913f2e5560f6e683a6781268ff08d32bc5899b00306a87eecc5603928", + "data": { + "slot": "8290", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xab42974cba2e3fa75faa4c1f717caf7c2a953f4964063462ed32629764336124fd2f2f430ddf0291325722206250452c109b14cded0e51171b89b106b6b2044291128c3d6966c804490033b9e5fd06450ea50d22b5ab761892c6c3f4de79e098", + "data": { + "slot": "8291", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8d852ffa1c5960ba3a5d09837fbdb859bbf9045001b3d1dc1c4d22c6b4bc5b6d506f6ef667b5c7c9fbfb1dd0cfe3617405f56750f8b5eb25b3539d0a4c94822b198c524de92a6c68982ce17f985ff5283cea6ac8dabe41828ce38edb7e9fe223", + "data": { + "slot": "8311", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8bfaed4667e28ed9e39464c7c57027ae345f22847b6ac1aa7e5f342fdb6cdca9d78a962da68f9e34e0453f68fa363fcd196881e2dd76abcab6814439d73448f404124ad2e2f57b59b0df57699d913e24f79c53f129a09c05f2659e4444f4bb53", + "data": { + "slot": "8320", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "258", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "260", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x97426dbbe61af8a68ac683ba95ad871baade096e9287e2d533c1efba04430b7083283485db5b1624fb03639065e8c754155cfe68986d526c1a771b67e45c0e8c97428dee8c6d80cc68892b961e8352d50f34e2623dc3b7ba2cb5dba28a854079", + "data": { + "slot": "8302", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x94fab4732e767881653a5923ca4a93fc2778d349cef972b077aa0fd2553946f578be6571d7a02fe14aa98b11a77475e115bc8062308b23a23c6ce71cd07c528a6e37d30324d57dcc36fa336575210bce5d71ccabf74f0dd96f839eefc1a49343", + "data": { + "slot": "8296", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b63c286bff1c5dc6fb2e4878e73631e16db1cd3b07e9d0150b3ede4175635fe8db571cb486398e35923640606643d630bacc148d84e9c1060c32b55fe644e5c2573326b041767c5d45d45509a5403a7f2f1b2dd60e54bed26f407bb367a8642", + "data": { + "slot": "8314", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x805e92ba1e4c88489f178202ca5d7ce96e7b874fe98bdee80ab6423441bd37ff3f0fe724bc088f58050ac2a8f81ec5e80401d76caeb65795b5794e7a20d0384f3bfd162b281a17e96cc98087c651d893b05154203af7a7591afe1056db934ec4", + "data": { + "slot": "8309", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xb60160a4024734b6c22e6083d755d97b22d107001965d35cd1aa5fc3c1059b4cb482c36c78609c0fa131631eb847d165177c877949e5baebb96a48f6e471c1d1d700619b4adeafa728b4d69de8d03d02854e4240d8e16d790168619cc2027247", + "data": { + "slot": "8318", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b1fbaba2982cd546a7d19d4af3755163112349a0e6d13f05c69807c709f363359c2cfff8a4afa66bd24445eb12b923615c33892a82d8081575207e4165a1d0c944fd3871ff885662c3921b152e674130879d67a0692b4867ad9fc2d20e24fa3", + "data": { + "slot": "8307", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa44792a6341475c3eff0c11ad62bc19154d5ed48a4b81869ed49885499d5f768c81f357dd6a3ea60aa2f15a184b098f40a1382cfaea0a8438d62d9cca27c85023245b1838e2e4f1d4e65926f8c6a3032740b36c0a3c8aa69850648ac6c12b3ce", + "data": { + "slot": "8306", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b1fbaba2982cd546a7d19d4af3755163112349a0e6d13f05c69807c709f363359c2cfff8a4afa66bd24445eb12b923615c33892a82d8081575207e4165a1d0c944fd3871ff885662c3921b152e674130879d67a0692b4867ad9fc2d20e24fa3", + "data": { + "slot": "8307", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xab42974cba2e3fa75faa4c1f717caf7c2a953f4964063462ed32629764336124fd2f2f430ddf0291325722206250452c109b14cded0e51171b89b106b6b2044291128c3d6966c804490033b9e5fd06450ea50d22b5ab761892c6c3f4de79e098", + "data": { + "slot": "8291", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x9494651d4491cfc326f3439cebc3304aaf50a8e5598217da6df2a13b5cb9f9731cc8934f406c0243786b17f936d5892801fc34fc74fb4f52fec147536375dabd9f892940aacdea196e28cb21320bce9ede79b0a11333569d90e6deeb59869217", + "data": { + "slot": "8300", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x87c3f6fac9ea937a8e8bd4f6dccb7893cb8ea39c65e0313a30e903c220dba2c8597df1d75ee21fd905eab1ebf2261ebf085b13115363d72adc9ccd9527293b7218c39e94c257c94a8c95c32cf909cf58e8b7ece89a9bd21107a413b3fe3172e0", + "data": { + "slot": "8304", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x94fab4732e767881653a5923ca4a93fc2778d349cef972b077aa0fd2553946f578be6571d7a02fe14aa98b11a77475e115bc8062308b23a23c6ce71cd07c528a6e37d30324d57dcc36fa336575210bce5d71ccabf74f0dd96f839eefc1a49343", + "data": { + "slot": "8296", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x97426dbbe61af8a68ac683ba95ad871baade096e9287e2d533c1efba04430b7083283485db5b1624fb03639065e8c754155cfe68986d526c1a771b67e45c0e8c97428dee8c6d80cc68892b961e8352d50f34e2623dc3b7ba2cb5dba28a854079", + "data": { + "slot": "8302", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x94fab4732e767881653a5923ca4a93fc2778d349cef972b077aa0fd2553946f578be6571d7a02fe14aa98b11a77475e115bc8062308b23a23c6ce71cd07c528a6e37d30324d57dcc36fa336575210bce5d71ccabf74f0dd96f839eefc1a49343", + "data": { + "slot": "8296", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x98416260b644654a4a90bda6032053f1eb3a12c59a3c7534f1ef348f2108c2837245bce74b3fd9f61ebae24860cc698100f864c4f26966c36431acbf0beea679807ba4eba9adfd1a267ef8d990290a2548af6456b1d0def6639ac47fd30c5542", + "data": { + "slot": "8317", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b1fbaba2982cd546a7d19d4af3755163112349a0e6d13f05c69807c709f363359c2cfff8a4afa66bd24445eb12b923615c33892a82d8081575207e4165a1d0c944fd3871ff885662c3921b152e674130879d67a0692b4867ad9fc2d20e24fa3", + "data": { + "slot": "8307", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x894270af1854ce4e65c6e09bc83c15171d564a2af871d0b442cacea78536e5cd34cf4a906025a6d87e12a172ceeb79990b86a1de7ed4ef40cffeca6b93402c3542682bb2914c34430e23038a57e8490abe809dc9f96f3b2caebed380113280b3", + "data": { + "slot": "8297", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x98aade2cf9dad0e1528edec0e76be15577601b6cbef68353e51748b6286bf08812e42fe8791147a54eeed34782249e3f0cc463e22d6cb1c6050636ca8d070531fe40e16913f2e5560f6e683a6781268ff08d32bc5899b00306a87eecc5603928", + "data": { + "slot": "8290", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x805e92ba1e4c88489f178202ca5d7ce96e7b874fe98bdee80ab6423441bd37ff3f0fe724bc088f58050ac2a8f81ec5e80401d76caeb65795b5794e7a20d0384f3bfd162b281a17e96cc98087c651d893b05154203af7a7591afe1056db934ec4", + "data": { + "slot": "8309", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa44792a6341475c3eff0c11ad62bc19154d5ed48a4b81869ed49885499d5f768c81f357dd6a3ea60aa2f15a184b098f40a1382cfaea0a8438d62d9cca27c85023245b1838e2e4f1d4e65926f8c6a3032740b36c0a3c8aa69850648ac6c12b3ce", + "data": { + "slot": "8306", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa44792a6341475c3eff0c11ad62bc19154d5ed48a4b81869ed49885499d5f768c81f357dd6a3ea60aa2f15a184b098f40a1382cfaea0a8438d62d9cca27c85023245b1838e2e4f1d4e65926f8c6a3032740b36c0a3c8aa69850648ac6c12b3ce", + "data": { + "slot": "8306", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x980e36beab885b1f2d8460e7ece21054e9d235fea5429836bc6df687e0c2f41b7556d9c86cd9c1ca7a69e5a51991b8d617eea619ba8e312d568e38f8de8adb8b4a9ec3e9dab2d47df45b35d9f2488236c042d66cd0916fee70e8a3295353b0ed", + "data": { + "slot": "8308", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xb60160a4024734b6c22e6083d755d97b22d107001965d35cd1aa5fc3c1059b4cb482c36c78609c0fa131631eb847d165177c877949e5baebb96a48f6e471c1d1d700619b4adeafa728b4d69de8d03d02854e4240d8e16d790168619cc2027247", + "data": { + "slot": "8318", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xb731b2df4dcaf841d50747f85b332170471895508c3af7e8bada14e58a816fed435460e1694e87e2887f19a0de201c3d0bc1ece52c26c519fd9131b25fa8a69b229c14ffd1c935d9e853aca8ab07eaae98a65daec09b2640b91961685e96d58c", + "data": { + "slot": "8292", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b63c286bff1c5dc6fb2e4878e73631e16db1cd3b07e9d0150b3ede4175635fe8db571cb486398e35923640606643d630bacc148d84e9c1060c32b55fe644e5c2573326b041767c5d45d45509a5403a7f2f1b2dd60e54bed26f407bb367a8642", + "data": { + "slot": "8314", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x906fd8d1a45b719a36eb5e09b5e13f9d0fb7faaa194d84b90e0b2b811ce299f385bf18bb07844620ec032b6f267d04781480dc303081be7c5d8ba735bccd682dd3ddb6345bae13bd96068eb86b148e73b8931b642705b1696d9ada4159b1dd65", + "data": { + "slot": "8305", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x9494651d4491cfc326f3439cebc3304aaf50a8e5598217da6df2a13b5cb9f9731cc8934f406c0243786b17f936d5892801fc34fc74fb4f52fec147536375dabd9f892940aacdea196e28cb21320bce9ede79b0a11333569d90e6deeb59869217", + "data": { + "slot": "8300", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x98416260b644654a4a90bda6032053f1eb3a12c59a3c7534f1ef348f2108c2837245bce74b3fd9f61ebae24860cc698100f864c4f26966c36431acbf0beea679807ba4eba9adfd1a267ef8d990290a2548af6456b1d0def6639ac47fd30c5542", + "data": { + "slot": "8317", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x980e36beab885b1f2d8460e7ece21054e9d235fea5429836bc6df687e0c2f41b7556d9c86cd9c1ca7a69e5a51991b8d617eea619ba8e312d568e38f8de8adb8b4a9ec3e9dab2d47df45b35d9f2488236c042d66cd0916fee70e8a3295353b0ed", + "data": { + "slot": "8308", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x815ca84fb30789d731ebf977b6ecdd60c30818202d464acdc2947143f62342c4a5d01c6cdb32b1e223d032c746fa98d30899164e6ab37828e6d049f32e46a5c59d742d82005f9a629938761e3abce454cec104352665cd81bbcffa2fce22a935", + "data": { + "slot": "8299", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x906fd8d1a45b719a36eb5e09b5e13f9d0fb7faaa194d84b90e0b2b811ce299f385bf18bb07844620ec032b6f267d04781480dc303081be7c5d8ba735bccd682dd3ddb6345bae13bd96068eb86b148e73b8931b642705b1696d9ada4159b1dd65", + "data": { + "slot": "8305", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x906fd8d1a45b719a36eb5e09b5e13f9d0fb7faaa194d84b90e0b2b811ce299f385bf18bb07844620ec032b6f267d04781480dc303081be7c5d8ba735bccd682dd3ddb6345bae13bd96068eb86b148e73b8931b642705b1696d9ada4159b1dd65", + "data": { + "slot": "8305", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x876c656d7889c15cd355d6652b347a25dc8fd7ffc383f0d14ad436b5f1af9ac09e06168ad71be76b85c3dd44ae79cc0f04f250a0bcc529d06a1032283e2b8b384d582c0ace50bf747264a199647697f159d06be75ecfb24da2a8b625a3087804", + "data": { + "slot": "8293", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x876c656d7889c15cd355d6652b347a25dc8fd7ffc383f0d14ad436b5f1af9ac09e06168ad71be76b85c3dd44ae79cc0f04f250a0bcc529d06a1032283e2b8b384d582c0ace50bf747264a199647697f159d06be75ecfb24da2a8b625a3087804", + "data": { + "slot": "8293", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8bfaed4667e28ed9e39464c7c57027ae345f22847b6ac1aa7e5f342fdb6cdca9d78a962da68f9e34e0453f68fa363fcd196881e2dd76abcab6814439d73448f404124ad2e2f57b59b0df57699d913e24f79c53f129a09c05f2659e4444f4bb53", + "data": { + "slot": "8320", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "258", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "260", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xabdb4b0a06e2d036021b0cd847fb6e8f4d2deca86e60788a6ae2bb9bd55b62ebf35716290f958e075812e8dfcba2beef00b002459e5932d7e7478cf00e91300f9f53a84f593ce40afb1f3c07b1db789ba5da757d313a9ee4cac6b2e28ed2f929", + "data": { + "slot": "8298", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xabdb4b0a06e2d036021b0cd847fb6e8f4d2deca86e60788a6ae2bb9bd55b62ebf35716290f958e075812e8dfcba2beef00b002459e5932d7e7478cf00e91300f9f53a84f593ce40afb1f3c07b1db789ba5da757d313a9ee4cac6b2e28ed2f929", + "data": { + "slot": "8298", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x912fe61ef99df1c96d7e5e6bd01ee5a6be73389978c7f4670c4e978beb6b8e4d640f238c6ba3426e935ac8f8527d118c06f464b08f6527ebebac793728ccc1190ee6701838c6f2b3b06391dc2d69232e63af11023ffe8e1c66eb3bd1075085a6", + "data": { + "slot": "8310", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x90428ae131501833950bbeccfa738a2af4cbb5c6a04ae8f5e21d7fdd00dda2452d32e0630cd989a4863e8cb81303e1ca04b8f3abcf0b4c456fd7856c0af8585d206f109972b635bcefd72a537a67839b033a6c61f00af536a5e7107fdb9bf527", + "data": { + "slot": "8313", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x815ca84fb30789d731ebf977b6ecdd60c30818202d464acdc2947143f62342c4a5d01c6cdb32b1e223d032c746fa98d30899164e6ab37828e6d049f32e46a5c59d742d82005f9a629938761e3abce454cec104352665cd81bbcffa2fce22a935", + "data": { + "slot": "8299", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8d852ffa1c5960ba3a5d09837fbdb859bbf9045001b3d1dc1c4d22c6b4bc5b6d506f6ef667b5c7c9fbfb1dd0cfe3617405f56750f8b5eb25b3539d0a4c94822b198c524de92a6c68982ce17f985ff5283cea6ac8dabe41828ce38edb7e9fe223", + "data": { + "slot": "8311", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xab42974cba2e3fa75faa4c1f717caf7c2a953f4964063462ed32629764336124fd2f2f430ddf0291325722206250452c109b14cded0e51171b89b106b6b2044291128c3d6966c804490033b9e5fd06450ea50d22b5ab761892c6c3f4de79e098", + "data": { + "slot": "8291", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa44792a6341475c3eff0c11ad62bc19154d5ed48a4b81869ed49885499d5f768c81f357dd6a3ea60aa2f15a184b098f40a1382cfaea0a8438d62d9cca27c85023245b1838e2e4f1d4e65926f8c6a3032740b36c0a3c8aa69850648ac6c12b3ce", + "data": { + "slot": "8306", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x912fe61ef99df1c96d7e5e6bd01ee5a6be73389978c7f4670c4e978beb6b8e4d640f238c6ba3426e935ac8f8527d118c06f464b08f6527ebebac793728ccc1190ee6701838c6f2b3b06391dc2d69232e63af11023ffe8e1c66eb3bd1075085a6", + "data": { + "slot": "8310", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xabdb4b0a06e2d036021b0cd847fb6e8f4d2deca86e60788a6ae2bb9bd55b62ebf35716290f958e075812e8dfcba2beef00b002459e5932d7e7478cf00e91300f9f53a84f593ce40afb1f3c07b1db789ba5da757d313a9ee4cac6b2e28ed2f929", + "data": { + "slot": "8298", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0xab42974cba2e3fa75faa4c1f717caf7c2a953f4964063462ed32629764336124fd2f2f430ddf0291325722206250452c109b14cded0e51171b89b106b6b2044291128c3d6966c804490033b9e5fd06450ea50d22b5ab761892c6c3f4de79e098", + "data": { + "slot": "8291", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x876c656d7889c15cd355d6652b347a25dc8fd7ffc383f0d14ad436b5f1af9ac09e06168ad71be76b85c3dd44ae79cc0f04f250a0bcc529d06a1032283e2b8b384d582c0ace50bf747264a199647697f159d06be75ecfb24da2a8b625a3087804", + "data": { + "slot": "8293", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x805e92ba1e4c88489f178202ca5d7ce96e7b874fe98bdee80ab6423441bd37ff3f0fe724bc088f58050ac2a8f81ec5e80401d76caeb65795b5794e7a20d0384f3bfd162b281a17e96cc98087c651d893b05154203af7a7591afe1056db934ec4", + "data": { + "slot": "8309", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x95bbaf8dcff64306f01e0b09b27ebe3c761def7edd75542e213586ee0c6d3fc313ae102760abd1262b4f8c00e57603fa01627390011e3a5dea555c74798d7a3e1da68e00e3cdb9d8e4af112b6ff83951bd926288d24eb82e3f203a3160a4d7a9", + "data": { + "slot": "8312", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0x815ca84fb30789d731ebf977b6ecdd60c30818202d464acdc2947143f62342c4a5d01c6cdb32b1e223d032c746fa98d30899164e6ab37828e6d049f32e46a5c59d742d82005f9a629938761e3abce454cec104352665cd81bbcffa2fce22a935", + "data": { + "slot": "8299", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x87c3f6fac9ea937a8e8bd4f6dccb7893cb8ea39c65e0313a30e903c220dba2c8597df1d75ee21fd905eab1ebf2261ebf085b13115363d72adc9ccd9527293b7218c39e94c257c94a8c95c32cf909cf58e8b7ece89a9bd21107a413b3fe3172e0", + "data": { + "slot": "8304", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x8b1fbaba2982cd546a7d19d4af3755163112349a0e6d13f05c69807c709f363359c2cfff8a4afa66bd24445eb12b923615c33892a82d8081575207e4165a1d0c944fd3871ff885662c3921b152e674130879d67a0692b4867ad9fc2d20e24fa3", + "data": { + "slot": "8307", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff7f", + "signature": "0xa46775d208c119b097221ead6ee9afbf011258b03da07138d01fef8d5bd4681ecbab6f36687e8ae644191acebc94800a002b136de6ff892e4e0910d05402def66858ee8ad8f4b706fab163fe742959dcb86fa90d0b822e5937092852962acbb1", + "data": { + "slot": "8294", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + }, + { + "aggregation_bits": "0xff3f", + "signature": "0x97426dbbe61af8a68ac683ba95ad871baade096e9287e2d533c1efba04430b7083283485db5b1624fb03639065e8c754155cfe68986d526c1a771b67e45c0e8c97428dee8c6d80cc68892b961e8352d50f34e2623dc3b7ba2cb5dba28a854079", + "data": { + "slot": "8302", + "index": "0", + "beacon_block_root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed", + "source": { + "epoch": "257", + "root": "0x31885d5a2405876b7203f9cc1a7e115b9977412107c51c81ab4fd49bde93905e" + }, + "target": { + "epoch": "259", + "root": "0x86979f6f6dc7626064ef0d38d4dffb89e91d1d4c18492e3fb7d7ee93cedca3ed" + } + } + } + ], + "deposits": [ + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xa19c8e80ddc1caad60a172b66eb24e83ef200d77034b3e16bbee4d95e929a5c1a473563973338d22e7a566fdbd352f65", + "withdrawal_credentials": "0x00edbcfc97a6985ac86187522426240ed81b6493c880d0798360149ec8ce96d8", + "amount": "32000000000", + "signature": "0xb9b4b512b2c67a3e89edcbef91fc0ccd88c9a8c8654c51a130ffb2ab539c22a0c6b84928e8db4ca8a9d04f2dee312c3817a2bf360b6f5f2f3d1ba69b43cf4671290f7f58621887ad4dd1c9fe6d02cc59443e12447a20b38913f67597b0e3cc93" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xb1f92d1a612942fb266c1e436f8d417282efa2805d5a5a819e3d07e358a70efbf0cc1671412ee986cd342c3d2255a324", + "withdrawal_credentials": "0x004ac0f181a01d43a7de32602b440cfbe3a091bb8c108c1fa35726ed301743f9", + "amount": "32000000000", + "signature": "0x8dbd6f9b4ce0a5277f66da9ec41776cff88a647ae1b4dde221a3bf41b9d4af1e77d0cff23185796815448f2e8148126a046b4b60947a32a1e201b4e979c91b395c1d4804ead1324d699eaa9c481efa69484a7946a0bad9788e50cf05847a30c4" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xb532643cb8824a2fbd9196c10961f3ad2f0e319c3612bb15a51a3454593f44726383f006425c2e5952b156a6e14aceb0", + "withdrawal_credentials": "0x00f68c08152911b76f556f9d6dfc66d54e5abd63de04dc073d6b03f333ac00f3", + "amount": "32000000000", + "signature": "0x97852e8c02386bcc8a2dd51c70c48661c79bc1f89f9dce113a60fcde345abedf96fa186c4230013cf61f3546c5d9877a0eab7a5a4f4e4e0e4bcd917dc8368a88e3b8380de9e96ed36bfd605d55956af64a17b877f12762acfdd1c3effe4b4d42" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xa7a1c0bbad929dc02699e92597a66266bbd9533419693270c9b56bbdea643cd2ded9664da3c9fd8db2389277b5e585cc", + "withdrawal_credentials": "0x00e64188226da03f1f3d787ef65d86690aaa24d44e5ac92c99c413463ec47c26", + "amount": "32000000000", + "signature": "0xb0e97772997255840a5758e5325b9d1c56a292500838c5b2b697b7dd207c65a2ef928ebb9466d57782edf79f9b74bbbb069235c752f6527e8d8eb1c785d99326da78680056ee3084811b980185287259af64607e218d67a3b8f24d27c0659ce2" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0x9919842dee455266e4dc77c74088bddbfdb535b9a1bbe75a3cced0e428598038365afe11c7578e4dbd8fe4cae7237543", + "withdrawal_credentials": "0x000a2baaef8f6cc730d6a5474879aed4fe8c95da787cc2e15c3cdba14a9cef12", + "amount": "32000000000", + "signature": "0x99ef1ab7cfbe40d0a1e136138a4a8094e8f54a59c8d05052749b7af14931274fad1c0a44577de51099f2700505fa8861023b7bddabb274249a091acb3a4f7543f877da3792dad7897351c7a01343116a65959812fd55cc4ce4197b05f698761f" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xb4ed73c02a816ba9d23ba0e023970772f82dd3a32a85eefd922958e33bcab7f9c85e20372e49107665926cca852b8b9a", + "withdrawal_credentials": "0x0017c0e8e177a6d58e4f8b93b2b66b13aef9c186cfccb9466d857a474b32b0d4", + "amount": "32000000000", + "signature": "0xa6dfce815f61ce81bf107bf5ccc1beae5f32b63a55e836a5983b63b90c0e7eac873387107c145ab59c32679091cfd28a0dbf2b73f75cd5ab01b75c6ba984b83c796c92b77adba152ab2a20132324fc4b20c8ec002663f16edec9308bb8f3d298" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xb0d0dfaf7479f59319beb513bee16e1af576a0740a7a124a9947ec7c3826dbc0a5d5db15519e8423d7aa683f638f3da3", + "withdrawal_credentials": "0x00a61d2fddabb70c2db059af7e298b0395ef882dda24ae144f2b7ac88026e55d", + "amount": "32000000000", + "signature": "0x85a06ab8d9d576cb2810a88635b7a462d1cfb238db066b8caeba7f36562bb903630f8f24d157747debad5428c4f42a9a0a08dfd53c687cd7c3e17ec539f353357bbd89b7111246c99cc7fab24b8cd33a88cddf845f7d27c8a33079aa097069e3" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xb69614adf68d58f7d67110d7ced171ab934cb973f19c60cbb83161468655c42fe19a80a8e903030650bfaa9613a1ab2d", + "withdrawal_credentials": "0x0037c021fdef99bcf9fb90c02440571ab2faa0238485ed72e427b69dc8dddc91", + "amount": "32000000000", + "signature": "0x957f48b82d761d3e7f2e34eeff5922358d87f9b31c51e5af37a54fedeab7cfc09c3068f6ef5c97e0323dabff706bc7520113d51841c6dc2eaa044c8526bdaebcf35476c0b08cccb69ab0bab07c8e7ca2d6573b0ae96c32ae3d18764ae7ea78e0" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0xac897c8892a6f3effcd276e4f44f410644846a333db600ad12e1099020196b2f8104563c04d78fedf5afc5d87b91b1b5", + "withdrawal_credentials": "0x0075f9178dd8a199c55d5cebb9dccb00508e619d5b9abd2b7cd5ad3f671c5a9f", + "amount": "32000000000", + "signature": "0x95a886b35ead6f8fc09d33975108857abffc32d53db6546a7251d32ca6d1706e899155b3883b05e65a041e44c51db8480703f13cccc6575cd2d50d0506485b9669a096bb1a2d4879008c15b8c1cdcd2e1a5c4f12885311e24dd87dc32e1bce87" + } + }, + { + "proof": [ + "0x1a02000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "data": { + "pubkey": "0x8794fd3f4e5e66e6e81735d5726943833b82d1efd7d877e495a8c36955b7dfb95b3f6cfcef865fd7969fa2e17e628ab9", + "withdrawal_credentials": "0x0087adf1a29896ae52be67356ee9a4a5035450764c278382f8940d554668c208", + "amount": "32000000000", + "signature": "0xb42aa548fd9068db7916757390f6d011ad890b9f27a75d4676dd9edcd9017f5d7e2cec215a04502fcff253aa821865fb0c30549e7b5d5e62cc8df0264dc3b55538f15cfd375f9cb022a94c2a39201d757a502701acd50554dc4da29173c945bd" + } + } + ], + "voluntary_exits": [ + { + "message": { + "epoch": "260", + "validator_index": "504" + }, + "signature": "0x8fedc3077271b41f631d6062cc1cc8c8f074e486e9e692f198c5f82b94d2bb3b0fbf71cbac043cee94b56a7a06adf06d07bb7ecf06d8f699add17972ceb54b25e6021c3a2a727afd3370e960afbf345a75fddd2d221ba85a5f7b07e5607eec1e" + }, + { + "message": { + "epoch": "260", + "validator_index": "503" + }, + "signature": "0xa44079752dfa36b925f0ff675dfd10b5b7cc0c178839356d0bda9c83b6df01f6bfdd904af92373002bfac40277941d2809c4152fc61007ae4f2c73e550ed02f425419efae0461d8829746c7a3d36dcae5bc37158ede7dd30ccc33930783b6194" + }, + { + "message": { + "epoch": "260", + "validator_index": "502" + }, + "signature": "0xb193b547c2d45341c9aedd0a22f4afc565d9aaa3a04889df2f8ad608bb31b44a0391c69383f0f4725cea291332c081ff0a48e850d246dd0be40880bf17316eb4b2eaf4b8b6ba6d59c93aea3af98988f05cb2ddf61d8637f943864ebfe7c9707c" + }, + { + "message": { + "epoch": "260", + "validator_index": "501" + }, + "signature": "0x88afe9a0215d2a67c451fcbdc358237c4d5dce6b46973ae527afb7f8fb1da800d6a3dd7f6387028a57737b354b7db88803bd6f2a59c7fb84229f42e6c6ea1b7510cb2a28026ff8f2eefb8fc7e2a83115197b7a1bd35fbf0afcc69e4b6e581911" + }, + { + "message": { + "epoch": "260", + "validator_index": "500" + }, + "signature": "0xa2f2399070bcfa3f50894d7170d1343ab5f52d6bdc155124e867bcde936aee4e0bb69f164dee5fa07d47abccb8844ec101126caf0402f1a757934f8e7b5904a60cedc283b5e9801f2a71f80cda16e910d72518d469a9a40cd94b8ad3cca10136" + }, + { + "message": { + "epoch": "260", + "validator_index": "499" + }, + "signature": "0x86abacd204c85cfc40d71853422001e44134b1900138fccb409928b7e663270476e3d7a7e0aaa103c693cad3629da1aa056cac30c8aab1a4eb50d81bb0711db3dba1d741562b103f67f495996b18fad779d3d9cc508763ab883a7cd6858bdc51" + }, + { + "message": { + "epoch": "260", + "validator_index": "498" + }, + "signature": "0xb86533e02779dd0f959dbf1b0fa195126ccc945fd0a7c5b7370aefc16f8f130d083c0c1c58a5c18e8119d7912dd532d91765dd26ad5ef3991238bc093bab79d511b1d8484482eec9b6b4a98f4a8928819ea58fc857ed80b59fe9cb7a33fe60a2" + }, + { + "message": { + "epoch": "260", + "validator_index": "495" + }, + "signature": "0x80a5c7c52a246dcaaf67caf6285ea518581835af668d1a64723b321b167464e238248c0017d5265be373c9079d7b529b10aedc37835683e5e1320c3ad6fa1f72d52046a49b061935e1631565912d2f2482434007957fe9903edecf4dad8e5bb8" + }, + { + "message": { + "epoch": "260", + "validator_index": "494" + }, + "signature": "0xb6a0e4cdc1815f03166218963ec9cc4c5d607a67d659d1227386e16f90d3e39c6cddf696e3534f3824ca5aff8c734bab153f3bab701247cdcea16db31c94846c1cd3781b1861485ad813d025bf0a486c592dd1f9afa1134e8288e4fef44d2f3c" + }, + { + "message": { + "epoch": "260", + "validator_index": "492" + }, + "signature": "0xad850276510c2e41d059df6a1cefab9f1b66463da47b0fc772b21ed90c13e1bd6f86def8b2ecb867f4f752612d9d25e30a151aa6ef630a1b6ddaa4420c240b37df0234ee332373fe132b0101a0486900c5733762beeacd95429dd34c34230d13" + }, + { + "message": { + "epoch": "260", + "validator_index": "491" + }, + "signature": "0x837669180ba01b65157087f49c7af19acb1439016eca9c699b7136da7e9bbc89d6bddc7a030388bbb7e149ebd521c4810f457846b9cf913f7ee6f01db4363d3ce92fc732e52359917d36c7e4a08158653f1a9a78a608c4b56ff3e155b2783974" + } + ], + "sync_aggregate": { + "sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "execution_payload": { + "parent_hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "state_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receipts_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "prev_randao": "0x0000000000000000000000000000000000000000000000000000000000000000", + "block_number": "0", + "gas_limit": "0", + "gas_used": "0", + "timestamp": "0", + "extra_data": null, + "base_fee_per_gas": "0x0000000000000000000000000000000000000000000000000000000000000000", + "block_hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactions": null + }, + "execution_changes": [], + "blob_kzg_commitments": [] + } + } + }, + "finalized": false, + "version": 0, + "execution_optimistic": false +} diff --git a/cl/beacon/handler/test_data/committees_1.yaml b/cl/beacon/handler/test_data/committees_1.yaml new file mode 100644 index 00000000000..feaabdcafe0 --- /dev/null +++ b/cl/beacon/handler/test_data/committees_1.yaml @@ -0,0 +1,6 @@ +- {"data":[{"index":"0","slot":"8322","validators":["0","104","491","501","379","318","275","504","75","280","105","399","35","401"]}],"execution_optimistic":false,"finalized":true} +- {"data":[],"finalized":true,"execution_optimistic":false} +- {"data":[{"index":"0","slot":"8290","validators":["127","377","274","85","309","420","423","398","153","480","273","429","374","260"]}],"execution_optimistic":false,"finalized":true} +- {"data":[{"index":"0","slot":"8322","validators":["0","104","491","501","379","318","275","504","75","280","105","399","35","401"]}],"finalized":false,"execution_optimistic":false} +- {"data":[],"finalized":false,"execution_optimistic":false} +- {"data":[{"index":"0","slot":"8290","validators":["127","377","274","85","309","420","423","398","153","480","273","429","374","260"]}],"finalized":false,"execution_optimistic":false} diff --git a/cl/beacon/handler/test_data/duties_1.yaml b/cl/beacon/handler/test_data/duties_1.yaml new file mode 100644 index 00000000000..f1a51002917 --- /dev/null +++ b/cl/beacon/handler/test_data/duties_1.yaml @@ -0,0 +1,2 @@ +- {"data":[{"pubkey":"0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb","validator_index":"0","committee_index":"0","committee_length":"14","validator_committee_index":"0","committees_at_slot":"1","slot":"8322"},{"pubkey":"0xb0e7791fb972fe014159aa33a98622da3cdc98ff707965e536d8636b5fcc5ac7a91a8c46e59a00dca575af0f18fb13dc","validator_index":"4","committee_index":"0","committee_length":"13","validator_committee_index":"5","committees_at_slot":"1","slot":"8327"},{"pubkey":"0xb928f3beb93519eecf0145da903b40a4c97dca00b21f12ac0df3be9116ef2ef27b2ae6bcd4c5bc2d54ef5a70627efcb7","validator_index":"6","committee_index":"0","committee_length":"13","validator_committee_index":"10","committees_at_slot":"1","slot":"8327"},{"pubkey":"0xa6e82f6da4520f85c5d27d8f329eccfa05944fd1096b20734c894966d12a9e2a9a9744529d7212d33883113a0cadb909","validator_index":"5","committee_index":"0","committee_length":"14","validator_committee_index":"10","committees_at_slot":"1","slot":"8329"},{"pubkey":"0x89ece308f9d1f0131765212deca99697b112d61f9be9a5f1f3780a51335b3ff981747a0b2ca2179b96d2c0c9024e5224","validator_index":"2","committee_index":"0","committee_length":"14","validator_committee_index":"11","committees_at_slot":"1","slot":"8331"},{"pubkey":"0xaf81da25ecf1c84b577fefbedd61077a81dc43b00304015b2b596ab67f00e41c86bb00ebd0f90d4b125eb0539891aeed","validator_index":"9","committee_index":"0","committee_length":"14","validator_committee_index":"8","committees_at_slot":"1","slot":"8342"},{"pubkey":"0xac9b60d5afcbd5663a8a44b7c5a02f19e9a77ab0a35bd65809bb5c67ec582c897feb04decc694b13e08587f3ff9b5b60","validator_index":"3","committee_index":"0","committee_length":"13","validator_committee_index":"6","committees_at_slot":"1","slot":"8348"}],"execution_optimistic":false} +- {"data":[],"execution_optimistic":false} diff --git a/cl/beacon/handler/test_data/duties_sync_1.yaml b/cl/beacon/handler/test_data/duties_sync_1.yaml new file mode 100644 index 00000000000..9d655806893 --- /dev/null +++ b/cl/beacon/handler/test_data/duties_sync_1.yaml @@ -0,0 +1,2 @@ +- {"data":[{"pubkey":"0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb","validator_index":"0","validator_sync_committee_indicies":["30","286"]},{"pubkey":"0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e","validator_index":"1","validator_sync_committee_indicies":["120","376"]},{"pubkey":"0x89ece308f9d1f0131765212deca99697b112d61f9be9a5f1f3780a51335b3ff981747a0b2ca2179b96d2c0c9024e5224","validator_index":"2","validator_sync_committee_indicies":["138","394"]},{"pubkey":"0xac9b60d5afcbd5663a8a44b7c5a02f19e9a77ab0a35bd65809bb5c67ec582c897feb04decc694b13e08587f3ff9b5b60","validator_index":"3","validator_sync_committee_indicies":["10","266"]},{"pubkey":"0xb0e7791fb972fe014159aa33a98622da3cdc98ff707965e536d8636b5fcc5ac7a91a8c46e59a00dca575af0f18fb13dc","validator_index":"4","validator_sync_committee_indicies":["114","370"]},{"pubkey":"0xa6e82f6da4520f85c5d27d8f329eccfa05944fd1096b20734c894966d12a9e2a9a9744529d7212d33883113a0cadb909","validator_index":"5","validator_sync_committee_indicies":["103","359"]},{"pubkey":"0xb928f3beb93519eecf0145da903b40a4c97dca00b21f12ac0df3be9116ef2ef27b2ae6bcd4c5bc2d54ef5a70627efcb7","validator_index":"6","validator_sync_committee_indicies":["163","419"]},{"pubkey":"0xa85ae765588126f5e860d019c0e26235f567a9c0c0b2d8ff30f3e8d436b1082596e5e7462d20f5be3764fd473e57f9cf","validator_index":"7","validator_sync_committee_indicies":["197","453"]},{"pubkey":"0x99cdf3807146e68e041314ca93e1fee0991224ec2a74beb2866816fd0826ce7b6263ee31e953a86d1b72cc2215a57793","validator_index":"8","validator_sync_committee_indicies":["175","431"]},{"pubkey":"0xaf81da25ecf1c84b577fefbedd61077a81dc43b00304015b2b596ab67f00e41c86bb00ebd0f90d4b125eb0539891aeed","validator_index":"9","validator_sync_committee_indicies":["53","309"]}],"execution_optimistic":false} +- {"data":[],"execution_optimistic":false} diff --git a/cl/beacon/handler/test_data/forkchoice_1.yaml b/cl/beacon/handler/test_data/forkchoice_1.yaml new file mode 100644 index 00000000000..448f1545a4b --- /dev/null +++ b/cl/beacon/handler/test_data/forkchoice_1.yaml @@ -0,0 +1,2 @@ +- {"data":[{"execution_optimistic":false,"root":"0x0102030000000000000000000000000000000000000000000000000000000000","slot":128}]} +- {"finalized_checkpoint":{"epoch":"1","root":"0x0102030000000000000000000000000000000000000000000000000000000000"},"fork_choice_nodes":[{"slot":"128","block_root":"0x0102030000000000000000000000000000000000000000000000000000000000","parent_root":"0x0102030000000000000000000000000000000000000000000000000000000000","justified_epoch":"0","finalized_epoch":"0","weight":"1","validity":"","execution_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000"},{"slot":"128","block_root":"0x0102020405030000000000000000000000000000000000000000000000000000","parent_root":"0x0102050000000000000000000000000000000000000000000000000000000000","justified_epoch":"0","finalized_epoch":"0","weight":"2","validity":"","execution_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000"}],"justified_checkpoint":{"epoch":"2","root":"0x0102030000000000000000000000000000000000000000000000000000000000"}} diff --git a/cl/beacon/handler/test_data/rewards_1.yaml b/cl/beacon/handler/test_data/rewards_1.yaml new file mode 100644 index 00000000000..28d6ba50e08 --- /dev/null +++ b/cl/beacon/handler/test_data/rewards_1.yaml @@ -0,0 +1,3 @@ +- {"data":{"proposer_index":"203","attestations":"332205","proposer_slashings":"0","attester_slashings":"0","sync_aggregate":"0","total":"332205"},"finalized":true,"execution_optimistic":false} +- {"data":{"proposer_index":"98","attestations":"332205","proposer_slashings":"0","attester_slashings":"0","sync_aggregate":"0","total":"332205"},"finalized":true,"execution_optimistic":false} +- {"data":[{"validator_index":"1","reward":"-698"},{"validator_index":"4","reward":"-698"}],"execution_optimistic":false,"finalized":true} diff --git a/cl/beacon/handler/test_data/rewards_2.json b/cl/beacon/handler/test_data/rewards_2.json new file mode 100644 index 00000000000..e73b214703a --- /dev/null +++ b/cl/beacon/handler/test_data/rewards_2.json @@ -0,0 +1 @@ +{"data":[{"validator_index":"0","reward":"-698"},{"validator_index":"1","reward":"-698"},{"validator_index":"2","reward":"-698"},{"validator_index":"3","reward":"-698"},{"validator_index":"4","reward":"-698"},{"validator_index":"5","reward":"-698"},{"validator_index":"6","reward":"-698"},{"validator_index":"7","reward":"-698"},{"validator_index":"8","reward":"-698"},{"validator_index":"9","reward":"-698"},{"validator_index":"10","reward":"-698"},{"validator_index":"11","reward":"-698"},{"validator_index":"12","reward":"-698"},{"validator_index":"13","reward":"-698"},{"validator_index":"14","reward":"-698"},{"validator_index":"15","reward":"-698"},{"validator_index":"16","reward":"-698"},{"validator_index":"17","reward":"-698"},{"validator_index":"18","reward":"-698"},{"validator_index":"19","reward":"-698"},{"validator_index":"20","reward":"-698"},{"validator_index":"21","reward":"-698"},{"validator_index":"22","reward":"-698"},{"validator_index":"23","reward":"-698"},{"validator_index":"24","reward":"-698"},{"validator_index":"25","reward":"-698"},{"validator_index":"26","reward":"-698"},{"validator_index":"27","reward":"-698"},{"validator_index":"28","reward":"-698"},{"validator_index":"29","reward":"-698"},{"validator_index":"30","reward":"-698"},{"validator_index":"31","reward":"-698"},{"validator_index":"32","reward":"-698"},{"validator_index":"33","reward":"-698"},{"validator_index":"34","reward":"-698"},{"validator_index":"35","reward":"-698"},{"validator_index":"36","reward":"-698"},{"validator_index":"37","reward":"-698"},{"validator_index":"38","reward":"-698"},{"validator_index":"39","reward":"-698"},{"validator_index":"40","reward":"-698"},{"validator_index":"41","reward":"-698"},{"validator_index":"42","reward":"-698"},{"validator_index":"43","reward":"-698"},{"validator_index":"44","reward":"-698"},{"validator_index":"45","reward":"-698"},{"validator_index":"46","reward":"-698"},{"validator_index":"47","reward":"-698"},{"validator_index":"48","reward":"-698"},{"validator_index":"49","reward":"-698"},{"validator_index":"50","reward":"-698"},{"validator_index":"51","reward":"-698"},{"validator_index":"52","reward":"-698"},{"validator_index":"53","reward":"-698"},{"validator_index":"54","reward":"-698"},{"validator_index":"55","reward":"-698"},{"validator_index":"56","reward":"-698"},{"validator_index":"57","reward":"-698"},{"validator_index":"58","reward":"-698"},{"validator_index":"59","reward":"-698"},{"validator_index":"60","reward":"-698"},{"validator_index":"61","reward":"-698"},{"validator_index":"62","reward":"-698"},{"validator_index":"63","reward":"-698"},{"validator_index":"64","reward":"-698"},{"validator_index":"65","reward":"-698"},{"validator_index":"66","reward":"-698"},{"validator_index":"67","reward":"-698"},{"validator_index":"68","reward":"-698"},{"validator_index":"69","reward":"-698"},{"validator_index":"70","reward":"-698"},{"validator_index":"71","reward":"-698"},{"validator_index":"72","reward":"-698"},{"validator_index":"73","reward":"-698"},{"validator_index":"74","reward":"-698"},{"validator_index":"75","reward":"-698"},{"validator_index":"76","reward":"-698"},{"validator_index":"77","reward":"-698"},{"validator_index":"78","reward":"-698"},{"validator_index":"79","reward":"-698"},{"validator_index":"80","reward":"-698"},{"validator_index":"81","reward":"-698"},{"validator_index":"82","reward":"-698"},{"validator_index":"83","reward":"-698"},{"validator_index":"84","reward":"-698"},{"validator_index":"85","reward":"-698"},{"validator_index":"86","reward":"-698"},{"validator_index":"87","reward":"-698"},{"validator_index":"88","reward":"-698"},{"validator_index":"89","reward":"-698"},{"validator_index":"90","reward":"-698"},{"validator_index":"91","reward":"-698"},{"validator_index":"92","reward":"-698"},{"validator_index":"93","reward":"-698"},{"validator_index":"94","reward":"-698"},{"validator_index":"95","reward":"-698"},{"validator_index":"96","reward":"-698"},{"validator_index":"97","reward":"-698"},{"validator_index":"98","reward":"-698"},{"validator_index":"99","reward":"-698"},{"validator_index":"100","reward":"-698"},{"validator_index":"101","reward":"-698"},{"validator_index":"102","reward":"-698"},{"validator_index":"103","reward":"-698"},{"validator_index":"104","reward":"-698"},{"validator_index":"105","reward":"-698"},{"validator_index":"106","reward":"-698"},{"validator_index":"107","reward":"-698"},{"validator_index":"108","reward":"-698"},{"validator_index":"109","reward":"-698"},{"validator_index":"110","reward":"-698"},{"validator_index":"111","reward":"-698"},{"validator_index":"112","reward":"-698"},{"validator_index":"113","reward":"-698"},{"validator_index":"114","reward":"-698"},{"validator_index":"115","reward":"-698"},{"validator_index":"116","reward":"-698"},{"validator_index":"117","reward":"-698"},{"validator_index":"118","reward":"-698"},{"validator_index":"119","reward":"-698"},{"validator_index":"120","reward":"-698"},{"validator_index":"121","reward":"-698"},{"validator_index":"122","reward":"-698"},{"validator_index":"123","reward":"-698"},{"validator_index":"124","reward":"-698"},{"validator_index":"125","reward":"-698"},{"validator_index":"126","reward":"-698"},{"validator_index":"127","reward":"-698"},{"validator_index":"128","reward":"-698"},{"validator_index":"129","reward":"-698"},{"validator_index":"130","reward":"-698"},{"validator_index":"131","reward":"-698"},{"validator_index":"132","reward":"-698"},{"validator_index":"133","reward":"-698"},{"validator_index":"134","reward":"-698"},{"validator_index":"135","reward":"-698"},{"validator_index":"136","reward":"-698"},{"validator_index":"137","reward":"-698"},{"validator_index":"138","reward":"-698"},{"validator_index":"139","reward":"-698"},{"validator_index":"140","reward":"-698"},{"validator_index":"141","reward":"-698"},{"validator_index":"142","reward":"-698"},{"validator_index":"143","reward":"-698"},{"validator_index":"144","reward":"-698"},{"validator_index":"145","reward":"-698"},{"validator_index":"146","reward":"-698"},{"validator_index":"147","reward":"-698"},{"validator_index":"148","reward":"-698"},{"validator_index":"149","reward":"-698"},{"validator_index":"150","reward":"-698"},{"validator_index":"151","reward":"-698"},{"validator_index":"152","reward":"-698"},{"validator_index":"153","reward":"-698"},{"validator_index":"154","reward":"-698"},{"validator_index":"155","reward":"-698"},{"validator_index":"156","reward":"-698"},{"validator_index":"157","reward":"-698"},{"validator_index":"158","reward":"-698"},{"validator_index":"159","reward":"-698"},{"validator_index":"160","reward":"-698"},{"validator_index":"161","reward":"-698"},{"validator_index":"162","reward":"-698"},{"validator_index":"163","reward":"-698"},{"validator_index":"164","reward":"-698"},{"validator_index":"165","reward":"-698"},{"validator_index":"166","reward":"-698"},{"validator_index":"167","reward":"-698"},{"validator_index":"168","reward":"-698"},{"validator_index":"169","reward":"-698"},{"validator_index":"170","reward":"-698"},{"validator_index":"171","reward":"-698"},{"validator_index":"172","reward":"-698"},{"validator_index":"173","reward":"-698"},{"validator_index":"174","reward":"-698"},{"validator_index":"175","reward":"-698"},{"validator_index":"176","reward":"-698"},{"validator_index":"177","reward":"-698"},{"validator_index":"178","reward":"-698"},{"validator_index":"179","reward":"-698"},{"validator_index":"180","reward":"-698"},{"validator_index":"181","reward":"-698"},{"validator_index":"182","reward":"-698"},{"validator_index":"183","reward":"-698"},{"validator_index":"184","reward":"-698"},{"validator_index":"185","reward":"-698"},{"validator_index":"186","reward":"-698"},{"validator_index":"187","reward":"-698"},{"validator_index":"188","reward":"-698"},{"validator_index":"189","reward":"-698"},{"validator_index":"190","reward":"-698"},{"validator_index":"191","reward":"-698"},{"validator_index":"192","reward":"-698"},{"validator_index":"193","reward":"-698"},{"validator_index":"194","reward":"-698"},{"validator_index":"195","reward":"-698"},{"validator_index":"196","reward":"-698"},{"validator_index":"197","reward":"-698"},{"validator_index":"198","reward":"-698"},{"validator_index":"199","reward":"-698"},{"validator_index":"200","reward":"-698"},{"validator_index":"201","reward":"-698"},{"validator_index":"202","reward":"-698"},{"validator_index":"203","reward":"-698"},{"validator_index":"204","reward":"-698"},{"validator_index":"205","reward":"-698"},{"validator_index":"206","reward":"-698"},{"validator_index":"207","reward":"-698"},{"validator_index":"208","reward":"-698"},{"validator_index":"209","reward":"-698"},{"validator_index":"210","reward":"-698"},{"validator_index":"211","reward":"-698"},{"validator_index":"212","reward":"-698"},{"validator_index":"213","reward":"-698"},{"validator_index":"214","reward":"-698"},{"validator_index":"215","reward":"-698"},{"validator_index":"216","reward":"-698"},{"validator_index":"217","reward":"-698"},{"validator_index":"218","reward":"-698"},{"validator_index":"219","reward":"-698"},{"validator_index":"220","reward":"-698"},{"validator_index":"221","reward":"-698"},{"validator_index":"222","reward":"-698"},{"validator_index":"223","reward":"-698"},{"validator_index":"224","reward":"-698"},{"validator_index":"225","reward":"-698"},{"validator_index":"226","reward":"-698"},{"validator_index":"227","reward":"-698"},{"validator_index":"228","reward":"-698"},{"validator_index":"229","reward":"-698"},{"validator_index":"230","reward":"-698"},{"validator_index":"231","reward":"-698"},{"validator_index":"232","reward":"-698"},{"validator_index":"233","reward":"-698"},{"validator_index":"234","reward":"-698"},{"validator_index":"235","reward":"-698"},{"validator_index":"236","reward":"-698"},{"validator_index":"237","reward":"-698"},{"validator_index":"238","reward":"-698"},{"validator_index":"239","reward":"-698"},{"validator_index":"240","reward":"-698"},{"validator_index":"241","reward":"-698"},{"validator_index":"242","reward":"-698"},{"validator_index":"243","reward":"-698"},{"validator_index":"244","reward":"-698"},{"validator_index":"245","reward":"-698"},{"validator_index":"246","reward":"-698"},{"validator_index":"247","reward":"-698"},{"validator_index":"248","reward":"-698"},{"validator_index":"249","reward":"-698"},{"validator_index":"250","reward":"-698"},{"validator_index":"251","reward":"-698"},{"validator_index":"252","reward":"-698"},{"validator_index":"253","reward":"-698"},{"validator_index":"254","reward":"-698"},{"validator_index":"255","reward":"-698"}],"finalized":true,"execution_optimistic":false} diff --git a/cl/beacon/handler/test_data/states_1.yaml b/cl/beacon/handler/test_data/states_1.yaml new file mode 100644 index 00000000000..f5e63003f7a --- /dev/null +++ b/cl/beacon/handler/test_data/states_1.yaml @@ -0,0 +1,2 @@ +finality_checkpoint: {"data":{"finalized_checkpoint":{"epoch":"1","root":"0xde46b0f2ed5e72f0cec20246403b14c963ec995d7c2825f3532b0460c09d5693"},"current_justified_checkpoint":{"epoch":"3","root":"0xa6e47f164b1a3ca30ea3b2144bd14711de442f51e5b634750a12a1734e24c987"},"previous_justified_checkpoint":{"epoch":"2","root":"0x4c3ee7969e485696669498a88c17f70e6999c40603e2f4338869004392069063"}},"finalized":false,"version":2,"execution_optimistic":false} +randao: {"data":{"randao":"0xdeec617717272914bfd73e02ca1da113a83cf4cf33cd4939486509e2da4ccf4e"},"finalized":false,"execution_optimistic":false} diff --git a/cl/beacon/handler/test_data/sync_committees_1.json b/cl/beacon/handler/test_data/sync_committees_1.json new file mode 100644 index 00000000000..b593004c7b7 --- /dev/null +++ b/cl/beacon/handler/test_data/sync_committees_1.json @@ -0,0 +1 @@ +{"data":{"validators":["109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42","141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192","109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42","141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192"],"validator_aggregates":[["109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42"],["141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192"],["109","134","145","89","181","81","159","168","34","251","3","205","213","202","99","121","80","149","18","65","201","227","116","69","100","74","160","198","16","131","0","73","210","122","209","217","97","237","136","98","229","248","176","95","150","171","238","191","200","220","33","219","126","9","214","124","56","86","169","208","125","85","25","88","13","190","153","183","96","165","180","90","164","104","240","123","118","196","163","222","231","127","241","77","68","32","62","79","44","58","14","187","151","243","139","142","174","106","228","102","223","31","120","5","43","255","179","66","119","170","60","152","167","194","4","112","156","233","254","203","1","55","53","19","92","21","28","42"],["141","162","146","57","23","45","158","93","212","38","2","206","246","225","195","189","47","193","224","242","76","138","84","140","111","51","135","113","41","133","207","30","82","175","161","6","249","83","234","155","244","177","108","252","94","143","173","8","154","75","50","49","39","36","182","101","48","12","172","87","250","59","24","157","215","218","72","185","71","7","253","114","230","226","110","46","166","91","130","20","137","117","132","204","221","52","197","188","11","232","67","115","245","26","35","103","186","37","27","235","64","40","70","239","236","211","61","29","216","199","63","54","78","105","184","15","10","147","247","22","144","107","128","17","178","148","129","192"]]},"finalized":false,"execution_optimistic":false} diff --git a/cl/beacon/handler/test_data/validators_1.yaml b/cl/beacon/handler/test_data/validators_1.yaml new file mode 100644 index 00000000000..fa092bd7602 --- /dev/null +++ b/cl/beacon/handler/test_data/validators_1.yaml @@ -0,0 +1,9 @@ +- {"data":[{"index":"1","status":"withdrawal_possible","balance":"20125000000","validator":{"pubkey":"0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e","withdrawal_credentials":"0x001f09ed305c0767d56f1b3bdb25f301298027f8e98a8e0cd2dcbcc660723d7b","effective_balance":"20000000000","slashed":false,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"253","withdrawable_epoch":"257"}},{"index":"2","status":"active_slashed","balance":"25678253779","validator":{"pubkey":"0x89ece308f9d1f0131765212deca99697b112d61f9be9a5f1f3780a51335b3ff981747a0b2ca2179b96d2c0c9024e5224","withdrawal_credentials":"0x006adc4a1e4caba37c54d56d2411fd0df3a102f8489a4c1be535f4fd5f8810c9","effective_balance":"25000000000","slashed":true,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"261","withdrawable_epoch":"8448"}},{"index":"3","status":"active_slashed","balance":"35998164834","validator":{"pubkey":"0xac9b60d5afcbd5663a8a44b7c5a02f19e9a77ab0a35bd65809bb5c67ec582c897feb04decc694b13e08587f3ff9b5b60","withdrawal_credentials":"0x0081c852078a2ad430d438d7eaefc39646f53895292596bbe199e2d7d1884ab8","effective_balance":"32000000000","slashed":true,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"261","withdrawable_epoch":"8448"}}],"finalized":true,"execution_optimistic":false} +- {"data":[{"index":"2","status":"active_slashed","balance":"25678253779","validator":{"pubkey":"0x89ece308f9d1f0131765212deca99697b112d61f9be9a5f1f3780a51335b3ff981747a0b2ca2179b96d2c0c9024e5224","withdrawal_credentials":"0x006adc4a1e4caba37c54d56d2411fd0df3a102f8489a4c1be535f4fd5f8810c9","effective_balance":"25000000000","slashed":true,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"261","withdrawable_epoch":"8448"}},{"index":"3","status":"active_slashed","balance":"35998164834","validator":{"pubkey":"0xac9b60d5afcbd5663a8a44b7c5a02f19e9a77ab0a35bd65809bb5c67ec582c897feb04decc694b13e08587f3ff9b5b60","withdrawal_credentials":"0x0081c852078a2ad430d438d7eaefc39646f53895292596bbe199e2d7d1884ab8","effective_balance":"32000000000","slashed":true,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"261","withdrawable_epoch":"8448"}}],"finalized":true,"execution_optimistic":false} +- {"data":[{"index":"1","status":"withdrawal_possible","balance":"20125000000","validator":{"pubkey":"0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e","withdrawal_credentials":"0x001f09ed305c0767d56f1b3bdb25f301298027f8e98a8e0cd2dcbcc660723d7b","effective_balance":"20000000000","slashed":false,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"253","withdrawable_epoch":"257"}}],"finalized":true,"execution_optimistic":false} + +- {"data":[{"index":"1","balance":"20125000000"},{"index":"2","balance":"25678253779"},{"index":"3","balance":"35998164834"}],"finalized":true,"execution_optimistic":false} +- {"data":[{"index":"1","balance":"20125000000"}],"finalized":true,"execution_optimistic":false} + +- {"data":{"index":"1","status":"withdrawal_possible","balance":"20125000000","validator":{"pubkey":"0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e","withdrawal_credentials":"0x001f09ed305c0767d56f1b3bdb25f301298027f8e98a8e0cd2dcbcc660723d7b","effective_balance":"20000000000","slashed":false,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"253","withdrawable_epoch":"257"}},"finalized":true,"execution_optimistic":false} +- {"data":{"index":"1","status":"withdrawal_possible","balance":"20125000000","validator":{"pubkey":"0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e","withdrawal_credentials":"0x001f09ed305c0767d56f1b3bdb25f301298027f8e98a8e0cd2dcbcc660723d7b","effective_balance":"20000000000","slashed":false,"activation_eligibility_epoch":"0","activation_epoch":"0","exit_epoch":"253","withdrawable_epoch":"257"}},"finalized":true,"execution_optimistic":false} diff --git a/cl/beacon/handler/utils_test.go b/cl/beacon/handler/utils_test.go new file mode 100644 index 00000000000..a23d8654aab --- /dev/null +++ b/cl/beacon/handler/utils_test.go @@ -0,0 +1,70 @@ +package handler_test + +import ( + "context" + "testing" + + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/cl/antiquary" + "github.com/ledgerwatch/erigon/cl/antiquary/tests" + "github.com/ledgerwatch/erigon/cl/beacon/handler" + "github.com/ledgerwatch/erigon/cl/beacon/synced_data" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/persistence" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/persistence/state/historical_states_reader" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" + "github.com/ledgerwatch/erigon/cl/pool" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/afero" + "github.com/stretchr/testify/require" +) + +func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logger) (db kv.RwDB, blocks []*cltypes.SignedBeaconBlock, f afero.Fs, preState, postState *state.CachingBeaconState, h *handler.ApiHandler, opPool pool.OperationsPool, syncedData *synced_data.SyncedDataManager, fcu *forkchoice.ForkChoiceStorageMock) { + bcfg := clparams.MainnetBeaconConfig + if v == clparams.Phase0Version { + blocks, preState, postState = tests.GetPhase0Random() + } else if v == clparams.BellatrixVersion { + bcfg.AltairForkEpoch = 1 + bcfg.BellatrixForkEpoch = 1 + blocks, preState, postState = tests.GetBellatrixRandom() + } else { + require.FailNow(t, "unknown state version") + } + fcu = forkchoice.NewForkChoiceStorageMock() + db = memdb.NewTestDB(t) + var reader *tests.MockBlockReader + reader, f = tests.LoadChain(blocks, postState, db, t) + + rawDB := persistence.NewAferoRawBlockSaver(f, &clparams.MainnetBeaconConfig) + bcfg.InitializeForkSchedule() + + ctx := context.Background() + vt := state_accessors.NewStaticValidatorTable() + a := antiquary.NewAntiquary(ctx, preState, vt, &bcfg, datadir.New("/tmp"), nil, db, nil, reader, nil, logger, true, true, f) + require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) + // historical states reader below + statesReader := historical_states_reader.NewHistoricalStatesReader(&bcfg, reader, vt, f, preState) + opPool = pool.NewOperationsPool(&bcfg) + fcu.Pool = opPool + syncedData = synced_data.NewSyncedDataManager(true, &bcfg) + gC := clparams.GenesisConfigs[clparams.MainnetNetwork] + h = handler.NewApiHandler( + &gC, + &bcfg, + rawDB, + db, + fcu, + opPool, + reader, + syncedData, + statesReader, + nil, + "test-version") + h.Init() + return +} diff --git a/cl/beacon/handler/validators.go b/cl/beacon/handler/validators.go new file mode 100644 index 00000000000..f08a9a850b6 --- /dev/null +++ b/cl/beacon/handler/validators.go @@ -0,0 +1,543 @@ +package handler + +import ( + "encoding/hex" + "fmt" + "math" + "net/http" + "strconv" + "strings" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "golang.org/x/exp/slices" +) + +type validatorStatus int + +var validatorJsonTemplate = "{\"index\":\"%d\",\"status\":\"%s\",\"balance\":\"%d\",\"validator\":{\"pubkey\":\"0x%x\",\"withdrawal_credentials\":\"0x%x\",\"effective_balance\":\"%d\",\"slashed\":%t,\"activation_eligibility_epoch\":\"%d\",\"activation_epoch\":\"%d\",\"exit_epoch\":\"%d\",\"withdrawable_epoch\":\"%d\"}}" + +const ( + validatorPendingInitialized validatorStatus = 1 //"pending_initialized" + validatorPendingQueued validatorStatus = 2 //"pending_queued" + validatorActiveOngoing validatorStatus = 3 //"active_ongoing" + validatorActiveExiting validatorStatus = 4 //"active_exiting" + validatorActiveSlashed validatorStatus = 5 //"active_slashed" + validatorExitedUnslashed validatorStatus = 6 //"exited_unslashed" + validatorExitedSlashed validatorStatus = 7 //"exited_slashed" + validatorWithdrawalPossible validatorStatus = 8 //"withdrawal_possible" + validatorWithdrawalDone validatorStatus = 9 //"withdrawal_done" + validatorActive validatorStatus = 10 //"active" + validatorPending validatorStatus = 11 //"pending" + validatorExited validatorStatus = 12 //"exited" + validatorWithdrawal validatorStatus = 13 //"withdrawal" +) + +func validatorStatusFromString(s string) (validatorStatus, error) { + switch s { + case "pending_initialized": + return validatorPendingInitialized, nil + case "pending_queued": + return validatorPendingQueued, nil + case "active_ongoing": + return validatorActiveOngoing, nil + case "active_exiting": + return validatorActiveExiting, nil + case "active_slashed": + return validatorActiveSlashed, nil + case "exited_unslashed": + return validatorExitedUnslashed, nil + case "exited_slashed": + return validatorExitedSlashed, nil + case "withdrawal_possible": + return validatorWithdrawalPossible, nil + case "withdrawal_done": + return validatorWithdrawalDone, nil + case "active": + return validatorActive, nil + case "pending": + return validatorPending, nil + case "exited": + return validatorExited, nil + case "withdrawal": + return validatorWithdrawal, nil + default: + return 0, fmt.Errorf("invalid validator status %s", s) + } +} + +func validatorStatusFromValidator(v solid.Validator, currentEpoch uint64, balance uint64) validatorStatus { + activationEpoch := v.ActivationEpoch() + // pending section + if activationEpoch > currentEpoch { + activationEligibilityEpoch := v.ActivationEligibilityEpoch() + if activationEligibilityEpoch == math.MaxUint64 { + return validatorPendingInitialized + } + return validatorPendingQueued + } + + exitEpoch := v.ExitEpoch() + // active section + if activationEpoch <= currentEpoch && currentEpoch < exitEpoch { + if exitEpoch == math.MaxUint64 { + return validatorActiveOngoing + } + slashed := v.Slashed() + if slashed { + return validatorActiveSlashed + } + return validatorActiveExiting + } + + withdrawableEpoch := v.WithdrawableEpoch() + // exited section + if exitEpoch <= currentEpoch && currentEpoch < withdrawableEpoch { + if v.Slashed() { + return validatorExitedSlashed + } + return validatorExitedUnslashed + } + + if balance == 0 { + return validatorWithdrawalDone + } + return validatorWithdrawalPossible + +} + +func (s validatorStatus) String() string { + switch s { + case validatorPendingInitialized: + return "pending_initialized" + case validatorPendingQueued: + return "pending_queued" + case validatorActiveOngoing: + return "active_ongoing" + case validatorActiveExiting: + return "active_exiting" + case validatorActiveSlashed: + return "active_slashed" + case validatorExitedUnslashed: + return "exited_unslashed" + case validatorExitedSlashed: + return "exited_slashed" + case validatorWithdrawalPossible: + return "withdrawal_possible" + case validatorWithdrawalDone: + return "withdrawal_done" + case validatorActive: + return "active" + case validatorPending: + return "pending" + case validatorExited: + return "exited" + case validatorWithdrawal: + return "withdrawal" + default: + panic("invalid validator status") + } +} + +const maxValidatorsLookupFilter = 32 + +func parseStatuses(s []string) ([]validatorStatus, error) { + seenAlready := make(map[validatorStatus]struct{}) + statuses := make([]validatorStatus, 0, len(s)) + + if len(s) > maxValidatorsLookupFilter { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "too many statuses requested") + } + + for _, status := range s { + s, err := validatorStatusFromString(status) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + if _, ok := seenAlready[s]; ok { + continue + } + seenAlready[s] = struct{}{} + statuses = append(statuses, s) + } + return statuses, nil +} + +func checkValidValidatorId(s string) (bool, error) { + // If it starts with 0x, then it must a 48bytes 0x prefixed string + if len(s) == 98 && s[:2] == "0x" { + // check if it is a valid hex string + if _, err := hex.DecodeString(s[2:]); err != nil { + return false, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + return true, nil + } + // If it is not 0x prefixed, then it must be a number, check if it is a base-10 number + if _, err := strconv.ParseUint(s, 10, 64); err != nil { + return false, beaconhttp.NewEndpointError(http.StatusBadRequest, "invalid validator id") + } + return false, nil +} + +func (a *ApiHandler) getAllValidators(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockId, err := beaconhttp.StateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + queryFilters, err := beaconhttp.StringListFromQueryParams(r, "status") + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + validatorIds, err := beaconhttp.StringListFromQueryParams(r, "id") + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + if len(validatorIds) > maxValidatorsLookupFilter { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "too many validators requested") + } + filterIndicies, err := parseQueryValidatorIndicies(tx, validatorIds) + if err != nil { + return nil, err + } + // Check the filters' validity + statusFilters, err := parseStatuses(queryFilters) + if err != nil { + return nil, err + } + + if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. + s, cn := a.syncedData.HeadState() + defer cn() + if s == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "node is not synced") + } + return responseValidators(filterIndicies, statusFilters, state.Epoch(s), s.Balances(), s.Validators(), false) + } + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + + if slot == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "state not found") + } + stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch + state, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true) + if err != nil { + return nil, err + } + if state == nil { + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, *slot) + if err != nil { + return nil, err + } + balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + if err != nil { + return nil, err + } + return responseValidators(filterIndicies, statusFilters, stateEpoch, balances, validatorSet, true) + } + return responseValidators(filterIndicies, statusFilters, stateEpoch, state.Balances(), state.Validators(), *slot <= a.forkchoiceStore.FinalizedSlot()) +} + +func parseQueryValidatorIndex(tx kv.Tx, id string) (uint64, error) { + isPublicKey, err := checkValidValidatorId(id) + if err != nil { + return 0, err + } + if isPublicKey { + var b48 libcommon.Bytes48 + if err := b48.UnmarshalText([]byte(id)); err != nil { + return 0, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + has, err := tx.Has(kv.InvertedValidatorPublicKeys, b48[:]) + if err != nil { + return 0, err + } + if !has { + return math.MaxUint64, nil + } + idx, ok, err := state_accessors.ReadValidatorIndexByPublicKey(tx, b48) + if err != nil { + return 0, err + } + if !ok { + return 0, beaconhttp.NewEndpointError(http.StatusNotFound, "validator not found") + } + return idx, nil + } + idx, err := strconv.ParseUint(id, 10, 64) + if err != nil { + return 0, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + return idx, nil + +} + +func parseQueryValidatorIndicies(tx kv.Tx, ids []string) ([]uint64, error) { + filterIndicies := make([]uint64, 0, len(ids)) + + for _, id := range ids { + idx, err := parseQueryValidatorIndex(tx, id) + if err != nil { + return nil, err + } + filterIndicies = append(filterIndicies, idx) + } + return filterIndicies, nil +} + +func (a *ApiHandler) getSingleValidator(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockId, err := beaconhttp.StateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + validatorId, err := beaconhttp.StringFromRequest(r, "validator_id") + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + validatorIndex, err := parseQueryValidatorIndex(tx, validatorId) + if err != nil { + return nil, err + } + + if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. + s, cn := a.syncedData.HeadState() + defer cn() + if s.ValidatorLength() <= int(validatorIndex) { + return newBeaconResponse([]int{}).WithFinalized(false), nil + } + if s == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "node is not synced") + } + return responseValidator(validatorIndex, state.Epoch(s), s.Balances(), s.Validators(), false) + } + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + + if slot == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "state not found") + } + stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch + state, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true) + if err != nil { + return nil, err + } + if state == nil { + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, *slot) + if err != nil { + return nil, err + } + balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + if err != nil { + return nil, err + } + return responseValidator(validatorIndex, stateEpoch, balances, validatorSet, true) + } + return responseValidator(validatorIndex, stateEpoch, state.Balances(), state.Validators(), *slot <= a.forkchoiceStore.FinalizedSlot()) +} + +func (a *ApiHandler) getAllValidatorsBalances(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { + ctx := r.Context() + + tx, err := a.indiciesDB.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockId, err := beaconhttp.StateIdFromRequest(r) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + blockRoot, httpStatus, err := a.blockRootFromStateId(ctx, tx, blockId) + if err != nil { + return nil, beaconhttp.NewEndpointError(httpStatus, err.Error()) + } + + validatorIds, err := beaconhttp.StringListFromQueryParams(r, "id") + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error()) + } + + if len(validatorIds) > maxValidatorsLookupFilter { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "too many validators requested") + } + filterIndicies, err := parseQueryValidatorIndicies(tx, validatorIds) + if err != nil { + return nil, err + } + + if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. + s, cn := a.syncedData.HeadState() + defer cn() + if s == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "node is not synced") + } + return responseValidatorsBalances(filterIndicies, state.Epoch(s), s.Balances(), false) + } + slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) + if err != nil { + return nil, err + } + + if slot == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "state not found") + } + stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch + state, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true) + if err != nil { + return nil, err + } + if state == nil { + balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + if err != nil { + return nil, err + } + return responseValidatorsBalances(filterIndicies, stateEpoch, balances, true) + } + return responseValidatorsBalances(filterIndicies, stateEpoch, state.Balances(), *slot <= a.forkchoiceStore.FinalizedSlot()) +} + +type directString string + +func (d directString) MarshalJSON() ([]byte, error) { + return []byte(d), nil +} + +func responseValidators(filterIndicies []uint64, filterStatuses []validatorStatus, stateEpoch uint64, balances solid.Uint64ListSSZ, validators *solid.ValidatorSet, finalized bool) (*beaconhttp.BeaconResponse, error) { + var b strings.Builder + b.WriteString("[") + first := true + var err error + validators.Range(func(i int, v solid.Validator, l int) bool { + if len(filterIndicies) > 0 && !slices.Contains(filterIndicies, uint64(i)) { + return true + } + status := validatorStatusFromValidator(v, stateEpoch, balances.Get(i)) + if shouldStatusBeFiltered(status, filterStatuses) { + return true + } + if !first { + if _, err = b.WriteString(","); err != nil { + return false + } + } + first = false + if _, err = b.WriteString(fmt.Sprintf(validatorJsonTemplate, i, status.String(), balances.Get(i), v.PublicKey(), v.WithdrawalCredentials(), v.EffectiveBalance(), v.Slashed(), v.ActivationEligibilityEpoch(), v.ActivationEpoch(), v.ExitEpoch(), v.WithdrawableEpoch())); err != nil { + return false + } + return true + }) + if err != nil { + return nil, err + } + + _, err = b.WriteString("]\n") + + return newBeaconResponse(directString(b.String())).WithFinalized(finalized), err +} + +func responseValidator(idx uint64, stateEpoch uint64, balances solid.Uint64ListSSZ, validators *solid.ValidatorSet, finalized bool) (*beaconhttp.BeaconResponse, error) { + var b strings.Builder + var err error + if validators.Length() <= int(idx) { + return newBeaconResponse([]int{}).WithFinalized(finalized), nil + } + + v := validators.Get(int(idx)) + status := validatorStatusFromValidator(v, stateEpoch, balances.Get(int(idx))) + + if _, err = b.WriteString(fmt.Sprintf(validatorJsonTemplate, idx, status.String(), balances.Get(int(idx)), v.PublicKey(), v.WithdrawalCredentials(), v.EffectiveBalance(), v.Slashed(), v.ActivationEligibilityEpoch(), v.ActivationEpoch(), v.ExitEpoch(), v.WithdrawableEpoch())); err != nil { + return nil, err + } + + _, err = b.WriteString("\n") + + return newBeaconResponse(directString(b.String())).WithFinalized(finalized), err +} + +func responseValidatorsBalances(filterIndicies []uint64, stateEpoch uint64, balances solid.Uint64ListSSZ, finalized bool) (*beaconhttp.BeaconResponse, error) { + var b strings.Builder + b.WriteString("[") + jsonTemplate := "{\"index\":\"%d\",\"balance\":\"%d\"}" + first := true + var err error + balances.Range(func(i int, v uint64, l int) bool { + if len(filterIndicies) > 0 && !slices.Contains(filterIndicies, uint64(i)) { + return true + } + + if !first { + if _, err = b.WriteString(","); err != nil { + return false + } + } + first = false + if _, err = b.WriteString(fmt.Sprintf(jsonTemplate, i, v)); err != nil { + return false + } + return true + }) + if err != nil { + return nil, err + } + + _, err = b.WriteString("]\n") + + return newBeaconResponse(directString(b.String())).WithFinalized(finalized), err +} + +func shouldStatusBeFiltered(status validatorStatus, statuses []validatorStatus) bool { + if len(statuses) == 0 { + return false + } + for _, s := range statuses { + if (s == status) || (s == validatorActive && (status == validatorActiveOngoing || status == validatorActiveExiting || status == validatorActiveSlashed)) || + (s == validatorPending && (status == validatorPendingInitialized || status == validatorPendingQueued)) || + (s == validatorExited && (status == validatorExitedUnslashed || status == validatorExitedSlashed)) || + (s == validatorWithdrawal && (status == validatorWithdrawalPossible || status == validatorWithdrawalDone)) { + return false + } + } + return true // filter if no filter condition is met +} diff --git a/cl/beacon/router.go b/cl/beacon/router.go index 018e138342c..4ffb605c7fc 100644 --- a/cl/beacon/router.go +++ b/cl/beacon/router.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/go-chi/chi/v5" + "github.com/go-chi/cors" "github.com/ledgerwatch/erigon/cl/beacon/beacon_router_configuration" "github.com/ledgerwatch/erigon/cl/beacon/handler" "github.com/ledgerwatch/erigon/cl/beacon/validatorapi" @@ -25,6 +26,14 @@ func ListenAndServe(beaconHandler *LayeredBeaconHandler, routerCfg beacon_router } defer listener.Close() mux := chi.NewRouter() + + mux.Use(cors.Handler( + cors.Options{ + AllowedOrigins: routerCfg.AllowedOrigins, + AllowedMethods: routerCfg.AllowedMethods, + AllowCredentials: routerCfg.AllowCredentials, + MaxAge: 4, + })) // enforce json content type mux.Use(func(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/cl/beacon/synced_data/synced_data.go b/cl/beacon/synced_data/synced_data.go index abc04251670..0d6f7e0789f 100644 --- a/cl/beacon/synced_data/synced_data.go +++ b/cl/beacon/synced_data/synced_data.go @@ -28,18 +28,18 @@ func (s *SyncedDataManager) OnHeadState(newState *state.CachingBeaconState) (err if !s.enabled { return } - // Schedule update. - go func() { - s.mu.Lock() - defer s.mu.Unlock() - if s.headState == nil { - s.headState, err = newState.Copy() - } - err = newState.CopyInto(s.headState) + s.mu.Lock() + defer s.mu.Unlock() + if s.headState == nil { + s.headState, err = newState.Copy() if err != nil { - log.Error("failed to copy head state", "err", err) + return err } - }() + } + err = newState.CopyInto(s.headState) + if err != nil { + log.Error("failed to copy head state", "err", err) + } return } @@ -59,7 +59,7 @@ func (s *SyncedDataManager) Syncing() bool { s.mu.RLock() defer s.mu.RUnlock() if s.headState == nil { - return false + return true } headEpoch := utils.GetCurrentEpoch(s.headState.GenesisTime(), s.cfg.SecondsPerSlot, s.cfg.SlotsPerEpoch) diff --git a/cl/beacon/validatorapi/endpoints.go b/cl/beacon/validatorapi/get.go similarity index 73% rename from cl/beacon/validatorapi/endpoints.go rename to cl/beacon/validatorapi/get.go index ed06d471a2f..00cbbf1e374 100644 --- a/cl/beacon/validatorapi/endpoints.go +++ b/cl/beacon/validatorapi/get.go @@ -5,10 +5,11 @@ import ( "net/http" "strconv" "strings" - "unicode" + "github.com/gfx-labs/sse" "github.com/go-chi/chi/v5" "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" "github.com/ledgerwatch/erigon/cl/clparams" @@ -17,7 +18,7 @@ import ( "github.com/ledgerwatch/erigon/cl/utils" ) -func (v *ValidatorApiHandler) GetEthV1NodeSyncing(r *http.Request) (any, error) { +func (v *ValidatorApiHandler) GetEthV1NodeSyncing(w http.ResponseWriter, r *http.Request) (any, error) { _, slot, err := v.FC.GetHead() if err != nil { return nil, err @@ -41,26 +42,24 @@ func (v *ValidatorApiHandler) GetEthV1NodeSyncing(r *http.Request) (any, error) } return map[string]any{ - "head_slot": strconv.FormatUint(slot, 10), - "sync_distance": syncDistance, - "is_syncing": isSyncing, - "el_offline": elOffline, - // TODO: figure out how to populat this field - "is_optimistic": true, - }, nil -} - -func (v *ValidatorApiHandler) EventSourceGetV1Events(w http.ResponseWriter, r *http.Request) { + "data": map[string]any{ + "head_slot": strconv.FormatUint(slot, 10), + "sync_distance": syncDistance, + "is_syncing": isSyncing, + "el_offline": elOffline, + // TODO: figure out how to populat this field + "is_optimistic": true, + }}, nil } -func (v *ValidatorApiHandler) GetEthV1ConfigSpec(r *http.Request) (*clparams.BeaconChainConfig, error) { +func (v *ValidatorApiHandler) GetEthV1ConfigSpec(w http.ResponseWriter, r *http.Request) (*clparams.BeaconChainConfig, error) { if v.BeaconChainCfg == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "beacon config not found") } return v.BeaconChainCfg, nil } -func (v *ValidatorApiHandler) GetEthV1BeaconGenesis(r *http.Request) (any, error) { +func (v *ValidatorApiHandler) GetEthV1BeaconGenesis(w http.ResponseWriter, r *http.Request) (any, error) { if v.GenesisCfg == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "genesis config not found") } @@ -69,13 +68,14 @@ func (v *ValidatorApiHandler) GetEthV1BeaconGenesis(r *http.Request) (any, error return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, err.Error()) } return map[string]any{ - "genesis_time": v.GenesisCfg.GenesisTime, - "genesis_validator_root": v.GenesisCfg.GenesisValidatorRoot, - "genesis_fork_version": hexutility.Bytes(digest[:]), - }, nil + "data": map[string]any{ + "genesis_time": v.GenesisCfg.GenesisTime, + "genesis_validator_root": v.GenesisCfg.GenesisValidatorRoot, + "genesis_fork_version": hexutility.Bytes(digest[:]), + }}, nil } -func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdFork(r *http.Request) (any, error) { +func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdFork(w http.ResponseWriter, r *http.Request) (any, error) { stateId := chi.URLParam(r, "state_id") state, err := v.privateGetStateFromStateId(stateId) if err != nil { @@ -95,7 +95,8 @@ func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdFork(r *http.Request) ( }, }, nil } -func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdValidatorsValidatorId(r *http.Request) (any, error) { + +func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdValidatorsValidatorId(w http.ResponseWriter, r *http.Request) (any, error) { stateId := chi.URLParam(r, "state_id") // grab the correct state for the given state id beaconState, err := v.privateGetStateFromStateId(stateId) @@ -206,44 +207,59 @@ func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdValidatorsValidatorId(r }, nil } -func (v *ValidatorApiHandler) privateGetStateFromStateId(stateId string) (*state.CachingBeaconState, error) { - switch { - case stateId == "head": - // Now check the head - headRoot, _, err := v.FC.GetHead() - if err != nil { - return nil, err - } - return v.FC.GetStateAtBlockRoot(headRoot, true) - case stateId == "genesis": - // not supported - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "genesis block not found") - case stateId == "finalized": - return v.FC.GetStateAtBlockRoot(v.FC.FinalizedCheckpoint().BlockRoot(), true) - case stateId == "justified": - return v.FC.GetStateAtBlockRoot(v.FC.JustifiedCheckpoint().BlockRoot(), true) - case strings.HasPrefix(stateId, "0x"): - // assume is hex has, so try to parse - hsh := common.Hash{} - err := hsh.UnmarshalText([]byte(stateId)) - if err != nil { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid state ID: %s", stateId)) - } - return v.FC.GetStateAtStateRoot(hsh, true) - case isInt(stateId): - // ignore the error bc isInt check succeeded. yes this doesn't protect for overflow, they will request slot 0 and it will fail. good - val, _ := strconv.ParseUint(stateId, 10, 64) - return v.FC.GetStateAtSlot(val, true) - default: - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid state ID: %s", stateId)) +func (v *ValidatorApiHandler) GetEthV1EthNodeSyncing(w http.ResponseWriter, r *http.Request) (any, error) { + // TODO: populate this map + o := map[string]any{ + "data": map[string]any{}, } + return o, nil } +func (v *ValidatorApiHandler) GetEthV3ValidatorBlocksSlot(w http.ResponseWriter, r *http.Request) (any, error) { + // TODO: populate this map + o := map[string]any{ + "data": map[string]any{}, + } -func isInt(s string) bool { - for _, c := range s { - if !unicode.IsDigit(c) { - return false - } + slotString := chi.URLParam(r, "slot") + slot, err := strconv.ParseUint(slotString, 10, 64) + if err != nil { + return nil, fmt.Errorf("fail to parse slot: %w", err) + } + randaoRevealString := r.URL.Query().Get("randao_reveal") + randaoReveal, err := hexutil.Decode(randaoRevealString) + if err != nil { + return nil, fmt.Errorf("fail to parse randao_reveal: %w", err) + } + graffitiString := r.URL.Query().Get("randao_reveal") + if graffitiString == "" { + graffitiString = "0x" + } + graffiti, err := hexutil.Decode(graffitiString) + if err != nil { + return nil, fmt.Errorf("fail to parse graffiti: %w", err) + } + skip_randao_verification := r.URL.Query().Has("skip_randao_verification") + //if skip_randao_verification { + // if isInfinity(randaoReveal) { + // return nil, beaconhttp.NewEndpointError(400, "randao reveal must be set to infinity if skip randao verification is set") + // } + //} + _, _, _, _ = slot, graffiti, randaoReveal, skip_randao_verification + return o, nil +} + +func (v *ValidatorApiHandler) EventSourceGetV1Events(w http.ResponseWriter, r *http.Request) { + sink, err := sse.DefaultUpgrader.Upgrade(w, r) + if err != nil { + // OK to ignore this error. + return + } + topics := r.URL.Query()["topics"] + for _, topic := range topics { + sink.Encode(&sse.Event{ + Event: []byte(topic), + Data: nil, + }) + // OK to ignore this error. maybe should log it later } - return true } diff --git a/cl/beacon/validatorapi/handler.go b/cl/beacon/validatorapi/handler.go index 5f4f28fcb91..41e9190fa6d 100644 --- a/cl/beacon/validatorapi/handler.go +++ b/cl/beacon/validatorapi/handler.go @@ -6,6 +6,7 @@ import ( "github.com/go-chi/chi/v5" "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/beacon/building" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" ) @@ -16,6 +17,8 @@ type ValidatorApiHandler struct { BeaconChainCfg *clparams.BeaconChainConfig GenesisCfg *clparams.GenesisConfig + state *building.State + o sync.Once mux *chi.Mux } @@ -23,6 +26,7 @@ type ValidatorApiHandler struct { func (v *ValidatorApiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { v.o.Do(func() { v.mux = chi.NewRouter() + v.state = building.NewState() v.Route(v.mux) }) v.mux.ServeHTTP(w, r) @@ -32,38 +36,39 @@ func (v *ValidatorApiHandler) Route(r chi.Router) { r.Route("/eth", func(r chi.Router) { r.Route("/v1", func(r chi.Router) { r.Route("/beacon", func(r chi.Router) { - r.Get("/genesis", beaconhttp.HandleEndpointFunc(v.GetEthV1BeaconGenesis)) r.Route("/states", func(r chi.Router) { r.Route("/{state_id}", func(r chi.Router) { r.Get("/fork", beaconhttp.HandleEndpointFunc(v.GetEthV1BeaconStatesStateIdFork)) - r.Get("/validators/{validator_id}", beaconhttp.HandleEndpointFunc(v.GetEthV1BeaconStatesStateIdValidatorsValidatorId)) + // r.Get("/validators/{validator_id}", beaconhttp.HandleEndpointFunc(v.GetEthV1BeaconStatesStateIdValidatorsValidatorId)) }) }) - r.Post("/binded_blocks", http.NotFound) - r.Post("/blocks", http.NotFound) + r.Post("/blocks", beaconhttp.HandleEndpointFunc(v.PostEthV1BeaconBlocks)) + r.Post("/blinded_blocks", beaconhttp.HandleEndpointFunc(v.PostEthV1BeaconBlindedBlocks)) r.Route("/pool", func(r chi.Router) { - r.Post("/attestations", http.NotFound) - r.Post("/sync_committees", http.NotFound) + r.Post("/attestations", beaconhttp.HandleEndpointFunc(v.PostEthV1BeaconPoolAttestations)) + r.Post("/sync_committees", beaconhttp.HandleEndpointFunc(v.PostEthV1BeaconPoolAttestations)) }) - r.Get("/node/syncing", beaconhttp.HandleEndpointFunc(v.GetEthV1NodeSyncing)) }) - r.Get("/config/spec", beaconhttp.HandleEndpointFunc(v.GetEthV1ConfigSpec)) - r.Get("/events", http.NotFound) + r.Route("/node", func(r chi.Router) { + r.Get("/syncing", beaconhttp.HandleEndpointFunc(v.GetEthV1NodeSyncing)) + }) + r.Get("/events", v.EventSourceGetV1Events) r.Route("/validator", func(r chi.Router) { - r.Route("/duties", func(r chi.Router) { - r.Post("/attester/{epoch}", http.NotFound) - r.Get("/proposer/{epoch}", http.NotFound) - r.Post("/sync/{epoch}", http.NotFound) - }) + // implemented by archive api (for now) + // r.Route("/duties", func(r chi.Router) { + // r.Post("/attester/{epoch}", http.NotFound) + // r.Post("/sync/{epoch}", http.NotFound) + // r.Get("/proposer/{epoch}", http.NotFound) + // }) // r.Get("/blinded_blocks/{slot}", http.NotFound) - deprecated r.Get("/attestation_data", http.NotFound) r.Get("/aggregate_attestation", http.NotFound) - r.Post("/aggregate_and_proofs", http.NotFound) - r.Post("/beacon_committee_subscriptions", http.NotFound) - r.Post("/sync_committee_subscriptions", http.NotFound) + r.Post("/aggregate_and_proofs", beaconhttp.HandleEndpointFunc(v.PostEthV1ValidatorAggregateAndProofs)) + r.Post("/beacon_committee_subscriptions", beaconhttp.HandleEndpointFunc(v.PostEthV1ValidatorBeaconCommitteeSubscriptions)) + r.Post("/sync_committee_subscriptions", beaconhttp.HandleEndpointFunc(v.PostEthV1ValidatorSyncCommitteeSubscriptions)) r.Get("/sync_committee_contribution", http.NotFound) - r.Post("/contribution_and_proofs", http.NotFound) - r.Post("/prepare_beacon_proposer", http.NotFound) + r.Post("/contribution_and_proofs", beaconhttp.HandleEndpointFunc(v.PostEthV1ValidatorContributionAndProofs)) + r.Post("/prepare_beacon_proposer", beaconhttp.HandleEndpointFunc(v.PostEthV1ValidatorPrepareBeaconProposer)) }) }) r.Route("/v2", func(r chi.Router) { @@ -73,14 +78,15 @@ func (v *ValidatorApiHandler) Route(r chi.Router) { }) }) r.Route("/beacon", func(r chi.Router) { - r.Post("/blocks/{block_id}", http.NotFound) + r.Post("/blocks", beaconhttp.HandleEndpointFunc(v.PostEthV2BeaconBlocks)) + r.Post("/blinded_blocks", beaconhttp.HandleEndpointFunc(v.PostEthV2BeaconBlindedBlocks)) }) r.Route("/validator", func(r chi.Router) { - r.Post("/blocks/{slot}", http.NotFound) + r.Post("/blocks/{slot}", beaconhttp.HandleEndpointFunc(v.GetEthV3ValidatorBlocksSlot)) }) }) r.Route("/v3", func(r chi.Router) { - r.Route("/beacon", func(r chi.Router) { + r.Route("/validator", func(r chi.Router) { r.Get("/blocks/{block_id}", http.NotFound) }) }) diff --git a/cl/beacon/validatorapi/helpers.go b/cl/beacon/validatorapi/helpers.go new file mode 100644 index 00000000000..af0319e0ce5 --- /dev/null +++ b/cl/beacon/validatorapi/helpers.go @@ -0,0 +1,55 @@ +package validatorapi + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "unicode" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" +) + +func (v *ValidatorApiHandler) privateGetStateFromStateId(stateId string) (*state.CachingBeaconState, error) { + switch { + case stateId == "head": + // Now check the head + headRoot, _, err := v.FC.GetHead() + if err != nil { + return nil, err + } + return v.FC.GetStateAtBlockRoot(headRoot, true) + case stateId == "genesis": + // not supported + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "genesis block not found") + case stateId == "finalized": + return v.FC.GetStateAtBlockRoot(v.FC.FinalizedCheckpoint().BlockRoot(), true) + case stateId == "justified": + return v.FC.GetStateAtBlockRoot(v.FC.JustifiedCheckpoint().BlockRoot(), true) + case strings.HasPrefix(stateId, "0x"): + // assume is hex has, so try to parse + hsh := common.Hash{} + err := hsh.UnmarshalText([]byte(stateId)) + if err != nil { + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid state ID: %s", stateId)) + } + return v.FC.GetStateAtStateRoot(hsh, true) + case isInt(stateId): + // ignore the error bc isInt check succeeded. yes this doesn't protect for overflow, they will request slot 0 and it will fail. good + val, _ := strconv.ParseUint(stateId, 10, 64) + return v.FC.GetStateAtSlot(val, true) + default: + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid state ID: %s", stateId)) + } +} + +func isInt(s string) bool { + for _, c := range s { + if !unicode.IsDigit(c) { + return false + } + } + return true +} diff --git a/cl/beacon/validatorapi/post.go b/cl/beacon/validatorapi/post.go new file mode 100644 index 00000000000..207eec480e8 --- /dev/null +++ b/cl/beacon/validatorapi/post.go @@ -0,0 +1,145 @@ +package validatorapi + +import ( + "encoding/json" + "net/http" + + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" + "github.com/ledgerwatch/erigon/cl/beacon/building" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" +) + +func (v *ValidatorApiHandler) PostEthV1ValidatorPrepareBeaconProposer(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []building.PrepareBeaconProposer + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + for _, x := range req { + v.state.SetFeeRecipient(x.ValidatorIndex, x.FeeRecipient) + } + return nil, nil +} + +func (v *ValidatorApiHandler) PostEthV1ValidatorContributionAndProofs(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []*cltypes.ContributionAndProof + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1ValidatorSyncCommitteeSubscriptions(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []building.SyncCommitteeSubscription + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1ValidatorBeaconCommitteeSubscriptions(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []building.BeaconCommitteeSubscription + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1ValidatorAggregateAndProofs(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []cltypes.SignedAggregateAndProof + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1BeaconPoolSyncCommittees(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []*solid.SyncCommittee + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1BeaconPoolAttestations(w http.ResponseWriter, r *http.Request) (*int, error) { + var req []*solid.Attestation + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1BeaconBlocks(w http.ResponseWriter, r *http.Request) (*int, error) { + ethConsensusVersion := r.Header.Get("Eth-Consensus-Version") + var req cltypes.SignedBeaconBlock + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + _ = ethConsensusVersion + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV2BeaconBlocks(w http.ResponseWriter, r *http.Request) (*int, error) { + broadcastValidation := r.URL.Query().Get("broadcast_validation") + if broadcastValidation == "" { + broadcastValidation = "gossip" + } + ethConsensusVersion := r.Header.Get("Eth-Consensus-Version") + if ethConsensusVersion == "" { + return nil, beaconhttp.NewEndpointError(400, "no eth consensus version set") + } + var req cltypes.SignedBeaconBlock + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + _, _ = broadcastValidation, ethConsensusVersion + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV1BeaconBlindedBlocks(w http.ResponseWriter, r *http.Request) (*int, error) { + ethConsensusVersion := r.Header.Get("Eth-Consensus-Version") + var req cltypes.SignedBlindedBeaconBlock + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + _ = ethConsensusVersion + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} + +func (v *ValidatorApiHandler) PostEthV2BeaconBlindedBlocks(w http.ResponseWriter, r *http.Request) (*int, error) { + broadcastValidation := r.URL.Query().Get("broadcast_validation") + if broadcastValidation == "" { + broadcastValidation = "gossip" + } + ethConsensusVersion := r.Header.Get("Eth-Consensus-Version") + if ethConsensusVersion == "" { + return nil, beaconhttp.NewEndpointError(400, "no eth consensus version set") + } + var req cltypes.SignedBlindedBeaconBlock + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return nil, beaconhttp.NewEndpointError(400, "invalid request: "+err.Error()) + } + // TODO: this endpoint + _, _ = broadcastValidation, ethConsensusVersion + return nil, beaconhttp.NewEndpointError(404, "not implemented") +} diff --git a/cl/clparams/config.go b/cl/clparams/config.go index a0bb995e3fe..ec7ba8516fd 100644 --- a/cl/clparams/config.go +++ b/cl/clparams/config.go @@ -308,20 +308,21 @@ type BeaconChainConfig struct { JustificationBitsLength uint64 `yaml:"JUSTIFICATION_BITS_LENGTH"` // JustificationBitsLength defines number of epochs to track when implementing k-finality in Casper FFG. // Misc constants. - PresetBase string `yaml:"PRESET_BASE" spec:"true"` // PresetBase represents the underlying spec preset this config is based on. - ConfigName string `yaml:"CONFIG_NAME" spec:"true"` // ConfigName for allowing an easy human-readable way of knowing what chain is being used. - TargetCommitteeSize uint64 `yaml:"TARGET_COMMITTEE_SIZE" spec:"true"` // TargetCommitteeSize is the number of validators in a committee when the chain is healthy. - MaxValidatorsPerCommittee uint64 `yaml:"MAX_VALIDATORS_PER_COMMITTEE" spec:"true"` // MaxValidatorsPerCommittee defines the upper bound of the size of a committee. - MaxCommitteesPerSlot uint64 `yaml:"MAX_COMMITTEES_PER_SLOT" spec:"true"` // MaxCommitteesPerSlot defines the max amount of committee in a single slot. - MinPerEpochChurnLimit uint64 `yaml:"MIN_PER_EPOCH_CHURN_LIMIT" spec:"true"` // MinPerEpochChurnLimit is the minimum amount of churn allotted for validator rotations. - ChurnLimitQuotient uint64 `yaml:"CHURN_LIMIT_QUOTIENT" spec:"true"` // ChurnLimitQuotient is used to determine the limit of how many validators can rotate per epoch. - ShuffleRoundCount uint64 `yaml:"SHUFFLE_ROUND_COUNT" spec:"true"` // ShuffleRoundCount is used for retrieving the permuted index. - MinGenesisActiveValidatorCount uint64 `yaml:"MIN_GENESIS_ACTIVE_VALIDATOR_COUNT" spec:"true"` // MinGenesisActiveValidatorCount defines how many validator deposits needed to kick off beacon chain. - MinGenesisTime uint64 `yaml:"MIN_GENESIS_TIME" spec:"true"` // MinGenesisTime is the time that needed to pass before kicking off beacon chain. - TargetAggregatorsPerCommittee uint64 `yaml:"TARGET_AGGREGATORS_PER_COMMITTEE" spec:"true"` // TargetAggregatorsPerCommittee defines the number of aggregators inside one committee. - HysteresisQuotient uint64 `yaml:"HYSTERESIS_QUOTIENT" spec:"true"` // HysteresisQuotient defines the hysteresis quotient for effective balance calculations. - HysteresisDownwardMultiplier uint64 `yaml:"HYSTERESIS_DOWNWARD_MULTIPLIER" spec:"true"` // HysteresisDownwardMultiplier defines the hysteresis downward multiplier for effective balance calculations. - HysteresisUpwardMultiplier uint64 `yaml:"HYSTERESIS_UPWARD_MULTIPLIER" spec:"true"` // HysteresisUpwardMultiplier defines the hysteresis upward multiplier for effective balance calculations. + PresetBase string `yaml:"PRESET_BASE" spec:"true"` // PresetBase represents the underlying spec preset this config is based on. + ConfigName string `yaml:"CONFIG_NAME" spec:"true"` // ConfigName for allowing an easy human-readable way of knowing what chain is being used. + TargetCommitteeSize uint64 `yaml:"TARGET_COMMITTEE_SIZE" spec:"true"` // TargetCommitteeSize is the number of validators in a committee when the chain is healthy. + MaxValidatorsPerCommittee uint64 `yaml:"MAX_VALIDATORS_PER_COMMITTEE" spec:"true"` // MaxValidatorsPerCommittee defines the upper bound of the size of a committee. + MaxCommitteesPerSlot uint64 `yaml:"MAX_COMMITTEES_PER_SLOT" spec:"true"` // MaxCommitteesPerSlot defines the max amount of committee in a single slot. + MinPerEpochChurnLimit uint64 `yaml:"MIN_PER_EPOCH_CHURN_LIMIT" spec:"true"` // MinPerEpochChurnLimit is the minimum amount of churn allotted for validator rotations. + ChurnLimitQuotient uint64 `yaml:"CHURN_LIMIT_QUOTIENT" spec:"true"` // ChurnLimitQuotient is used to determine the limit of how many validators can rotate per epoch. + MaxPerEpochActivationChurnLimit uint64 `yaml:"MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT" spec:"true"` // MaxPerEpochActivationChurnLimit defines the maximum amount of churn allowed in one epoch from deneb. + ShuffleRoundCount uint64 `yaml:"SHUFFLE_ROUND_COUNT" spec:"true"` // ShuffleRoundCount is used for retrieving the permuted index. + MinGenesisActiveValidatorCount uint64 `yaml:"MIN_GENESIS_ACTIVE_VALIDATOR_COUNT" spec:"true"` // MinGenesisActiveValidatorCount defines how many validator deposits needed to kick off beacon chain. + MinGenesisTime uint64 `yaml:"MIN_GENESIS_TIME" spec:"true"` // MinGenesisTime is the time that needed to pass before kicking off beacon chain. + TargetAggregatorsPerCommittee uint64 `yaml:"TARGET_AGGREGATORS_PER_COMMITTEE" spec:"true"` // TargetAggregatorsPerCommittee defines the number of aggregators inside one committee. + HysteresisQuotient uint64 `yaml:"HYSTERESIS_QUOTIENT" spec:"true"` // HysteresisQuotient defines the hysteresis quotient for effective balance calculations. + HysteresisDownwardMultiplier uint64 `yaml:"HYSTERESIS_DOWNWARD_MULTIPLIER" spec:"true"` // HysteresisDownwardMultiplier defines the hysteresis downward multiplier for effective balance calculations. + HysteresisUpwardMultiplier uint64 `yaml:"HYSTERESIS_UPWARD_MULTIPLIER" spec:"true"` // HysteresisUpwardMultiplier defines the hysteresis upward multiplier for effective balance calculations. // Gwei value constants. MinDepositAmount uint64 `yaml:"MIN_DEPOSIT_AMOUNT" spec:"true"` // MinDepositAmount is the minimum amount of Gwei a validator can send to the deposit contract at once (lower amounts will be reverted). @@ -573,18 +574,19 @@ var MainnetBeaconConfig BeaconChainConfig = BeaconChainConfig{ GenesisDelay: 604800, // 1 week. // Misc constant. - TargetCommitteeSize: 128, - MaxValidatorsPerCommittee: 2048, - MaxCommitteesPerSlot: 64, - MinPerEpochChurnLimit: 4, - ChurnLimitQuotient: 1 << 16, - ShuffleRoundCount: 90, - MinGenesisActiveValidatorCount: 16384, - MinGenesisTime: 1606824000, // Dec 1, 2020, 12pm UTC. - TargetAggregatorsPerCommittee: 16, - HysteresisQuotient: 4, - HysteresisDownwardMultiplier: 1, - HysteresisUpwardMultiplier: 5, + TargetCommitteeSize: 128, + MaxValidatorsPerCommittee: 2048, + MaxCommitteesPerSlot: 64, + MinPerEpochChurnLimit: 4, + ChurnLimitQuotient: 1 << 16, + MaxPerEpochActivationChurnLimit: 8, + ShuffleRoundCount: 90, + MinGenesisActiveValidatorCount: 16384, + MinGenesisTime: 1606824000, // Dec 1, 2020, 12pm UTC. + TargetAggregatorsPerCommittee: 16, + HysteresisQuotient: 4, + HysteresisDownwardMultiplier: 1, + HysteresisUpwardMultiplier: 5, // Gwei value constants. MinDepositAmount: 1 * 1e9, @@ -803,6 +805,8 @@ func sepoliaConfig() BeaconChainConfig { cfg.BellatrixForkVersion = 0x90000071 cfg.CapellaForkEpoch = 56832 cfg.CapellaForkVersion = 0x90000072 + cfg.DenebForkEpoch = 132608 + cfg.DenebForkVersion = 0x90000073 cfg.TerminalTotalDifficulty = "17000000000000000" cfg.DepositContractAddress = "0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D" cfg.InitializeForkSchedule() @@ -824,7 +828,8 @@ func goerliConfig() BeaconChainConfig { cfg.BellatrixForkVersion = 0x02001020 cfg.CapellaForkEpoch = 162304 cfg.CapellaForkVersion = 0x03001020 - cfg.DenebForkVersion = 0x40001020 + cfg.DenebForkEpoch = 231680 + cfg.DenebForkVersion = 0x04001020 cfg.TerminalTotalDifficulty = "10790000" cfg.DepositContractAddress = "0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b" cfg.InitializeForkSchedule() @@ -880,13 +885,15 @@ func chiadoConfig() BeaconChainConfig { cfg.AltairForkVersion = 0x0100006f cfg.BellatrixForkEpoch = 180 cfg.BellatrixForkVersion = 0x0200006f + cfg.CapellaForkEpoch = 244224 + cfg.CapellaForkVersion = 0x0300006f + cfg.DenebForkEpoch = 516608 + cfg.DenebForkVersion = 0x0400006f cfg.TerminalTotalDifficulty = "231707791542740786049188744689299064356246512" cfg.DepositContractAddress = "0xb97036A26259B7147018913bD58a774cf91acf25" cfg.BaseRewardFactor = 25 cfg.SlotsPerEpoch = 16 cfg.EpochsPerSyncCommitteePeriod = 512 - cfg.CapellaForkEpoch = math.MaxUint64 - cfg.DenebForkEpoch = math.MaxUint64 cfg.InitializeForkSchedule() return cfg } diff --git a/cl/cltypes/aggregate.go b/cl/cltypes/aggregate.go index a21ade55f6a..5819b6f323c 100644 --- a/cl/cltypes/aggregate.go +++ b/cl/cltypes/aggregate.go @@ -12,9 +12,9 @@ import ( * to be aggregated and the BLS signature of the attestation. */ type AggregateAndProof struct { - AggregatorIndex uint64 - Aggregate *solid.Attestation - SelectionProof libcommon.Bytes96 + AggregatorIndex uint64 `json:"aggregator_index,string"` + Aggregate *solid.Attestation `json:"aggregate"` + SelectionProof libcommon.Bytes96 `json:"selection_proof"` } func (a *AggregateAndProof) EncodeSSZ(dst []byte) ([]byte, error) { @@ -39,8 +39,8 @@ func (a *AggregateAndProof) HashSSZ() ([32]byte, error) { } type SignedAggregateAndProof struct { - Message *AggregateAndProof - Signature libcommon.Bytes96 + Message *AggregateAndProof `json:"message"` + Signature libcommon.Bytes96 `json:"signature"` } func (a *SignedAggregateAndProof) EncodeSSZ(dst []byte) ([]byte, error) { @@ -65,7 +65,7 @@ func (a *SignedAggregateAndProof) HashSSZ() ([32]byte, error) { * and signature is the aggregate BLS signature of the committee. */ type SyncAggregate struct { - SyncCommiteeBits libcommon.Bytes64 `json:"sync_commitee_bits"` + SyncCommiteeBits libcommon.Bytes64 `json:"sync_committee_bits"` SyncCommiteeSignature libcommon.Bytes96 `json:"signature"` } @@ -82,6 +82,13 @@ func (agg *SyncAggregate) Sum() int { return ret } +func (agg *SyncAggregate) IsSet(idx uint64) bool { + if idx >= 2048 { + return false + } + return agg.SyncCommiteeBits[idx/8]&(1<<(idx%8)) > 0 +} + func (agg *SyncAggregate) EncodeSSZ(buf []byte) ([]byte, error) { return append(buf, append(agg.SyncCommiteeBits[:], agg.SyncCommiteeSignature[:]...)...), nil } diff --git a/cl/cltypes/beacon_block.go b/cl/cltypes/beacon_block.go index 8aca8dd92a6..d9763803706 100644 --- a/cl/cltypes/beacon_block.go +++ b/cl/cltypes/beacon_block.go @@ -28,8 +28,8 @@ type SignedBeaconBlock struct { } type BeaconBlock struct { - Slot uint64 `json:"slot"` - ProposerIndex uint64 `json:"proposer_index"` + Slot uint64 `json:"slot,string"` + ProposerIndex uint64 `json:"proposer_index,string"` ParentRoot libcommon.Hash `json:"parent_root"` StateRoot libcommon.Hash `json:"state_root"` Body *BeaconBody `json:"body"` @@ -41,7 +41,7 @@ type BeaconBody struct { // Data related to the Ethereum 1.0 chain Eth1Data *Eth1Data `json:"eth1_data"` // A byte array used to customize validators' behavior - Graffiti libcommon.Hash `json:"graffit"` + Graffiti libcommon.Hash `json:"graffiti"` // A list of slashing events for validators who included invalid blocks in the chain ProposerSlashings *solid.ListSSZ[*ProposerSlashing] `json:"proposer_slashings"` // A list of slashing events for validators who included invalid attestations in the chain @@ -293,3 +293,7 @@ func (*BeaconBody) Static() bool { func (*BeaconBlock) Static() bool { return false } + +func (b *BeaconBody) ExecutionPayloadMerkleProof() ([][32]byte, error) { + return merkle_tree.MerkleProof(4, 9, b.getSchema(false)...) +} diff --git a/cl/cltypes/beacon_header.go b/cl/cltypes/beacon_header.go index 94064339d23..4c67066f3df 100644 --- a/cl/cltypes/beacon_header.go +++ b/cl/cltypes/beacon_header.go @@ -13,8 +13,8 @@ import ( * It contains the hash of the block body, and state root data. */ type BeaconBlockHeader struct { - Slot uint64 `json:"slot"` - ProposerIndex uint64 `json:"proposer_index"` + Slot uint64 `json:"slot,string"` + ProposerIndex uint64 `json:"proposer_index,string"` ParentRoot libcommon.Hash `json:"parent_root"` Root libcommon.Hash `json:"state_root"` BodyRoot libcommon.Hash `json:"body_root"` diff --git a/cl/cltypes/blinded_beacon_block.go b/cl/cltypes/blinded_beacon_block.go index 75a1698a7ea..f15a6d83c66 100644 --- a/cl/cltypes/blinded_beacon_block.go +++ b/cl/cltypes/blinded_beacon_block.go @@ -19,8 +19,8 @@ type SignedBlindedBeaconBlock struct { } type BlindedBeaconBlock struct { - Slot uint64 `json:"slot"` - ProposerIndex uint64 `json:"proposer_index"` + Slot uint64 `json:"slot,string"` + ProposerIndex uint64 `json:"proposer_index,string"` ParentRoot libcommon.Hash `json:"parent_root"` StateRoot libcommon.Hash `json:"state_root"` Body *BlindedBeaconBody `json:"body"` @@ -32,7 +32,7 @@ type BlindedBeaconBody struct { // Data related to the Ethereum 1.0 chain Eth1Data *Eth1Data `json:"eth1_data"` // A byte array used to customize validators' behavior - Graffiti libcommon.Hash `json:"graffit"` + Graffiti libcommon.Hash `json:"graffiti"` // A list of slashing events for validators who included invalid blocks in the chain ProposerSlashings *solid.ListSSZ[*ProposerSlashing] `json:"proposer_slashings"` // A list of slashing events for validators who included invalid attestations in the chain @@ -309,6 +309,10 @@ func (b *SignedBlindedBeaconBlock) Clone() clonable.Clonable { return NewSignedBlindedBeaconBlock(b.Block.Body.beaconCfg) } +func (b *BlindedBeaconBody) ExecutionPayloadMerkleProof() ([][32]byte, error) { + return merkle_tree.MerkleProof(4, 9, b.getSchema(false)...) +} + // make sure that the type implements the interface ssz2.ObjectSSZ var _ ssz2.ObjectSSZ = (*BlindedBeaconBody)(nil) var _ ssz2.ObjectSSZ = (*BlindedBeaconBlock)(nil) diff --git a/cl/cltypes/bls_to_execution_change.go b/cl/cltypes/bls_to_execution_change.go index 676154f15fc..8fa1a9000a9 100644 --- a/cl/cltypes/bls_to_execution_change.go +++ b/cl/cltypes/bls_to_execution_change.go @@ -11,7 +11,7 @@ import ( // Change to EL engine type BLSToExecutionChange struct { - ValidatorIndex uint64 `json:"validator_index"` + ValidatorIndex uint64 `json:"validator_index,string"` From libcommon.Bytes48 `json:"from"` To libcommon.Address `json:"to"` } diff --git a/cl/cltypes/contribution.go b/cl/cltypes/contribution.go new file mode 100644 index 00000000000..a64ea8b10c3 --- /dev/null +++ b/cl/cltypes/contribution.go @@ -0,0 +1,111 @@ +package cltypes + +import ( + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/merkle_tree" + ssz2 "github.com/ledgerwatch/erigon/cl/ssz" +) + +/* + * ContributionAndProof contains the index of the aggregator, the attestation + * to be aggregated and the BLS signature of the attestation. + */ +type ContributionAndProof struct { + AggregatorIndex uint64 `json:"aggregator_index,string"` + SelectionProof libcommon.Bytes96 `json:"selection_proof"` + Contribution *solid.Contribution `json:"contribution"` +} + +func (a *ContributionAndProof) EncodeSSZ(dst []byte) ([]byte, error) { + return ssz2.MarshalSSZ(dst, a.AggregatorIndex, a.Contribution, a.SelectionProof[:]) +} + +func (a *ContributionAndProof) Static() bool { + return false +} + +func (a *ContributionAndProof) DecodeSSZ(buf []byte, version int) error { + a.Contribution = new(solid.Contribution) + return ssz2.UnmarshalSSZ(buf, version, &a.AggregatorIndex, a.Contribution, a.SelectionProof[:]) +} + +func (a *ContributionAndProof) EncodingSizeSSZ() int { + return 108 + a.Contribution.EncodingSizeSSZ() +} + +func (a *ContributionAndProof) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(a.AggregatorIndex, a.Contribution, a.SelectionProof[:]) +} + +type SignedContributionAndProof struct { + Message *ContributionAndProof `json:"message"` + Signature libcommon.Bytes96 `json:"signature"` +} + +func (a *SignedContributionAndProof) EncodeSSZ(dst []byte) ([]byte, error) { + return ssz2.MarshalSSZ(dst, a.Message, a.Signature[:]) +} + +func (a *SignedContributionAndProof) DecodeSSZ(buf []byte, version int) error { + a.Message = new(ContributionAndProof) + return ssz2.UnmarshalSSZ(buf, version, a.Message, a.Signature[:]) +} + +func (a *SignedContributionAndProof) EncodingSizeSSZ() int { + return 100 + a.Message.EncodingSizeSSZ() +} + +func (a *SignedContributionAndProof) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(a.Message, a.Signature[:]) +} + +/* + * SyncContribution, Determines successfull committee, bits shows active participants, + * and signature is the aggregate BLS signature of the committee. + */ +type SyncContribution struct { + SyncCommiteeBits libcommon.Bytes64 `json:"sync_committee_bits"` + SyncCommiteeSignature libcommon.Bytes96 `json:"signature"` +} + +// return sum of the committee bits +func (agg *SyncContribution) Sum() int { + ret := 0 + for i := range agg.SyncCommiteeBits { + for bit := 1; bit <= 128; bit *= 2 { + if agg.SyncCommiteeBits[i]&byte(bit) > 0 { + ret++ + } + } + } + return ret +} + +func (agg *SyncContribution) IsSet(idx uint64) bool { + if idx >= 2048 { + return false + } + return agg.SyncCommiteeBits[idx/8]&(1<<(idx%8)) > 0 +} + +func (agg *SyncContribution) EncodeSSZ(buf []byte) ([]byte, error) { + return append(buf, append(agg.SyncCommiteeBits[:], agg.SyncCommiteeSignature[:]...)...), nil +} + +func (*SyncContribution) Static() bool { + return true +} + +func (agg *SyncContribution) DecodeSSZ(buf []byte, version int) error { + return ssz2.UnmarshalSSZ(buf, version, agg.SyncCommiteeBits[:], agg.SyncCommiteeSignature[:]) +} + +func (agg *SyncContribution) EncodingSizeSSZ() int { + return 160 +} + +func (agg *SyncContribution) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(agg.SyncCommiteeBits[:], agg.SyncCommiteeSignature[:]) + +} diff --git a/cl/cltypes/eth1_block.go b/cl/cltypes/eth1_block.go index 7d474d4f91d..67f54233f42 100644 --- a/cl/cltypes/eth1_block.go +++ b/cl/cltypes/eth1_block.go @@ -22,18 +22,18 @@ type Eth1Block struct { ReceiptsRoot libcommon.Hash `json:"receipts_root"` LogsBloom types.Bloom `json:"logs_bloom"` PrevRandao libcommon.Hash `json:"prev_randao"` - BlockNumber uint64 `json:"block_number"` - GasLimit uint64 `json:"gas_limit"` - GasUsed uint64 `json:"gas_used"` - Time uint64 `json:"timestamp"` + BlockNumber uint64 `json:"block_number,string"` + GasLimit uint64 `json:"gas_limit,string"` + GasUsed uint64 `json:"gas_used,string"` + Time uint64 `json:"timestamp,string"` Extra *solid.ExtraData `json:"extra_data"` BaseFeePerGas libcommon.Hash `json:"base_fee_per_gas"` // Extra fields BlockHash libcommon.Hash `json:"block_hash"` Transactions *solid.TransactionsSSZ `json:"transactions"` Withdrawals *solid.ListSSZ[*Withdrawal] `json:"withdrawals,omitempty"` - BlobGasUsed uint64 `json:"blob_gas_used,omitempty"` - ExcessBlobGas uint64 `json:"excess_blob_gas,omitempty"` + BlobGasUsed uint64 `json:"blob_gas_used,omitempty,string"` + ExcessBlobGas uint64 `json:"excess_blob_gas,omitempty,string"` // internals version clparams.StateVersion beaconCfg *clparams.BeaconChainConfig diff --git a/cl/cltypes/eth1_data.go b/cl/cltypes/eth1_data.go index 716105dee89..ee9695d67ca 100644 --- a/cl/cltypes/eth1_data.go +++ b/cl/cltypes/eth1_data.go @@ -10,7 +10,7 @@ import ( type Eth1Data struct { Root libcommon.Hash `json:"deposit_root"` - DepositCount uint64 `json:"deposit_count"` + DepositCount uint64 `json:"deposit_count,string"` BlockHash libcommon.Hash `json:"block_hash"` } diff --git a/cl/cltypes/eth1_header.go b/cl/cltypes/eth1_header.go index a8c8f679b82..e74270fc6ed 100644 --- a/cl/cltypes/eth1_header.go +++ b/cl/cltypes/eth1_header.go @@ -20,18 +20,18 @@ type Eth1Header struct { ReceiptsRoot libcommon.Hash `json:"receipts_root"` LogsBloom types.Bloom `json:"logs_bloom"` PrevRandao libcommon.Hash `json:"prev_randao"` - BlockNumber uint64 `json:"block_number"` - GasLimit uint64 `json:"gas_limit"` - GasUsed uint64 `json:"gas_used"` - Time uint64 `json:"time"` + BlockNumber uint64 `json:"block_number,string"` + GasLimit uint64 `json:"gas_limit,string"` + GasUsed uint64 `json:"gas_used,string"` + Time uint64 `json:"time,string"` Extra *solid.ExtraData `json:"extra_data"` BaseFeePerGas libcommon.Hash `json:"base_fee_per_gas"` // Extra fields BlockHash libcommon.Hash `json:"block_hash"` TransactionsRoot libcommon.Hash `json:"transactions_root"` WithdrawalsRoot libcommon.Hash `json:"withdrawals_root,omitempty"` - BlobGasUsed uint64 `json:"blob_gas_used,omitempty"` - ExcessBlobGas uint64 `json:"excess_blob_gas,omitempty"` + BlobGasUsed uint64 `json:"blob_gas_used,omitempty,string"` + ExcessBlobGas uint64 `json:"excess_blob_gas,omitempty,string"` // internals version clparams.StateVersion } @@ -57,7 +57,7 @@ func (e *Eth1Header) Capella() { e.WithdrawalsRoot = libcommon.Hash{} } -// Capella converts the header to capella version. +// Deneb converts the header to deneb version. func (e *Eth1Header) Deneb() { e.version = clparams.DenebVersion e.BlobGasUsed = 0 diff --git a/cl/cltypes/fork.go b/cl/cltypes/fork.go index 9059a927112..eff0f292047 100644 --- a/cl/cltypes/fork.go +++ b/cl/cltypes/fork.go @@ -10,7 +10,7 @@ import ( type Fork struct { PreviousVersion libcommon.Bytes4 `json:"previous_version"` CurrentVersion libcommon.Bytes4 `json:"current_version"` - Epoch uint64 `json:"epoch"` + Epoch uint64 `json:"epoch,string"` } func (*Fork) Static() bool { diff --git a/cl/cltypes/indexed_attestation.go b/cl/cltypes/indexed_attestation.go index 4173429d8a0..1464c8eb158 100644 --- a/cl/cltypes/indexed_attestation.go +++ b/cl/cltypes/indexed_attestation.go @@ -16,6 +16,13 @@ type IndexedAttestation struct { Signature libcommon.Bytes96 `json:"signature"` } +func NewIndexedAttestation() *IndexedAttestation { + return &IndexedAttestation{ + AttestingIndices: solid.NewRawUint64List(2048, nil), + Data: solid.NewAttestationData(), + } +} + func (i *IndexedAttestation) Static() bool { return false } diff --git a/cl/cltypes/light_client.go b/cl/cltypes/light_client.go new file mode 100644 index 00000000000..b68b1d6bec9 --- /dev/null +++ b/cl/cltypes/light_client.go @@ -0,0 +1,289 @@ +package cltypes + +import ( + "github.com/ledgerwatch/erigon-lib/types/clonable" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/merkle_tree" + ssz2 "github.com/ledgerwatch/erigon/cl/ssz" +) + +const ( + ExecutionBranchSize = 4 + SyncCommitteeBranchSize = 5 + CurrentSyncCommitteeBranchSize = 5 + FinalizedBranchSize = 6 +) + +type LightClientHeader struct { + Beacon *BeaconBlockHeader `json:"beacon"` + + ExecutionPayloadHeader *Eth1Header `json:"execution_payload_header,omitempty"` + ExecutionBranch solid.HashVectorSSZ `json:"execution_branch,omitempty"` + + version clparams.StateVersion +} + +func NewLightClientHeader(version clparams.StateVersion) *LightClientHeader { + return &LightClientHeader{ + version: version, + Beacon: &BeaconBlockHeader{}, + ExecutionBranch: solid.NewHashVector(ExecutionBranchSize), + ExecutionPayloadHeader: NewEth1Header(version), + } +} + +func (l *LightClientHeader) EncodeSSZ(buf []byte) ([]byte, error) { + return ssz2.MarshalSSZ(buf, l.getSchema()...) +} + +func (l *LightClientHeader) DecodeSSZ(buf []byte, version int) error { + l.version = clparams.StateVersion(version) + l.Beacon = &BeaconBlockHeader{} + l.ExecutionBranch = solid.NewHashVector(ExecutionBranchSize) + l.ExecutionPayloadHeader = NewEth1Header(l.version) + return ssz2.UnmarshalSSZ(buf, version, l.getSchema()...) +} + +func (l *LightClientHeader) EncodingSizeSSZ() int { + size := l.Beacon.EncodingSizeSSZ() + if l.version >= clparams.CapellaVersion { + size += l.ExecutionPayloadHeader.EncodingSizeSSZ() + 4 // the extra 4 is for the offset + size += l.ExecutionBranch.EncodingSizeSSZ() + } + return size +} + +func (l *LightClientHeader) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(l.getSchema()...) +} + +func (l *LightClientHeader) Static() bool { + return l.version < clparams.CapellaVersion +} + +func (l *LightClientHeader) Clone() clonable.Clonable { + return NewLightClientHeader(l.version) +} + +func (l *LightClientHeader) getSchema() []interface{} { + schema := []interface{}{ + l.Beacon, + } + if l.version >= clparams.CapellaVersion { + schema = append(schema, l.ExecutionPayloadHeader, l.ExecutionBranch) + } + return schema +} + +type LightClientUpdate struct { + AttestedHeader *LightClientHeader `json:"attested_header"` + NextSyncCommittee *solid.SyncCommittee `json:"next_sync_committee"` + NextSyncCommitteeBranch solid.HashVectorSSZ `json:"next_sync_committee_branch"` + FinalizedHeader *LightClientHeader `json:"finalized_header"` + FinalityBranch solid.HashVectorSSZ `json:"finality_branch"` + SyncAggregate *SyncAggregate `json:"sync_aggregate"` + SignatureSlot uint64 `json:"signature_slot,string"` +} + +func NewLightClientUpdate(version clparams.StateVersion) *LightClientUpdate { + return &LightClientUpdate{ + AttestedHeader: NewLightClientHeader(version), + NextSyncCommittee: &solid.SyncCommittee{}, + NextSyncCommitteeBranch: solid.NewHashVector(CurrentSyncCommitteeBranchSize), + FinalizedHeader: NewLightClientHeader(version), + FinalityBranch: solid.NewHashVector(FinalizedBranchSize), + SyncAggregate: &SyncAggregate{}, + } +} + +func (l *LightClientUpdate) EncodeSSZ(buf []byte) ([]byte, error) { + return ssz2.MarshalSSZ(buf, l.AttestedHeader, l.NextSyncCommittee, l.NextSyncCommitteeBranch, l.FinalizedHeader, l.FinalityBranch, l.SyncAggregate, &l.SignatureSlot) +} + +func (l *LightClientUpdate) DecodeSSZ(buf []byte, version int) error { + l.AttestedHeader = NewLightClientHeader(clparams.StateVersion(version)) + l.NextSyncCommittee = &solid.SyncCommittee{} + l.NextSyncCommitteeBranch = solid.NewHashVector(CurrentSyncCommitteeBranchSize) + l.FinalizedHeader = NewLightClientHeader(clparams.StateVersion(version)) + l.FinalityBranch = solid.NewHashVector(FinalizedBranchSize) + l.SyncAggregate = &SyncAggregate{} + return ssz2.UnmarshalSSZ(buf, version, l.AttestedHeader, l.NextSyncCommittee, l.NextSyncCommitteeBranch, l.FinalizedHeader, l.FinalityBranch, l.SyncAggregate, &l.SignatureSlot) +} + +func (l *LightClientUpdate) EncodingSizeSSZ() int { + size := l.AttestedHeader.EncodingSizeSSZ() + if !l.AttestedHeader.Static() { + size += 4 // the extra 4 is for the offset + } + size += l.NextSyncCommittee.EncodingSizeSSZ() + size += l.NextSyncCommitteeBranch.EncodingSizeSSZ() + size += l.FinalizedHeader.EncodingSizeSSZ() + if !l.FinalizedHeader.Static() { + size += 4 // the extra 4 is for the offset + } + size += l.FinalityBranch.EncodingSizeSSZ() + size += l.SyncAggregate.EncodingSizeSSZ() + size += 8 // for the slot + return size +} + +func (l *LightClientUpdate) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(l.AttestedHeader, l.NextSyncCommittee, l.NextSyncCommitteeBranch, l.FinalizedHeader, l.FinalityBranch, l.SyncAggregate, &l.SignatureSlot) +} + +func (l *LightClientUpdate) Clone() clonable.Clonable { + v := clparams.Phase0Version + if l.AttestedHeader != nil { + v = l.AttestedHeader.version + } + return NewLightClientUpdate(v) +} + +type LightClientBootstrap struct { + Header *LightClientHeader `json:"header"` + CurrentSyncCommittee *solid.SyncCommittee `json:"current_sync_committee"` + CurrentSyncCommitteeBranch solid.HashVectorSSZ `json:"current_sync_committee_branch"` +} + +func NewLightClientBootstrap(version clparams.StateVersion) *LightClientBootstrap { + return &LightClientBootstrap{ + Header: NewLightClientHeader(version), + CurrentSyncCommittee: &solid.SyncCommittee{}, + CurrentSyncCommitteeBranch: solid.NewHashVector(CurrentSyncCommitteeBranchSize), + } +} + +func (l *LightClientBootstrap) EncodeSSZ(buf []byte) ([]byte, error) { + return ssz2.MarshalSSZ(buf, l.Header, l.CurrentSyncCommittee, l.CurrentSyncCommitteeBranch) +} + +func (l *LightClientBootstrap) DecodeSSZ(buf []byte, version int) error { + l.Header = NewLightClientHeader(clparams.StateVersion(version)) + l.CurrentSyncCommittee = &solid.SyncCommittee{} + l.CurrentSyncCommitteeBranch = solid.NewHashVector(CurrentSyncCommitteeBranchSize) + return ssz2.UnmarshalSSZ(buf, version, l.Header, l.CurrentSyncCommittee, l.CurrentSyncCommitteeBranch) +} + +func (l *LightClientBootstrap) EncodingSizeSSZ() int { + size := l.Header.EncodingSizeSSZ() + if !l.Header.Static() { + size += 4 // the extra 4 is for the offset + } + size += l.CurrentSyncCommittee.EncodingSizeSSZ() + size += l.CurrentSyncCommitteeBranch.EncodingSizeSSZ() + return size +} + +func (l *LightClientBootstrap) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(l.Header, l.CurrentSyncCommittee, l.CurrentSyncCommitteeBranch) +} + +func (l *LightClientBootstrap) Clone() clonable.Clonable { + v := clparams.Phase0Version + if l.Header != nil { + v = l.Header.version + } + return NewLightClientBootstrap(v) +} + +type LightClientFinalityUpdate struct { + AttestedHeader *LightClientHeader `json:"attested_header"` + FinalizedHeader *LightClientHeader `json:"finalized_header"` + FinalityBranch solid.HashVectorSSZ `json:"finality_branch"` + SyncAggregate *SyncAggregate `json:"sync_aggregate"` + SignatureSlot uint64 `json:"signature_slot,string"` +} + +func NewLightClientFinalityUpdate(version clparams.StateVersion) *LightClientFinalityUpdate { + return &LightClientFinalityUpdate{ + AttestedHeader: NewLightClientHeader(version), + FinalizedHeader: NewLightClientHeader(version), + FinalityBranch: solid.NewHashVector(FinalizedBranchSize), + SyncAggregate: &SyncAggregate{}, + } +} + +func (l *LightClientFinalityUpdate) EncodeSSZ(buf []byte) ([]byte, error) { + return ssz2.MarshalSSZ(buf, l.AttestedHeader, l.FinalizedHeader, l.FinalityBranch, l.SyncAggregate, &l.SignatureSlot) +} + +func (l *LightClientFinalityUpdate) DecodeSSZ(buf []byte, version int) error { + l.AttestedHeader = NewLightClientHeader(clparams.StateVersion(version)) + l.FinalizedHeader = NewLightClientHeader(clparams.StateVersion(version)) + l.FinalityBranch = solid.NewHashVector(FinalizedBranchSize) + l.SyncAggregate = &SyncAggregate{} + return ssz2.UnmarshalSSZ(buf, version, l.AttestedHeader, l.FinalizedHeader, l.FinalityBranch, l.SyncAggregate, &l.SignatureSlot) +} + +func (l *LightClientFinalityUpdate) EncodingSizeSSZ() int { + size := l.AttestedHeader.EncodingSizeSSZ() + if !l.AttestedHeader.Static() { + size += 4 // the extra 4 is for the offset + } + size += l.FinalizedHeader.EncodingSizeSSZ() + if !l.FinalizedHeader.Static() { + size += 4 // the extra 4 is for the offset + } + size += l.FinalityBranch.EncodingSizeSSZ() + size += l.SyncAggregate.EncodingSizeSSZ() + size += 8 // for the slot + return size +} + +func (l *LightClientFinalityUpdate) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(l.AttestedHeader, l.FinalizedHeader, l.FinalityBranch, l.SyncAggregate, &l.SignatureSlot) +} + +func (l *LightClientFinalityUpdate) Clone() clonable.Clonable { + v := clparams.Phase0Version + if l.AttestedHeader != nil { + v = l.AttestedHeader.version + } + return NewLightClientFinalityUpdate(v) +} + +type LightClientOptimisticUpdate struct { + AttestedHeader *LightClientHeader `json:"attested_header"` + SyncAggregate *SyncAggregate `json:"sync_aggregate"` + SignatureSlot uint64 `json:"signature_slot,string"` +} + +func NewLightClientOptimisticUpdate(version clparams.StateVersion) *LightClientOptimisticUpdate { + return &LightClientOptimisticUpdate{ + AttestedHeader: NewLightClientHeader(version), + SyncAggregate: &SyncAggregate{}, + } +} + +func (l *LightClientOptimisticUpdate) EncodeSSZ(buf []byte) ([]byte, error) { + return ssz2.MarshalSSZ(buf, l.AttestedHeader, l.SyncAggregate, &l.SignatureSlot) +} + +func (l *LightClientOptimisticUpdate) DecodeSSZ(buf []byte, version int) error { + l.AttestedHeader = NewLightClientHeader(clparams.StateVersion(version)) + l.SyncAggregate = &SyncAggregate{} + return ssz2.UnmarshalSSZ(buf, version, l.AttestedHeader, l.SyncAggregate, &l.SignatureSlot) +} + +func (l *LightClientOptimisticUpdate) EncodingSizeSSZ() int { + size := l.AttestedHeader.EncodingSizeSSZ() + if !l.AttestedHeader.Static() { + size += 4 // the extra 4 is for the offset + } + size += l.SyncAggregate.EncodingSizeSSZ() + size += 8 // for the slot + return size +} + +func (l *LightClientOptimisticUpdate) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(l.AttestedHeader, l.SyncAggregate, &l.SignatureSlot) +} + +func (l *LightClientOptimisticUpdate) Clone() clonable.Clonable { + v := clparams.Phase0Version + if l.AttestedHeader != nil { + v = l.AttestedHeader.version + } + return NewLightClientOptimisticUpdate(v) +} diff --git a/cl/cltypes/slashings.go b/cl/cltypes/slashings.go index f849b7b50ee..b7ef50dc656 100644 --- a/cl/cltypes/slashings.go +++ b/cl/cltypes/slashings.go @@ -33,6 +33,13 @@ type AttesterSlashing struct { Attestation_2 *IndexedAttestation `json:"attestation_2"` } +func NewAttesterSlashing() *AttesterSlashing { + return &AttesterSlashing{ + Attestation_1: NewIndexedAttestation(), + Attestation_2: NewIndexedAttestation(), + } +} + func (a *AttesterSlashing) EncodeSSZ(dst []byte) ([]byte, error) { return ssz2.MarshalSSZ(dst, a.Attestation_1, a.Attestation_2) } diff --git a/cl/cltypes/solid/attestation.go b/cl/cltypes/solid/attestation.go index 68e9e131e39..42097e11d9f 100644 --- a/cl/cltypes/solid/attestation.go +++ b/cl/cltypes/solid/attestation.go @@ -65,6 +65,7 @@ func (a *Attestation) UnmarshalJSON(buf []byte) error { Signature libcommon.Bytes96 `json:"signature"` Data AttestationData `json:"data"` } + tmp.Data = NewAttestationData() if err := json.Unmarshal(buf, &tmp); err != nil { return err } diff --git a/cl/cltypes/solid/attestation_data.go b/cl/cltypes/solid/attestation_data.go index 6c505f58105..188571957ab 100644 --- a/cl/cltypes/solid/attestation_data.go +++ b/cl/cltypes/solid/attestation_data.go @@ -40,8 +40,8 @@ func NewAttestionDataFromParameters( func (a AttestationData) MarshalJSON() ([]byte, error) { return json.Marshal(struct { - Slot uint64 `json:"slot"` - Index uint64 `json:"index"` + Slot uint64 `json:"slot,string"` + Index uint64 `json:"index,string"` BeaconBlockRoot libcommon.Hash `json:"beacon_block_root"` Source Checkpoint `json:"source"` Target Checkpoint `json:"target"` @@ -56,12 +56,14 @@ func (a AttestationData) MarshalJSON() ([]byte, error) { func (a AttestationData) UnmarshalJSON(buf []byte) error { var tmp struct { - Slot uint64 `json:"slot"` - Index uint64 `json:"index"` + Slot uint64 `json:"slot,string"` + Index uint64 `json:"index,string"` BeaconBlockRoot libcommon.Hash `json:"beacon_block_root"` Source Checkpoint `json:"source"` Target Checkpoint `json:"target"` } + tmp.Source = NewCheckpoint() + tmp.Target = NewCheckpoint() if err := json.Unmarshal(buf, &tmp); err != nil { return err } diff --git a/cl/cltypes/solid/bitlist.go b/cl/cltypes/solid/bitlist.go index cf14cf0644c..f54d55af93a 100644 --- a/cl/cltypes/solid/bitlist.go +++ b/cl/cltypes/solid/bitlist.go @@ -65,6 +65,13 @@ func (u *BitList) CopyTo(target IterableSSZ[byte]) { } } +func (u *BitList) Copy() *BitList { + n := NewBitList(u.l, u.c) + n.u = make([]byte, len(u.u), cap(u.u)) + copy(n.u, u.u) + return n +} + // Range allows us to do something to each bit in the list, just like a Power Rangers roll call. func (u *BitList) Range(fn func(index int, value byte, length int) bool) { for i, v := range u.u { diff --git a/cl/cltypes/solid/checkpoint.go b/cl/cltypes/solid/checkpoint.go index 87ce50436aa..948bd8344eb 100644 --- a/cl/cltypes/solid/checkpoint.go +++ b/cl/cltypes/solid/checkpoint.go @@ -35,14 +35,14 @@ func NewCheckpoint() Checkpoint { func (c Checkpoint) MarshalJSON() ([]byte, error) { return json.Marshal(struct { - Epoch uint64 `json:"epoch"` + Epoch uint64 `json:"epoch,string"` Root libcommon.Hash `json:"root"` }{Epoch: c.Epoch(), Root: c.BlockRoot()}) } func (c Checkpoint) UnmarshalJSON(buf []byte) error { var tmp struct { - Epoch uint64 `json:"epoch"` + Epoch uint64 `json:"epoch,string"` Root libcommon.Hash `json:"root"` } if err := json.Unmarshal(buf, &tmp); err != nil { diff --git a/cl/cltypes/solid/contribution.go b/cl/cltypes/solid/contribution.go new file mode 100644 index 00000000000..36e0806e897 --- /dev/null +++ b/cl/cltypes/solid/contribution.go @@ -0,0 +1,231 @@ +package solid + +import ( + "encoding/binary" + "encoding/json" + + "github.com/ledgerwatch/erigon-lib/common" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon-lib/common/length" + "github.com/ledgerwatch/erigon-lib/types/clonable" + "github.com/ledgerwatch/erigon-lib/types/ssz" + "github.com/ledgerwatch/erigon/cl/merkle_tree" + ssz2 "github.com/ledgerwatch/erigon/cl/ssz" +) + +const ( + // slot: 8 bytes // 0 + // beaconBlockHash: 32 bytes // 8 + // subcommitteeIndex: 8 bytes // 40 + // aggregationbits: 16 bytes // 48 + // signature: 96 bytes // 64 + // total = 160 + contributionStaticBufferSize = 8 + 32 + 8 + 16 + 96 +) + +// Contribution type represents a statement or confirmation of some occurrence or phenomenon. +type Contribution [160]byte + +// Static returns whether the contribution is static or not. For Contribution, it's always false. +func (*Contribution) Static() bool { + return false +} + +// NewAttestionFromParameters creates a new Contribution instance using provided parameters +func NewContributionFromParameters( + slot uint64, + beaconBlockRoot libcommon.Hash, + subcommitteeIndex uint64, + aggregationBits [16]byte, + signature libcommon.Bytes96, +) *Contribution { + a := &Contribution{} + a.SetSlot(slot) + a.SetBeaconBlockRoot(beaconBlockRoot) + a.SetSubcommitteeIndex(subcommitteeIndex) + a.SetAggregationBits(aggregationBits) + a.SetSignature(signature) + return a +} + +func (a Contribution) MarshalJSON() ([]byte, error) { + ab := a.AggregationBits() + return json.Marshal(struct { + Slot uint64 `json:"slot,string"` + BeaconBlockRoot libcommon.Hash `json:"beacon_block_root"` + SubcommitteeIndex uint64 `json:"subcommittee_index,string"` + AggregationBits hexutility.Bytes `json:"aggregation_bits"` + Signature libcommon.Bytes96 `json:"signature"` + }{ + Slot: a.Slot(), + BeaconBlockRoot: a.BeaconBlockRoot(), + SubcommitteeIndex: a.SubcommitteeIndex(), + AggregationBits: hexutility.Bytes(ab[:]), + Signature: a.Signature(), + }) +} + +func (a *Contribution) UnmarshalJSON(buf []byte) error { + var tmp struct { + Slot uint64 `json:"slot,string"` + BeaconBlockRoot libcommon.Hash `json:"beacon_block_root"` + SubcommitteeIndex uint64 `json:"subcommittee_index,string"` + AggregationBits hexutility.Bytes `json:"aggregation_bits"` + Signature libcommon.Bytes96 `json:"signature"` + } + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + a.SetSlot(tmp.Slot) + a.SetBeaconBlockRoot(tmp.BeaconBlockRoot) + a.SetSubcommitteeIndex(tmp.SubcommitteeIndex) + o := [16]byte{} + copy(o[:], tmp.AggregationBits) + a.SetAggregationBits(o) + a.SetSignature(tmp.Signature) + return nil +} +func (a Contribution) Slot() uint64 { + return binary.LittleEndian.Uint64(a[:8]) +} +func (a Contribution) BeaconBlockRoot() (o libcommon.Hash) { + copy(o[:], a[16:40]) + return +} +func (a Contribution) SubcommitteeIndex() uint64 { + return binary.LittleEndian.Uint64(a[40:48]) +} +func (a Contribution) AggregationBits() (o [16]byte) { + copy(o[:], a[48:64]) + return +} +func (a Contribution) Signature() (o libcommon.Bytes96) { + copy(o[:], a[96:160]) + return +} + +func (a Contribution) SetSlot(slot uint64) { + binary.LittleEndian.PutUint64(a[:8], slot) +} + +func (a Contribution) SetBeaconBlockRoot(hsh common.Hash) { + copy(a[40:48], hsh[:]) +} + +func (a Contribution) SetSubcommitteeIndex(validatorIndex uint64) { + binary.LittleEndian.PutUint64(a[40:48], validatorIndex) +} + +func (a Contribution) SetAggregationBits(xs [16]byte) { + copy(a[48:64], xs[:]) +} + +// SetSignature sets the signature of the Contribution instance. +func (a Contribution) SetSignature(signature [96]byte) { + copy(a[64:], signature[:]) +} + +// EncodingSizeSSZ returns the size of the Contribution instance when encoded in SSZ format. +func (a *Contribution) EncodingSizeSSZ() (size int) { + return 160 +} + +// DecodeSSZ decodes the provided buffer into the Contribution instance. +func (a *Contribution) DecodeSSZ(buf []byte, _ int) error { + if len(buf) < contributionStaticBufferSize { + return ssz.ErrLowBufferSize + } + copy((*a)[:], buf) + return nil +} + +// EncodeSSZ encodes the Contribution instance into the provided buffer. +func (a *Contribution) EncodeSSZ(dst []byte) ([]byte, error) { + buf := dst + buf = append(buf, (*a)[:]...) + return buf, nil +} + +// CopyHashBufferTo copies the hash buffer of the Contribution instance to the provided byte slice. +func (a *Contribution) CopyHashBufferTo(o []byte) error { + for i := 0; i < 160; i++ { + o[i] = 0 + } + + // hash signature first + copy(o[:128], a[64:160]) + if err := merkle_tree.InPlaceRoot(o); err != nil { + return err + } + copy(o[:128:160], o[:32]) + + copy(o[:32], a[:8]) + copy(o[32:64], a[8:40]) + copy(o[64:96], a[40:48]) + copy(o[96:128], a[48:64]) + return nil +} + +// HashSSZ hashes the Contribution instance using SSZ. +// It creates a byte slice `leaves` with a size based on length.Hash, +// then fills this slice with the values from the Contribution's hash buffer. +func (a *Contribution) HashSSZ() (o [32]byte, err error) { + leaves := make([]byte, length.Hash*5) + if err = a.CopyHashBufferTo(leaves); err != nil { + return + } + err = merkle_tree.MerkleRootFromFlatLeaves(leaves, o[:]) + return +} + +// Clone creates a new clone of the Contribution instance. +// This can be useful for creating copies without changing the original object. +func (*Contribution) Clone() clonable.Clonable { + return &Contribution{} +} + +type ContributionAndProof struct { + AggregatorIndex uint64 `json:"aggregator_index,string"` + Message *Contribution `json:"message"` + Signature libcommon.Bytes96 `json:"selection_proof"` +} + +func (a *ContributionAndProof) EncodeSSZ(dst []byte) ([]byte, error) { + return ssz2.MarshalSSZ(dst, a.Message, a.Signature[:]) +} + +func (a *ContributionAndProof) DecodeSSZ(buf []byte, version int) error { + a.Message = new(Contribution) + return ssz2.UnmarshalSSZ(buf, version, a.Message, a.Signature[:]) +} + +func (a *ContributionAndProof) EncodingSizeSSZ() int { + return 100 + a.Message.EncodingSizeSSZ() +} + +func (a *ContributionAndProof) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(a.Message, a.Signature[:]) +} + +type SignedContributionAndProof struct { + Message *ContributionAndProof `json:"message"` + Signature libcommon.Bytes96 `json:"signature"` +} + +func (a *SignedContributionAndProof) EncodeSSZ(dst []byte) ([]byte, error) { + return ssz2.MarshalSSZ(dst, a.Message, a.Signature[:]) +} + +func (a *SignedContributionAndProof) DecodeSSZ(buf []byte, version int) error { + a.Message = new(ContributionAndProof) + return ssz2.UnmarshalSSZ(buf, version, a.Message, a.Signature[:]) +} + +func (a *SignedContributionAndProof) EncodingSizeSSZ() int { + return 100 + a.Message.EncodingSizeSSZ() +} + +func (a *SignedContributionAndProof) HashSSZ() ([32]byte, error) { + return merkle_tree.HashTreeRoot(a.Message, a.Signature[:]) +} diff --git a/cl/cltypes/solid/pending_attestation.go b/cl/cltypes/solid/pending_attestation.go index e17b48b07ad..02788f33089 100644 --- a/cl/cltypes/solid/pending_attestation.go +++ b/cl/cltypes/solid/pending_attestation.go @@ -115,8 +115,8 @@ func (a *PendingAttestation) MarshalJSON() ([]byte, error) { return json.Marshal(struct { AggregationBits hexutility.Bytes `json:"aggregation_bits"` AttestationData AttestationData `json:"attestation_data"` - InclusionDelay uint64 `json:"inclusion_delay"` - ProposerIndex uint64 `json:"proposer_index"` + InclusionDelay uint64 `json:"inclusion_delay,string"` + ProposerIndex uint64 `json:"proposer_index,string"` }{ AggregationBits: a.AggregationBits(), AttestationData: a.AttestantionData(), @@ -130,8 +130,8 @@ func (a *PendingAttestation) UnmarshalJSON(input []byte) error { var tmp struct { AggregationBits hexutility.Bytes `json:"aggregation_bits"` AttestationData AttestationData `json:"attestation_data"` - InclusionDelay uint64 `json:"inclusion_delay"` - ProposerIndex uint64 `json:"proposer_index"` + InclusionDelay uint64 `json:"inclusion_delay,string"` + ProposerIndex uint64 `json:"proposer_index,string"` } if err = json.Unmarshal(input, &tmp); err != nil { return err diff --git a/cl/cltypes/solid/uint64_raw_list.go b/cl/cltypes/solid/uint64_raw_list.go index b520700bfd8..e93b580a165 100644 --- a/cl/cltypes/solid/uint64_raw_list.go +++ b/cl/cltypes/solid/uint64_raw_list.go @@ -3,6 +3,7 @@ package solid import ( "encoding/binary" "encoding/json" + "strconv" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" @@ -154,14 +155,26 @@ func (arr *RawUint64List) Pop() uint64 { } func (arr *RawUint64List) MarshalJSON() ([]byte, error) { - return json.Marshal(arr.u) + // convert it to a list of strings + strs := make([]string, len(arr.u)) + for i, v := range arr.u { + strs[i] = strconv.FormatInt(int64(v), 10) + } + return json.Marshal(strs) } func (arr *RawUint64List) UnmarshalJSON(data []byte) error { - arr.cachedHash = libcommon.Hash{} - if err := json.Unmarshal(data, &arr.u); err != nil { + var strs []string + if err := json.Unmarshal(data, &strs); err != nil { return err } - arr.c = len(arr.u) + arr.u = make([]uint64, len(strs)) + for i, s := range strs { + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + arr.u[i] = v + } return nil } diff --git a/cl/cltypes/solid/uint64slice_byte.go b/cl/cltypes/solid/uint64slice_byte.go index a642c6278c1..8cdaa7fa7fe 100644 --- a/cl/cltypes/solid/uint64slice_byte.go +++ b/cl/cltypes/solid/uint64slice_byte.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "encoding/json" + "strconv" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" @@ -79,9 +80,9 @@ func (arr *byteBasedUint64Slice) CopyTo(target *byteBasedUint64Slice) { } func (arr *byteBasedUint64Slice) MarshalJSON() ([]byte, error) { - list := make([]uint64, arr.l) + list := make([]string, arr.l) for i := 0; i < arr.l; i++ { - list[i] = arr.Get(i) + list[i] = strconv.FormatInt(int64(arr.Get(i)), 10) } return json.Marshal(list) } diff --git a/cl/cltypes/solid/validator.go b/cl/cltypes/solid/validator.go index 63353e1ee66..ce8e7245676 100644 --- a/cl/cltypes/solid/validator.go +++ b/cl/cltypes/solid/validator.go @@ -195,12 +195,12 @@ func (v Validator) MarshalJSON() ([]byte, error) { return json.Marshal(struct { PublicKey common.Bytes48 `json:"public_key"` WithdrawalCredentials common.Hash `json:"withdrawal_credentials"` - EffectiveBalance uint64 `json:"effective_balance"` + EffectiveBalance uint64 `json:"effective_balance,string"` Slashed bool `json:"slashed"` - ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch"` - ActivationEpoch uint64 `json:"activation_epoch"` - ExitEpoch uint64 `json:"exit_epoch"` - WithdrawableEpoch uint64 `json:"withdrawable_epoch"` + ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch,string"` + ActivationEpoch uint64 `json:"activation_epoch,string"` + ExitEpoch uint64 `json:"exit_epoch,string"` + WithdrawableEpoch uint64 `json:"withdrawable_epoch,string"` }{ PublicKey: v.PublicKey(), WithdrawalCredentials: v.WithdrawalCredentials(), @@ -218,12 +218,12 @@ func (v *Validator) UnmarshalJSON(input []byte) error { var tmp struct { PublicKey common.Bytes48 `json:"public_key"` WithdrawalCredentials common.Hash `json:"withdrawal_credentials"` - EffectiveBalance uint64 `json:"effective_balance"` + EffectiveBalance uint64 `json:"effective_balance,string"` Slashed bool `json:"slashed"` - ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch"` - ActivationEpoch uint64 `json:"activation_epoch"` - ExitEpoch uint64 `json:"exit_epoch"` - WithdrawableEpoch uint64 `json:"withdrawable_epoch"` + ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch,string"` + ActivationEpoch uint64 `json:"activation_epoch,string"` + ExitEpoch uint64 `json:"exit_epoch,string"` + WithdrawableEpoch uint64 `json:"withdrawable_epoch,string"` } if err = json.Unmarshal(input, &tmp); err != nil { return err diff --git a/cl/cltypes/validator.go b/cl/cltypes/validator.go index 7d328bf21cf..645b4fec5a0 100644 --- a/cl/cltypes/validator.go +++ b/cl/cltypes/validator.go @@ -18,7 +18,7 @@ const ( type DepositData struct { PubKey libcommon.Bytes48 `json:"pubkey"` WithdrawalCredentials libcommon.Hash `json:"withdrawal_credentials"` - Amount uint64 `json:"amount"` + Amount uint64 `json:"amount,string"` Signature libcommon.Bytes96 `json:"signature"` } @@ -72,8 +72,8 @@ func (d *Deposit) HashSSZ() ([32]byte, error) { } type VoluntaryExit struct { - Epoch uint64 `json:"epoch"` - ValidatorIndex uint64 `json:"validator_index"` + Epoch uint64 `json:"epoch,string"` + ValidatorIndex uint64 `json:"validator_index,string"` } func (e *VoluntaryExit) EncodeSSZ(buf []byte) ([]byte, error) { diff --git a/cl/cltypes/withdrawal.go b/cl/cltypes/withdrawal.go index 8923820b243..ffaae3234d9 100644 --- a/cl/cltypes/withdrawal.go +++ b/cl/cltypes/withdrawal.go @@ -11,10 +11,10 @@ import ( ) type Withdrawal struct { - Index uint64 `json:"index"` // monotonically increasing identifier issued by consensus layer - Validator uint64 `json:"validatorIndex"` // index of validator associated with withdrawal - Address libcommon.Address `json:"address"` // target address for withdrawn ether - Amount uint64 `json:"amount"` // value of withdrawal in GWei + Index uint64 `json:"index,string"` // monotonically increasing identifier issued by consensus layer + Validator uint64 `json:"validatorIndex,string"` // index of validator associated with withdrawal + Address libcommon.Address `json:"address"` // target address for withdrawn ether + Amount uint64 `json:"amount,string"` // value of withdrawal in GWei } func (obj *Withdrawal) EncodeSSZ(buf []byte) ([]byte, error) { diff --git a/cl/fork/fork_test.go b/cl/fork/fork_test.go index 8e272facf2f..f20fc90c9ac 100644 --- a/cl/fork/fork_test.go +++ b/cl/fork/fork_test.go @@ -56,7 +56,7 @@ func TestGoerliForkDigest(t *testing.T) { require.NoError(t, err) _, err = ComputeForkId(&beaconCfg, &genesisCfg) require.NoError(t, err) - require.Equal(t, [4]uint8{0x62, 0x89, 0x41, 0xef}, digest) + require.Equal(t, [4]uint8{0xa7, 0x5d, 0xcc, 0xf2}, digest) } func TestSepoliaForkDigest(t *testing.T) { @@ -66,7 +66,7 @@ func TestSepoliaForkDigest(t *testing.T) { require.NoError(t, err) _, err = ComputeForkId(&beaconCfg, &genesisCfg) require.NoError(t, err) - require.Equal(t, [4]uint8{0x47, 0xeb, 0x72, 0xb3}, digest) + require.Equal(t, [4]uint8{0xd3, 0x1f, 0x61, 0x91}, digest) } // ForkDigestVersion diff --git a/cl/gossip/gossip.go b/cl/gossip/gossip.go new file mode 100644 index 00000000000..05d335273ed --- /dev/null +++ b/cl/gossip/gossip.go @@ -0,0 +1,25 @@ +package gossip + +import ( + "strconv" + "strings" +) + +const ( + TopicNameBeaconBlock = "beacon_block" + TopicNameBeaconAggregateAndProof = "beacon_aggregate_and_proof" + TopicNameVoluntaryExit = "voluntary_exit" + TopicNameProposerSlashing = "proposer_slashing" + TopicNameAttesterSlashing = "attester_slashing" + TopicNameBlsToExecutionChange = "bls_to_execution_change" + + TopicNamePrefixBlobSidecar = "blob_sidecar_" +) + +func TopicNameBlobSidecar(d int) string { + return TopicNamePrefixBlobSidecar + strconv.Itoa(d) +} + +func IsTopicBlobSidecar(d string) bool { + return strings.Contains(d, TopicNamePrefixBlobSidecar) +} diff --git a/cl/merkle_tree/merkle_root.go b/cl/merkle_tree/merkle_root.go index ba231d4853f..a4b9791f966 100644 --- a/cl/merkle_tree/merkle_root.go +++ b/cl/merkle_tree/merkle_root.go @@ -10,6 +10,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/types/ssz" + "github.com/ledgerwatch/erigon/cl/utils" "github.com/prysmaticlabs/gohashtree" ) @@ -116,3 +117,44 @@ func MerkleRootFromFlatLeaves(leaves []byte, out []byte) (err error) { func MerkleRootFromFlatLeavesWithLimit(leaves []byte, out []byte, limit uint64) (err error) { return globalHasher.merkleizeTrieLeavesFlat(leaves, out, limit) } + +// Merkle Proof computes the merkle proof for a given schema of objects. +func MerkleProof(depth, proofIndex int, schema ...interface{}) ([][32]byte, error) { + // Calculate the total number of leaves needed based on the schema length + maxDepth := GetDepth(uint64(len(schema))) + if utils.PowerOf2(uint64(maxDepth)) != uint64(len(schema)) { + maxDepth++ + } + + if depth != int(maxDepth) { // TODO: Add support for lower depths + return nil, fmt.Errorf("depth is different than maximum depth, have %d, want %d", depth, maxDepth) + } + var err error + proof := make([][32]byte, maxDepth) + currentSizeDepth := utils.PowerOf2(uint64(maxDepth)) + for len(schema) != int(currentSizeDepth) { // Augment the schema to be a power of 2 + schema = append(schema, make([]byte, 32)) + } + + for i := 0; i < depth; i++ { + // Hash the left branch + if proofIndex >= int(currentSizeDepth)/2 { + proof[depth-i-1], err = HashTreeRoot(schema[0 : currentSizeDepth/2]...) + if err != nil { + return nil, err + } + schema = schema[currentSizeDepth/2:] // explore the right branch + proofIndex -= int(currentSizeDepth) / 2 + currentSizeDepth /= 2 + continue + } + // Hash the right branch + proof[depth-i-1], err = HashTreeRoot(schema[currentSizeDepth/2:]...) + if err != nil { + return nil, err + } + schema = schema[0 : currentSizeDepth/2] // explore the left branch + currentSizeDepth /= 2 + } + return proof, nil +} diff --git a/cl/persistence/base_encoding/primitives_test.go b/cl/persistence/base_encoding/primitives_test.go index 11c80684ecf..a2b18d0c0fb 100644 --- a/cl/persistence/base_encoding/primitives_test.go +++ b/cl/persistence/base_encoding/primitives_test.go @@ -3,7 +3,6 @@ package base_encoding import ( "bytes" "encoding/binary" - "fmt" "testing" "github.com/stretchr/testify/require" @@ -70,7 +69,6 @@ func TestDiff64Effective(t *testing.T) { out := b.Bytes() new2, err := ApplyCompressedSerializedUint64ListDiff(previous, nil, out) require.NoError(t, err) - fmt.Println(previous) require.Equal(t, new2, expected) } diff --git a/cl/persistence/base_encoding/rabbit.go b/cl/persistence/base_encoding/rabbit.go new file mode 100644 index 00000000000..7478d17fd5b --- /dev/null +++ b/cl/persistence/base_encoding/rabbit.go @@ -0,0 +1,86 @@ +package base_encoding + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/klauspost/compress/zstd" +) + +func WriteRabbits(in []uint64, w io.Writer) error { + // Retrieve compressor first + compressor := compressorPool.Get().(*zstd.Encoder) + defer compressorPool.Put(compressor) + compressor.Reset(w) + + expectedNum := uint64(0) + count := 0 + // write length + if err := binary.Write(compressor, binary.LittleEndian, uint64(len(in))); err != nil { + return err + } + for _, element := range in { + if expectedNum != element { + // [1,2,5,6] + // write contiguous sequence + if err := binary.Write(compressor, binary.LittleEndian, uint64(count)); err != nil { + return err + } + // write non-contiguous element + if err := binary.Write(compressor, binary.LittleEndian, element-expectedNum); err != nil { + return err + } + count = 0 + } + count++ + expectedNum = element + 1 + + } + // write last contiguous sequence + if err := binary.Write(compressor, binary.LittleEndian, uint64(count)); err != nil { + return err + } + return compressor.Close() +} + +func ReadRabbits(out []uint64, r io.Reader) ([]uint64, error) { + // Retrieve compressor first + decompressor, err := zstd.NewReader(r) + if err != nil { + return nil, err + } + defer decompressor.Close() + + var length uint64 + if err := binary.Read(decompressor, binary.LittleEndian, &length); err != nil { + return nil, err + } + + if cap(out) < int(length) { + out = make([]uint64, 0, length) + } + out = out[:0] + var count uint64 + var current uint64 + active := true + for err != io.EOF { + err = binary.Read(decompressor, binary.LittleEndian, &count) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + if active { + for i := current; i < current+count; i++ { + out = append(out, i) + } + current += count + } else { + current += count + } + active = !active + } + return out, nil +} diff --git a/cl/persistence/base_encoding/rabbit_test.go b/cl/persistence/base_encoding/rabbit_test.go new file mode 100644 index 00000000000..fe98fe3bc34 --- /dev/null +++ b/cl/persistence/base_encoding/rabbit_test.go @@ -0,0 +1,22 @@ +package base_encoding + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRabbit(t *testing.T) { + list := []uint64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 17, 23, 90} + var w bytes.Buffer + if err := WriteRabbits(list, &w); err != nil { + t.Fatal(err) + } + var out []uint64 + out, err := ReadRabbits(out, &w) + if err != nil { + t.Fatal(err) + } + require.Equal(t, list, out) +} diff --git a/cl/persistence/format/snapshot_format/blocks.go b/cl/persistence/format/snapshot_format/blocks.go index 3692a6de30b..11f68d4b8e5 100644 --- a/cl/persistence/format/snapshot_format/blocks.go +++ b/cl/persistence/format/snapshot_format/blocks.go @@ -156,3 +156,32 @@ func ReadBlockHeaderFromSnapshotWithExecutionData(r io.Reader, cfg *clparams.Bea blockHash := blindedBlock.Block.Body.ExecutionPayload.BlockHash return blockHeader, blockNumber, blockHash, nil } + +func ReadBlindedBlockFromSnapshot(r io.Reader, cfg *clparams.BeaconChainConfig) (*cltypes.SignedBlindedBeaconBlock, error) { + buffer := buffersPool.Get().(*bytes.Buffer) + defer buffersPool.Put(buffer) + buffer.Reset() + + blindedBlock := cltypes.NewSignedBlindedBeaconBlock(cfg) + + // Read the metadata + metadataSlab := make([]byte, 33) + v, _, err := readMetadataForBlock(r, metadataSlab) + if err != nil { + return nil, err + } + // Read the length + length := make([]byte, 8) + if _, err := io.ReadFull(r, length); err != nil { + return nil, err + } + // Read the block + if _, err := io.CopyN(buffer, r, int64(binary.BigEndian.Uint64(length))); err != nil { + return nil, err + } + // Decode the block in blinded + if err := blindedBlock.DecodeSSZ(buffer.Bytes(), int(v)); err != nil { + return nil, err + } + return blindedBlock, nil +} diff --git a/cl/persistence/format/snapshot_format/blocks_test.go b/cl/persistence/format/snapshot_format/blocks_test.go index 8c357fd4b01..eae38978810 100644 --- a/cl/persistence/format/snapshot_format/blocks_test.go +++ b/cl/persistence/format/snapshot_format/blocks_test.go @@ -74,12 +74,22 @@ func TestBlockSnapshotEncoding(t *testing.T) { require.NoError(t, err) hash3, err := header.HashSSZ() require.NoError(t, err) - + // now do it with blinded require.Equal(t, hash1, hash2) require.Equal(t, header.Signature, blk.Signature) require.Equal(t, header.Header.Slot, blk.Block.Slot) + b.Reset() + _, err = snapshot_format.WriteBlockForSnapshot(&b, blk, nil) + require.NoError(t, err) + blk4, err := snapshot_format.ReadBlindedBlockFromSnapshot(&b, &clparams.MainnetBeaconConfig) + require.NoError(t, err) + + hash4, err := blk4.HashSSZ() + require.NoError(t, err) + require.Equal(t, hash1, hash4) + if blk.Version() >= clparams.BellatrixVersion { require.Equal(t, bn, blk.Block.Body.ExecutionPayload.BlockNumber) require.Equal(t, bHash, blk.Block.Body.ExecutionPayload.BlockHash) diff --git a/cl/persistence/state/epoch_data.go b/cl/persistence/state/epoch_data.go new file mode 100644 index 00000000000..7b6ed7e963e --- /dev/null +++ b/cl/persistence/state/epoch_data.go @@ -0,0 +1,77 @@ +package state_accessors + +import ( + "encoding/binary" + "io" + + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + ssz2 "github.com/ledgerwatch/erigon/cl/ssz" +) + +// EpochData stores the data for the epoch (valid throughout the epoch) +type EpochData struct { + TotalActiveBalance uint64 + JustificationBits *cltypes.JustificationBits + Fork *cltypes.Fork + CurrentJustifiedCheckpoint solid.Checkpoint + PreviousJustifiedCheckpoint solid.Checkpoint + FinalizedCheckpoint solid.Checkpoint + HistoricalSummariesLength uint64 + HistoricalRootsLength uint64 +} + +func EpochDataFromBeaconState(s *state.CachingBeaconState) *EpochData { + justificationCopy := &cltypes.JustificationBits{} + jj := s.JustificationBits() + copy(justificationCopy[:], jj[:]) + return &EpochData{ + Fork: s.Fork(), + JustificationBits: justificationCopy, + TotalActiveBalance: s.GetTotalActiveBalance(), + CurrentJustifiedCheckpoint: s.CurrentJustifiedCheckpoint(), + PreviousJustifiedCheckpoint: s.PreviousJustifiedCheckpoint(), + FinalizedCheckpoint: s.FinalizedCheckpoint(), + HistoricalSummariesLength: s.HistoricalSummariesLength(), + HistoricalRootsLength: s.HistoricalRootsLength(), + } +} + +// Serialize serializes the state into a byte slice with zstd compression. +func (m *EpochData) WriteTo(w io.Writer) error { + buf, err := ssz2.MarshalSSZ(nil, m.getSchema()...) + if err != nil { + return err + } + lenB := make([]byte, 8) + binary.LittleEndian.PutUint64(lenB, uint64(len(buf))) + if _, err := w.Write(lenB); err != nil { + return err + } + _, err = w.Write(buf) + return err +} + +// Deserialize deserializes the state from a byte slice with zstd compression. +func (m *EpochData) ReadFrom(r io.Reader) error { + m.JustificationBits = &cltypes.JustificationBits{} + m.Fork = &cltypes.Fork{} + m.FinalizedCheckpoint = solid.NewCheckpoint() + m.CurrentJustifiedCheckpoint = solid.NewCheckpoint() + m.PreviousJustifiedCheckpoint = solid.NewCheckpoint() + lenB := make([]byte, 8) + if _, err := io.ReadFull(r, lenB); err != nil { + return err + } + len := binary.LittleEndian.Uint64(lenB) + buf := make([]byte, len) + if _, err := io.ReadFull(r, buf); err != nil { + return err + } + return ssz2.UnmarshalSSZ(buf, 0, m.getSchema()...) +} + +func (m *EpochData) getSchema() []interface{} { + return []interface{}{&m.TotalActiveBalance, m.JustificationBits, m.Fork, m.CurrentJustifiedCheckpoint, m.PreviousJustifiedCheckpoint, m.FinalizedCheckpoint, &m.HistoricalSummariesLength, &m.HistoricalRootsLength} +} diff --git a/cl/persistence/state/epoch_data_test.go b/cl/persistence/state/epoch_data_test.go new file mode 100644 index 00000000000..95079cc58bb --- /dev/null +++ b/cl/persistence/state/epoch_data_test.go @@ -0,0 +1,35 @@ +package state_accessors + +import ( + "bytes" + "testing" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/stretchr/testify/require" +) + +func TestEpochData(t *testing.T) { + e := &EpochData{ + TotalActiveBalance: 123, + JustificationBits: &cltypes.JustificationBits{true}, + Fork: &cltypes.Fork{}, + CurrentJustifiedCheckpoint: solid.NewCheckpointFromParameters(libcommon.Hash{}, 123), + PreviousJustifiedCheckpoint: solid.NewCheckpointFromParameters(libcommon.Hash{}, 123), + FinalizedCheckpoint: solid.NewCheckpointFromParameters(libcommon.Hash{}, 123), + HistoricalSummariesLength: 235, + HistoricalRootsLength: 345, + } + var b bytes.Buffer + if err := e.WriteTo(&b); err != nil { + t.Fatal(err) + } + + e2 := &EpochData{} + if err := e2.ReadFrom(&b); err != nil { + t.Fatal(err) + } + + require.Equal(t, e, e2) +} diff --git a/cl/persistence/state/historical_states_reader/attesting_indicies.go b/cl/persistence/state/historical_states_reader/attesting_indicies.go index e3d5f717b59..22868b02248 100644 --- a/cl/persistence/state/historical_states_reader/attesting_indicies.go +++ b/cl/persistence/state/historical_states_reader/attesting_indicies.go @@ -3,20 +3,24 @@ package historical_states_reader import ( "fmt" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/persistence/base_encoding" + state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state" "github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling" "github.com/ledgerwatch/erigon/cl/utils" ) -func (r *HistoricalStatesReader) attestingIndicies(attestation solid.AttestationData, aggregationBits []byte, checkBitsLength bool, randaoMixes solid.HashVectorSSZ, idxs []uint64) ([]uint64, error) { +func (r *HistoricalStatesReader) attestingIndicies(attestation solid.AttestationData, aggregationBits []byte, checkBitsLength bool, mix libcommon.Hash, idxs []uint64) ([]uint64, error) { slot := attestation.Slot() committeesPerSlot := committeeCount(r.cfg, slot/r.cfg.SlotsPerEpoch, idxs) committeeIndex := attestation.ValidatorIndex() index := (slot%r.cfg.SlotsPerEpoch)*committeesPerSlot + committeeIndex count := committeesPerSlot * r.cfg.SlotsPerEpoch - committee, err := r.computeCommittee(randaoMixes, idxs, attestation.Slot(), count, index) + committee, err := r.ComputeCommittee(mix, idxs, attestation.Slot(), count, index) if err != nil { return nil, err } @@ -40,7 +44,7 @@ func (r *HistoricalStatesReader) attestingIndicies(attestation solid.Attestation } // computeCommittee uses cache to compute compittee -func (r *HistoricalStatesReader) computeCommittee(randaoMixes solid.HashVectorSSZ, indicies []uint64, slot uint64, count, index uint64) ([]uint64, error) { +func (r *HistoricalStatesReader) ComputeCommittee(mix libcommon.Hash, indicies []uint64, slot uint64, count, index uint64) ([]uint64, error) { cfg := r.cfg lenIndicies := uint64(len(indicies)) @@ -48,12 +52,9 @@ func (r *HistoricalStatesReader) computeCommittee(randaoMixes solid.HashVectorSS end := (lenIndicies * (index + 1)) / count var shuffledIndicies []uint64 epoch := slot / cfg.SlotsPerEpoch - - mixPosition := (epoch + cfg.EpochsPerHistoricalVector - cfg.MinSeedLookahead - 1) % - cfg.EpochsPerHistoricalVector - // Input for the seed hash. - mix := randaoMixes.Get(int(mixPosition)) - + /* + mixPosition := (epoch + cfg.EpochsPerHistoricalVector - cfg.MinSeedLookahead - 1) % cfg.EpochsPerHistoricalVector + */ if shuffledIndicesInterface, ok := r.shuffledSetsCache.Get(epoch); ok { shuffledIndicies = shuffledIndicesInterface } else { @@ -75,3 +76,92 @@ func committeeCount(cfg *clparams.BeaconChainConfig, epoch uint64, idxs []uint64 } return committeCount } + +func (r *HistoricalStatesReader) readHistoricalBlockRoot(tx kv.Tx, slot, index uint64) (libcommon.Hash, error) { + slotSubIndex := slot % r.cfg.SlotsPerHistoricalRoot + needFromGenesis := true + + var slotLookup uint64 + if index <= slotSubIndex { + if slot > (slotSubIndex - index) { + slotLookup = slot - (slotSubIndex - index) + needFromGenesis = false + } + } else { + if slot > (slotSubIndex + (r.cfg.SlotsPerHistoricalRoot - index)) { + slotLookup = slot - (slotSubIndex + (r.cfg.SlotsPerHistoricalRoot - index)) + needFromGenesis = false + } + } + + if needFromGenesis { + return r.genesisState.GetBlockRootAtSlot(slot) + } + br, err := tx.GetOne(kv.BlockRoot, base_encoding.Encode64ToBytes4(slotLookup)) + if err != nil { + return libcommon.Hash{}, err + } + if len(br) != 32 { + return libcommon.Hash{}, fmt.Errorf("invalid block root length %d", len(br)) + } + return libcommon.BytesToHash(br), nil + +} + +func (r *HistoricalStatesReader) getAttestationParticipationFlagIndicies(tx kv.Tx, stateSlot uint64, data solid.AttestationData, inclusionDelay uint64, skipAssert bool) ([]uint8, error) { + currentCheckpoint, previousCheckpoint, _, err := state_accessors.ReadCheckpoints(tx, r.cfg.RoundSlotToEpoch(stateSlot)) + if err != nil { + return nil, err + } + if currentCheckpoint == nil { + currentCheckpoint = r.genesisState.CurrentJustifiedCheckpoint() + } + if previousCheckpoint == nil { + previousCheckpoint = r.genesisState.PreviousJustifiedCheckpoint() + } + + var justifiedCheckpoint solid.Checkpoint + // get checkpoint from epoch + if data.Target().Epoch() == stateSlot/r.cfg.SlotsPerEpoch { + justifiedCheckpoint = currentCheckpoint + } else { + justifiedCheckpoint = previousCheckpoint + } + // Matching roots + if !data.Source().Equal(justifiedCheckpoint) && !skipAssert { + // jsonify the data.Source and justifiedCheckpoint + jsonSource, err := data.Source().MarshalJSON() + if err != nil { + return nil, err + } + jsonJustifiedCheckpoint, err := justifiedCheckpoint.MarshalJSON() + if err != nil { + return nil, err + } + return nil, fmt.Errorf("GetAttestationParticipationFlagIndicies: source does not match. source: %s, justifiedCheckpoint: %s", jsonSource, jsonJustifiedCheckpoint) + } + i := (data.Target().Epoch() * r.cfg.SlotsPerEpoch) % r.cfg.SlotsPerHistoricalRoot + targetRoot, err := r.readHistoricalBlockRoot(tx, stateSlot, i) + if err != nil { + return nil, err + } + + i = data.Slot() % r.cfg.SlotsPerHistoricalRoot + headRoot, err := r.readHistoricalBlockRoot(tx, stateSlot, i) + if err != nil { + return nil, err + } + matchingTarget := data.Target().BlockRoot() == targetRoot + matchingHead := matchingTarget && data.BeaconBlockRoot() == headRoot + participationFlagIndicies := []uint8{} + if inclusionDelay <= utils.IntegerSquareRoot(r.cfg.SlotsPerEpoch) { + participationFlagIndicies = append(participationFlagIndicies, r.cfg.TimelySourceFlagIndex) + } + if matchingTarget && inclusionDelay <= r.cfg.SlotsPerEpoch { + participationFlagIndicies = append(participationFlagIndicies, r.cfg.TimelyTargetFlagIndex) + } + if matchingHead && inclusionDelay == r.cfg.MinAttestationInclusionDelay { + participationFlagIndicies = append(participationFlagIndicies, r.cfg.TimelyHeadFlagIndex) + } + return participationFlagIndicies, nil +} diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader.go b/cl/persistence/state/historical_states_reader/historical_states_reader.go index 611bb1d1a63..5cf69b590b2 100644 --- a/cl/persistence/state/historical_states_reader/historical_states_reader.go +++ b/cl/persistence/state/historical_states_reader/historical_states_reader.go @@ -23,6 +23,8 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" "github.com/spf13/afero" "golang.org/x/exp/slices" + + libcommon "github.com/ledgerwatch/erigon-lib/common" ) type HistoricalStatesReader struct { @@ -65,7 +67,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. return nil, fmt.Errorf("slot %d is greater than latest processed state %d", slot, latestProcessedState) } - if slot == 0 { + if slot == r.genesisState.Slot() { return r.genesisState.Copy() } // Read the current block (we need the block header) + other stuff @@ -74,26 +76,34 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. return nil, err } if block == nil { - return nil, fmt.Errorf("block at slot %d not found", slot) + return nil, nil } blockHeader := block.SignedBeaconBlockHeader().Header blockHeader.Root = common.Hash{} - // Read the minimal beacon state which have the small fields. - minimalBeaconState, err := state_accessors.ReadMinimalBeaconState(tx, slot) + // Read the epoch and per-slot data. + slotData, err := state_accessors.ReadSlotData(tx, slot) if err != nil { return nil, err } - // State not found - if minimalBeaconState == nil { + if slotData == nil { + return nil, nil + } + roundedSlot := r.cfg.RoundSlotToEpoch(slot) + + epochData, err := state_accessors.ReadEpochData(tx, roundedSlot) + if err != nil { + return nil, err + } + if epochData == nil { return nil, nil } // Versioning - ret.SetVersion(minimalBeaconState.Version) + ret.SetVersion(slotData.Version) ret.SetGenesisTime(r.genesisState.GenesisTime()) ret.SetGenesisValidatorsRoot(r.genesisState.GenesisValidatorsRoot()) ret.SetSlot(slot) - ret.SetFork(minimalBeaconState.Fork) + ret.SetFork(epochData.Fork) // History stateRoots, blockRoots := solid.NewHashVector(int(r.cfg.SlotsPerHistoricalRoot)), solid.NewHashVector(int(r.cfg.SlotsPerHistoricalRoot)) ret.SetLatestBlockHeader(blockHeader) @@ -109,7 +119,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetStateRoots(stateRoots) historicalRoots := solid.NewHashList(int(r.cfg.HistoricalRootsLimit)) - if err := state_accessors.ReadHistoricalRoots(tx, minimalBeaconState.HistoricalRootsLength, func(idx int, root common.Hash) error { + if err := state_accessors.ReadHistoricalRoots(tx, epochData.HistoricalRootsLength, func(idx int, root common.Hash) error { historicalRoots.Append(root) return nil }); err != nil { @@ -119,12 +129,12 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. // Eth1 eth1DataVotes := solid.NewStaticListSSZ[*cltypes.Eth1Data](int(r.cfg.Eth1DataVotesLength()), 72) - if err := r.readEth1DataVotes(tx, minimalBeaconState.Eth1DataLength, slot, eth1DataVotes); err != nil { + if err := r.readEth1DataVotes(tx, slotData.Eth1DataLength, slot, eth1DataVotes); err != nil { return nil, err } ret.SetEth1DataVotes(eth1DataVotes) - ret.SetEth1Data(minimalBeaconState.Eth1Data) - ret.SetEth1DepositIndex(minimalBeaconState.Eth1DepositIndex) + ret.SetEth1Data(slotData.Eth1Data) + ret.SetEth1DepositIndex(slotData.Eth1DepositIndex) // Registry (Validators + Balances) balancesBytes, err := r.reconstructBalances(tx, slot, kv.ValidatorBalance) if err != nil { @@ -136,7 +146,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. } ret.SetBalances(balances) - validatorSet, currActiveIdxs, prevActiveIdxs, err := r.readValidatorsForHistoricalState(tx, slot, minimalBeaconState.ValidatorLength) + validatorSet, err := r.ReadValidatorsForHistoricalState(tx, slot) if err != nil { return nil, fmt.Errorf("failed to read validators: %w", err) } @@ -149,14 +159,14 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetRandaoMixes(randaoMixes) slashingsVector := solid.NewUint64VectorSSZ(int(r.cfg.EpochsPerSlashingsVector)) // Slashings - err = r.reconstructUint64ListDump(tx, slot, kv.ValidatorSlashings, int(r.cfg.EpochsPerSlashingsVector), slashingsVector) + err = r.ReconstructUint64ListDump(tx, slot, kv.ValidatorSlashings, int(r.cfg.EpochsPerSlashingsVector), slashingsVector) if err != nil { return nil, fmt.Errorf("failed to read slashings: %w", err) } ret.SetSlashings(slashingsVector) // Finality - currentCheckpoint, previousCheckpoint, finalizedCheckpoint, err := state_accessors.ReadCheckpoints(tx, r.cfg.RoundSlotToEpoch(slot)) + currentCheckpoint, previousCheckpoint, finalizedCheckpoint, err := state_accessors.ReadCheckpoints(tx, roundedSlot) if err != nil { return nil, err } @@ -169,20 +179,20 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. if finalizedCheckpoint == nil { finalizedCheckpoint = r.genesisState.FinalizedCheckpoint() } - ret.SetJustificationBits(*minimalBeaconState.JustificationBits) + ret.SetJustificationBits(*epochData.JustificationBits) ret.SetPreviousJustifiedCheckpoint(previousCheckpoint) ret.SetCurrentJustifiedCheckpoint(currentCheckpoint) ret.SetFinalizedCheckpoint(finalizedCheckpoint) // Participation if ret.Version() == clparams.Phase0Version { - currentAtts, previousAtts, err := r.readPendingEpochs(tx, slot, minimalBeaconState.CurrentEpochAttestationsLength, minimalBeaconState.PreviousEpochAttestationsLength) + currentAtts, previousAtts, err := r.readPendingEpochs(tx, slot, slotData.CurrentEpochAttestationsLength, slotData.PreviousEpochAttestationsLength) if err != nil { return nil, fmt.Errorf("failed to read pending attestations: %w", err) } ret.SetCurrentEpochAttestations(currentAtts) ret.SetPreviousEpochAttestations(previousAtts) } else { - currentIdxs, previousIdxs, err := r.readPartecipations(tx, slot, minimalBeaconState.ValidatorLength, currActiveIdxs, prevActiveIdxs, ret, currentCheckpoint, previousCheckpoint) + currentIdxs, previousIdxs, err := r.ReadPartecipations(tx, slot) if err != nil { return nil, fmt.Errorf("failed to read participations: %w", err) } @@ -191,11 +201,11 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. } if ret.Version() < clparams.AltairVersion { - return ret, ret.InitBeaconState() + return ret, nil } inactivityScores := solid.NewUint64ListSSZ(int(r.cfg.ValidatorRegistryLimit)) // Inactivity - err = r.reconstructUint64ListDump(tx, slot, kv.InactivityScores, int(minimalBeaconState.ValidatorLength), inactivityScores) + err = r.ReconstructUint64ListDump(tx, slot, kv.InactivityScores, int(slotData.ValidatorLength), inactivityScores) if err != nil { return nil, fmt.Errorf("failed to read inactivity scores: %w", err) } @@ -222,7 +232,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetNextSyncCommittee(nextSyncCommittee) // Execution if ret.Version() < clparams.BellatrixVersion { - return ret, ret.InitBeaconState() + return ret, nil } payloadHeader, err := block.Block.Body.ExecutionPayload.PayloadHeader() if err != nil { @@ -230,22 +240,22 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. } ret.SetLatestExecutionPayloadHeader(payloadHeader) if ret.Version() < clparams.CapellaVersion { - return ret, ret.InitBeaconState() + return ret, nil } // Withdrawals - ret.SetNextWithdrawalIndex(minimalBeaconState.NextWithdrawalIndex) - ret.SetNextWithdrawalValidatorIndex(minimalBeaconState.NextWithdrawalValidatorIndex) + ret.SetNextWithdrawalIndex(slotData.NextWithdrawalIndex) + ret.SetNextWithdrawalValidatorIndex(slotData.NextWithdrawalValidatorIndex) // Deep history valid from Capella onwards historicalSummaries := solid.NewStaticListSSZ[*cltypes.HistoricalSummary](int(r.cfg.HistoricalRootsLimit), 64) - if err := state_accessors.ReadHistoricalSummaries(tx, minimalBeaconState.HistoricalSummariesLength, func(idx int, historicalSummary *cltypes.HistoricalSummary) error { + if err := state_accessors.ReadHistoricalSummaries(tx, epochData.HistoricalSummariesLength, func(idx int, historicalSummary *cltypes.HistoricalSummary) error { historicalSummaries.Append(historicalSummary) return nil }); err != nil { return nil, fmt.Errorf("failed to read historical summaries: %w", err) } ret.SetHistoricalSummaries(historicalSummaries) - return ret, ret.InitBeaconState() + return ret, nil } func (r *HistoricalStatesReader) readHistoryHashVector(tx kv.Tx, genesisVector solid.HashVectorSSZ, slot, size uint64, table string, out solid.HashVectorSSZ) (err error) { @@ -480,7 +490,7 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, slot uint64, diff return currentList, err } -func (r *HistoricalStatesReader) reconstructUint64ListDump(tx kv.Tx, slot uint64, bkt string, size int, out solid.Uint64ListSSZ) error { +func (r *HistoricalStatesReader) ReconstructUint64ListDump(tx kv.Tx, slot uint64, bkt string, size int, out solid.Uint64ListSSZ) error { diffCursor, err := tx.Cursor(bkt) if err != nil { return err @@ -520,44 +530,39 @@ func (r *HistoricalStatesReader) reconstructUint64ListDump(tx kv.Tx, slot uint64 return out.DecodeSSZ(currentList, 0) } -func (r *HistoricalStatesReader) readValidatorsForHistoricalState(tx kv.Tx, slot, validatorSetLength uint64) (*solid.ValidatorSet, []uint64, []uint64, error) { +func (r *HistoricalStatesReader) ReadValidatorsForHistoricalState(tx kv.Tx, slot uint64) (*solid.ValidatorSet, error) { + // Read the minimal beacon state which have the small fields. + sd, err := state_accessors.ReadSlotData(tx, slot) + if err != nil { + return nil, err + } + // State not found + if sd == nil { + return nil, nil + } + validatorSetLength := sd.ValidatorLength + out := solid.NewValidatorSetWithLength(int(r.cfg.ValidatorRegistryLimit), int(validatorSetLength)) // Read the static validator field which are hot in memory (this is > 70% of the whole beacon state) - activeIds := make([]uint64, 0, validatorSetLength) - epoch := slot / r.cfg.SlotsPerEpoch - - prevActiveIds := make([]uint64, 0, validatorSetLength) - if epoch == 0 { - prevActiveIds = activeIds - } r.validatorTable.ForEach(func(validatorIndex uint64, validator *state_accessors.StaticValidator) bool { if validatorIndex >= validatorSetLength { return false } currValidator := out.Get(int(validatorIndex)) validator.ToValidator(currValidator, slot) - if currValidator.Active(epoch) { - activeIds = append(activeIds, validatorIndex) - } - if epoch == 0 { - return true - } - if currValidator.Active(epoch - 1) { - prevActiveIds = append(prevActiveIds, validatorIndex) - } return true }) // Read the balances bytesEffectiveBalances, err := r.reconstructDiffedUint64List(tx, slot, kv.ValidatorEffectiveBalance, "effective_balances") if err != nil { - return nil, nil, nil, err + return nil, err } for i := 0; i < int(validatorSetLength); i++ { out.Get(i). SetEffectiveBalanceFromBytes(bytesEffectiveBalances[(i * 8) : (i*8)+8]) } - return out, activeIds, prevActiveIds, nil + return out, nil } func (r *HistoricalStatesReader) readPendingEpochs(tx kv.Tx, slot uint64, currentEpochAttestationsLength, previousEpochAttestationsLength uint64) (*solid.ListSSZ[*solid.PendingAttestation], *solid.ListSSZ[*solid.PendingAttestation], error) { @@ -580,18 +585,42 @@ func (r *HistoricalStatesReader) readPendingEpochs(tx kv.Tx, slot uint64, curren } // readParticipations shuffles active indicies and returns the participation flags for the given epoch. -func (r *HistoricalStatesReader) readPartecipations(tx kv.Tx, slot uint64, validatorLength uint64, - currentActiveIndicies, previousActiveIndicies []uint64, ret *state.CachingBeaconState, - currentJustifiedCheckpoint, previousJustifiedCheckpoint solid.Checkpoint) (*solid.BitList, *solid.BitList, error) { - randaoMixes := ret.RandaoMixes() +func (r *HistoricalStatesReader) ReadPartecipations(tx kv.Tx, slot uint64) (*solid.BitList, *solid.BitList, error) { var beginSlot uint64 epoch, prevEpoch := r.computeRelevantEpochs(slot) beginSlot = prevEpoch * r.cfg.SlotsPerEpoch + currentActiveIndicies, err := state_accessors.ReadActiveIndicies(tx, epoch*r.cfg.SlotsPerEpoch) + if err != nil { + return nil, nil, err + } + var previousActiveIndicies []uint64 + if epoch == 0 { + previousActiveIndicies = currentActiveIndicies + } else { + previousActiveIndicies, err = state_accessors.ReadActiveIndicies(tx, (epoch-1)*r.cfg.SlotsPerEpoch) + if err != nil { + return nil, nil, err + } + } + + // Read the minimal beacon state which have the small fields. + sd, err := state_accessors.ReadSlotData(tx, slot) + if err != nil { + return nil, nil, err + } + // State not found + if sd == nil { + return nil, nil, nil + } + validatorLength := sd.ValidatorLength + currentIdxs := solid.NewBitList(int(validatorLength), int(r.cfg.ValidatorRegistryLimit)) previousIdxs := solid.NewBitList(int(validatorLength), int(r.cfg.ValidatorRegistryLimit)) // trigger the cache for shuffled sets in parallel - r.tryCachingEpochsInParallell(randaoMixes, [][]uint64{currentActiveIndicies, previousActiveIndicies}, []uint64{epoch, prevEpoch}) + if err := r.tryCachingEpochsInParallell(tx, [][]uint64{currentActiveIndicies, previousActiveIndicies}, []uint64{epoch, prevEpoch}); err != nil { + return nil, nil, err + } // Read the previous idxs for i := beginSlot; i <= slot; i++ { // Read the block @@ -602,7 +631,6 @@ func (r *HistoricalStatesReader) readPartecipations(tx kv.Tx, slot uint64, valid if block == nil { continue } - ret.SetSlot(i) currentEpoch := i / r.cfg.SlotsPerEpoch // Read the participation flags @@ -624,13 +652,21 @@ func (r *HistoricalStatesReader) readPartecipations(tx kv.Tx, slot uint64, valid activeIndicies = previousActiveIndicies } + attestationEpoch := data.Slot() / r.cfg.SlotsPerEpoch + + mixPosition := (attestationEpoch + r.cfg.EpochsPerHistoricalVector - r.cfg.MinSeedLookahead - 1) % r.cfg.EpochsPerHistoricalVector + mix, err := r.ReadRandaoMixBySlotAndIndex(tx, data.Slot(), mixPosition) + if err != nil { + return false + } + var attestingIndicies []uint64 - attestingIndicies, err = r.attestingIndicies(attestation.AttestantionData(), attestation.AggregationBits(), true, randaoMixes, activeIndicies) + attestingIndicies, err = r.attestingIndicies(data, attestation.AggregationBits(), true, mix, activeIndicies) if err != nil { return false } var participationFlagsIndicies []uint8 - participationFlagsIndicies, err = ret.GetAttestationParticipationFlagIndicies(data, ret.Slot()-data.Slot(), true) + participationFlagsIndicies, err = r.getAttestationParticipationFlagIndicies(tx, i, data, i-data.Slot(), true) if err != nil { return false } @@ -670,15 +706,84 @@ func (r *HistoricalStatesReader) computeRelevantEpochs(slot uint64) (uint64, uin return epoch, epoch - 1 } -func (r *HistoricalStatesReader) tryCachingEpochsInParallell(randaoMixes solid.HashVectorSSZ, activeIdxs [][]uint64, epochs []uint64) { +func (r *HistoricalStatesReader) tryCachingEpochsInParallell(tx kv.Tx, activeIdxs [][]uint64, epochs []uint64) error { var wg sync.WaitGroup wg.Add(len(epochs)) for i, epoch := range epochs { - go func(epoch uint64, idxs []uint64) { + mixPosition := (epoch + r.cfg.EpochsPerHistoricalVector - r.cfg.MinSeedLookahead - 1) % r.cfg.EpochsPerHistoricalVector + mix, err := r.ReadRandaoMixBySlotAndIndex(tx, epochs[0]*r.cfg.SlotsPerEpoch, mixPosition) + if err != nil { + return err + } + + go func(mix libcommon.Hash, epoch uint64, idxs []uint64) { defer wg.Done() - _, _ = r.computeCommittee(randaoMixes, idxs, epoch*r.cfg.SlotsPerEpoch, r.cfg.TargetCommitteeSize, 0) - }(epoch, activeIdxs[i]) + + _, _ = r.ComputeCommittee(mix, idxs, epoch*r.cfg.SlotsPerEpoch, r.cfg.TargetCommitteeSize, 0) + }(mix, epoch, activeIdxs[i]) } wg.Wait() + return nil +} +func (r *HistoricalStatesReader) ReadValidatorsBalances(tx kv.Tx, slot uint64) (solid.Uint64ListSSZ, error) { + sd, err := state_accessors.ReadSlotData(tx, slot) + if err != nil { + return nil, err + } + // State not found + if sd == nil { + return nil, nil + } + + balances, err := r.reconstructBalances(tx, slot, kv.ValidatorBalance) + if err != nil { + return nil, err + } + balancesList := solid.NewUint64ListSSZ(int(r.cfg.ValidatorRegistryLimit)) + + return balancesList, balancesList.DecodeSSZ(balances, 0) +} + +func (r *HistoricalStatesReader) ReadRandaoMixBySlotAndIndex(tx kv.Tx, slot, index uint64) (libcommon.Hash, error) { + epoch := slot / r.cfg.SlotsPerEpoch + epochSubIndex := epoch % r.cfg.EpochsPerHistoricalVector + if index == epochSubIndex { + intraRandaoMix, err := tx.GetOne(kv.IntraRandaoMixes, base_encoding.Encode64ToBytes4(slot)) + if err != nil { + return libcommon.Hash{}, err + } + if len(intraRandaoMix) != 32 { + return libcommon.Hash{}, fmt.Errorf("invalid intra randao mix length %d", len(intraRandaoMix)) + } + return libcommon.BytesToHash(intraRandaoMix), nil + } + needFromGenesis := true + var epochLookup uint64 + if index <= epochSubIndex { + if epoch > (epochSubIndex - index) { + needFromGenesis = false + epochLookup = epoch - (epochSubIndex - index) + } + } else { + if epoch > (epochSubIndex + (r.cfg.EpochsPerHistoricalVector - index)) { + needFromGenesis = false + epochLookup = epoch - (epochSubIndex + (r.cfg.EpochsPerHistoricalVector - index)) + } + } + if epochLookup < r.genesisState.Slot()/r.cfg.SlotsPerEpoch { + needFromGenesis = true + } + + if needFromGenesis { + return r.genesisState.GetRandaoMixes(epoch), nil + } + mixBytes, err := tx.GetOne(kv.RandaoMixes, base_encoding.Encode64ToBytes4(epochLookup*r.cfg.SlotsPerEpoch)) + if err != nil { + return libcommon.Hash{}, err + } + if len(mixBytes) != 32 { + return libcommon.Hash{}, fmt.Errorf("invalid mix length %d", len(mixBytes)) + } + return libcommon.BytesToHash(mixBytes), nil } diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go index c3e64b3ed11..cec53451589 100644 --- a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go +++ b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go @@ -21,12 +21,12 @@ import ( func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postState *state.CachingBeaconState) { db := memdb.NewTestDB(t) - reader, _ := tests.LoadChain(blocks, db, t) + reader, _ := tests.LoadChain(blocks, postState, db, t) ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() f := afero.NewMemMapFs() - a := antiquary.NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, f) + a := antiquary.NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, true, f) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) // Now lets test it against the reader tx, err := db.BeginRw(ctx) @@ -48,21 +48,18 @@ func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postSt func TestStateAntiquaryCapella(t *testing.T) { t.Skip() - //t.Skip() blocks, preState, postState := tests.GetCapellaRandom() runTest(t, blocks, preState, postState) } func TestStateAntiquaryPhase0(t *testing.T) { t.Skip() - // t.Skip() blocks, preState, postState := tests.GetPhase0Random() runTest(t, blocks, preState, postState) } func TestStateAntiquaryBellatrix(t *testing.T) { t.Skip() - // t.Skip() blocks, preState, postState := tests.GetBellatrixRandom() runTest(t, blocks, preState, postState) } diff --git a/cl/persistence/state/minimal_state.go b/cl/persistence/state/slot_data.go similarity index 68% rename from cl/persistence/state/minimal_state.go rename to cl/persistence/state/slot_data.go index b22923767b2..2d0e865bfc2 100644 --- a/cl/persistence/state/minimal_state.go +++ b/cl/persistence/state/slot_data.go @@ -7,11 +7,11 @@ import ( "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" - "github.com/ledgerwatch/erigon/cl/phase1/core/state/raw" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" ssz2 "github.com/ledgerwatch/erigon/cl/ssz" ) -type MinimalBeaconState struct { +type SlotData struct { // Block Header and Execution Headers can be retrieved from block snapshots Version clparams.StateVersion // Lengths @@ -19,42 +19,39 @@ type MinimalBeaconState struct { Eth1DataLength uint64 PreviousEpochAttestationsLength uint64 CurrentEpochAttestationsLength uint64 - HistoricalSummariesLength uint64 - HistoricalRootsLength uint64 // Phase0 - Eth1Data *cltypes.Eth1Data - Eth1DepositIndex uint64 - JustificationBits *cltypes.JustificationBits - Fork *cltypes.Fork + Eth1Data *cltypes.Eth1Data + Eth1DepositIndex uint64 // Capella NextWithdrawalIndex uint64 NextWithdrawalValidatorIndex uint64 + + // BlockRewards for proposer + AttestationsRewards uint64 + SyncAggregateRewards uint64 + ProposerSlashings uint64 + AttesterSlashings uint64 } -func MinimalBeaconStateFromBeaconState(s *raw.BeaconState) *MinimalBeaconState { +func SlotDataFromBeaconState(s *state.CachingBeaconState) *SlotData { justificationCopy := &cltypes.JustificationBits{} jj := s.JustificationBits() copy(justificationCopy[:], jj[:]) - return &MinimalBeaconState{ - Fork: s.Fork(), + return &SlotData{ ValidatorLength: uint64(s.ValidatorLength()), Eth1DataLength: uint64(s.Eth1DataVotes().Len()), PreviousEpochAttestationsLength: uint64(s.PreviousEpochAttestations().Len()), CurrentEpochAttestationsLength: uint64(s.CurrentEpochAttestations().Len()), - HistoricalSummariesLength: s.HistoricalSummariesLength(), - HistoricalRootsLength: s.HistoricalRootsLength(), Version: s.Version(), Eth1Data: s.Eth1Data(), Eth1DepositIndex: s.Eth1DepositIndex(), - JustificationBits: justificationCopy, NextWithdrawalIndex: s.NextWithdrawalIndex(), NextWithdrawalValidatorIndex: s.NextWithdrawalValidatorIndex(), } - } // Serialize serializes the state into a byte slice with zstd compression. -func (m *MinimalBeaconState) WriteTo(w io.Writer) error { +func (m *SlotData) WriteTo(w io.Writer) error { buf, err := ssz2.MarshalSSZ(nil, m.getSchema()...) if err != nil { return err @@ -75,10 +72,8 @@ func (m *MinimalBeaconState) WriteTo(w io.Writer) error { } // Deserialize deserializes the state from a byte slice with zstd compression. -func (m *MinimalBeaconState) ReadFrom(r io.Reader) error { +func (m *SlotData) ReadFrom(r io.Reader) error { m.Eth1Data = &cltypes.Eth1Data{} - m.JustificationBits = &cltypes.JustificationBits{} - m.Fork = &cltypes.Fork{} var err error versionByte := make([]byte, 1) @@ -105,8 +100,8 @@ func (m *MinimalBeaconState) ReadFrom(r io.Reader) error { return ssz2.UnmarshalSSZ(buf, int(m.Version), m.getSchema()...) } -func (m *MinimalBeaconState) getSchema() []interface{} { - schema := []interface{}{m.Eth1Data, m.Fork, &m.Eth1DepositIndex, m.JustificationBits, &m.ValidatorLength, &m.Eth1DataLength, &m.PreviousEpochAttestationsLength, &m.CurrentEpochAttestationsLength, &m.HistoricalSummariesLength, &m.HistoricalRootsLength} +func (m *SlotData) getSchema() []interface{} { + schema := []interface{}{m.Eth1Data, &m.Eth1DepositIndex, &m.ValidatorLength, &m.Eth1DataLength, &m.PreviousEpochAttestationsLength, &m.CurrentEpochAttestationsLength, &m.AttestationsRewards, &m.SyncAggregateRewards, &m.ProposerSlashings, &m.AttesterSlashings} if m.Version >= clparams.CapellaVersion { schema = append(schema, &m.NextWithdrawalIndex, &m.NextWithdrawalValidatorIndex) } diff --git a/cl/persistence/state/minimal_state_test.go b/cl/persistence/state/slot_data_test.go similarity index 73% rename from cl/persistence/state/minimal_state_test.go rename to cl/persistence/state/slot_data_test.go index acfb7c22f07..6378d159a26 100644 --- a/cl/persistence/state/minimal_state_test.go +++ b/cl/persistence/state/slot_data_test.go @@ -9,13 +9,11 @@ import ( "github.com/stretchr/testify/require" ) -func TestMinimalState(t *testing.T) { - m := &MinimalBeaconState{ +func TestSlotData(t *testing.T) { + m := &SlotData{ Version: clparams.CapellaVersion, Eth1Data: &cltypes.Eth1Data{}, - Fork: &cltypes.Fork{}, Eth1DepositIndex: 0, - JustificationBits: &cltypes.JustificationBits{}, NextWithdrawalIndex: 0, NextWithdrawalValidatorIndex: 0, } @@ -23,7 +21,7 @@ func TestMinimalState(t *testing.T) { if err := m.WriteTo(&b); err != nil { t.Fatal(err) } - m2 := &MinimalBeaconState{} + m2 := &SlotData{} if err := m2.ReadFrom(&b); err != nil { t.Fatal(err) } diff --git a/cl/persistence/state/state_accessors.go b/cl/persistence/state/state_accessors.go index 893b17c7fce..234641de968 100644 --- a/cl/persistence/state/state_accessors.go +++ b/cl/persistence/state/state_accessors.go @@ -123,13 +123,16 @@ func ReadPublicKeyByIndex(tx kv.Tx, index uint64) (libcommon.Bytes48, error) { return ret, err } -func ReadValidatorIndexByPublicKey(tx kv.Tx, key libcommon.Bytes48) (uint64, error) { +func ReadValidatorIndexByPublicKey(tx kv.Tx, key libcommon.Bytes48) (uint64, bool, error) { var index []byte var err error if index, err = tx.GetOne(kv.InvertedValidatorPublicKeys, key[:]); err != nil { - return 0, err + return 0, false, err + } + if len(index) == 0 { + return 0, false, nil } - return base_encoding.Decode64FromBytes4(index), nil + return base_encoding.Decode64FromBytes4(index), true, nil } func GetStateProcessingProgress(tx kv.Tx) (uint64, error) { @@ -147,9 +150,23 @@ func SetStateProcessingProgress(tx kv.RwTx, progress uint64) error { return tx.Put(kv.StatesProcessingProgress, kv.StatesProcessingKey, base_encoding.Encode64ToBytes4(progress)) } -func ReadMinimalBeaconState(tx kv.Tx, slot uint64) (*MinimalBeaconState, error) { - minimalState := &MinimalBeaconState{} - v, err := tx.GetOne(kv.MinimalBeaconState, base_encoding.Encode64ToBytes4(slot)) +func ReadSlotData(tx kv.Tx, slot uint64) (*SlotData, error) { + sd := &SlotData{} + v, err := tx.GetOne(kv.SlotData, base_encoding.Encode64ToBytes4(slot)) + if err != nil { + return nil, err + } + if len(v) == 0 { + return nil, nil + } + buf := bytes.NewBuffer(v) + + return sd, sd.ReadFrom(buf) +} + +func ReadEpochData(tx kv.Tx, slot uint64) (*EpochData, error) { + ed := &EpochData{} + v, err := tx.GetOne(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -158,20 +175,26 @@ func ReadMinimalBeaconState(tx kv.Tx, slot uint64) (*MinimalBeaconState, error) } buf := bytes.NewBuffer(v) - return minimalState, minimalState.ReadFrom(buf) + return ed, ed.ReadFrom(buf) } // ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized func ReadCheckpoints(tx kv.Tx, slot uint64) (current solid.Checkpoint, previous solid.Checkpoint, finalized solid.Checkpoint, err error) { - v, err := tx.GetOne(kv.Checkpoints, base_encoding.Encode64ToBytes4(slot)) + ed := &EpochData{} + v, err := tx.GetOne(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, nil, nil, err } if len(v) == 0 { return nil, nil, nil, nil } + buf := bytes.NewBuffer(v) + + if err := ed.ReadFrom(buf); err != nil { + return nil, nil, nil, err + } // Current, Pre - return solid.Checkpoint(v[0:40]), solid.Checkpoint(v[40:80]), solid.Checkpoint(v[80:120]), nil + return ed.CurrentJustifiedCheckpoint, ed.PreviousJustifiedCheckpoint, ed.FinalizedCheckpoint, nil } // ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized @@ -240,7 +263,13 @@ func ReadCurrentEpochAttestations(tx kv.Tx, slot uint64, limit int) (*solid.List return nil, err } if len(v) == 0 { - return nil, nil + has, err := tx.Has(kv.CurrentEpochAttestations, base_encoding.Encode64ToBytes4(slot)) + if err != nil { + return nil, err + } + if !has { + return nil, nil + } } attestations := solid.NewDynamicListSSZ[*solid.PendingAttestation](limit) reader, err := zstd.NewReader(bytes.NewReader(v)) @@ -264,7 +293,13 @@ func ReadPreviousEpochAttestations(tx kv.Tx, slot uint64, limit int) (*solid.Lis return nil, err } if len(v) == 0 { - return nil, nil + has, err := tx.Has(kv.PreviousEpochAttestations, base_encoding.Encode64ToBytes4(slot)) + if err != nil { + return nil, err + } + if !has { + return nil, nil + } } attestations := solid.NewDynamicListSSZ[*solid.PendingAttestation](limit) reader, err := zstd.NewReader(bytes.NewReader(v)) @@ -309,5 +344,17 @@ func ReadValidatorsTable(tx kv.Tx, out *StaticValidatorTable) error { } out.slot = slot return err +} +func ReadActiveIndicies(tx kv.Tx, slot uint64) ([]uint64, error) { + key := base_encoding.Encode64ToBytes4(slot) + v, err := tx.GetOne(kv.ActiveValidatorIndicies, key) + if err != nil { + return nil, err + } + if len(v) == 0 { + return nil, nil + } + buf := bytes.NewBuffer(v) + return base_encoding.ReadRabbits(nil, buf) } diff --git a/cl/phase1/core/state/accessors.go b/cl/phase1/core/state/accessors.go index e5faa8e2eb8..90974b2cce8 100644 --- a/cl/phase1/core/state/accessors.go +++ b/cl/phase1/core/state/accessors.go @@ -1,6 +1,7 @@ package state import ( + "encoding/binary" "fmt" "github.com/Giulio2002/bls" @@ -28,6 +29,12 @@ func Epoch(b abstract.BeaconStateBasic) uint64 { return GetEpochAtSlot(b.BeaconConfig(), b.Slot()) } +func IsAggregator(cfg *clparams.BeaconChainConfig, committeeLength, slot, committeeIndex uint64, slotSignature libcommon.Bytes96) bool { + modulo := utils.Max64(1, committeeLength/cfg.TargetAggregatorsPerCommittee) + hashSlotSignatue := utils.Sha256(slotSignature[:]) + return binary.LittleEndian.Uint64(hashSlotSignatue[:8])%modulo == 0 +} + // GetTotalBalance return the sum of all balances within the given validator set. func GetTotalBalance(b abstract.BeaconStateBasic, validatorSet []uint64) (uint64, error) { var ( @@ -82,12 +89,9 @@ func InactivityLeaking(b abstract.BeaconState) bool { } // IsUnslashedParticipatingIndex -func IsUnslashedParticipatingIndex(b abstract.BeaconState, epoch, index uint64, flagIdx int) bool { - validator, err := b.ValidatorForValidatorIndex(int(index)) - if err != nil { - return false - } - return validator.Active(epoch) && cltypes.ParticipationFlags(b.EpochParticipation(false).Get(int(index))).HasFlag(flagIdx) && !validator.Slashed() +func IsUnslashedParticipatingIndex(validatorSet *solid.ValidatorSet, previousEpochParticipation *solid.BitList, epoch, index uint64, flagIdx int) bool { + validator := validatorSet.Get(int(index)) + return validator.Active(epoch) && cltypes.ParticipationFlags(previousEpochParticipation.Get(int(index))).HasFlag(flagIdx) && !validator.Slashed() } // EligibleValidatorsIndicies Implementation of get_eligible_validator_indices as defined in the eth 2.0 specs. diff --git a/cl/phase1/core/state/cache.go b/cl/phase1/core/state/cache.go index fcd410f080d..63ada362378 100644 --- a/cl/phase1/core/state/cache.go +++ b/cl/phase1/core/state/cache.go @@ -334,11 +334,16 @@ func readUint64WithBuffer(r io.Reader, buf []byte, out *uint64) error { // internal encoding/decoding algos func (b *CachingBeaconState) encodeActiveValidatorsCache(w io.Writer, num []byte) error { - keys := b.activeValidatorsCache.Keys() - lists := make([][]uint64, len(keys)) - - for i, key := range keys { - lists[i], _ = b.activeValidatorsCache.Get(key) + keysA := b.activeValidatorsCache.Keys() + keys := make([]uint64, 0, len(keysA)) + lists := make([][]uint64, 0, len(keys)) + for _, key := range keysA { + l, ok := b.activeValidatorsCache.Get(key) + if !ok || len(l) == 0 { + continue + } + keys = append(keys, key) + lists = append(lists, l) } // Write the total length if err := writeUint64WithBuffer(w, uint64(len(keys)), num); err != nil { @@ -396,11 +401,17 @@ func (b *CachingBeaconState) decodeActiveValidatorsCache(r io.Reader, num []byte // internal encoding/decoding algos func (b *CachingBeaconState) encodeShuffledSetsCache(w io.Writer, num []byte) error { - keys := b.shuffledSetsCache.Keys() - lists := make([][]uint64, len(keys)) - - for i, key := range keys { - lists[i], _ = b.shuffledSetsCache.Get(key) + keysA := b.shuffledSetsCache.Keys() + keys := make([]common.Hash, 0, len(keysA)) + lists := make([][]uint64, 0, len(keys)) + + for _, key := range keysA { + l, ok := b.shuffledSetsCache.Get(key) + if !ok || len(l) == 0 { + continue + } + keys = append(keys, key) + lists = append(lists, l) } // Write the total length if err := writeUint64WithBuffer(w, uint64(len(keys)), num); err != nil { diff --git a/cl/phase1/core/state/cache_accessors.go b/cl/phase1/core/state/cache_accessors.go index 99f2f17a9bb..80685ec5a24 100644 --- a/cl/phase1/core/state/cache_accessors.go +++ b/cl/phase1/core/state/cache_accessors.go @@ -171,7 +171,10 @@ func (b *CachingBeaconState) GetAttestationParticipationFlagIndicies(data solid. if inclusionDelay <= utils.IntegerSquareRoot(b.BeaconConfig().SlotsPerEpoch) { participationFlagIndicies = append(participationFlagIndicies, b.BeaconConfig().TimelySourceFlagIndex) } - if matchingTarget && inclusionDelay <= b.BeaconConfig().SlotsPerEpoch { + if b.Version() < clparams.DenebVersion && matchingTarget && inclusionDelay <= b.BeaconConfig().SlotsPerEpoch { + participationFlagIndicies = append(participationFlagIndicies, b.BeaconConfig().TimelyTargetFlagIndex) + } + if b.Version() >= clparams.DenebVersion && matchingTarget { participationFlagIndicies = append(participationFlagIndicies, b.BeaconConfig().TimelyTargetFlagIndex) } if matchingHead && inclusionDelay == b.BeaconConfig().MinAttestationInclusionDelay { @@ -295,3 +298,11 @@ func (b *CachingBeaconState) GetValidatorChurnLimit() uint64 { activeIndsCount := uint64(len(b.GetActiveValidatorsIndices(Epoch(b)))) return utils.Max64(activeIndsCount/b.BeaconConfig().ChurnLimitQuotient, b.BeaconConfig().MinPerEpochChurnLimit) } + +// https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#new-get_validator_activation_churn_limit +func (b *CachingBeaconState) GetValidatorActivationChurnLimit() uint64 { + if b.Version() >= clparams.DenebVersion { + return utils.Min64(b.BeaconConfig().MaxPerEpochActivationChurnLimit, b.GetValidatorChurnLimit()) + } + return b.GetValidatorChurnLimit() +} diff --git a/cl/phase1/core/state/cache_mutators.go b/cl/phase1/core/state/cache_mutators.go index 05f9080c91a..b5ee40db296 100644 --- a/cl/phase1/core/state/cache_mutators.go +++ b/cl/phase1/core/state/cache_mutators.go @@ -16,46 +16,46 @@ func (b *CachingBeaconState) getSlashingProposerReward(whistleBlowerReward uint6 return whistleBlowerReward * b.BeaconConfig().ProposerWeight / b.BeaconConfig().WeightDenominator } -func (b *CachingBeaconState) SlashValidator(slashedInd uint64, whistleblowerInd *uint64) error { +func (b *CachingBeaconState) SlashValidator(slashedInd uint64, whistleblowerInd *uint64) (uint64, error) { epoch := Epoch(b) if err := b.InitiateValidatorExit(slashedInd); err != nil { - return err + return 0, err } // Record changes in changeset slashingsIndex := int(epoch % b.BeaconConfig().EpochsPerSlashingsVector) // Change the validator to be slashed if err := b.SetValidatorSlashed(int(slashedInd), true); err != nil { - return err + return 0, err } currentWithdrawableEpoch, err := b.ValidatorWithdrawableEpoch(int(slashedInd)) if err != nil { - return err + return 0, err } newWithdrawableEpoch := utils.Max64(currentWithdrawableEpoch, epoch+b.BeaconConfig().EpochsPerSlashingsVector) if err := b.SetWithdrawableEpochForValidatorAtIndex(int(slashedInd), newWithdrawableEpoch); err != nil { - return err + return 0, err } // Update slashings vector currentEffectiveBalance, err := b.ValidatorEffectiveBalance(int(slashedInd)) if err != nil { - return err + return 0, err } b.SetSlashingSegmentAt(slashingsIndex, b.SlashingSegmentAt(slashingsIndex)+currentEffectiveBalance) newEffectiveBalance, err := b.ValidatorEffectiveBalance(int(slashedInd)) if err != nil { - return err + return 0, err } if err := DecreaseBalance(b, slashedInd, newEffectiveBalance/b.BeaconConfig().GetMinSlashingPenaltyQuotient(b.Version())); err != nil { - return err + return 0, err } proposerInd, err := b.GetBeaconProposerIndex() if err != nil { - return fmt.Errorf("unable to get beacon proposer index: %v", err) + return 0, fmt.Errorf("unable to get beacon proposer index: %v", err) } if whistleblowerInd == nil { whistleblowerInd = new(uint64) @@ -64,9 +64,13 @@ func (b *CachingBeaconState) SlashValidator(slashedInd uint64, whistleblowerInd whistleBlowerReward := newEffectiveBalance / b.BeaconConfig().WhistleBlowerRewardQuotient proposerReward := b.getSlashingProposerReward(whistleBlowerReward) if err := IncreaseBalance(b, proposerInd, proposerReward); err != nil { - return err + return 0, err + } + rewardWhist := whistleBlowerReward - proposerReward + if whistleblowerInd == nil { + proposerReward += rewardWhist } - return IncreaseBalance(b, *whistleblowerInd, whistleBlowerReward-proposerReward) + return proposerReward, IncreaseBalance(b, *whistleblowerInd, whistleBlowerReward-proposerReward) } func (b *CachingBeaconState) InitiateValidatorExit(index uint64) error { diff --git a/cl/phase1/core/state/raw/getters.go b/cl/phase1/core/state/raw/getters.go index a1676714b28..1d234a647a0 100644 --- a/cl/phase1/core/state/raw/getters.go +++ b/cl/phase1/core/state/raw/getters.go @@ -84,6 +84,18 @@ func (b *BeaconState) Eth1DepositIndex() uint64 { return b.eth1DepositIndex } +func (b *BeaconState) ValidatorSet() *solid.ValidatorSet { + return b.validators +} + +func (b *BeaconState) PreviousEpochParticipation() *solid.BitList { + return b.previousEpochParticipation +} + +func (b *BeaconState) CurrentEpochParticipation() *solid.BitList { + return b.currentEpochParticipation +} + func (b *BeaconState) ValidatorLength() int { return b.validators.Length() } diff --git a/cl/phase1/core/state/raw/hashing.go b/cl/phase1/core/state/raw/hashing.go index 72c840fbf35..7be41d67830 100644 --- a/cl/phase1/core/state/raw/hashing.go +++ b/cl/phase1/core/state/raw/hashing.go @@ -23,6 +23,45 @@ func (b *BeaconState) HashSSZ() (out [32]byte, err error) { return } +func (b *BeaconState) CurrentSyncCommitteeBranch() ([][32]byte, error) { + if err := b.computeDirtyLeaves(); err != nil { + return nil, err + } + schema := []interface{}{} + for i := 0; i < len(b.leaves); i += 32 { + schema = append(schema, b.leaves[i:i+32]) + } + return merkle_tree.MerkleProof(5, 22, schema...) +} + +func (b *BeaconState) NextSyncCommitteeBranch() ([][32]byte, error) { + if err := b.computeDirtyLeaves(); err != nil { + return nil, err + } + schema := []interface{}{} + for i := 0; i < len(b.leaves); i += 32 { + schema = append(schema, b.leaves[i:i+32]) + } + return merkle_tree.MerkleProof(5, 23, schema...) +} + +func (b *BeaconState) FinalityRootBranch() ([][32]byte, error) { + if err := b.computeDirtyLeaves(); err != nil { + return nil, err + } + schema := []interface{}{} + for i := 0; i < len(b.leaves); i += 32 { + schema = append(schema, b.leaves[i:i+32]) + } + proof, err := merkle_tree.MerkleProof(5, 20, schema...) + if err != nil { + return nil, err + } + + proof = append([][32]byte{merkle_tree.Uint64Root(b.finalizedCheckpoint.Epoch())}, proof...) + return proof, nil +} + func preparateRootsForHashing(roots []common.Hash) [][32]byte { ret := make([][32]byte, len(roots)) for i := range roots { diff --git a/cl/phase1/core/state/raw/state.go b/cl/phase1/core/state/raw/state.go index 3f6533e4e92..f84eade855a 100644 --- a/cl/phase1/core/state/raw/state.go +++ b/cl/phase1/core/state/raw/state.go @@ -2,6 +2,7 @@ package raw import ( "encoding/json" + "strconv" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cl/clparams" @@ -105,9 +106,9 @@ func (b *BeaconState) init() error { func (b *BeaconState) MarshalJSON() ([]byte, error) { obj := map[string]interface{}{ - "genesis_time": b.genesisTime, + "genesis_time": strconv.FormatInt(int64(b.genesisTime), 10), "genesis_validators_root": b.genesisValidatorsRoot, - "slot": b.slot, + "slot": strconv.FormatInt(int64(b.slot), 10), "fork": b.fork, "latest_block_header": b.latestBlockHeader, "block_roots": b.blockRoots, @@ -115,7 +116,7 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) { "historical_roots": b.historicalRoots, "eth1_data": b.eth1Data, "eth1_data_votes": b.eth1DataVotes, - "eth1_deposit_index": b.eth1DepositIndex, + "eth1_deposit_index": strconv.FormatInt(int64(b.eth1DepositIndex), 10), "validators": b.validators, "balances": b.balances, "randao_mixes": b.randaoMixes, @@ -141,8 +142,8 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) { obj["latest_execution_payload_header"] = b.latestExecutionPayloadHeader } if b.version >= clparams.CapellaVersion { - obj["next_withdrawal_index"] = b.nextWithdrawalIndex - obj["next_withdrawal_validator_index"] = b.nextWithdrawalValidatorIndex + obj["next_withdrawal_index"] = strconv.FormatInt(int64(b.nextWithdrawalIndex), 10) + obj["next_withdrawal_validator_index"] = strconv.FormatInt(int64(b.nextWithdrawalValidatorIndex), 10) obj["historical_summaries"] = b.historicalSummaries } return json.Marshal(obj) diff --git a/cl/phase1/core/state/utils_test.go b/cl/phase1/core/state/utils_test.go index 61fca829fed..8a1ac83e6c3 100644 --- a/cl/phase1/core/state/utils_test.go +++ b/cl/phase1/core/state/utils_test.go @@ -14,9 +14,10 @@ import ( func TestValidatorSlashing(t *testing.T) { state := New(&clparams.MainnetBeaconConfig) utils.DecodeSSZSnappy(state, stateEncoded, int(clparams.DenebVersion)) - - require.NoError(t, state.SlashValidator(1, nil)) - require.NoError(t, state.SlashValidator(2, nil)) + _, err := state.SlashValidator(1, nil) + require.NoError(t, err) + _, err = state.SlashValidator(2, nil) + require.NoError(t, err) exit, err := state.BeaconState.ValidatorExitEpoch(1) require.NoError(t, err) diff --git a/cl/phase1/forkchoice/checkpoint_state.go b/cl/phase1/forkchoice/checkpoint_state.go index e5937fc3cc5..612068945dc 100644 --- a/cl/phase1/forkchoice/checkpoint_state.go +++ b/cl/phase1/forkchoice/checkpoint_state.go @@ -69,7 +69,7 @@ func newCheckpointState(beaconConfig *clparams.BeaconChainConfig, anchorPublicKe // Add the post-anchor public keys as surplus for i := len(anchorPublicKeys) / length.Bytes48; i < len(validatorSet); i++ { pos := i - len(anchorPublicKeys)/length.Bytes48 - copy(publicKeys[pos*length.Bytes48:], validatorSet[i].PublicKeyBytes()) + copy(publicKeys[pos*length.Bytes48:(pos+1)*length.Bytes48], validatorSet[i].PublicKeyBytes()) } mixes := solid.NewHashVector(randaoMixesLength) @@ -166,11 +166,11 @@ func (c *checkpointState) isValidIndexedAttestation(att *cltypes.IndexedAttestat pks := [][]byte{} inds.Range(func(_ int, v uint64, _ int) bool { - if v < uint64(len(c.anchorPublicKeys)) { + if v < uint64(len(c.anchorPublicKeys))/length.Bytes48 { pks = append(pks, c.anchorPublicKeys[v*length.Bytes48:(v+1)*length.Bytes48]) } else { offset := uint64(len(c.anchorPublicKeys) / length.Bytes48) - pks = append(pks, c.publicKeys[(v-offset)*length.Bytes48:]) + pks = append(pks, c.publicKeys[(v-offset)*length.Bytes48:(v-offset+1)*length.Bytes48]) } return true }) diff --git a/cl/phase1/forkchoice/fork_choice_test.go b/cl/phase1/forkchoice/fork_choice_test.go index 4b5c5d81975..fa3559490d3 100644 --- a/cl/phase1/forkchoice/fork_choice_test.go +++ b/cl/phase1/forkchoice/fork_choice_test.go @@ -3,13 +3,16 @@ package forkchoice_test import ( "context" _ "embed" + "fmt" "testing" + "github.com/ledgerwatch/erigon/cl/antiquary/tests" "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph" "github.com/ledgerwatch/erigon/cl/pool" + "github.com/ledgerwatch/erigon/cl/transition" "github.com/spf13/afero" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -89,7 +92,7 @@ func TestForkChoiceBasic(t *testing.T) { require.Equal(t, headSlot, uint64(3)) require.Equal(t, headRoot, libcommon.HexToHash("0x744cc484f6503462f0f3a5981d956bf4fcb3e57ab8687ed006467e05049ee033")) // lastly do attestation - require.NoError(t, store.OnAttestation(testAttestation, false)) + require.NoError(t, store.OnAttestation(testAttestation, false, false)) // Try processing a voluntary exit err = store.OnVoluntaryExit(&cltypes.SignedVoluntaryExit{ VoluntaryExit: &cltypes.VoluntaryExit{ @@ -107,3 +110,42 @@ func TestForkChoiceBasic(t *testing.T) { require.NoError(t, err) require.Equal(t, len(pool.VoluntaryExistsPool.Raw()), 1) } + +func TestForkChoiceChainBellatrix(t *testing.T) { + blocks, anchorState, _ := tests.GetBellatrixRandom() + + intermediaryState, err := anchorState.Copy() + require.NoError(t, err) + + intermediaryBlockRoot := blocks[0].Block.ParentRoot + for i := 0; i < 35; i++ { + require.NoError(t, transition.TransitionState(intermediaryState, blocks[i], nil, false)) + intermediaryBlockRoot, err = blocks[i].Block.HashSSZ() + require.NoError(t, err) + } + // Initialize forkchoice store + pool := pool.NewOperationsPool(&clparams.MainnetBeaconConfig) + store, err := forkchoice.NewForkChoiceStore(context.Background(), anchorState, nil, nil, pool, fork_graph.NewForkGraphDisk(anchorState, afero.NewMemMapFs())) + store.OnTick(2000) + require.NoError(t, err) + for _, block := range blocks { + require.NoError(t, store.OnBlock(block, false, true)) + } + root1, err := blocks[20].Block.HashSSZ() + require.NoError(t, err) + + rewards, ok := store.BlockRewards(libcommon.Hash(root1)) + require.True(t, ok) + require.Equal(t, rewards.Attestations, uint64(0x511ad)) + // test randao mix + mixes := solid.NewHashVector(int(clparams.MainnetBeaconConfig.EpochsPerHistoricalVector)) + require.True(t, store.RandaoMixes(intermediaryBlockRoot, mixes)) + for i := 0; i < mixes.Length(); i++ { + require.Equal(t, mixes.Get(i), intermediaryState.RandaoMixes().Get(i), fmt.Sprintf("mixes mismatch at index %d, have: %x, expected: %x", i, mixes.Get(i), intermediaryState.RandaoMixes().Get(i))) + } + currentIntermediarySyncCommittee, nextIntermediarySyncCommittee, ok := store.GetSyncCommittees(intermediaryBlockRoot) + require.True(t, ok) + + require.Equal(t, intermediaryState.CurrentSyncCommittee(), currentIntermediarySyncCommittee) + require.Equal(t, intermediaryState.NextSyncCommittee(), nextIntermediarySyncCommittee) +} diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go index 927230fcdaa..1030cba9014 100644 --- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go +++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/transition" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" "github.com/ledgerwatch/log/v3" "github.com/spf13/afero" "golang.org/x/exp/slices" @@ -82,6 +83,8 @@ type forkGraphDisk struct { // for each block root we also keep track of te equivalent current justified and finalized checkpoints for faster head retrieval. currentJustifiedCheckpoints map[libcommon.Hash]solid.Checkpoint finalizedCheckpoints map[libcommon.Hash]solid.Checkpoint + // keep track of rewards too + blockRewards map[libcommon.Hash]*eth2.BlockRewardsCollector // for each block root we keep track of the sync committees for head retrieval. syncCommittees map[libcommon.Hash]syncCommittees @@ -89,7 +92,7 @@ type forkGraphDisk struct { beaconCfg *clparams.BeaconChainConfig genesisTime uint64 // highest block seen - highestSeen, anchorSlot uint64 + highestSeen, lowestAvaiableSlot, anchorSlot uint64 // reusable buffers sszBuffer bytes.Buffer @@ -127,10 +130,12 @@ func NewForkGraphDisk(anchorState *state.CachingBeaconState, aferoFs afero.Fs) F // checkpoints trackers currentJustifiedCheckpoints: make(map[libcommon.Hash]solid.Checkpoint), finalizedCheckpoints: make(map[libcommon.Hash]solid.Checkpoint), + blockRewards: make(map[libcommon.Hash]*eth2.BlockRewardsCollector), // configuration - beaconCfg: anchorState.BeaconConfig(), - genesisTime: anchorState.GenesisTime(), - anchorSlot: anchorState.Slot(), + beaconCfg: anchorState.BeaconConfig(), + genesisTime: anchorState.GenesisTime(), + anchorSlot: anchorState.Slot(), + lowestAvaiableSlot: anchorState.Slot(), } f.dumpBeaconStateOnDisk(anchorState, anchorRoot) return f @@ -173,8 +178,9 @@ func (f *forkGraphDisk) AddChainSegment(signedBlock *cltypes.SignedBeaconBlock, return nil, MissingSegment, nil } + blockRewardsCollector := ð2.BlockRewardsCollector{} // Execute the state - if invalidBlockErr := transition.TransitionState(newState, signedBlock, fullValidation); invalidBlockErr != nil { + if invalidBlockErr := transition.TransitionState(newState, signedBlock, blockRewardsCollector, fullValidation); invalidBlockErr != nil { // Add block to list of invalid blocks log.Debug("Invalid beacon block", "reason", invalidBlockErr) f.badBlocks[blockRoot] = struct{}{} @@ -188,6 +194,8 @@ func (f *forkGraphDisk) AddChainSegment(signedBlock *cltypes.SignedBeaconBlock, return nil, InvalidBlock, invalidBlockErr } + + f.blockRewards[blockRoot] = blockRewardsCollector f.syncCommittees[blockRoot] = syncCommittees{ currentSyncCommittee: newState.CurrentSyncCommittee().Copy(), nextSyncCommittee: newState.NextSyncCommittee().Copy(), @@ -316,7 +324,7 @@ func (f *forkGraphDisk) GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.Cac // Traverse the blocks from top to bottom. for _, block := range blocksInTheWay { - if err := transition.TransitionState(copyReferencedState, block, false); err != nil { + if err := transition.TransitionState(copyReferencedState, block, nil, false); err != nil { return nil, err } } @@ -365,7 +373,7 @@ func (f *forkGraphDisk) GetState(blockRoot libcommon.Hash, alwaysCopy bool) (*st // Traverse the blocks from top to bottom. for i := len(blocksInTheWay) - 1; i >= 0; i-- { - if err := transition.TransitionState(copyReferencedState, blocksInTheWay[i], false); err != nil { + if err := transition.TransitionState(copyReferencedState, blocksInTheWay[i], nil, false); err != nil { return nil, err } } @@ -395,6 +403,7 @@ func (f *forkGraphDisk) Prune(pruneSlot uint64) (err error) { } oldRoots = append(oldRoots, hash) } + f.lowestAvaiableSlot = pruneSlot + 1 for _, root := range oldRoots { delete(f.badBlocks, root) delete(f.blocks, root) @@ -403,6 +412,7 @@ func (f *forkGraphDisk) Prune(pruneSlot uint64) (err error) { delete(f.headers, root) delete(f.saveStates, root) delete(f.syncCommittees, root) + delete(f.blockRewards, root) f.fs.Remove(getBeaconStateFilename(root)) f.fs.Remove(getBeaconStateCacheFilename(root)) } @@ -417,3 +427,12 @@ func (f *forkGraphDisk) GetSyncCommittees(blockRoot libcommon.Hash) (*solid.Sync } return obj.currentSyncCommittee, obj.nextSyncCommittee, true } + +func (f *forkGraphDisk) GetBlockRewards(blockRoot libcommon.Hash) (*eth2.BlockRewardsCollector, bool) { + obj, has := f.blockRewards[blockRoot] + return obj, has +} + +func (f *forkGraphDisk) LowestAvaiableSlot() uint64 { + return f.lowestAvaiableSlot +} diff --git a/cl/phase1/forkchoice/fork_graph/interface.go b/cl/phase1/forkchoice/fork_graph/interface.go index dcd2e345307..23d9e106040 100644 --- a/cl/phase1/forkchoice/fork_graph/interface.go +++ b/cl/phase1/forkchoice/fork_graph/interface.go @@ -5,6 +5,7 @@ import ( "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" ) /* @@ -27,6 +28,8 @@ type ForkGraph interface { MarkHeaderAsInvalid(blockRoot libcommon.Hash) AnchorSlot() uint64 Prune(uint64) error + GetBlockRewards(blockRoot libcommon.Hash) (*eth2.BlockRewardsCollector, bool) + LowestAvaiableSlot() uint64 // extra methods for validator api GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) diff --git a/cl/phase1/forkchoice/forkchoice.go b/cl/phase1/forkchoice/forkchoice.go index 160d53ecd27..689c8240d3a 100644 --- a/cl/phase1/forkchoice/forkchoice.go +++ b/cl/phase1/forkchoice/forkchoice.go @@ -2,7 +2,9 @@ package forkchoice import ( "context" + "sort" "sync" + "sync/atomic" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes/solid" @@ -12,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon/cl/phase1/execution_client" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph" "github.com/ledgerwatch/erigon/cl/pool" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" "golang.org/x/exp/slices" lru "github.com/hashicorp/golang-lru/v2" @@ -19,6 +22,31 @@ import ( "github.com/ledgerwatch/erigon-lib/common/length" ) +// Schema +/* +{ + "slot": "1", + "block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "justified_epoch": "1", + "finalized_epoch": "1", + "weight": "1", + "validity": "valid", + "execution_block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "extra_data": {} + } +*/ +type ForkNode struct { + Slot uint64 `json:"slot,string"` + BlockRoot libcommon.Hash `json:"block_root"` + ParentRoot libcommon.Hash `json:"parent_root"` + JustifiedEpoch uint64 `json:"justified_epoch,string"` + FinalizedEpoch uint64 `json:"finalized_epoch,string"` + Weight uint64 `json:"weight,string"` + Validity string `json:"validity"` + ExecutionBlock libcommon.Hash `json:"execution_block_hash"` +} + type checkpointComparable string const ( @@ -26,6 +54,11 @@ const ( allowedCachedStates = 8 ) +type randaoDelta struct { + epoch uint64 + delta libcommon.Hash +} + type finalityCheckpoints struct { finalizedCheckpoint solid.Checkpoint currentJustifiedCheckpoint solid.Checkpoint @@ -47,23 +80,35 @@ type ForkChoiceStore struct { unrealizedJustifiedCheckpoint solid.Checkpoint unrealizedFinalizedCheckpoint solid.Checkpoint proposerBoostRoot libcommon.Hash - headHash libcommon.Hash - headSlot uint64 - genesisTime uint64 - childrens map[libcommon.Hash]childrens + // attestations that are not yet processed + attestationSet sync.Map + // head data + headHash libcommon.Hash + headSlot uint64 + genesisTime uint64 + weights map[libcommon.Hash]uint64 + headSet map[libcommon.Hash]struct{} + // childrens + childrens map[libcommon.Hash]childrens // Use go map because this is actually an unordered set - equivocatingIndicies map[uint64]struct{} + equivocatingIndicies []byte forkGraph fork_graph.ForkGraph // I use the cache due to the convenient auto-cleanup feauture. checkpointStates map[checkpointComparable]*checkpointState // We keep ssz snappy of it as the full beacon state is full of rendundant data. - latestMessages map[uint64]*LatestMessage + latestMessages []LatestMessage anchorPublicKeys []byte // We keep track of them so that we can forkchoice with EL. eth2Roots *lru.Cache[libcommon.Hash, libcommon.Hash] // ETH2 root -> ETH1 hash // preverifid sizes and other data collection preverifiedSizes *lru.Cache[libcommon.Hash, preverifiedAppendListsSizes] finalityCheckpoints *lru.Cache[libcommon.Hash, finalityCheckpoints] + totalActiveBalances *lru.Cache[libcommon.Hash, uint64] + // Randao mixes + randaoMixesLists *lru.Cache[libcommon.Hash, solid.HashListSSZ] // limited randao mixes full list (only 16 elements) + randaoDeltas *lru.Cache[libcommon.Hash, randaoDelta] // small entry can be lots of elements. + // participation tracking + participation *lru.Cache[uint64, *solid.BitList] // epoch -> [partecipation] mu sync.Mutex // EL @@ -73,6 +118,8 @@ type ForkChoiceStore struct { // operations pool operationsPool pool.OperationsPool beaconCfg *clparams.BeaconChainConfig + + synced atomic.Bool } type LatestMessage struct { @@ -101,6 +148,16 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt return nil, err } + randaoMixesLists, err := lru.New[libcommon.Hash, solid.HashListSSZ](allowedCachedStates) + if err != nil { + return nil, err + } + + randaoDeltas, err := lru.New[libcommon.Hash, randaoDelta](checkpointsPerCache) + if err != nil { + return nil, err + } + finalityCheckpoints, err := lru.New[libcommon.Hash, finalityCheckpoints](checkpointsPerCache) if err != nil { return nil, err @@ -125,6 +182,24 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt historicalSummariesLength: anchorState.HistoricalSummariesLength(), }) + totalActiveBalances, err := lru.New[libcommon.Hash, uint64](checkpointsPerCache * 10) + if err != nil { + return nil, err + } + + participation, err := lru.New[uint64, *solid.BitList](16) + if err != nil { + return nil, err + } + + participation.Add(state.Epoch(anchorState.BeaconState), anchorState.CurrentEpochParticipation().Copy()) + + totalActiveBalances.Add(anchorRoot, anchorState.GetTotalActiveBalance()) + r := solid.NewHashVector(int(anchorState.BeaconConfig().EpochsPerHistoricalVector)) + anchorState.RandaoMixes().CopyTo(r) + randaoMixesLists.Add(anchorRoot, r) + headSet := make(map[libcommon.Hash]struct{}) + headSet[anchorRoot] = struct{}{} return &ForkChoiceStore{ ctx: ctx, highestSeen: anchorState.Slot(), @@ -134,8 +209,8 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt unrealizedJustifiedCheckpoint: anchorCheckpoint.Copy(), unrealizedFinalizedCheckpoint: anchorCheckpoint.Copy(), forkGraph: forkGraph, - equivocatingIndicies: map[uint64]struct{}{}, - latestMessages: map[uint64]*LatestMessage{}, + equivocatingIndicies: make([]byte, anchorState.ValidatorLength(), anchorState.ValidatorLength()*2), + latestMessages: make([]LatestMessage, anchorState.ValidatorLength(), anchorState.ValidatorLength()*2), checkpointStates: make(map[checkpointComparable]*checkpointState), eth2Roots: eth2Roots, engine: engine, @@ -147,6 +222,12 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt childrens: make(map[libcommon.Hash]childrens), preverifiedSizes: preverifiedSizes, finalityCheckpoints: finalityCheckpoints, + totalActiveBalances: totalActiveBalances, + randaoMixesLists: randaoMixesLists, + randaoDeltas: randaoDeltas, + headSet: headSet, + weights: make(map[libcommon.Hash]uint64), + participation: participation, }, nil } @@ -226,7 +307,7 @@ func (f *ForkChoiceStore) FinalizedCheckpoint() solid.Checkpoint { func (f *ForkChoiceStore) FinalizedSlot() uint64 { f.mu.Lock() defer f.mu.Unlock() - return f.computeStartSlotAtEpoch(f.finalizedCheckpoint.Epoch()) + return f.computeStartSlotAtEpoch(f.finalizedCheckpoint.Epoch()) + (f.beaconCfg.SlotsPerEpoch - 1) } // FinalizedCheckpoint returns justified checkpoint @@ -302,3 +383,106 @@ func (f *ForkChoiceStore) GetSyncCommittees(blockRoot libcommon.Hash) (*solid.Sy defer f.mu.Unlock() return f.forkGraph.GetSyncCommittees(blockRoot) } + +func (f *ForkChoiceStore) BlockRewards(root libcommon.Hash) (*eth2.BlockRewardsCollector, bool) { + f.mu.Lock() + defer f.mu.Unlock() + return f.forkGraph.GetBlockRewards(root) +} + +func (f *ForkChoiceStore) TotalActiveBalance(root libcommon.Hash) (uint64, bool) { + return f.totalActiveBalances.Get(root) +} + +func (f *ForkChoiceStore) LowestAvaiableSlot() uint64 { + f.mu.Lock() + defer f.mu.Unlock() + return f.forkGraph.LowestAvaiableSlot() +} + +func (f *ForkChoiceStore) RandaoMixes(blockRoot libcommon.Hash, out solid.HashListSSZ) bool { + f.mu.Lock() + defer f.mu.Unlock() + relevantDeltas := map[uint64]randaoDelta{} + currentBlockRoot := blockRoot + var currentSlot uint64 + for { + h, ok := f.forkGraph.GetHeader(currentBlockRoot) + if !ok { + return false + } + currentSlot = h.Slot + if f.randaoMixesLists.Contains(currentBlockRoot) { + break + } + randaoDelta, ok := f.randaoDeltas.Get(currentBlockRoot) + if !ok { + return false + } + currentBlockRoot = h.ParentRoot + if _, ok := relevantDeltas[currentSlot/f.beaconCfg.SlotsPerEpoch]; !ok { + relevantDeltas[currentSlot/f.beaconCfg.SlotsPerEpoch] = randaoDelta + } + } + randaoMixes, ok := f.randaoMixesLists.Get(currentBlockRoot) + if !ok { + return false + } + randaoMixes.CopyTo(out) + for epoch, delta := range relevantDeltas { + out.Set(int(epoch%f.beaconCfg.EpochsPerHistoricalVector), delta.delta) + } + return true +} + +func (f *ForkChoiceStore) Partecipation(epoch uint64) (*solid.BitList, bool) { + return f.participation.Get(epoch) +} + +func (f *ForkChoiceStore) ForkNodes() []ForkNode { + f.mu.Lock() + defer f.mu.Unlock() + forkNodes := make([]ForkNode, 0, len(f.weights)) + for blockRoot, weight := range f.weights { + header, has := f.forkGraph.GetHeader(blockRoot) + if !has { + continue + } + justifiedCheckpoint, has := f.forkGraph.GetCurrentJustifiedCheckpoint(blockRoot) + if !has { + continue + } + finalizedCheckpoint, has := f.forkGraph.GetFinalizedCheckpoint(blockRoot) + if !has { + continue + } + blockHash, _ := f.eth2Roots.Get(blockRoot) + + forkNodes = append(forkNodes, ForkNode{ + Weight: weight, + BlockRoot: blockRoot, + ParentRoot: header.ParentRoot, + JustifiedEpoch: justifiedCheckpoint.Epoch(), + FinalizedEpoch: finalizedCheckpoint.Epoch(), + Slot: header.Slot, + Validity: "valid", + ExecutionBlock: blockHash, + }) + } + sort.Slice(forkNodes, func(i, j int) bool { + return forkNodes[i].Slot < forkNodes[j].Slot + }) + return forkNodes +} + +func (f *ForkChoiceStore) Synced() bool { + f.mu.Lock() + defer f.mu.Unlock() + return f.synced.Load() +} + +func (f *ForkChoiceStore) SetSynced(s bool) { + f.mu.Lock() + defer f.mu.Unlock() + f.synced.Store(s) +} diff --git a/cl/phase1/forkchoice/forkchoice_mock.go b/cl/phase1/forkchoice/forkchoice_mock.go new file mode 100644 index 00000000000..665b0f12621 --- /dev/null +++ b/cl/phase1/forkchoice/forkchoice_mock.go @@ -0,0 +1,235 @@ +package forkchoice + +import ( + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "github.com/ledgerwatch/erigon/cl/phase1/execution_client" + "github.com/ledgerwatch/erigon/cl/pool" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" +) + +// type ForkChoiceStorage interface { +// ForkChoiceStorageWriter +// ForkChoiceStorageReader +// } + +// type ForkChoiceStorageReader interface { +// Ancestor(root common.Hash, slot uint64) common.Hash +// AnchorSlot() uint64 +// Engine() execution_client.ExecutionEngine +// FinalizedCheckpoint() solid.Checkpoint +// FinalizedSlot() uint64 +// GetEth1Hash(eth2Root common.Hash) common.Hash +// GetHead() (common.Hash, uint64, error) +// HighestSeen() uint64 +// JustifiedCheckpoint() solid.Checkpoint +// JustifiedSlot() uint64 +// ProposerBoostRoot() common.Hash +// GetStateAtBlockRoot(blockRoot libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) +// GetFinalityCheckpoints(blockRoot libcommon.Hash) (bool, solid.Checkpoint, solid.Checkpoint, solid.Checkpoint) +// GetSyncCommittees(blockRoot libcommon.Hash) (*solid.SyncCommittee, *solid.SyncCommittee, bool) +// Slot() uint64 +// Time() uint64 + +// GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) +// GetStateAtStateRoot(root libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) +// } + +// type ForkChoiceStorageWriter interface { +// OnAttestation(attestation *solid.Attestation, fromBlock bool) error +// OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing, test bool) error +// OnBlock(block *cltypes.SignedBeaconBlock, newPayload bool, fullValidation bool) error +// OnTick(time uint64) +// } + +// Make mocks with maps and simple setters and getters, panic on methods from ForkChoiceStorageWriter + +type ForkChoiceStorageMock struct { + Ancestors map[uint64]common.Hash + AnchorSlotVal uint64 + FinalizedCheckpointVal solid.Checkpoint + FinalizedSlotVal uint64 + HeadVal common.Hash + HeadSlotVal uint64 + HighestSeenVal uint64 + JustifiedCheckpointVal solid.Checkpoint + JustifiedSlotVal uint64 + ProposerBoostRootVal common.Hash + SlotVal uint64 + TimeVal uint64 + + ParticipationVal *solid.BitList + + StateAtBlockRootVal map[common.Hash]*state.CachingBeaconState + StateAtSlotVal map[uint64]*state.CachingBeaconState + GetSyncCommitteesVal map[common.Hash][2]*solid.SyncCommittee + GetFinalityCheckpointsVal map[common.Hash][3]solid.Checkpoint + WeightsMock []ForkNode + + Pool pool.OperationsPool +} + +func NewForkChoiceStorageMock() *ForkChoiceStorageMock { + return &ForkChoiceStorageMock{ + Ancestors: make(map[uint64]common.Hash), + AnchorSlotVal: 0, + FinalizedCheckpointVal: solid.Checkpoint{}, + FinalizedSlotVal: 0, + HeadVal: common.Hash{}, + HighestSeenVal: 0, + JustifiedCheckpointVal: solid.Checkpoint{}, + JustifiedSlotVal: 0, + ProposerBoostRootVal: common.Hash{}, + SlotVal: 0, + TimeVal: 0, + StateAtBlockRootVal: make(map[common.Hash]*state.CachingBeaconState), + StateAtSlotVal: make(map[uint64]*state.CachingBeaconState), + GetSyncCommitteesVal: make(map[common.Hash][2]*solid.SyncCommittee), + GetFinalityCheckpointsVal: make(map[common.Hash][3]solid.Checkpoint), + } +} + +func (f *ForkChoiceStorageMock) Ancestor(root common.Hash, slot uint64) common.Hash { + return f.Ancestors[slot] +} + +func (f *ForkChoiceStorageMock) AnchorSlot() uint64 { + return f.AnchorSlotVal +} + +func (f *ForkChoiceStorageMock) Engine() execution_client.ExecutionEngine { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) FinalizedCheckpoint() solid.Checkpoint { + return f.FinalizedCheckpointVal +} + +func (f *ForkChoiceStorageMock) FinalizedSlot() uint64 { + return f.FinalizedSlotVal +} + +func (f *ForkChoiceStorageMock) GetEth1Hash(eth2Root common.Hash) common.Hash { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) GetHead() (common.Hash, uint64, error) { + return f.HeadVal, f.HeadSlotVal, nil +} + +func (f *ForkChoiceStorageMock) HighestSeen() uint64 { + return f.HighestSeenVal +} + +func (f *ForkChoiceStorageMock) JustifiedCheckpoint() solid.Checkpoint { + return f.JustifiedCheckpointVal +} + +func (f *ForkChoiceStorageMock) JustifiedSlot() uint64 { + return f.JustifiedSlotVal +} + +func (f *ForkChoiceStorageMock) ProposerBoostRoot() common.Hash { + return f.ProposerBoostRootVal +} + +func (f *ForkChoiceStorageMock) GetStateAtBlockRoot(blockRoot common.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) { + return f.StateAtBlockRootVal[blockRoot], nil +} + +func (f *ForkChoiceStorageMock) GetFinalityCheckpoints(blockRoot common.Hash) (bool, solid.Checkpoint, solid.Checkpoint, solid.Checkpoint) { + oneNil := f.GetFinalityCheckpointsVal[blockRoot][0] != nil && f.GetFinalityCheckpointsVal[blockRoot][1] != nil && f.GetFinalityCheckpointsVal[blockRoot][2] != nil + return oneNil, f.GetFinalityCheckpointsVal[blockRoot][0], f.GetFinalityCheckpointsVal[blockRoot][1], f.GetFinalityCheckpointsVal[blockRoot][2] +} + +func (f *ForkChoiceStorageMock) GetSyncCommittees(blockRoot common.Hash) (*solid.SyncCommittee, *solid.SyncCommittee, bool) { + return f.GetSyncCommitteesVal[blockRoot][0], f.GetSyncCommitteesVal[blockRoot][1], f.GetSyncCommitteesVal[blockRoot][0] != nil && f.GetSyncCommitteesVal[blockRoot][1] != nil +} + +func (f *ForkChoiceStorageMock) GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) { + return f.StateAtSlotVal[slot], nil +} + +func (f *ForkChoiceStorageMock) Slot() uint64 { + return f.SlotVal +} + +func (f *ForkChoiceStorageMock) Time() uint64 { + return f.TimeVal +} + +func (f *ForkChoiceStorageMock) OnAttestation(attestation *solid.Attestation, fromBlock, insert bool) error { + f.Pool.AttestationsPool.Insert(attestation.Signature(), attestation) + return nil +} + +func (f *ForkChoiceStorageMock) OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing, test bool) error { + f.Pool.AttesterSlashingsPool.Insert(pool.ComputeKeyForAttesterSlashing(attesterSlashing), attesterSlashing) + return nil +} + +func (f *ForkChoiceStorageMock) OnBlock(block *cltypes.SignedBeaconBlock, newPayload bool, fullValidation bool) error { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) OnTick(time uint64) { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) GetStateAtStateRoot(root common.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) BlockRewards(root common.Hash) (*eth2.BlockRewardsCollector, bool) { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) TotalActiveBalance(root common.Hash) (uint64, bool) { + panic("implement me") +} + +func (f *ForkChoiceStorageMock) RandaoMixes(blockRoot common.Hash, out solid.HashListSSZ) bool { + return false +} + +func (f *ForkChoiceStorageMock) LowestAvaiableSlot() uint64 { + return f.FinalizedSlotVal +} + +func (f *ForkChoiceStorageMock) Partecipation(epoch uint64) (*solid.BitList, bool) { + return f.ParticipationVal, f.ParticipationVal != nil +} + +func (f *ForkChoiceStorageMock) OnVoluntaryExit(signedVoluntaryExit *cltypes.SignedVoluntaryExit, test bool) error { + f.Pool.VoluntaryExistsPool.Insert(signedVoluntaryExit.VoluntaryExit.ValidatorIndex, signedVoluntaryExit) + return nil +} + +func (f *ForkChoiceStorageMock) OnProposerSlashing(proposerSlashing *cltypes.ProposerSlashing, test bool) error { + f.Pool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(proposerSlashing), proposerSlashing) + return nil +} + +func (f *ForkChoiceStorageMock) OnBlsToExecutionChange(signedChange *cltypes.SignedBLSToExecutionChange, test bool) error { + f.Pool.BLSToExecutionChangesPool.Insert(signedChange.Signature, signedChange) + return nil +} + +func (f *ForkChoiceStorageMock) ForkNodes() []ForkNode { + return f.WeightsMock +} + +func (f *ForkChoiceStorageMock) OnAggregateAndProof(aggregateAndProof *cltypes.SignedAggregateAndProof, test bool) error { + f.Pool.AttestationsPool.Insert(aggregateAndProof.Message.Aggregate.Signature(), aggregateAndProof.Message.Aggregate) + return nil +} + +func (f *ForkChoiceStorageMock) Synced() bool { + return true +} + +func (f *ForkChoiceStorageMock) SetSynced(synced bool) { + panic("implement me") +} diff --git a/cl/phase1/forkchoice/get_head.go b/cl/phase1/forkchoice/get_head.go index e1300c2c022..56165f4a2bc 100644 --- a/cl/phase1/forkchoice/get_head.go +++ b/cl/phase1/forkchoice/get_head.go @@ -16,6 +16,23 @@ func (f *ForkChoiceStore) GetHead() (libcommon.Hash, uint64, error) { return f.getHead() } +// accountWeights updates the weights of the validators, given the vote and given an head leaf. +func (f *ForkChoiceStore) accountWeights(votes, weights map[libcommon.Hash]uint64, justifedRoot, leaf libcommon.Hash) { + curr := leaf + accumulated := uint64(0) + for curr != justifedRoot { + accumulated += votes[curr] + votes[curr] = 0 // make sure we don't double count + weights[curr] += accumulated + header, has := f.forkGraph.GetHeader(curr) + if !has { + return + } + curr = header.ParentRoot + } + return +} + func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) { if f.headHash != (libcommon.Hash{}) { return f.headHash, f.headSlot, nil @@ -28,8 +45,33 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) { if err != nil { return libcommon.Hash{}, 0, err } - // Filter all validators deemed as bad - filteredIndicies := f.filterValidatorSetForAttestationScores(justificationState, justificationState.epoch) + // Do a simple scan to determine the fork votes. + votes := make(map[libcommon.Hash]uint64) + for validatorIndex, message := range f.latestMessages { + if message == (LatestMessage{}) { + continue + } + if !readFromBitset(justificationState.actives, validatorIndex) || readFromBitset(justificationState.slasheds, validatorIndex) { + continue + } + if _, hasLatestMessage := f.getLatestMessage(uint64(validatorIndex)); !hasLatestMessage { + continue + } + if f.isUnequivocating(uint64(validatorIndex)) { + continue + } + votes[message.Root] += justificationState.balances[validatorIndex] + } + if f.proposerBoostRoot != (libcommon.Hash{}) { + boost := justificationState.activeBalance / justificationState.beaconConfig.SlotsPerEpoch + votes[f.proposerBoostRoot] += (boost * justificationState.beaconConfig.ProposerScoreBoost) / 100 + } + // Account for weights on each head fork + f.weights = make(map[libcommon.Hash]uint64) + for head := range f.headSet { + f.accountWeights(votes, f.weights, f.justifiedCheckpoint.BlockRoot(), head) + } + for { // Filter out current head children. unfilteredChildren := f.children(f.headHash) @@ -62,9 +104,9 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) { // After sorting is done determine best fit. f.headHash = children[0] - maxWeight := f.getWeight(children[0], filteredIndicies, justificationState) + maxWeight := f.weights[children[0]] for i := 1; i < len(children); i++ { - weight := f.getWeight(children[i], filteredIndicies, justificationState) + weight := f.weights[children[i]] // Lexicographical order is king. if weight >= maxWeight { f.headHash = children[i] @@ -81,10 +123,10 @@ func (f *ForkChoiceStore) filterValidatorSetForAttestationScores(c *checkpointSt if !readFromBitset(c.actives, validatorIndex) || readFromBitset(c.slasheds, validatorIndex) { continue } - if _, hasLatestMessage := f.latestMessages[uint64(validatorIndex)]; !hasLatestMessage { + if _, hasLatestMessage := f.getLatestMessage(uint64(validatorIndex)); !hasLatestMessage { continue } - if _, isUnequivocating := f.equivocatingIndicies[uint64(validatorIndex)]; isUnequivocating { + if f.isUnequivocating(uint64(validatorIndex)) { continue } filtered = append(filtered, uint64(validatorIndex)) diff --git a/cl/phase1/forkchoice/interface.go b/cl/phase1/forkchoice/interface.go index c08a20950d4..955bd188d4a 100644 --- a/cl/phase1/forkchoice/interface.go +++ b/cl/phase1/forkchoice/interface.go @@ -7,6 +7,7 @@ import ( "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/phase1/execution_client" + "github.com/ledgerwatch/erigon/cl/transition/impl/eth2" ) type ForkChoiceStorage interface { @@ -20,6 +21,7 @@ type ForkChoiceStorageReader interface { Engine() execution_client.ExecutionEngine FinalizedCheckpoint() solid.Checkpoint FinalizedSlot() uint64 + LowestAvaiableSlot() uint64 GetEth1Hash(eth2Root common.Hash) common.Hash GetHead() (common.Hash, uint64, error) HighestSeen() uint64 @@ -31,14 +33,25 @@ type ForkChoiceStorageReader interface { GetSyncCommittees(blockRoot libcommon.Hash) (*solid.SyncCommittee, *solid.SyncCommittee, bool) Slot() uint64 Time() uint64 + Partecipation(epoch uint64) (*solid.BitList, bool) + RandaoMixes(blockRoot libcommon.Hash, out solid.HashListSSZ) bool + BlockRewards(root libcommon.Hash) (*eth2.BlockRewardsCollector, bool) + TotalActiveBalance(root libcommon.Hash) (uint64, bool) GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) GetStateAtStateRoot(root libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) + ForkNodes() []ForkNode + Synced() bool } type ForkChoiceStorageWriter interface { - OnAttestation(attestation *solid.Attestation, fromBlock bool) error + OnAggregateAndProof(aggregateAndProof *cltypes.SignedAggregateAndProof, test bool) error + OnAttestation(attestation *solid.Attestation, fromBlock, insert bool) error OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing, test bool) error + OnVoluntaryExit(signedVoluntaryExit *cltypes.SignedVoluntaryExit, test bool) error + OnProposerSlashing(proposerSlashing *cltypes.ProposerSlashing, test bool) error + OnBlsToExecutionChange(signedChange *cltypes.SignedBLSToExecutionChange, test bool) error OnBlock(block *cltypes.SignedBeaconBlock, newPayload bool, fullValidation bool) error OnTick(time uint64) + SetSynced(synced bool) } diff --git a/cl/phase1/forkchoice/on_attestation.go b/cl/phase1/forkchoice/on_attestation.go index ed4b0ce674c..3c59c4c2eb8 100644 --- a/cl/phase1/forkchoice/on_attestation.go +++ b/cl/phase1/forkchoice/on_attestation.go @@ -4,6 +4,7 @@ import ( "fmt" "time" + "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/phase1/cache" "github.com/ledgerwatch/erigon/cl/phase1/core/state" @@ -12,8 +13,13 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" ) +const maxAttestationJobLifetime = 30 * time.Minute + // OnAttestation processes incoming attestations. -func (f *ForkChoiceStore) OnAttestation(attestation *solid.Attestation, fromBlock bool) error { +func (f *ForkChoiceStore) OnAttestation(attestation *solid.Attestation, fromBlock bool, insert bool) error { + if !f.synced.Load() { + return nil + } f.mu.Lock() defer f.mu.Unlock() f.headHash = libcommon.Hash{} @@ -61,44 +67,150 @@ func (f *ForkChoiceStore) OnAttestation(attestation *solid.Attestation, fromBloc cache.StoreAttestation(&data, attestation.AggregationBits(), attestationIndicies) // Lastly update latest messages. f.processAttestingIndicies(attestation, attestationIndicies) + if !fromBlock && insert { + // Add to the pool when verified. + f.operationsPool.AttestationsPool.Insert(attestation.Signature(), attestation) + } return nil } +func (f *ForkChoiceStore) OnAggregateAndProof(aggregateAndProof *cltypes.SignedAggregateAndProof, test bool) error { + if !f.synced.Load() { + return nil + } + slot := aggregateAndProof.Message.Aggregate.AttestantionData().Slot() + selectionProof := aggregateAndProof.Message.SelectionProof + committeeIndex := aggregateAndProof.Message.Aggregate.AttestantionData().ValidatorIndex() + epoch := state.GetEpochAtSlot(f.beaconCfg, slot) + + if err := f.validateOnAttestation(aggregateAndProof.Message.Aggregate, false); err != nil { + return err + } + + target := aggregateAndProof.Message.Aggregate.AttestantionData().Target() + targetState, err := f.getCheckpointState(target) + if err != nil { + return nil + } + + activeIndicies := targetState.getActiveIndicies(epoch) + activeIndiciesLength := uint64(len(activeIndicies)) + + count := targetState.committeeCount(epoch, activeIndiciesLength) * f.beaconCfg.SlotsPerEpoch + start := (activeIndiciesLength * committeeIndex) / count + end := (activeIndiciesLength * (committeeIndex + 1)) / count + committeeLength := end - start + if !state.IsAggregator(f.beaconCfg, committeeLength, slot, committeeIndex, selectionProof) { + log.Warn("invalid aggregate and proof") + return fmt.Errorf("invalid aggregate and proof") + } + return f.OnAttestation(aggregateAndProof.Message.Aggregate, false, false) +} + +type attestationJob struct { + attestation *solid.Attestation + insert bool + when time.Time +} + // scheduleAttestationForLaterProcessing scheudules an attestation for later processing -func (f *ForkChoiceStore) scheduleAttestationForLaterProcessing(attestation *solid.Attestation, fromBlock bool) { +func (f *ForkChoiceStore) scheduleAttestationForLaterProcessing(attestation *solid.Attestation, insert bool) { + root, err := attestation.HashSSZ() + if err != nil { + log.Error("failed to hash attestation", "err", err) + return + } + f.attestationSet.Store(root, &attestationJob{ + attestation: attestation, + insert: insert, + when: time.Now(), + }) +} + +func (f *ForkChoiceStore) StartAttestationsRTT() { go func() { - logInterval := time.NewTicker(50 * time.Millisecond) + interval := time.NewTicker(500 * time.Millisecond) for { select { case <-f.ctx.Done(): return - case <-logInterval.C: - if f.Slot() < attestation.AttestantionData().Slot()+1 { - continue - } - if err := f.OnAttestation(attestation, false); err != nil { - log.Trace("could not process scheduled attestation", "reason", err) - } - return + case <-interval.C: + f.attestationSet.Range(func(key, value interface{}) bool { + job := value.(*attestationJob) + if time.Since(job.when) > maxAttestationJobLifetime { + f.attestationSet.Delete(key) + return true + } + if f.Slot() >= job.attestation.AttestantionData().Slot()+1 { + if err := f.OnAttestation(job.attestation, false, job.insert); err != nil { + log.Warn("failed to process attestation", "err", err) + } + f.attestationSet.Delete(key) + } + return true + }) } } }() } +func (f *ForkChoiceStore) setLatestMessage(index uint64, message LatestMessage) { + if index >= uint64(len(f.latestMessages)) { + if index >= uint64(cap(f.latestMessages)) { + tmp := make([]LatestMessage, index+1, index*2) + copy(tmp, f.latestMessages) + f.latestMessages = tmp + } + f.latestMessages = f.latestMessages[:index+1] + } + f.latestMessages[index] = message +} + +func (f *ForkChoiceStore) getLatestMessage(validatorIndex uint64) (LatestMessage, bool) { + if validatorIndex >= uint64(len(f.latestMessages)) || f.latestMessages[validatorIndex] == (LatestMessage{}) { + return LatestMessage{}, false + } + return f.latestMessages[validatorIndex], true +} + +func (f *ForkChoiceStore) isUnequivocating(validatorIndex uint64) bool { + // f.equivocatingIndicies is a bitlist + index := int(validatorIndex) / 8 + if index >= len(f.equivocatingIndicies) { + return false + } + subIndex := int(validatorIndex) % 8 + return f.equivocatingIndicies[index]&(1<= len(f.equivocatingIndicies) { + if index >= cap(f.equivocatingIndicies) { + tmp := make([]byte, index+1, index*2) + copy(tmp, f.equivocatingIndicies) + f.equivocatingIndicies = tmp + } + f.equivocatingIndicies = f.equivocatingIndicies[:index+1] + } + subIndex := int(validatorIndex) % 8 + f.equivocatingIndicies[index] |= 1 << uint(subIndex) +} + func (f *ForkChoiceStore) processAttestingIndicies(attestation *solid.Attestation, indicies []uint64) { beaconBlockRoot := attestation.AttestantionData().BeaconBlockRoot() target := attestation.AttestantionData().Target() for _, index := range indicies { - if _, ok := f.equivocatingIndicies[index]; ok { + if f.isUnequivocating(index) { continue } - validatorMessage, has := f.latestMessages[index] + validatorMessage, has := f.getLatestMessage(index) if !has || target.Epoch() > validatorMessage.Epoch { - f.latestMessages[index] = &LatestMessage{ + f.setLatestMessage(index, LatestMessage{ Epoch: target.Epoch(), Root: beaconBlockRoot, - } + }) } } } diff --git a/cl/phase1/forkchoice/on_attester_slashing.go b/cl/phase1/forkchoice/on_attester_slashing.go index 1c4ea9d5dc3..4305ed58d08 100644 --- a/cl/phase1/forkchoice/on_attester_slashing.go +++ b/cl/phase1/forkchoice/on_attester_slashing.go @@ -86,7 +86,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS defer f.mu.Unlock() var anySlashed bool for _, index := range solid.IntersectionOfSortedSets(attestation1.AttestingIndices, attestation2.AttestingIndices) { - f.equivocatingIndicies[index] = struct{}{} + f.setUnequivocating(index) if !anySlashed { v, err := s.ValidatorForValidatorIndex(int(index)) if err != nil { diff --git a/cl/phase1/forkchoice/on_block.go b/cl/phase1/forkchoice/on_block.go index 5cd183c8a24..627e16ac26f 100644 --- a/cl/phase1/forkchoice/on_block.go +++ b/cl/phase1/forkchoice/on_block.go @@ -8,7 +8,9 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/freezer" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph" "github.com/ledgerwatch/erigon/cl/transition/impl/eth2/statechange" ) @@ -64,6 +66,9 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload, if block.Block.Slot > f.highestSeen { f.highestSeen = block.Block.Slot } + // Remove the parent from the head set + delete(f.headSet, block.Block.ParentRoot) + f.headSet[blockRoot] = struct{}{} // Add proposer score boost if the block is timely timeIntoSlot := (f.time - f.genesisTime) % lastProcessedState.BeaconConfig().SecondsPerSlot isBeforeAttestingInterval := timeIntoSlot < f.beaconCfg.SecondsPerSlot/f.beaconCfg.IntervalsPerSlot @@ -74,7 +79,17 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload, if err := freezer.PutObjectSSZIntoFreezer("beaconState", "caplin_core", lastProcessedState.Slot(), lastProcessedState, f.recorder); err != nil { return err } + // Update randao mixes + r := solid.NewHashVector(int(f.beaconCfg.EpochsPerHistoricalVector)) + lastProcessedState.RandaoMixes().CopyTo(r) + f.randaoMixesLists.Add(blockRoot, r) + } else { + f.randaoDeltas.Add(blockRoot, randaoDelta{ + epoch: state.Epoch(lastProcessedState), + delta: lastProcessedState.GetRandaoMixes(state.Epoch(lastProcessedState)), + }) } + f.participation.Add(state.Epoch(lastProcessedState), lastProcessedState.CurrentEpochParticipation().Copy()) f.preverifiedSizes.Add(blockRoot, preverifiedAppendListsSizes{ validatorLength: uint64(lastProcessedState.ValidatorLength()), historicalRootsLength: lastProcessedState.HistoricalRootsLength(), @@ -85,6 +100,7 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload, currentJustifiedCheckpoint: lastProcessedState.CurrentJustifiedCheckpoint().Copy(), previousJustifiedCheckpoint: lastProcessedState.PreviousJustifiedCheckpoint().Copy(), }) + f.totalActiveBalances.Add(blockRoot, lastProcessedState.GetTotalActiveBalance()) // Update checkpoints f.updateCheckpoints(lastProcessedState.CurrentJustifiedCheckpoint().Copy(), lastProcessedState.FinalizedCheckpoint().Copy()) // First thing save previous values of the checkpoints (avoid memory copy of all states and ensure easy revert) diff --git a/cl/phase1/forkchoice/on_operations.go b/cl/phase1/forkchoice/on_operations.go index 8679fb6a905..74399523134 100644 --- a/cl/phase1/forkchoice/on_operations.go +++ b/cl/phase1/forkchoice/on_operations.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/Giulio2002/bls" + "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/fork" "github.com/ledgerwatch/erigon/cl/phase1/core/state" @@ -49,10 +50,18 @@ func (f *ForkChoiceStore) OnVoluntaryExit(signedVoluntaryExit *cltypes.SignedVol pk := val.PublicKey() f.mu.Unlock() - domain, err := s.GetDomain(s.BeaconConfig().DomainVoluntaryExit, voluntaryExit.Epoch) + domainType := f.beaconCfg.DomainVoluntaryExit + var domain []byte + + if s.Version() < clparams.DenebVersion { + domain, err = s.GetDomain(domainType, voluntaryExit.Epoch) + } else if s.Version() >= clparams.DenebVersion { + domain, err = fork.ComputeDomain(domainType[:], utils.Uint32ToBytes4(s.BeaconConfig().CapellaForkVersion), s.GenesisValidatorsRoot()) + } if err != nil { return err } + signingRoot, err := fork.ComputeSigningRoot(voluntaryExit, domain) if err != nil { return err diff --git a/cl/phase1/forkchoice/utils.go b/cl/phase1/forkchoice/utils.go index b3eaca58da7..f13aee3dac4 100644 --- a/cl/phase1/forkchoice/utils.go +++ b/cl/phase1/forkchoice/utils.go @@ -41,6 +41,7 @@ func (f *ForkChoiceStore) onNewFinalized(newFinalized solid.Checkpoint) { for k, children := range f.childrens { if children.parentSlot <= newFinalized.Epoch()*f.beaconCfg.SlotsPerEpoch { delete(f.childrens, k) + delete(f.headSet, k) continue } } diff --git a/cl/phase1/network/gossip_manager.go b/cl/phase1/network/gossip_manager.go index abc33f3d6c4..88c0841dea8 100644 --- a/cl/phase1/network/gossip_manager.go +++ b/cl/phase1/network/gossip_manager.go @@ -3,10 +3,12 @@ package network import ( "context" "fmt" - "github.com/ledgerwatch/erigon-lib/common" "sync" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/freezer" + "github.com/ledgerwatch/erigon/cl/gossip" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" "github.com/ledgerwatch/erigon/cl/sentinel/peers" @@ -96,8 +98,8 @@ func (g *GossipManager) onRecv(ctx context.Context, data *sentinel.GossipData, l // If the deserialization fails, an error is logged and the loop returns to the next iteration. // If the deserialization is successful, the object is set to the deserialized value and the loop returns to the next iteration. var object ssz.Unmarshaler - switch data.Type { - case sentinel.GossipType_BeaconBlockGossipType: + switch data.Name { + case gossip.TopicNameBeaconBlock: object = cltypes.NewSignedBeaconBlock(g.beaconConfig) if err := object.DecodeSSZ(common.CopyBytes(data.Data), int(version)); err != nil { g.sentinel.BanPeer(ctx, data.Peer) @@ -142,28 +144,32 @@ func (g *GossipManager) onRecv(ctx context.Context, data *sentinel.GossipData, l } g.mu.RUnlock() - case sentinel.GossipType_VoluntaryExitGossipType: + case gossip.TopicNameVoluntaryExit: if err := operationsContract[*cltypes.SignedVoluntaryExit](ctx, g, l, data, int(version), "voluntary exit", g.forkChoice.OnVoluntaryExit); err != nil { return err } - case sentinel.GossipType_ProposerSlashingGossipType: + case gossip.TopicNameProposerSlashing: if err := operationsContract[*cltypes.ProposerSlashing](ctx, g, l, data, int(version), "proposer slashing", g.forkChoice.OnProposerSlashing); err != nil { return err } - case sentinel.GossipType_AttesterSlashingGossipType: + case gossip.TopicNameAttesterSlashing: if err := operationsContract[*cltypes.AttesterSlashing](ctx, g, l, data, int(version), "attester slashing", g.forkChoice.OnAttesterSlashing); err != nil { return err } - case sentinel.GossipType_BlsToExecutionChangeGossipType: + case gossip.TopicNameBlsToExecutionChange: if err := operationsContract[*cltypes.SignedBLSToExecutionChange](ctx, g, l, data, int(version), "bls to execution change", g.forkChoice.OnBlsToExecutionChange); err != nil { return err } + case gossip.TopicNameBeaconAggregateAndProof: + if err := operationsContract[*cltypes.SignedAggregateAndProof](ctx, g, l, data, int(version), "aggregate and proof", g.forkChoice.OnAggregateAndProof); err != nil { + return err + } } return nil } func (g *GossipManager) Start(ctx context.Context) { - subscription, err := g.sentinel.SubscribeGossip(ctx, &sentinel.EmptyMessage{}) + subscription, err := g.sentinel.SubscribeGossip(ctx, &sentinel.SubscriptionData{}) if err != nil { return } diff --git a/cl/phase1/stages/clstages.go b/cl/phase1/stages/clstages.go index bbf1da29c8b..5f5e18c80e2 100644 --- a/cl/phase1/stages/clstages.go +++ b/cl/phase1/stages/clstages.go @@ -14,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/clstages" "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/persistence" "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" "github.com/ledgerwatch/erigon/cl/persistence/db_config" @@ -98,7 +99,6 @@ func ClStagesCfg( type StageName = string const ( - WaitForPeers StageName = "WaitForPeers" CatchUpEpochs StageName = "CatchUpEpochs" CatchUpBlocks StageName = "CatchUpBlocks" ForkChoice StageName = "ForkChoice" @@ -113,9 +113,6 @@ const ( ) func MetaCatchingUp(args Args) StageName { - if args.peers < minPeersForDownload { - return WaitForPeers - } if !args.hasDownloaded { return DownloadHistoricalBlocks } @@ -218,39 +215,6 @@ func ConsensusClStages(ctx context.Context, return }, Stages: map[string]clstages.Stage[*Cfg, Args]{ - WaitForPeers: { - Description: `wait for enough peers. This is also a safe stage to go to when unsure of what stage to use`, - TransitionFunc: func(cfg *Cfg, args Args, err error) string { - if x := MetaCatchingUp(args); x != "" { - return x - } - return CatchUpBlocks - }, - ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { - peersCount, err := cfg.rpc.Peers() - if err != nil { - return nil - } - waitWhenNotEnoughPeers := 3 * time.Second - for { - if peersCount >= minPeersForDownload { - break - } - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - logger.Info("[Caplin] Waiting For Peers", "have", peersCount, "needed", minPeersForDownload, "retryIn", waitWhenNotEnoughPeers) - time.Sleep(waitWhenNotEnoughPeers) - peersCount, err = cfg.rpc.Peers() - if err != nil { - peersCount = 0 - } - } - return nil - }, - }, DownloadHistoricalBlocks: { Description: "Download historical blocks", TransitionFunc: func(cfg *Cfg, args Args, err error) string { @@ -369,7 +333,7 @@ func ConsensusClStages(ctx context.Context, ) respCh := make(chan *peers.PeeredObject[[]*cltypes.SignedBeaconBlock]) errCh := make(chan error) - sources := []persistence.BlockSource{gossipSource} + sources := []persistence.BlockSource{gossipSource, rpcSource} // if we are more than one block behind, we request the rpc source as well if totalRequest > 2 { @@ -379,36 +343,81 @@ func ConsensusClStages(ctx context.Context, ctx, cn := context.WithTimeout(ctx, 15*time.Second) defer cn() - tx, err := cfg.indiciesDB.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() // we go ask all the sources and see who gets back to us first. whoever does is the winner!! for _, v := range sources { sourceFunc := v.GetRange - go func() { - blocks, err := sourceFunc(ctx, tx, args.seenSlot+1, totalRequest) + go func(source persistence.BlockSource) { + if _, ok := source.(*persistence.BeaconRpcSource); ok { + time.Sleep(2 * time.Second) + var blocks *peers.PeeredObject[[]*cltypes.SignedBeaconBlock] + Loop: + for { + var err error + from := args.seenSlot - 2 + currentSlot := utils.GetCurrentSlot(cfg.genesisCfg.GenesisTime, cfg.beaconCfg.SecondsPerSlot) + count := (currentSlot - from) + 2 + if currentSlot <= cfg.forkChoice.HighestSeen() { + time.Sleep(100 * time.Millisecond) + continue + } + blocks, err = sourceFunc(ctx, nil, from, count) + if err != nil { + errCh <- err + return + } + for _, block := range blocks.Data { + if block.Block.Slot >= currentSlot { + break Loop + } + } + } + respCh <- blocks + return + } + blocks, err := sourceFunc(ctx, nil, args.seenSlot+1, totalRequest) if err != nil { errCh <- err return } respCh <- blocks - }() + }(v) } + tx, err := cfg.indiciesDB.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + logTimer := time.NewTicker(30 * time.Second) defer logTimer.Stop() - select { - case err := <-errCh: - return err - case blocks := <-respCh: - for _, block := range blocks.Data { - if err := processBlock(tx, block, true, true); err != nil { - return err + MainLoop: + for { + select { + case <-ctx.Done(): + return errors.New("timeout waiting for blocks") + case err := <-errCh: + return err + case blocks := <-respCh: + for _, block := range blocks.Data { + if err := processBlock(tx, block, true, true); err != nil { + log.Error("bad blocks segment received", "err", err) + cfg.rpc.BanPeer(blocks.Peer) + continue MainLoop + } + block.Block.Body.Attestations.Range(func(idx int, a *solid.Attestation, total int) bool { + if err = cfg.forkChoice.OnAttestation(a, true, false); err != nil { + log.Debug("bad attestation received", "err", err) + } + return true + }) + + if block.Block.Slot >= args.targetSlot { + break MainLoop + } } + case <-logTimer.C: + logger.Info("[Caplin] Progress", "progress", cfg.forkChoice.HighestSeen(), "from", args.seenSlot, "to", args.targetSlot) } - case <-logTimer.C: - logger.Info("[Caplin] Progress", "progress", cfg.forkChoice.HighestSeen(), "from", args.seenEpoch, "to", args.targetSlot) } return tx.Commit() }, @@ -424,17 +433,6 @@ func ConsensusClStages(ctx context.Context, }, ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { - // TODO: we need to get the last run block in order to process attestations here - ////////block.Block.Body.Attestations.Range(func(idx int, a *solid.Attestation, total int) bool { - //////// if err = g.forkChoice.OnAttestation(a, true); err != nil { - //////// return false - //////// } - //////// return true - ////////}) - ////////if err != nil { - //////// return err - ////////} - // Now check the head headRoot, headSlot, err := cfg.forkChoice.GetHead() if err != nil { @@ -509,6 +507,7 @@ func ConsensusClStages(ctx context.Context, if err != nil { return err } + cfg.forkChoice.SetSynced(true) if err := cfg.syncedData.OnHeadState(headState); err != nil { return err } @@ -620,7 +619,10 @@ func ConsensusClStages(ctx context.Context, SleepForSlot: { Description: `sleep until the next slot`, TransitionFunc: func(cfg *Cfg, args Args, err error) string { - return WaitForPeers + if x := MetaCatchingUp(args); x != "" { + return x + } + return ListenForForks }, ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { nextSlot := args.seenSlot + 1 diff --git a/cl/phase1/stages/stage_history_download.go b/cl/phase1/stages/stage_history_download.go index 19a43a0f548..c8fc88baa79 100644 --- a/cl/phase1/stages/stage_history_download.go +++ b/cl/phase1/stages/stage_history_download.go @@ -246,6 +246,9 @@ func SpawnStageHistoryDownload(cfg StageHistoryReconstructionCfg, ctx context.Co if err := executionPayload.DecodeSSZ(v[:len(v)-1-32], int(version)); err != nil { return fmt.Errorf("error decoding execution payload during collection: %s", err) } + if executionPayload.BlockNumber%10000 == 0 { + cfg.logger.Info("Inserting execution payload", "blockNumber", executionPayload.BlockNumber) + } body := executionPayload.Body() header, err := executionPayload.RlpHeader(&parentRoot) diff --git a/cl/pool/operation_pool.go b/cl/pool/operation_pool.go index 44962135584..348680559c5 100644 --- a/cl/pool/operation_pool.go +++ b/cl/pool/operation_pool.go @@ -1,13 +1,19 @@ package pool import ( + "time" + "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru" ) +const lifeSpan = 30 * time.Minute + var operationsMultiplier = 20 // Cap the amount of cached element to max_operations_per_block * operations_multiplier type OperationPool[K comparable, T any] struct { - pool *lru.Cache[K, T] // Map the Signature to the underlying object + pool *lru.Cache[K, T] // Map the Signature to the underlying object + recentlySeen map[K]time.Time + lastPruned time.Time } func NewOperationPool[K comparable, T any](maxOperationsPerBlock int, matricName string) *OperationPool[K, T] { @@ -15,11 +21,30 @@ func NewOperationPool[K comparable, T any](maxOperationsPerBlock int, matricName if err != nil { panic(err) } - return &OperationPool[K, T]{pool: pool} + return &OperationPool[K, T]{ + pool: pool, + recentlySeen: make(map[K]time.Time), + } } func (o *OperationPool[K, T]) Insert(k K, operation T) { + if _, ok := o.recentlySeen[k]; ok { + return + } o.pool.Add(k, operation) + o.recentlySeen[k] = time.Now() + if time.Since(o.lastPruned) > lifeSpan { + deleteList := make([]K, 0, len(o.recentlySeen)) + for k, t := range o.recentlySeen { + if time.Since(t) > lifeSpan { + deleteList = append(deleteList, k) + } + } + for _, k := range deleteList { + delete(o.recentlySeen, k) + } + o.lastPruned = time.Now() + } } func (o *OperationPool[K, T]) DeleteIfExist(k K) (removed bool) { diff --git a/cl/rpc/rpc.go b/cl/rpc/rpc.go index 0ada88e8115..338edaac382 100644 --- a/cl/rpc/rpc.go +++ b/cl/rpc/rpc.go @@ -186,7 +186,7 @@ func (b *BeaconRpcP2P) PropagateBlock(block *cltypes.SignedBeaconBlock) error { } _, err = b.sentinel.PublishGossip(b.ctx, &sentinel.GossipData{ Data: encoded, - Type: sentinel.GossipType_BeaconBlockGossipType, + Name: "beacon_block", }) return err } diff --git a/cl/sentinel/gossip.go b/cl/sentinel/gossip.go index cf1602e2d71..7067d79bf96 100644 --- a/cl/sentinel/gossip.go +++ b/cl/sentinel/gossip.go @@ -19,7 +19,9 @@ import ( "sync" "time" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cl/fork" + "github.com/ledgerwatch/erigon/cl/gossip" "github.com/ledgerwatch/log/v3" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" @@ -35,65 +37,55 @@ var ( const SSZSnappyCodec = "ssz_snappy" -type TopicName string - -const ( - BeaconBlockTopic TopicName = "beacon_block" - BeaconAggregateAndProofTopic TopicName = "beacon_aggregate_and_proof" - VoluntaryExitTopic TopicName = "voluntary_exit" - ProposerSlashingTopic TopicName = "proposer_slashing" - AttesterSlashingTopic TopicName = "attester_slashing" - BlsToExecutionChangeTopic TopicName = "bls_to_execution_change" - BlobSidecarTopic TopicName = "blob_sidecar_%d" // This topic needs an index -) - type GossipTopic struct { - Name TopicName + Name string CodecStr string } var BeaconBlockSsz = GossipTopic{ - Name: BeaconBlockTopic, + Name: gossip.TopicNameBeaconBlock, CodecStr: SSZSnappyCodec, } var BeaconAggregateAndProofSsz = GossipTopic{ - Name: BeaconAggregateAndProofTopic, + Name: gossip.TopicNameBeaconAggregateAndProof, CodecStr: SSZSnappyCodec, } var VoluntaryExitSsz = GossipTopic{ - Name: VoluntaryExitTopic, + Name: gossip.TopicNameVoluntaryExit, CodecStr: SSZSnappyCodec, } var ProposerSlashingSsz = GossipTopic{ - Name: ProposerSlashingTopic, + Name: gossip.TopicNameProposerSlashing, CodecStr: SSZSnappyCodec, } var AttesterSlashingSsz = GossipTopic{ - Name: AttesterSlashingTopic, + Name: gossip.TopicNameAttesterSlashing, CodecStr: SSZSnappyCodec, } var BlsToExecutionChangeSsz = GossipTopic{ - Name: BlsToExecutionChangeTopic, + Name: gossip.TopicNameBlsToExecutionChange, CodecStr: SSZSnappyCodec, } type GossipManager struct { - ch chan *pubsub.Message + ch chan *GossipMessage subscriptions map[string]*GossipSubscription mu sync.RWMutex } +const maxIncomingGossipMessages = 5092 + // construct a new gossip manager that will handle packets with the given handlerfunc func NewGossipManager( ctx context.Context, ) *GossipManager { g := &GossipManager{ - ch: make(chan *pubsub.Message, 1), + ch: make(chan *GossipMessage, maxIncomingGossipMessages), subscriptions: map[string]*GossipSubscription{}, } return g @@ -102,14 +94,14 @@ func NewGossipManager( func GossipSidecarTopics(maxBlobs uint64) (ret []GossipTopic) { for i := uint64(0); i < maxBlobs; i++ { ret = append(ret, GossipTopic{ - Name: TopicName(fmt.Sprintf(string(BlobSidecarTopic), i)), + Name: gossip.TopicNameBlobSidecar(int(i)), CodecStr: SSZSnappyCodec, }) } return } -func (s *GossipManager) Recv() <-chan *pubsub.Message { +func (s *GossipManager) Recv() <-chan *GossipMessage { return s.ch } @@ -201,7 +193,7 @@ func (s *Sentinel) SubscribeGossip(topic GossipTopic, opts ...pubsub.TopicOpt) ( if err != nil { return nil, fmt.Errorf("failed to join topic %s, err=%w", path, err) } - topicScoreParams := s.topicScoreParams(string(topic.Name)) + topicScoreParams := s.topicScoreParams(topic.Name) if topicScoreParams != nil { sub.topic.SetScoreParams(topicScoreParams) } @@ -222,7 +214,7 @@ func (s *Sentinel) Unsubscribe(topic GossipTopic, opts ...pubsub.TopicOpt) (err func (s *Sentinel) topicScoreParams(topic string) *pubsub.TopicScoreParams { switch { - case strings.Contains(topic, string(BeaconBlockTopic)): + case strings.Contains(topic, gossip.TopicNameBeaconBlock): return s.defaultBlockTopicParams() /*case strings.Contains(topic, GossipAggregateAndProofMessage): return defaultAggregateTopicParams(activeValidators), nil @@ -283,7 +275,7 @@ func (g *GossipManager) Close() { type GossipSubscription struct { gossip_topic GossipTopic host peer.ID - ch chan *pubsub.Message + ch chan *GossipMessage ctx context.Context topic *pubsub.Topic @@ -330,9 +322,15 @@ func (s *GossipSubscription) Close() { } } +type GossipMessage struct { + From peer.ID + TopicName string + Data []byte +} + // this is a helper to begin running the gossip subscription. // function should not be used outside of the constructor for gossip subscription -func (s *GossipSubscription) run(ctx context.Context, sub *pubsub.Subscription, topic string) { +func (s *GossipSubscription) run(ctx context.Context, sub *pubsub.Subscription, topicName string) { defer func() { if r := recover(); r != nil { log.Error("[Sentinel Gossip] Message Handler Crashed", "err", r) @@ -350,13 +348,17 @@ func (s *GossipSubscription) run(ctx context.Context, sub *pubsub.Subscription, if errors.Is(err, context.Canceled) { return } - log.Warn("[Sentinel] fail to decode gossip packet", "err", err, "topic", topic) + log.Warn("[Sentinel] fail to decode gossip packet", "err", err, "topicName", topicName) return } if msg.GetFrom() == s.host { continue } - s.ch <- msg + s.ch <- &GossipMessage{ + From: msg.GetFrom(), + TopicName: topicName, + Data: common.Copy(msg.Data), + } } } } diff --git a/cl/sentinel/handlers/handlers.go b/cl/sentinel/handlers/handlers.go index 87be5480c40..a97bfb57cd0 100644 --- a/cl/sentinel/handlers/handlers.go +++ b/cl/sentinel/handlers/handlers.go @@ -16,7 +16,7 @@ package handlers import ( "context" "errors" - "fmt" + "math" "strings" "sync" "time" @@ -47,7 +47,7 @@ type RateLimits struct { } const punishmentPeriod = time.Minute -const defaultRateLimit = 5000 +const defaultRateLimit = math.MaxInt const defaultBlockHandlerRateLimit = 200 var rateLimits = RateLimits{ @@ -163,7 +163,6 @@ func (c *ConsensusHandlers) wrapStreamHandler(name string, fn func(s network.Str err = fn(s) if err != nil { l["err"] = err - fmt.Println("err", err) log.Trace("[pubsubhandler] stream handler", l) // TODO: maybe we should log this _ = s.Reset() diff --git a/cl/sentinel/sentinel.go b/cl/sentinel/sentinel.go index ad2cbbf6db8..f5b36861699 100644 --- a/cl/sentinel/sentinel.go +++ b/cl/sentinel/sentinel.go @@ -92,7 +92,7 @@ func (s *Sentinel) createLocalNode( udpPort, tcpPort int, tmpDir string, ) (*enode.LocalNode, error) { - db, err := enode.OpenDB(s.ctx, "", tmpDir) + db, err := enode.OpenDB(s.ctx, "", tmpDir, s.logger) if err != nil { return nil, fmt.Errorf("could not open node's peer database: %w", err) } @@ -262,7 +262,7 @@ func (s *Sentinel) ReqRespHandler() http.Handler { return s.httpApi } -func (s *Sentinel) RecvGossip() <-chan *pubsub.Message { +func (s *Sentinel) RecvGossip() <-chan *GossipMessage { return s.subManager.Recv() } diff --git a/cl/sentinel/sentinel_gossip_test.go b/cl/sentinel/sentinel_gossip_test.go index 448c3513555..5ef8b2082e8 100644 --- a/cl/sentinel/sentinel_gossip_test.go +++ b/cl/sentinel/sentinel_gossip_test.go @@ -13,77 +13,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestSentinelGossipAverage(t *testing.T) { - t.Skip("TODO: fix me") - listenAddrHost := "127.0.0.1" - - ctx := context.Background() - db, _, f, _, _ := loadChain(t) - raw := persistence.NewAferoRawBlockSaver(f, &clparams.MainnetBeaconConfig) - genesisConfig, networkConfig, beaconConfig := clparams.GetConfigsByNetwork(clparams.MainnetNetwork) - sentinel1, err := New(ctx, &SentinelConfig{ - NetworkConfig: networkConfig, - BeaconConfig: beaconConfig, - GenesisConfig: genesisConfig, - IpAddr: listenAddrHost, - Port: 7070, - EnableBlocks: true, - }, raw, db, log.New()) - require.NoError(t, err) - defer sentinel1.Stop() - - require.NoError(t, sentinel1.Start()) - h := sentinel1.host - - sentinel2, err := New(ctx, &SentinelConfig{ - NetworkConfig: networkConfig, - BeaconConfig: beaconConfig, - GenesisConfig: genesisConfig, - IpAddr: listenAddrHost, - Port: 7077, - EnableBlocks: true, - TCPPort: 9123, - }, raw, db, log.New()) - require.NoError(t, err) - defer sentinel2.Stop() - - require.NoError(t, sentinel2.Start()) - h2 := sentinel2.host - - sub1, err := sentinel1.SubscribeGossip(BeaconBlockSsz) - require.NoError(t, err) - defer sub1.Close() - - require.NoError(t, sub1.Listen()) - - sub2, err := sentinel2.SubscribeGossip(BeaconBlockSsz) - require.NoError(t, err) - defer sub2.Close() - require.NoError(t, sub2.Listen()) - - err = h.Connect(ctx, peer.AddrInfo{ - ID: h2.ID(), - Addrs: h2.Addrs(), - }) - require.NoError(t, err) - - ch := sentinel2.RecvGossip() - msg := []byte("hello") - go func() { - // delay to make sure that the connection is established - time.Sleep(5 * time.Second) - sub1.Publish(msg) - }() - - select { - case ans := <-ch: - require.Equal(t, len(msg), len(ans.Message.Data)) - require.Equal(t, msg, ans.Data) - case <-ctx.Done(): - t.Fatal("timeout") - } -} - func TestSentinelGossipOnHardFork(t *testing.T) { listenAddrHost := "127.0.0.1" @@ -144,38 +73,33 @@ func TestSentinelGossipOnHardFork(t *testing.T) { Addrs: h2.Addrs(), }) require.NoError(t, err) + time.Sleep(5 * time.Second) ch := sentinel2.RecvGossip() msg := []byte("hello") go func() { // delay to make sure that the connection is established - time.Sleep(time.Second) sub1.Publish(msg) }() previousTopic := "" - select { - case ans := <-ch: - require.Equal(t, len(msg), len(ans.Message.Data)) - previousTopic = *ans.Topic - case <-ctx.Done(): - t.Fatal("timeout") - } + ans := <-ch + require.Equal(t, ans.Data, msg) + previousTopic = ans.TopicName bcfg.AltairForkEpoch = clparams.MainnetBeaconConfig.AltairForkEpoch bcfg.InitializeForkSchedule() + time.Sleep(5 * time.Second) + msg = []byte("hello1") go func() { // delay to make sure that the connection is established - time.Sleep(time.Second) - sub1 = sentinel1.subManager.GetMatchingSubscription(string(BeaconBlockSsz.Name)) + sub1 = sentinel1.subManager.GetMatchingSubscription(BeaconBlockSsz.Name) sub1.Publish(msg) }() - select { - case ans := <-ch: - require.NotEqual(t, previousTopic, *ans.Topic) - case <-ctx.Done(): - t.Fatal("timeout") - } + ans = <-ch + require.Equal(t, ans.Data, msg) + require.NotEqual(t, previousTopic, ans.TopicName) + } diff --git a/cl/sentinel/sentinel_requests_test.go b/cl/sentinel/sentinel_requests_test.go index 34a58ab917a..1686c97e872 100644 --- a/cl/sentinel/sentinel_requests_test.go +++ b/cl/sentinel/sentinel_requests_test.go @@ -35,11 +35,11 @@ func loadChain(t *testing.T) (db kv.RwDB, blocks []*cltypes.SignedBeaconBlock, f blocks, preState, postState = tests.GetPhase0Random() db = memdb.NewTestDB(t) var reader *tests.MockBlockReader - reader, f = tests.LoadChain(blocks, db, t) + reader, f = tests.LoadChain(blocks, postState, db, t) ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() - a := antiquary.NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, f) + a := antiquary.NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, true, f) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) return } @@ -259,6 +259,7 @@ func TestSentinelBlocksByRoots(t *testing.T) { } func TestSentinelStatusRequest(t *testing.T) { + t.Skip("TODO: fix me") listenAddrHost := "127.0.0.1" ctx := context.Background() diff --git a/cl/sentinel/service/notifiers.go b/cl/sentinel/service/notifiers.go index 8af9b991a73..7297dc81802 100644 --- a/cl/sentinel/service/notifiers.go +++ b/cl/sentinel/service/notifiers.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel" + "github.com/ledgerwatch/erigon/cl/gossip" ) const ( @@ -12,10 +12,9 @@ const ( ) type gossipObject struct { - data []byte // gossip data - t sentinel.GossipType // determine which gossip message we are notifying of - pid string // pid is the peer id of the sender - blobIndex *uint32 // index of the blob + data []byte // gossip data + t string // determine which gossip message we are notifying of + pid string // pid is the peer id of the sender } type gossipNotifier struct { @@ -30,7 +29,7 @@ func newGossipNotifier() *gossipNotifier { } } -func (g *gossipNotifier) notify(t sentinel.GossipType, data []byte, pid string) { +func (g *gossipNotifier) notify(t string, data []byte, pid string) { g.mu.Lock() defer g.mu.Unlock() @@ -43,18 +42,15 @@ func (g *gossipNotifier) notify(t sentinel.GossipType, data []byte, pid string) } } -func (g *gossipNotifier) notifyBlob(t sentinel.GossipType, data []byte, pid string, blobIndex int) { +func (g *gossipNotifier) notifyBlob(data []byte, pid string, blobIndex int) { g.mu.Lock() defer g.mu.Unlock() - index := new(uint32) - *index = uint32(blobIndex) for _, ch := range g.notifiers { ch <- gossipObject{ - data: data, - t: t, - pid: pid, - blobIndex: index, + data: data, + t: gossip.TopicNameBlobSidecar(blobIndex), + pid: pid, } } } diff --git a/cl/sentinel/service/service.go b/cl/sentinel/service/service.go index 1c72d1fe4c6..218c54f1848 100644 --- a/cl/sentinel/service/service.go +++ b/cl/sentinel/service/service.go @@ -3,16 +3,17 @@ package service import ( "bytes" "context" - "errors" "fmt" "io" "net/http" + "path" "strconv" "strings" "sync" "time" "github.com/ledgerwatch/erigon-lib/diagnostics" + "github.com/ledgerwatch/erigon/cl/gossip" "github.com/ledgerwatch/erigon/cl/sentinel" "github.com/ledgerwatch/erigon/cl/sentinel/httpreqresp" @@ -21,10 +22,11 @@ import ( "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/utils" "github.com/ledgerwatch/log/v3" - pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" ) +var _ sentinelrpc.SentinelServer = (*SentinelServer)(nil) + type SentinelServer struct { sentinelrpc.UnimplementedSentinelServer @@ -51,7 +53,7 @@ func NewSentinelServer(ctx context.Context, sentinel *sentinel.Sentinel, logger // extractBlobSideCarIndex takes a topic and extract the blob sidecar func extractBlobSideCarIndex(topic string) int { // compute the index prefixless - startIndex := strings.Index(topic, string(sentinel.BlobSidecarTopic)) + len(sentinel.BlobSidecarTopic) + startIndex := strings.Index(topic, gossip.TopicNamePrefixBlobSidecar) + len(gossip.TopicNamePrefixBlobSidecar) endIndex := strings.Index(topic[:startIndex], "/") blobIndex, err := strconv.Atoi(topic[startIndex:endIndex]) if err != nil { @@ -78,28 +80,30 @@ func (s *SentinelServer) PublishGossip(_ context.Context, msg *sentinelrpc.Gossi // Snappify payload before sending it to gossip compressedData := utils.CompressSnappy(msg.Data) - s.trackPeerStatistics(msg.GetPeer().Pid, false, msg.Type.String(), "unknown", len(compressedData)) + s.trackPeerStatistics(msg.GetPeer().Pid, false, msg.Name, "unknown", len(compressedData)) var subscription *sentinel.GossipSubscription - switch msg.Type { - case sentinelrpc.GossipType_BeaconBlockGossipType: - subscription = manager.GetMatchingSubscription(string(sentinel.BeaconBlockTopic)) - case sentinelrpc.GossipType_AggregateAndProofGossipType: - subscription = manager.GetMatchingSubscription(string(sentinel.BeaconAggregateAndProofTopic)) - case sentinelrpc.GossipType_VoluntaryExitGossipType: - subscription = manager.GetMatchingSubscription(string(sentinel.VoluntaryExitTopic)) - case sentinelrpc.GossipType_ProposerSlashingGossipType: - subscription = manager.GetMatchingSubscription(string(sentinel.ProposerSlashingTopic)) - case sentinelrpc.GossipType_AttesterSlashingGossipType: - subscription = manager.GetMatchingSubscription(string(sentinel.AttesterSlashingTopic)) - case sentinelrpc.GossipType_BlobSidecarType: - if msg.BlobIndex == nil { - return &sentinelrpc.EmptyMessage{}, errors.New("cannot publish sidecar blob with no index") - } - subscription = manager.GetMatchingSubscription(fmt.Sprintf(string(sentinel.BlobSidecarTopic), *msg.BlobIndex)) + // TODO: this is still wrong... we should build a subscription here to match exactly, meaning that downstream consumers should be + // in charge of keeping track of fork id. + switch msg.Name { + case gossip.TopicNameBeaconBlock: + subscription = manager.GetMatchingSubscription(msg.Name) + case gossip.TopicNameBeaconAggregateAndProof: + subscription = manager.GetMatchingSubscription(msg.Name) + case gossip.TopicNameVoluntaryExit: + subscription = manager.GetMatchingSubscription(msg.Name) + case gossip.TopicNameProposerSlashing: + subscription = manager.GetMatchingSubscription(msg.Name) + case gossip.TopicNameAttesterSlashing: + subscription = manager.GetMatchingSubscription(msg.Name) default: - return &sentinelrpc.EmptyMessage{}, nil + switch { + case gossip.IsTopicBlobSidecar(msg.Name): + subscription = manager.GetMatchingSubscription(msg.Name) + default: + return &sentinelrpc.EmptyMessage{}, nil + } } if subscription == nil { return &sentinelrpc.EmptyMessage{}, nil @@ -107,7 +111,7 @@ func (s *SentinelServer) PublishGossip(_ context.Context, msg *sentinelrpc.Gossi return &sentinelrpc.EmptyMessage{}, subscription.Publish(compressedData) } -func (s *SentinelServer) SubscribeGossip(_ *sentinelrpc.EmptyMessage, stream sentinelrpc.Sentinel_SubscribeGossipServer) error { +func (s *SentinelServer) SubscribeGossip(data *sentinelrpc.SubscriptionData, stream sentinelrpc.Sentinel_SubscribeGossipServer) error { // first of all subscribe ch, subId, err := s.gossipNotifier.addSubscriber() if err != nil { @@ -121,13 +125,15 @@ func (s *SentinelServer) SubscribeGossip(_ *sentinelrpc.EmptyMessage, stream sen case <-stream.Context().Done(): return nil case packet := <-ch: + if !s.gossipMatchSubscription(packet, data) { + continue + } if err := stream.Send(&sentinelrpc.GossipData{ Data: packet.data, - Type: packet.t, + Name: packet.t, Peer: &sentinelrpc.Peer{ Pid: packet.pid, }, - BlobIndex: packet.blobIndex, }); err != nil { s.logger.Warn("[Sentinel] Could not relay gossip packet", "reason", err) } @@ -135,6 +141,17 @@ func (s *SentinelServer) SubscribeGossip(_ *sentinelrpc.EmptyMessage, stream sen } } +func (s *SentinelServer) gossipMatchSubscription(obj gossipObject, data *sentinelrpc.SubscriptionData) bool { + if data.Filter != nil { + filter := data.GetFilter() + matched, err := path.Match(obj.t, filter) + if err != nil || !matched { + return false + } + } + return true +} + func (s *SentinelServer) withTimeoutCtx(pctx context.Context, dur time.Duration) (ctx context.Context, cn func()) { if dur > 0 { ctx, cn = context.WithTimeout(pctx, 8*time.Second) @@ -273,43 +290,43 @@ func (s *SentinelServer) ListenToGossip() { } } -func (s *SentinelServer) handleGossipPacket(pkt *pubsub.Message) error { +func (s *SentinelServer) handleGossipPacket(pkt *sentinel.GossipMessage) error { var err error - s.logger.Trace("[Sentinel Gossip] Received Packet", "topic", pkt.Topic) - - data := pkt.GetData() + s.logger.Trace("[Sentinel Gossip] Received Packet", "topic", pkt.TopicName) + data := pkt.Data + topic := pkt.TopicName // If we use snappy codec then decompress it accordingly. - if strings.Contains(*pkt.Topic, sentinel.SSZSnappyCodec) { + if strings.Contains(topic, sentinel.SSZSnappyCodec) { data, err = utils.DecompressSnappy(data) if err != nil { return err } } - textPid, err := pkt.ReceivedFrom.MarshalText() + textPid, err := pkt.From.MarshalText() if err != nil { return err } - msgType, msgCap := parseTopic(pkt.GetTopic()) + msgType, msgCap := parseTopic(topic) s.trackPeerStatistics(string(textPid), true, msgType, msgCap, len(data)) // Check to which gossip it belongs to. - if strings.Contains(*pkt.Topic, string(sentinel.BeaconBlockTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_BeaconBlockGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.BeaconAggregateAndProofTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_AggregateAndProofGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.VoluntaryExitTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_VoluntaryExitGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.ProposerSlashingTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_ProposerSlashingGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.AttesterSlashingTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_AttesterSlashingGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.BlsToExecutionChangeTopic)) { - s.gossipNotifier.notify(sentinelrpc.GossipType_BlsToExecutionChangeGossipType, data, string(textPid)) - } else if strings.Contains(*pkt.Topic, string(sentinel.BlobSidecarTopic)) { + if strings.Contains(topic, string(gossip.TopicNameBeaconBlock)) { + s.gossipNotifier.notify(gossip.TopicNameBeaconBlock, data, string(textPid)) + } else if strings.Contains(topic, string(gossip.TopicNameBeaconAggregateAndProof)) { + s.gossipNotifier.notify(gossip.TopicNameBeaconAggregateAndProof, data, string(textPid)) + } else if strings.Contains(topic, string(gossip.TopicNameVoluntaryExit)) { + s.gossipNotifier.notify(gossip.TopicNameVoluntaryExit, data, string(textPid)) + } else if strings.Contains(topic, string(gossip.TopicNameProposerSlashing)) { + s.gossipNotifier.notify(gossip.TopicNameProposerSlashing, data, string(textPid)) + } else if strings.Contains(topic, string(gossip.TopicNameAttesterSlashing)) { + s.gossipNotifier.notify(gossip.TopicNameAttesterSlashing, data, string(textPid)) + } else if strings.Contains(topic, string(gossip.TopicNameBlsToExecutionChange)) { + s.gossipNotifier.notify(gossip.TopicNameBlsToExecutionChange, data, string(textPid)) + } else if gossip.IsTopicBlobSidecar(topic) { // extract the index - s.gossipNotifier.notifyBlob(sentinelrpc.GossipType_BlobSidecarType, data, string(textPid), extractBlobSideCarIndex(*pkt.Topic)) + s.gossipNotifier.notifyBlob(data, string(textPid), extractBlobSideCarIndex(topic)) } return nil } diff --git a/cl/sentinel/service/start.go b/cl/sentinel/service/start.go index 50ad1b38cf1..f84de009455 100644 --- a/cl/sentinel/service/start.go +++ b/cl/sentinel/service/start.go @@ -31,7 +31,7 @@ func createSentinel(cfg *sentinel.SentinelConfig, db persistence.RawBeaconBlockC } gossipTopics := []sentinel.GossipTopic{ sentinel.BeaconBlockSsz, - //sentinel.BeaconAggregateAndProofSsz, + sentinel.BeaconAggregateAndProofSsz, sentinel.VoluntaryExitSsz, sentinel.ProposerSlashingSsz, sentinel.AttesterSlashingSsz, diff --git a/cl/spectest/Makefile b/cl/spectest/Makefile index 42877b2a330..23c6cf4c17e 100644 --- a/cl/spectest/Makefile +++ b/cl/spectest/Makefile @@ -3,15 +3,14 @@ tests: GIT_LFS_SKIP_SMUDGE=1 git clone https://github.com/ethereum/consensus-spec-tests - cd consensus-spec-tests && git checkout 99549a414c10baa9e69abcb08eb256fc1a8d54f6 && git lfs pull --exclude=tests/general,tests/minimal && cd .. + cd consensus-spec-tests && git checkout 080c96fbbf3be58e75947debfeb9ba3b2b7c9748 && git lfs pull --exclude=tests/general,tests/minimal && cd .. mv consensus-spec-tests/tests . rm -rf consensus-spec-tests rm -rf tests/minimal # not needed for now rm -rf tests/mainnet/eip6110 - # will not implement until i see it on a testnet - rm -rf tests/mainnet/deneb - + # FIXME: Add fork choice coverage for deneb + rm -rf tests/mainnet/deneb/fork_choice clean: rm -rf tests diff --git a/cl/spectest/consensus_tests/appendix.go b/cl/spectest/consensus_tests/appendix.go index 788ac359a65..b547533edb2 100644 --- a/cl/spectest/consensus_tests/appendix.go +++ b/cl/spectest/consensus_tests/appendix.go @@ -11,6 +11,7 @@ import ( var TestFormats = spectest.Appendix{} func init() { + TestFormats.Add("bls"). With("aggregate_verify", &BlsAggregateVerify{}). With("aggregate", spectest.UnimplementedHandler). @@ -47,7 +48,7 @@ func init() { TestFormats.Add("kzg"). With("", spectest.UnimplementedHandler) TestFormats.Add("light_client"). - With("", spectest.UnimplementedHandler) + WithFn("single_merkle_proof", LightClientBeaconBlockBodyExecutionMerkleProof) TestFormats.Add("operations"). WithFn("attestation", operationAttestationHandler). WithFn("attester_slashing", operationAttesterSlashingHandler). @@ -106,11 +107,11 @@ func addSszTests() { //With("HistoricalBatch", getSSZStaticConsensusTest(&cltypes.HistoricalBatch{})). With("HistoricalSummary", getSSZStaticConsensusTest(&cltypes.HistoricalSummary{})). With("IndexedAttestation", getSSZStaticConsensusTest(&cltypes.IndexedAttestation{})). - // With("LightClientBootstrap", getSSZStaticConsensusTest(&cltypes.LightClientBootstrap{})). Unimplemented - // With("LightClientFinalityUpdate", getSSZStaticConsensusTest(&cltypes.LightClientFinalityUpdate{})). Unimplemented - // With("LightClientHeader", getSSZStaticConsensusTest(&cltypes.LightClientHeader{})). Unimplemented - // With("LightClientOptimisticUpdate", getSSZStaticConsensusTest(&cltypes.LightClientOptimisticUpdate{})). Unimplemented - // With("LightClientUpdate", getSSZStaticConsensusTest(&cltypes.LightClientUpdate{})). Unimplemented + With("LightClientBootstrap", getSSZStaticConsensusTest(&cltypes.LightClientBootstrap{})). + With("LightClientFinalityUpdate", getSSZStaticConsensusTest(&cltypes.LightClientFinalityUpdate{})). + With("LightClientHeader", getSSZStaticConsensusTest(&cltypes.LightClientHeader{})). + With("LightClientOptimisticUpdate", getSSZStaticConsensusTest(&cltypes.LightClientOptimisticUpdate{})). + With("LightClientUpdate", getSSZStaticConsensusTest(&cltypes.LightClientUpdate{})). With("PendingAttestation", getSSZStaticConsensusTest(&solid.PendingAttestation{})). // With("PowBlock", getSSZStaticConsensusTest(&cltypes.PowBlock{})). Unimplemented With("ProposerSlashing", getSSZStaticConsensusTest(&cltypes.ProposerSlashing{})). diff --git a/cl/spectest/consensus_tests/epoch_processing.go b/cl/spectest/consensus_tests/epoch_processing.go index 390c2ae4289..8fea8841bbd 100644 --- a/cl/spectest/consensus_tests/epoch_processing.go +++ b/cl/spectest/consensus_tests/epoch_processing.go @@ -1,12 +1,14 @@ package consensus_tests import ( - "github.com/ledgerwatch/erigon/spectest" "io/fs" "os" "testing" + "github.com/ledgerwatch/erigon/spectest" + "github.com/ledgerwatch/erigon/cl/abstract" + "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/transition/impl/eth2/statechange" @@ -66,7 +68,11 @@ var historicalRootsUpdateTest = NewEpochProcessing(func(s abstract.BeaconState) }) var inactivityUpdateTest = NewEpochProcessing(func(s abstract.BeaconState) error { - return statechange.ProcessInactivityScores(s, state.EligibleValidatorsIndicies(s), statechange.GetUnslashedIndiciesSet(s)) + var unslashedIndiciesSet [][]bool + if s.Version() >= clparams.AltairVersion { + unslashedIndiciesSet = statechange.GetUnslashedIndiciesSet(s.BeaconConfig(), state.PreviousEpoch(s), s.ValidatorSet(), s.PreviousEpochParticipation()) + } + return statechange.ProcessInactivityScores(s, state.EligibleValidatorsIndicies(s), unslashedIndiciesSet) }) var justificationFinalizationTest = NewEpochProcessing(func(s abstract.BeaconState) error { @@ -91,7 +97,11 @@ var registryUpdatesTest = NewEpochProcessing(func(s abstract.BeaconState) error }) var rewardsAndPenaltiesTest = NewEpochProcessing(func(s abstract.BeaconState) error { - return statechange.ProcessRewardsAndPenalties(s, state.EligibleValidatorsIndicies(s), statechange.GetUnslashedIndiciesSet(s)) + var unslashedIndiciesSet [][]bool + if s.Version() >= clparams.AltairVersion { + unslashedIndiciesSet = statechange.GetUnslashedIndiciesSet(s.BeaconConfig(), state.PreviousEpoch(s), s.ValidatorSet(), s.PreviousEpochParticipation()) + } + return statechange.ProcessRewardsAndPenalties(s, state.EligibleValidatorsIndicies(s), unslashedIndiciesSet) }) var slashingsTest = NewEpochProcessing(func(s abstract.BeaconState) error { diff --git a/cl/spectest/consensus_tests/fork_choice.go b/cl/spectest/consensus_tests/fork_choice.go index ca39a83c042..4eff9f50c11 100644 --- a/cl/spectest/consensus_tests/fork_choice.go +++ b/cl/spectest/consensus_tests/fork_choice.go @@ -3,10 +3,11 @@ package consensus_tests import ( "context" "fmt" - "github.com/ledgerwatch/erigon/spectest" "io/fs" "testing" + "github.com/ledgerwatch/erigon/spectest" + "github.com/ledgerwatch/erigon/cl/abstract" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes/solid" @@ -157,6 +158,7 @@ func (b *ForkChoice) Run(t *testing.T, root fs.FS, c spectest.TestCase) (err err forkStore, err := forkchoice.NewForkChoiceStore(context.Background(), anchorState, nil, nil, pool.NewOperationsPool(&clparams.MainnetBeaconConfig), fork_graph.NewForkGraphDisk(anchorState, afero.NewMemMapFs())) require.NoError(t, err) + forkStore.SetSynced(true) var steps []ForkChoiceStep err = spectest.ReadYml(root, "steps.yaml", &steps) @@ -194,7 +196,7 @@ func (b *ForkChoice) Run(t *testing.T, root fs.FS, c spectest.TestCase) (err err att := &solid.Attestation{} err := spectest.ReadSsz(root, c.Version(), step.GetAttestation()+".ssz_snappy", att) require.NoError(t, err, stepstr) - err = forkStore.OnAttestation(att, false) + err = forkStore.OnAttestation(att, false, false) if step.GetValid() { require.NoError(t, err, stepstr) } else { diff --git a/cl/spectest/consensus_tests/light_client.go b/cl/spectest/consensus_tests/light_client.go new file mode 100644 index 00000000000..370d7fe7948 --- /dev/null +++ b/cl/spectest/consensus_tests/light_client.go @@ -0,0 +1,59 @@ +package consensus_tests + +import ( + "io/fs" + "testing" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + "github.com/ledgerwatch/erigon/spectest" + "github.com/stretchr/testify/require" +) + +type LcBranch struct { + Branch []string `yaml:"branch"` +} + +var LightClientBeaconBlockBodyExecutionMerkleProof = spectest.HandlerFunc(func(t *testing.T, root fs.FS, c spectest.TestCase) (err error) { + var proof [][32]byte + switch c.CaseName { + case "execution_merkle_proof": + beaconBody := cltypes.NewBeaconBody(&clparams.MainnetBeaconConfig) + require.NoError(t, spectest.ReadSsz(root, c.Version(), spectest.ObjectSSZ, beaconBody)) + proof, err = beaconBody.ExecutionPayloadMerkleProof() + require.NoError(t, err) + case "current_sync_committee_merkle_proof": + state := state.New(&clparams.MainnetBeaconConfig) + require.NoError(t, spectest.ReadSsz(root, c.Version(), spectest.ObjectSSZ, state)) + proof, err = state.CurrentSyncCommitteeBranch() + require.NoError(t, err) + case "next_sync_committee_merkle_proof": + state := state.New(&clparams.MainnetBeaconConfig) + require.NoError(t, spectest.ReadSsz(root, c.Version(), spectest.ObjectSSZ, state)) + proof, err = state.NextSyncCommitteeBranch() + require.NoError(t, err) + case "finality_root_merkle_proof": + state := state.New(&clparams.MainnetBeaconConfig) + require.NoError(t, spectest.ReadSsz(root, c.Version(), spectest.ObjectSSZ, state)) + + proof, err = state.FinalityRootBranch() + require.NoError(t, err) + default: + t.Skip("skipping: ", c.CaseName) + } + + // read proof.yaml + proofYaml := LcBranch{} + err = spectest.ReadYml(root, "proof.yaml", &proofYaml) + require.NoError(t, err) + + branch := make([][32]byte, len(proofYaml.Branch)) + for i, b := range proofYaml.Branch { + branch[i] = libcommon.HexToHash(b) + } + + require.Equal(t, branch, proof) + return nil +}) diff --git a/cl/transition/compat.go b/cl/transition/compat.go index 94a70a958d6..bb5d6f06185 100644 --- a/cl/transition/compat.go +++ b/cl/transition/compat.go @@ -13,7 +13,7 @@ var _ machine2.Interface = (*eth2.Impl)(nil) var DefaultMachine = ð2.Impl{} var ValidatingMachine = ð2.Impl{FullValidation: true} -func TransitionState(s abstract.BeaconState, block *cltypes.SignedBeaconBlock, fullValidation bool) error { - cvm := ð2.Impl{FullValidation: fullValidation} +func TransitionState(s abstract.BeaconState, block *cltypes.SignedBeaconBlock, blockRewardsCollector *eth2.BlockRewardsCollector, fullValidation bool) error { + cvm := ð2.Impl{FullValidation: fullValidation, BlockRewardsCollector: blockRewardsCollector} return machine2.TransitionState(cvm, s, block) } diff --git a/cl/transition/impl/eth2/block_processing_test.go b/cl/transition/impl/eth2/block_processing_test.go index 7838ce55ae2..4a5ac87a878 100644 --- a/cl/transition/impl/eth2/block_processing_test.go +++ b/cl/transition/impl/eth2/block_processing_test.go @@ -24,5 +24,5 @@ func TestBlockProcessing(t *testing.T) { require.NoError(t, utils.DecodeSSZSnappy(s, capellaState, int(clparams.CapellaVersion))) block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig) require.NoError(t, utils.DecodeSSZSnappy(block, capellaBlock, int(clparams.CapellaVersion))) - require.NoError(t, transition.TransitionState(s, block, true)) // All checks already made in transition state + require.NoError(t, transition.TransitionState(s, block, nil, true)) // All checks already made in transition state } diff --git a/cl/transition/impl/eth2/impl.go b/cl/transition/impl/eth2/impl.go index dcd233bca0c..9fc7490c94e 100644 --- a/cl/transition/impl/eth2/impl.go +++ b/cl/transition/impl/eth2/impl.go @@ -6,6 +6,14 @@ type Impl = impl var _ machine.Interface = (*impl)(nil) +type BlockRewardsCollector struct { + Attestations uint64 + AttesterSlashings uint64 + ProposerSlashings uint64 + SyncAggregate uint64 +} + type impl struct { - FullValidation bool + FullValidation bool + BlockRewardsCollector *BlockRewardsCollector } diff --git a/cl/transition/impl/eth2/operations.go b/cl/transition/impl/eth2/operations.go index 13335129d4b..fd570038fe5 100644 --- a/cl/transition/impl/eth2/operations.go +++ b/cl/transition/impl/eth2/operations.go @@ -71,8 +71,11 @@ func (I *impl) ProcessProposerSlashing(s abstract.BeaconState, propSlashing *clt } // Set whistleblower index to 0 so current proposer gets reward. - s.SlashValidator(h1.ProposerIndex, nil) - return nil + pr, err := s.SlashValidator(h1.ProposerIndex, nil) + if I.BlockRewardsCollector != nil { + I.BlockRewardsCollector.ProposerSlashings += pr + } + return err } func (I *impl) ProcessAttesterSlashing(s abstract.BeaconState, attSlashing *cltypes.AttesterSlashing) error { @@ -109,9 +112,12 @@ func (I *impl) ProcessAttesterSlashing(s abstract.BeaconState, attSlashing *clty return err } if validator.IsSlashable(currentEpoch) { - err := s.SlashValidator(ind, nil) + pr, err := s.SlashValidator(ind, nil) if err != nil { - return fmt.Errorf("unable to slash validator: %d", ind) + return fmt.Errorf("unable to slash validator: %d: %s", ind, err) + } + if I.BlockRewardsCollector != nil { + I.BlockRewardsCollector.AttesterSlashings += pr } slashedAny = true } @@ -211,7 +217,12 @@ func (I *impl) ProcessVoluntaryExit(s abstract.BeaconState, signedVoluntaryExit // We can skip it in some instances if we want to optimistically sync up. if I.FullValidation { - domain, err := s.GetDomain(s.BeaconConfig().DomainVoluntaryExit, voluntaryExit.Epoch) + var domain []byte + if s.Version() < clparams.DenebVersion { + domain, err = s.GetDomain(s.BeaconConfig().DomainVoluntaryExit, voluntaryExit.Epoch) + } else if s.Version() >= clparams.DenebVersion { + domain, err = fork.ComputeDomain(s.BeaconConfig().DomainVoluntaryExit[:], utils.Uint32ToBytes4(s.BeaconConfig().CapellaForkVersion), s.GenesisValidatorsRoot()) + } if err != nil { return err } @@ -305,7 +316,7 @@ func (I *impl) ProcessExecutionPayload(s abstract.BeaconState, payload *cltypes. } func (I *impl) ProcessSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAggregate) error { - votedKeys, err := processSyncAggregate(s, sync) + votedKeys, err := I.processSyncAggregate(s, sync) if err != nil { return err } @@ -335,7 +346,7 @@ func (I *impl) ProcessSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAg // processSyncAggregate applies all the logic in the spec function `process_sync_aggregate` except // verifying the BLS signatures. It returns the modified beacons state and the list of validators' // public keys that voted, for future signature verification. -func processSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAggregate) ([][]byte, error) { +func (I *impl) processSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAggregate) ([][]byte, error) { currentSyncCommittee := s.CurrentSyncCommittee() if currentSyncCommittee == nil { @@ -382,6 +393,9 @@ func processSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAggregate) ( } } + if I.BlockRewardsCollector != nil { + I.BlockRewardsCollector.SyncAggregate = earnedProposerReward + } return votedKeys, state.IncreaseBalance(s, proposerIndex, earnedProposerReward) } @@ -478,7 +492,7 @@ func (I *impl) ProcessAttestations(s abstract.BeaconState, attestations *solid.L c := h.Tag("attestation_step", "process") var err error if err := solid.RangeErr[*solid.Attestation](attestations, func(i int, a *solid.Attestation, _ int) error { - if attestingIndiciesSet[i], err = processAttestation(s, a, baseRewardPerIncrement); err != nil { + if attestingIndiciesSet[i], err = I.processAttestation(s, a, baseRewardPerIncrement); err != nil { return err } return nil @@ -505,7 +519,7 @@ func (I *impl) ProcessAttestations(s abstract.BeaconState, attestations *solid.L return nil } -func processAttestationPostAltair(s abstract.BeaconState, attestation *solid.Attestation, baseRewardPerIncrement uint64) ([]uint64, error) { +func (I *impl) processAttestationPostAltair(s abstract.BeaconState, attestation *solid.Attestation, baseRewardPerIncrement uint64) ([]uint64, error) { data := attestation.AttestantionData() currentEpoch := state.Epoch(s) stateSlot := s.Slot() @@ -560,11 +574,14 @@ func processAttestationPostAltair(s abstract.BeaconState, attestation *solid.Att c.PutSince() proposerRewardDenominator := (beaconConfig.WeightDenominator - beaconConfig.ProposerWeight) * beaconConfig.WeightDenominator / beaconConfig.ProposerWeight reward := proposerRewardNumerator / proposerRewardDenominator + if I.BlockRewardsCollector != nil { + I.BlockRewardsCollector.Attestations += reward + } return attestingIndicies, state.IncreaseBalance(s, proposer, reward) } // processAttestationsPhase0 implements the rules for phase0 processing. -func processAttestationPhase0(s abstract.BeaconState, attestation *solid.Attestation) ([]uint64, error) { +func (I *impl) processAttestationPhase0(s abstract.BeaconState, attestation *solid.Attestation) ([]uint64, error) { data := attestation.AttestantionData() committee, err := s.GetBeaconCommitee(data.Slot(), data.ValidatorIndex()) if err != nil { @@ -675,7 +692,7 @@ func processAttestationPhase0(s abstract.BeaconState, attestation *solid.Attesta } // ProcessAttestation takes an attestation and process it. -func processAttestation(s abstract.BeaconState, attestation *solid.Attestation, baseRewardPerIncrement uint64) ([]uint64, error) { +func (I *impl) processAttestation(s abstract.BeaconState, attestation *solid.Attestation, baseRewardPerIncrement uint64) ([]uint64, error) { data := attestation.AttestantionData() currentEpoch := state.Epoch(s) previousEpoch := state.PreviousEpoch(s) @@ -685,7 +702,10 @@ func processAttestation(s abstract.BeaconState, attestation *solid.Attestation, if (data.Target().Epoch() != currentEpoch && data.Target().Epoch() != previousEpoch) || data.Target().Epoch() != state.GetEpochAtSlot(s.BeaconConfig(), data.Slot()) { return nil, errors.New("ProcessAttestation: attestation with invalid epoch") } - if data.Slot()+beaconConfig.MinAttestationInclusionDelay > stateSlot || stateSlot > data.Slot()+beaconConfig.SlotsPerEpoch { + if s.Version() < clparams.DenebVersion && ((data.Slot()+beaconConfig.MinAttestationInclusionDelay > stateSlot) || (stateSlot > data.Slot()+beaconConfig.SlotsPerEpoch)) { + return nil, errors.New("ProcessAttestation: attestation slot not in range") + } + if s.Version() >= clparams.DenebVersion && data.Slot()+beaconConfig.MinAttestationInclusionDelay > stateSlot { return nil, errors.New("ProcessAttestation: attestation slot not in range") } if data.ValidatorIndex() >= s.CommitteeCount(data.Target().Epoch()) { @@ -693,9 +713,9 @@ func processAttestation(s abstract.BeaconState, attestation *solid.Attestation, } // check if we need to use rules for phase0 or post-altair. if s.Version() == clparams.Phase0Version { - return processAttestationPhase0(s, attestation) + return I.processAttestationPhase0(s, attestation) } - return processAttestationPostAltair(s, attestation, baseRewardPerIncrement) + return I.processAttestationPostAltair(s, attestation, baseRewardPerIncrement) } func verifyAttestations(s abstract.BeaconState, attestations *solid.ListSSZ[*solid.Attestation], attestingIndicies [][]uint64) (bool, error) { @@ -743,28 +763,26 @@ func batchVerifyAttestations(s abstract.BeaconState, indexedAttestations []*clty } func (I *impl) ProcessBlockHeader(s abstract.BeaconState, block *cltypes.BeaconBlock) error { - if I.FullValidation { - if block.Slot != s.Slot() { - return fmt.Errorf("state slot: %d, not equal to block slot: %d", s.Slot(), block.Slot) - } - if block.Slot <= s.LatestBlockHeader().Slot { - return fmt.Errorf("slock slot: %d, not greater than latest block slot: %d", block.Slot, s.LatestBlockHeader().Slot) - } - propInd, err := s.GetBeaconProposerIndex() - if err != nil { - return fmt.Errorf("error in GetBeaconProposerIndex: %v", err) - } - if block.ProposerIndex != propInd { - return fmt.Errorf("block proposer index: %d, does not match beacon proposer index: %d", block.ProposerIndex, propInd) - } - blockHeader := s.LatestBlockHeader() - latestRoot, err := (&blockHeader).HashSSZ() - if err != nil { - return fmt.Errorf("unable to hash tree root of latest block header: %v", err) - } - if block.ParentRoot != latestRoot { - return fmt.Errorf("block parent root: %x, does not match latest block root: %x", block.ParentRoot, latestRoot) - } + if block.Slot != s.Slot() { + return fmt.Errorf("state slot: %d, not equal to block slot: %d", s.Slot(), block.Slot) + } + if block.Slot <= s.LatestBlockHeader().Slot { + return fmt.Errorf("slock slot: %d, not greater than latest block slot: %d", block.Slot, s.LatestBlockHeader().Slot) + } + propInd, err := s.GetBeaconProposerIndex() + if err != nil { + return fmt.Errorf("error in GetBeaconProposerIndex: %v", err) + } + if block.ProposerIndex != propInd { + return fmt.Errorf("block proposer index: %d, does not match beacon proposer index: %d", block.ProposerIndex, propInd) + } + blockHeader := s.LatestBlockHeader() + latestRoot, err := (&blockHeader).HashSSZ() + if err != nil { + return fmt.Errorf("unable to hash tree root of latest block header: %v", err) + } + if block.ParentRoot != latestRoot { + return fmt.Errorf("block parent root: %x, does not match latest block root: %x", block.ParentRoot, latestRoot) } bodyRoot, err := block.Body.HashSSZ() diff --git a/cl/transition/impl/eth2/statechange/process_epoch.go b/cl/transition/impl/eth2/statechange/process_epoch.go index 304a218ebc7..f42c5a1263d 100644 --- a/cl/transition/impl/eth2/statechange/process_epoch.go +++ b/cl/transition/impl/eth2/statechange/process_epoch.go @@ -7,20 +7,16 @@ import ( "github.com/ledgerwatch/erigon/cl/phase1/core/state" ) -func GetUnslashedIndiciesSet(s abstract.BeaconState) [][]bool { - if s.Version() == clparams.Phase0Version { - return nil - } - weights := s.BeaconConfig().ParticipationWeights() +func GetUnslashedIndiciesSet(cfg *clparams.BeaconChainConfig, previousEpoch uint64, validatorSet *solid.ValidatorSet, previousEpochPartecipation *solid.BitList) [][]bool { + weights := cfg.ParticipationWeights() flagsUnslashedIndiciesSet := make([][]bool, len(weights)) for i := range weights { - flagsUnslashedIndiciesSet[i] = make([]bool, s.ValidatorLength()) + flagsUnslashedIndiciesSet[i] = make([]bool, validatorSet.Length()) } - previousEpoch := state.PreviousEpoch(s) - s.ForEachValidator(func(validator solid.Validator, validatorIndex, total int) bool { + validatorSet.Range(func(validatorIndex int, validator solid.Validator, total int) bool { for i := range weights { - flagsUnslashedIndiciesSet[i][validatorIndex] = state.IsUnslashedParticipatingIndex(s, previousEpoch, uint64(validatorIndex), i) + flagsUnslashedIndiciesSet[i][validatorIndex] = state.IsUnslashedParticipatingIndex(validatorSet, previousEpochPartecipation, previousEpoch, uint64(validatorIndex), i) } return true }) @@ -31,8 +27,10 @@ func GetUnslashedIndiciesSet(s abstract.BeaconState) [][]bool { func ProcessEpoch(s abstract.BeaconState) error { eligibleValidators := state.EligibleValidatorsIndicies(s) // start := time.Now() - - unslashedIndiciesSet := GetUnslashedIndiciesSet(s) + var unslashedIndiciesSet [][]bool + if s.Version() >= clparams.AltairVersion { + unslashedIndiciesSet = GetUnslashedIndiciesSet(s.BeaconConfig(), state.PreviousEpoch(s), s.ValidatorSet(), s.PreviousEpochParticipation()) + } if err := ProcessJustificationBitsAndFinality(s, unslashedIndiciesSet); err != nil { return err } diff --git a/cl/transition/impl/eth2/statechange/process_epoch_test.go b/cl/transition/impl/eth2/statechange/process_epoch_test.go index 18c7377afff..98970ce9985 100644 --- a/cl/transition/impl/eth2/statechange/process_epoch_test.go +++ b/cl/transition/impl/eth2/statechange/process_epoch_test.go @@ -91,7 +91,11 @@ var startingSlashingsResetState []byte func TestProcessRewardsAndPenalties(t *testing.T) { runEpochTransitionConsensusTest(t, startingRewardsPenaltyState, expectedRewardsPenaltyState, func(s abstract.BeaconState) error { - return ProcessRewardsAndPenalties(s, state.EligibleValidatorsIndicies(s), GetUnslashedIndiciesSet(s)) + var unslashedIndiciesSet [][]bool + if s.Version() >= clparams.AltairVersion { + unslashedIndiciesSet = GetUnslashedIndiciesSet(s.BeaconConfig(), state.PreviousEpoch(s), s.ValidatorSet(), s.PreviousEpochParticipation()) + } + return ProcessRewardsAndPenalties(s, state.EligibleValidatorsIndicies(s), unslashedIndiciesSet) }) } @@ -161,6 +165,11 @@ var startingInactivityScoresState []byte func TestInactivityScores(t *testing.T) { runEpochTransitionConsensusTest(t, startingInactivityScoresState, expectedInactivityScoresState, func(s abstract.BeaconState) error { - return ProcessInactivityScores(s, state.EligibleValidatorsIndicies(s), GetUnslashedIndiciesSet(s)) + var unslashedIndiciesSet [][]bool + if s.Version() >= clparams.AltairVersion { + unslashedIndiciesSet = GetUnslashedIndiciesSet(s.BeaconConfig(), state.PreviousEpoch(s), s.ValidatorSet(), s.PreviousEpochParticipation()) + } + + return ProcessInactivityScores(s, state.EligibleValidatorsIndicies(s), unslashedIndiciesSet) }) } diff --git a/cl/transition/impl/eth2/statechange/process_registry_updates.go b/cl/transition/impl/eth2/statechange/process_registry_updates.go index c22c442266b..ee56a2f0ed2 100644 --- a/cl/transition/impl/eth2/statechange/process_registry_updates.go +++ b/cl/transition/impl/eth2/statechange/process_registry_updates.go @@ -62,7 +62,7 @@ func ProcessRegistryUpdates(s abstract.BeaconState) error { } return activationQueue[i].validatorIndex < activationQueue[j].validatorIndex }) - activationQueueLength := s.GetValidatorChurnLimit() + activationQueueLength := s.GetValidatorActivationChurnLimit() if len(activationQueue) > int(activationQueueLength) { activationQueue = activationQueue[:activationQueueLength] } diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index eedde266ad4..d3ea50e2e6f 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -120,7 +120,7 @@ func main() { ctx, cancel := common.RootContext() defer cancel() - db, err := enode.OpenDB(ctx, "" /* path */, "" /* tmpDir */) + db, err := enode.OpenDB(ctx, "" /* path */, "" /* tmpDir */, logger) if err != nil { panic(err) } diff --git a/cmd/capcli/cli.go b/cmd/capcli/cli.go index e0636217661..f012add350d 100644 --- a/cmd/capcli/cli.go +++ b/cmd/capcli/cli.go @@ -400,7 +400,9 @@ func (c *Chain) Run(ctx *Context) error { log.Info("Started chain download", "chain", c.Chain) dirs := datadir.New(c.Datadir) - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, log.Root()) + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version + + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, log.Root()) rawDB, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory) beaconDB, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false) @@ -437,7 +439,7 @@ func (c *Chain) Run(ctx *Context) error { } downloader := network.NewBackwardBeaconDownloader(ctx, beacon, db) - cfg := stages.StageHistoryReconstruction(downloader, antiquary.NewAntiquary(ctx, nil, nil, nil, dirs, nil, nil, nil, nil, nil, nil, false, nil), csn, beaconDB, db, nil, genesisConfig, beaconConfig, true, true, bRoot, bs.Slot(), "/tmp", 300*time.Millisecond, log.Root()) + cfg := stages.StageHistoryReconstruction(downloader, antiquary.NewAntiquary(ctx, nil, nil, nil, dirs, nil, nil, nil, nil, nil, nil, false, false, nil), csn, beaconDB, db, nil, genesisConfig, beaconConfig, true, true, bRoot, bs.Slot(), "/tmp", 300*time.Millisecond, log.Root()) return stages.SpawnStageHistoryDownload(cfg, ctx, log.Root()) } @@ -592,7 +594,9 @@ func (c *DumpSnapshots) Run(ctx *Context) error { return }) - return freezeblocks.DumpBeaconBlocks(ctx, db, beaconDB, 0, to, snaptype.Erigon2MergeLimit, dirs.Tmp, dirs.Snap, estimate.CompressSnapshot.Workers(), log.LvlInfo, log.Root()) + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version + + return freezeblocks.DumpBeaconBlocks(ctx, db, beaconDB, snapshotVersion, 0, to, snaptype.Erigon2MergeLimit, dirs.Tmp, dirs.Snap, estimate.CompressSnapshot.Workers(), log.LvlInfo, log.Root()) } type CheckSnapshots struct { @@ -630,8 +634,9 @@ func (c *CheckSnapshots) Run(ctx *Context) error { } to = (to / snaptype.Erigon2MergeLimit) * snaptype.Erigon2MergeLimit + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, log.Root()) + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, log.Root()) if err := csn.ReopenFolder(); err != nil { return err } @@ -712,7 +717,9 @@ func (c *LoopSnapshots) Run(ctx *Context) error { to = (to / snaptype.Erigon2MergeLimit) * snaptype.Erigon2MergeLimit - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, log.Root()) + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version + + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, log.Root()) if err := csn.ReopenFolder(); err != nil { return err } @@ -782,7 +789,14 @@ func (d *DownloadSnapshots) Run(ctx *Context) error { if err != nil { return fmt.Errorf("new server: %w", err) } - return snapshotsync.WaitForDownloader("CapCliDownloader", ctx, false, snapshotsync.OnlyCaplin, s, tx, freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, log.Root()), freezeblocks.NewBorRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, log.Root())), params.ChainConfigByChainName(d.Chain), direct.NewDownloaderClient(bittorrentServer)) + + snapshotVersion := snapcfg.KnownCfg(d.Chain, 0).Version + + return snapshotsync.WaitForDownloader(ctx, "CapCliDownloader", false, snapshotsync.OnlyCaplin, s, tx, + freezeblocks.NewBlockReader( + freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, snapshotVersion, log.Root()), + freezeblocks.NewBorRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, snapshotVersion, log.Root())), + params.ChainConfigByChainName(d.Chain), direct.NewDownloaderClient(bittorrentServer), []string{}) } type RetrieveHistoricalState struct { @@ -811,7 +825,9 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error { return err } defer tx.Rollback() - allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root()) + snapshotVersion := snapcfg.KnownCfg(r.Chain, 0).Version + + allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, snapshotVersion, log.Root()) if err := allSnapshots.ReopenFolder(); err != nil { return err } @@ -822,7 +838,7 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error { var bor *freezeblocks.BorRoSnapshots blockReader := freezeblocks.NewBlockReader(allSnapshots, bor) eth1Getter := getters.NewExecutionSnapshotReader(ctx, beaconConfig, blockReader, db) - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, log.Root()) + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, log.Root()) if err := csn.ReopenFolder(); err != nil { return err } @@ -838,6 +854,16 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error { if err != nil { return err } + endTime := time.Since(start) + hRoot, err := haveState.HashSSZ() + if err != nil { + return err + } + log.Info("Got state", "slot", haveState.Slot(), "root", libcommon.Hash(hRoot), "elapsed", endTime) + + if err := haveState.InitBeaconState(); err != nil { + return err + } v := haveState.Version() // encode and decode the state @@ -849,12 +875,10 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error { if err := haveState.DecodeSSZ(enc, int(v)); err != nil { return err } - endTime := time.Since(start) - hRoot, err := haveState.HashSSZ() + hRoot, err = haveState.HashSSZ() if err != nil { return err } - log.Info("Got state", "slot", haveState.Slot(), "root", libcommon.Hash(hRoot), "elapsed", endTime) if r.CompareFile == "" { return nil } diff --git a/cmd/caplin-regression/regression/tester.go b/cmd/caplin-regression/regression/tester.go index badaebe5413..9734b21894f 100644 --- a/cmd/caplin-regression/regression/tester.go +++ b/cmd/caplin-regression/regression/tester.go @@ -88,7 +88,7 @@ func TestRegressionWithValidation(store *forkchoice.ForkChoiceStore, block *clty return err } block.Block.Body.Attestations.Range(func(index int, value *solid2.Attestation, length int) bool { - store.OnAttestation(value, true) + store.OnAttestation(value, true, true) return true }) return nil diff --git a/cmd/caplin/caplin1/run.go b/cmd/caplin/caplin1/run.go index 47d1bd123d5..a2b27138178 100644 --- a/cmd/caplin/caplin1/run.go +++ b/cmd/caplin/caplin1/run.go @@ -18,6 +18,7 @@ import ( "github.com/ledgerwatch/erigon/cl/freezer" freezer2 "github.com/ledgerwatch/erigon/cl/freezer" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" "github.com/ledgerwatch/erigon/cl/persistence" @@ -87,7 +88,7 @@ func OpenCaplinDatabase(ctx context.Context, func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engine execution_client.ExecutionEngine, beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, state *state.CachingBeaconState, - caplinFreezer freezer.Freezer, dirs datadir.Dirs, cfg beacon_router_configuration.RouterConfiguration, eth1Getter snapshot_format.ExecutionBlockReaderByNumber, + caplinFreezer freezer.Freezer, dirs datadir.Dirs, snapshotVersion uint8, cfg beacon_router_configuration.RouterConfiguration, eth1Getter snapshot_format.ExecutionBlockReaderByNumber, snDownloader proto_downloader.DownloaderClient, backfilling bool, states bool, historyDB persistence.BeaconChainDatabase, indexDB kv.RwDB) error { rawDB, af := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory) @@ -98,7 +99,7 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi logger := log.New("app", "caplin") - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, logger) + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, logger) rcsn := freezeblocks.NewBeaconSnapshotReader(csn, eth1Getter, historyDB, beaconConfig) if caplinFreezer != nil { @@ -197,7 +198,7 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi if err != nil { return err } - antiq := antiquary.NewAntiquary(ctx, genesisState, vTables, beaconConfig, dirs, snDownloader, indexDB, csn, rcsn, historyDB, logger, states, af) + antiq := antiquary.NewAntiquary(ctx, genesisState, vTables, beaconConfig, dirs, snDownloader, indexDB, csn, rcsn, historyDB, logger, states, backfilling, af) // Create the antiquary go func() { if err := antiq.Loop(); err != nil { @@ -212,7 +213,7 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi statesReader := historical_states_reader.NewHistoricalStatesReader(beaconConfig, rcsn, vTables, af, genesisState) syncedDataManager := synced_data.NewSyncedDataManager(cfg.Active, beaconConfig) if cfg.Active { - apiHandler := handler.NewApiHandler(genesisConfig, beaconConfig, rawDB, indexDB, forkChoice, pool, rcsn, syncedDataManager, statesReader) + apiHandler := handler.NewApiHandler(genesisConfig, beaconConfig, rawDB, indexDB, forkChoice, pool, rcsn, syncedDataManager, statesReader, sentinel, params.GitTag) headApiHandler := &validatorapi.ValidatorApiHandler{ FC: forkChoice, BeaconChainCfg: beaconConfig, @@ -225,11 +226,13 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi log.Info("Beacon API started", "addr", cfg.Address) } + forkChoice.StartAttestationsRTT() + stageCfg := stages.ClStagesCfg(beaconRpc, antiq, genesisConfig, beaconConfig, state, engine, gossipManager, forkChoice, historyDB, indexDB, csn, dirs.Tmp, dbConfig, backfilling, syncedDataManager) sync := stages.ConsensusClStages(ctx, stageCfg) logger.Info("[Caplin] starting clstages loop") - err = sync.StartWithStage(ctx, "WaitForPeers", logger, stageCfg) + err = sync.StartWithStage(ctx, "DownloadHistoricalBlocks", logger, stageCfg) logger.Info("[Caplin] exiting clstages loop") if err != nil { return err diff --git a/cmd/caplin/caplincli/config.go b/cmd/caplin/caplincli/config.go index 46238b7c3dc..87b05181349 100644 --- a/cmd/caplin/caplincli/config.go +++ b/cmd/caplin/caplincli/config.go @@ -40,6 +40,10 @@ type CaplinCliCfg struct { EngineAPIPort int `json:"engine_api_port"` JwtSecret []byte + AllowedMethods []string `json:"allowed_methods"` + AllowedOrigins []string `json:"allowed_origins"` + AllowCredentials bool `json:"allow_credentials"` + InitalState *state.CachingBeaconState Dirs datadir.Dirs } @@ -72,6 +76,9 @@ func SetupCaplinCli(ctx *cli.Context) (cfg *CaplinCliCfg, err error) { cfg.BeaconApiReadTimeout = time.Duration(ctx.Uint64(caplinflags.BeaconApiReadTimeout.Name)) * time.Second cfg.BeaconApiWriteTimeout = time.Duration(ctx.Uint(caplinflags.BeaconApiWriteTimeout.Name)) * time.Second cfg.BeaconAddr = fmt.Sprintf("%s:%d", ctx.String(caplinflags.BeaconApiAddr.Name), ctx.Int(caplinflags.BeaconApiPort.Name)) + cfg.AllowCredentials = ctx.Bool(utils.BeaconApiAllowCredentialsFlag.Name) + cfg.AllowedMethods = ctx.StringSlice(utils.BeaconApiAllowMethodsFlag.Name) + cfg.AllowedOrigins = ctx.StringSlice(utils.BeaconApiAllowOriginsFlag.Name) cfg.BeaconProtocol = "tcp" cfg.RecordMode = ctx.Bool(caplinflags.RecordModeFlag.Name) cfg.RecordDir = ctx.String(caplinflags.RecordModeDir.Name) diff --git a/cmd/caplin/caplinflags/flags.go b/cmd/caplin/caplinflags/flags.go index 2d3d9361f78..c7929462588 100644 --- a/cmd/caplin/caplinflags/flags.go +++ b/cmd/caplin/caplinflags/flags.go @@ -23,6 +23,9 @@ var CliFlags = []cli.Flag{ &EngineApiPortFlag, &JwtSecret, &utils.DataDirFlag, + &utils.BeaconApiAllowCredentialsFlag, + &utils.BeaconApiAllowMethodsFlag, + &utils.BeaconApiAllowOriginsFlag, } var ( @@ -56,7 +59,6 @@ var ( Usage: "sets the port to listen for beacon api requests", Value: 5555, } - BeaconDBModeFlag = cli.StringFlag{ Name: "beacon-db-mode", Usage: "level of storing on beacon chain, minimal(only 500k blocks stored), full (all blocks stored), light (no blocks stored)", diff --git a/cmd/caplin/main.go b/cmd/caplin/main.go index 59a580d66c7..9fea81304a9 100644 --- a/cmd/caplin/main.go +++ b/cmd/caplin/main.go @@ -16,6 +16,7 @@ import ( "fmt" "os" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon/cl/beacon/beacon_router_configuration" "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/fork" @@ -128,12 +129,17 @@ func runCaplinNode(cliCtx *cli.Context) error { return err } - return caplin1.RunCaplinPhase1(ctx, sentinel, executionEngine, cfg.BeaconCfg, cfg.GenesisCfg, state, caplinFreezer, cfg.Dirs, beacon_router_configuration.RouterConfiguration{ - Protocol: cfg.BeaconProtocol, - Address: cfg.BeaconAddr, - ReadTimeTimeout: cfg.BeaconApiReadTimeout, - WriteTimeout: cfg.BeaconApiWriteTimeout, - IdleTimeout: cfg.BeaconApiWriteTimeout, - Active: !cfg.NoBeaconApi, + snapshotVersion := snapcfg.KnownCfg(cliCtx.String(utils.ChainFlag.Name), 0).Version + + return caplin1.RunCaplinPhase1(ctx, sentinel, executionEngine, cfg.BeaconCfg, cfg.GenesisCfg, state, caplinFreezer, cfg.Dirs, snapshotVersion, beacon_router_configuration.RouterConfiguration{ + Protocol: cfg.BeaconProtocol, + Address: cfg.BeaconAddr, + ReadTimeTimeout: cfg.BeaconApiReadTimeout, + WriteTimeout: cfg.BeaconApiWriteTimeout, + IdleTimeout: cfg.BeaconApiWriteTimeout, + Active: !cfg.NoBeaconApi, + AllowedOrigins: cfg.AllowedOrigins, + AllowedMethods: cfg.AllowedMethods, + AllowCredentials: cfg.AllowCredentials, }, nil, nil, false, false, historyDB, indiciesDB) } diff --git a/cmd/devnet/README.md b/cmd/devnet/README.md index a364d567ea0..15969924c06 100644 --- a/cmd/devnet/README.md +++ b/cmd/devnet/README.md @@ -9,7 +9,7 @@ The devnet code performs 3 main functions: * It allows for the specification of a series of scenarios which will be run against the nodes on that internal network * It can optionally run a `support` connection which allows the nodes on the network to be connected to the Erigon diagnostic system -The specification of both nodes and scenarios for the devenet is done by specifying configuration objects. These objects are currently build in code using go `structs` but are capable of being read as configuration. +The specification of both nodes and scenarios for the devnet is done by specifying configuration objects. These objects are currently built in code using go `structs` but are capable of being read as configuration. ## Devnet runtime start-up @@ -85,7 +85,7 @@ func init() { ) } ``` -Each step method will be called with a `context.Context` as its initial argument. This context provides access to the underlying devnet - so the sptep handler can use it for processing. +Each step method will be called with a `context.Context` as its initial argument. This context provides access to the underlying devnet - so the step handler can use it for processing. ```go func PingErigonRpc(ctx context.Context) error { diff --git a/cmd/devnet/args/node_args.go b/cmd/devnet/args/node_args.go index 50c73c0e96f..1bbf714562c 100644 --- a/cmd/devnet/args/node_args.go +++ b/cmd/devnet/args/node_args.go @@ -9,14 +9,13 @@ import ( "path/filepath" "strconv" + "github.com/ledgerwatch/erigon-lib/chain/networkname" + "github.com/ledgerwatch/erigon/cmd/devnet/accounts" + "github.com/ledgerwatch/erigon/cmd/devnet/requests" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/params" - - "github.com/ledgerwatch/erigon-lib/chain/networkname" - "github.com/ledgerwatch/erigon/cmd/devnet/accounts" - "github.com/ledgerwatch/erigon/cmd/devnet/requests" ) type NodeArgs struct { @@ -42,7 +41,7 @@ type NodeArgs struct { HttpCorsDomain string `arg:"--http.corsdomain" json:"http.corsdomain"` AuthRpcPort int `arg:"--authrpc.port" default:"8551" json:"authrpc.port"` AuthRpcVHosts string `arg:"--authrpc.vhosts" json:"authrpc.vhosts"` - WSPort int `arg:"-" default:"8546" json:"-"` // flag not defined + WSPort int `arg:"--ws.port" default:"8546" json:"ws.port"` GRPCPort int `arg:"-" default:"8547" json:"-"` // flag not defined TCPPort int `arg:"-" default:"8548" json:"-"` // flag not defined Metrics bool `arg:"--metrics" flag:"" default:"false" json:"metrics"` @@ -50,7 +49,7 @@ type NodeArgs struct { MetricsAddr string `arg:"--metrics.addr" json:"metrics.addr,omitempty"` StaticPeers string `arg:"--staticpeers" json:"staticpeers,omitempty"` WithoutHeimdall bool `arg:"--bor.withoutheimdall" flag:"" default:"false" json:"bor.withoutheimdall,omitempty"` - HeimdallGrpcAddr string `arg:"--bor.heimdallgRPC" json:"bor.heimdallgRPC,omitempty"` + HeimdallURL string `arg:"--bor.heimdall" json:"bor.heimdall,omitempty"` WithHeimdallMilestones bool `arg:"--bor.milestone" json:"bor.milestone"` VMDebug bool `arg:"--vmdebug" flag:"" default:"false" json:"dmdebug"` @@ -136,6 +135,7 @@ type BlockProducer struct { NodeArgs Mine bool `arg:"--mine" flag:"true"` Etherbase string `arg:"--miner.etherbase"` + GasLimit int `arg:"--miner.gaslimit"` DevPeriod int `arg:"--dev.period"` BorPeriod int `arg:"--bor.period"` BorMinBlockSize int `arg:"--bor.minblocksize"` @@ -182,18 +182,18 @@ func (n *BlockProducer) IsBlockProducer() bool { return true } -type NonBlockProducer struct { +type BlockConsumer struct { NodeArgs HttpApi string `arg:"--http.api" default:"admin,eth,debug,net,trace,web3,erigon,txpool" json:"http.api"` TorrentPort string `arg:"--torrent.port" default:"42070" json:"torrent.port"` NoDiscover string `arg:"--nodiscover" flag:"" default:"true" json:"nodiscover"` } -func (n *NonBlockProducer) IsBlockProducer() bool { +func (n *BlockConsumer) IsBlockProducer() bool { return false } -func (n *NonBlockProducer) Account() *accounts.Account { +func (n *BlockConsumer) Account() *accounts.Account { return nil } diff --git a/cmd/devnet/args/node_args_test.go b/cmd/devnet/args/node_args_test.go index a67370b19ea..ec9a717cc95 100644 --- a/cmd/devnet/args/node_args_test.go +++ b/cmd/devnet/args/node_args_test.go @@ -36,7 +36,7 @@ func TestNodeArgs(t *testing.T) { t.Fatal(asMap, "not found") } - nodeArgs, _ = args.AsArgs(args.NonBlockProducer{ + nodeArgs, _ = args.AsArgs(args.BlockConsumer{ NodeArgs: args.NodeArgs{ DataDir: filepath.Join("data", fmt.Sprintf("%d", 2)), StaticPeers: "enode", @@ -159,6 +159,7 @@ func producingNodeArgs(dataDir string, nodeNumber int) []string { p2pProtocol, _ := parameterFromArgument("--p2p.protocol", "68") downloaderArg, _ := parameterFromArgument("--no-downloader", "true") httpPortArg, _ := parameterFromArgument("--http.port", "8545") + wsPortArg, _ := parameterFromArgument("--ws.port", "8546") authrpcPortArg, _ := parameterFromArgument("--authrpc.port", "8551") natArg, _ := parameterFromArgument("--nat", "none") accountSlotsArg, _ := parameterFromArgument("--txpool.accountslots", "16") @@ -170,6 +171,7 @@ func producingNodeArgs(dataDir string, nodeNumber int) []string { chainType, privateApiAddr, httpPortArg, + wsPortArg, authrpcPortArg, mine, httpApi, @@ -196,6 +198,7 @@ func nonProducingNodeArgs(dataDir string, nodeNumber int, enode string) []string p2pProtocol, _ := parameterFromArgument("--p2p.protocol", "68") downloaderArg, _ := parameterFromArgument("--no-downloader", "true") httpPortArg, _ := parameterFromArgument("--http.port", "8545") + wsPortArg, _ := parameterFromArgument("--ws.port", "8546") httpApi, _ := parameterFromArgument(httpApiArg, "admin,eth,debug,net,trace,web3,erigon,txpool") authrpcPortArg, _ := parameterFromArgument("--authrpc.port", "8551") natArg, _ := parameterFromArgument("--nat", "none") @@ -208,6 +211,7 @@ func nonProducingNodeArgs(dataDir string, nodeNumber int, enode string) []string chainType, privateApiAddr, httpPortArg, + wsPortArg, authrpcPortArg, httpApi, ws, diff --git a/cmd/devnet/devnet/context.go b/cmd/devnet/devnet/context.go index 54d9faccbc7..7322d054ca3 100644 --- a/cmd/devnet/devnet/context.go +++ b/cmd/devnet/devnet/context.go @@ -139,10 +139,12 @@ func CurrentNetwork(ctx context.Context) *Network { return cn.network } - if current := CurrentNode(ctx); current != nil { - if n, ok := current.(*devnetNode); ok { - return n.network - } + if cn, ok := ctx.Value(ckNode).(*cnode); ok && cn.node != nil { + return cn.node.(*devnetNode).network + } + + if devnet, ok := ctx.Value(ckDevnet).(Devnet); ok { + return devnet.SelectNetwork(ctx, 0) } return nil diff --git a/cmd/devnet/devnet/network.go b/cmd/devnet/devnet/network.go index 29eee727cdf..372aca838d2 100644 --- a/cmd/devnet/devnet/network.go +++ b/cmd/devnet/devnet/network.go @@ -34,7 +34,7 @@ type Network struct { Snapshots bool Nodes []Node Services []Service - Alloc types.GenesisAlloc + Genesis *types.Genesis BorStateSyncDelay time.Duration BorPeriod time.Duration BorMinBlockSize int @@ -140,12 +140,16 @@ func (nw *Network) createNode(nodeArgs Node) (Node, error) { } if n.IsBlockProducer() { - if nw.Alloc == nil { - nw.Alloc = types.GenesisAlloc{ + if nw.Genesis == nil { + nw.Genesis = &types.Genesis{} + } + + if nw.Genesis.Alloc == nil { + nw.Genesis.Alloc = types.GenesisAlloc{ n.Account().Address: types.GenesisAccount{Balance: blockProducerFunds}, } } else { - nw.Alloc[n.Account().Address] = types.GenesisAccount{Balance: blockProducerFunds} + nw.Genesis.Alloc[n.Account().Address] = types.GenesisAccount{Balance: blockProducerFunds} } } diff --git a/cmd/devnet/devnet/node.go b/cmd/devnet/devnet/node.go index 4c372721a03..33f716aa3f3 100644 --- a/cmd/devnet/devnet/node.go +++ b/cmd/devnet/devnet/node.go @@ -8,6 +8,9 @@ import ( "sync" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/log/v3" + "github.com/urfave/cli/v2" + "github.com/ledgerwatch/erigon/cmd/devnet/accounts" "github.com/ledgerwatch/erigon/cmd/devnet/args" "github.com/ledgerwatch/erigon/cmd/devnet/requests" @@ -17,8 +20,6 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/debug" enode "github.com/ledgerwatch/erigon/turbo/node" - "github.com/ledgerwatch/log/v3" - "github.com/urfave/cli/v2" ) type Node interface { @@ -166,12 +167,19 @@ func (n *devnetNode) run(ctx *cli.Context) error { n.nodeCfg.MdbxGrowthStep = 32 * datasize.MB n.nodeCfg.MdbxDBSizeLimit = 512 * datasize.MB - for addr, account := range n.network.Alloc { - n.ethCfg.Genesis.Alloc[addr] = account + if n.network.Genesis != nil { + for addr, account := range n.network.Genesis.Alloc { + n.ethCfg.Genesis.Alloc[addr] = account + } + + if n.network.Genesis.GasLimit != 0 { + n.ethCfg.Genesis.GasLimit = n.network.Genesis.GasLimit + } } if n.network.BorStateSyncDelay > 0 { - n.ethCfg.Bor.StateSyncConfirmationDelay = map[string]uint64{"0": uint64(n.network.BorStateSyncDelay.Seconds())} + stateSyncConfirmationDelay := map[string]uint64{"0": uint64(n.network.BorStateSyncDelay.Seconds())} + logger.Warn("TODO: custom BorStateSyncDelay is not applied to BorConfig.StateSyncConfirmationDelay", "delay", stateSyncConfirmationDelay) } n.ethNode, err = enode.New(ctx.Context, n.nodeCfg, n.ethCfg, logger) diff --git a/cmd/devnet/main.go b/cmd/devnet/main.go index 69f66e7a795..b8f5ff4ae3a 100644 --- a/cmd/devnet/main.go +++ b/cmd/devnet/main.go @@ -6,12 +6,13 @@ import ( "os/signal" "path/filepath" dbg "runtime/debug" + "strconv" "strings" "syscall" "time" - "github.com/ledgerwatch/erigon/cmd/devnet/services" - "github.com/ledgerwatch/erigon/cmd/devnet/services/polygon" + "github.com/ledgerwatch/log/v3" + "github.com/urfave/cli/v2" "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon-lib/common/metrics" @@ -21,17 +22,16 @@ import ( _ "github.com/ledgerwatch/erigon/cmd/devnet/contracts/steps" "github.com/ledgerwatch/erigon/cmd/devnet/devnet" "github.com/ledgerwatch/erigon/cmd/devnet/devnetutils" + "github.com/ledgerwatch/erigon/cmd/devnet/networks" "github.com/ledgerwatch/erigon/cmd/devnet/requests" "github.com/ledgerwatch/erigon/cmd/devnet/scenarios" - "github.com/ledgerwatch/erigon/cmd/devnet/tests" - "github.com/ledgerwatch/log/v3" - + "github.com/ledgerwatch/erigon/cmd/devnet/services" + "github.com/ledgerwatch/erigon/cmd/devnet/services/polygon" "github.com/ledgerwatch/erigon/cmd/utils/flags" "github.com/ledgerwatch/erigon/params" erigon_app "github.com/ledgerwatch/erigon/turbo/app" "github.com/ledgerwatch/erigon/turbo/debug" "github.com/ledgerwatch/erigon/turbo/logging" - "github.com/urfave/cli/v2" ) var ( @@ -76,10 +76,10 @@ var ( Usage: "Run with a devnet local Heimdall service", } - HeimdallGrpcAddressFlag = cli.StringFlag{ - Name: "bor.heimdallgRPC", - Usage: "Address of Heimdall gRPC service", - Value: polygon.HeimdallGrpcAddressDefault, + HeimdallURLFlag = cli.StringFlag{ + Name: "bor.heimdall", + Usage: "URL of Heimdall service", + Value: polygon.HeimdallURLDefault, } BorSprintSizeFlag = cli.IntFlag{ @@ -119,6 +119,24 @@ var ( Usage: "internal flag", } + txCountFlag = cli.IntFlag{ + Name: "txcount", + Usage: "Transaction count, (scenario dependent - may be total or reoccurring)", + Value: 100, + } + + BlockProducersFlag = cli.UintFlag{ + Name: "block-producers", + Usage: "The number of block producers to instantiate in the network", + Value: 1, + } + + GasLimitFlag = cli.Uint64Flag{ + Name: "gaslimit", + Usage: "Target gas limit for mined blocks", + Value: 0, + } + WaitFlag = cli.BoolFlag{ Name: "wait", Usage: "Wait until interrupted after all scenarios have run", @@ -147,7 +165,7 @@ func main() { &BaseRpcPortFlag, &WithoutHeimdallFlag, &LocalHeimdallFlag, - &HeimdallGrpcAddressFlag, + &HeimdallURLFlag, &BorSprintSizeFlag, &MetricsEnabledFlag, &MetricsNodeFlag, @@ -156,9 +174,12 @@ func main() { &insecureFlag, &metricsURLsFlag, &WaitFlag, + &txCountFlag, + &BlockProducersFlag, &logging.LogVerbosityFlag, &logging.LogConsoleVerbosityFlag, &logging.LogDirVerbosityFlag, + &GasLimitFlag, } if err := app.Run(os.Args); err != nil { @@ -175,7 +196,7 @@ func setupLogger(ctx *cli.Context) (log.Logger, error) { return nil, err } - logger := logging.SetupLoggerCtx("devnet", ctx, false /* rootLogger */) + logger := logging.SetupLoggerCtx("devnet", ctx, log.LvlInfo, log.LvlInfo, false /* rootLogger */) // Make root logger fail log.Root().SetHandler(PanicHandler{}) @@ -241,7 +262,8 @@ func mainContext(ctx *cli.Context) error { go connectDiagnosticsIfEnabled(ctx, logger) enabledScenarios := strings.Split(ctx.String(ScenariosFlag.Name), ",") - if err = allScenarios(runCtx).Run(runCtx, enabledScenarios...); err != nil { + + if err = allScenarios(ctx, runCtx).Run(runCtx, enabledScenarios...); err != nil { return err } @@ -256,7 +278,7 @@ func mainContext(ctx *cli.Context) error { return nil } -func allScenarios(runCtx devnet.Context) scenarios.Scenarios { +func allScenarios(cliCtx *cli.Context, runCtx devnet.Context) scenarios.Scenarios { // unsubscribe from all the subscriptions made defer services.UnsubscribeAll() @@ -313,6 +335,11 @@ func allScenarios(runCtx devnet.Context) scenarios.Scenarios { //{Text: "BatchProcessTransfers", Args: []any{"child-funder", 1, 10, 2, 2}}, }, }, + "block-production": { + Steps: []*scenarios.Step{ + {Text: "SendTxLoad", Args: []any{recipientAddress, accounts.DevAddress, sendValue, cliCtx.Uint(txCountFlag.Name)}}, + }, + }, } } @@ -321,21 +348,75 @@ func initDevnet(ctx *cli.Context, logger log.Logger) (devnet.Devnet, error) { chainName := ctx.String(ChainFlag.Name) baseRpcHost := ctx.String(BaseRpcHostFlag.Name) baseRpcPort := ctx.Int(BaseRpcPortFlag.Name) + producerCount := int(ctx.Uint(BlockProducersFlag.Name)) + gasLimit := ctx.Uint64(GasLimitFlag.Name) + + var dirLogLevel log.Lvl = log.LvlTrace + var consoleLogLevel log.Lvl = log.LvlCrit + + if ctx.IsSet(logging.LogVerbosityFlag.Name) { + lvlVal := ctx.String(logging.LogVerbosityFlag.Name) + + i, err := strconv.Atoi(lvlVal) + + lvl := log.Lvl(i) + + if err != nil { + lvl, err = log.LvlFromString(lvlVal) + } + + if err == nil { + consoleLogLevel = lvl + dirLogLevel = lvl + } + } else { + if ctx.IsSet(logging.LogConsoleVerbosityFlag.Name) { + lvlVal := ctx.String(logging.LogConsoleVerbosityFlag.Name) + + i, err := strconv.Atoi(lvlVal) + + lvl := log.Lvl(i) + + if err != nil { + lvl, err = log.LvlFromString(lvlVal) + } + + if err == nil { + consoleLogLevel = lvl + } + } + + if ctx.IsSet(logging.LogDirVerbosityFlag.Name) { + lvlVal := ctx.String(logging.LogDirVerbosityFlag.Name) + + i, err := strconv.Atoi(lvlVal) + + lvl := log.Lvl(i) + + if err != nil { + lvl, err = log.LvlFromString(lvlVal) + } + + if err == nil { + dirLogLevel = lvl + } + } + } switch chainName { case networkname.BorDevnetChainName: if ctx.Bool(WithoutHeimdallFlag.Name) { - return tests.NewBorDevnetWithoutHeimdall(dataDir, baseRpcHost, baseRpcPort, logger), nil + return networks.NewBorDevnetWithoutHeimdall(dataDir, baseRpcHost, baseRpcPort, gasLimit, logger, consoleLogLevel, dirLogLevel), nil } else if ctx.Bool(LocalHeimdallFlag.Name) { - heimdallGrpcAddr := ctx.String(HeimdallGrpcAddressFlag.Name) + heimdallURL := ctx.String(HeimdallURLFlag.Name) sprintSize := uint64(ctx.Int(BorSprintSizeFlag.Name)) - return tests.NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, logger), nil + return networks.NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallURL, sprintSize, producerCount, gasLimit, logger, consoleLogLevel, dirLogLevel), nil } else { - return tests.NewBorDevnetWithRemoteHeimdall(dataDir, baseRpcHost, baseRpcPort, logger), nil + return networks.NewBorDevnetWithRemoteHeimdall(dataDir, baseRpcHost, baseRpcPort, producerCount, gasLimit, logger, consoleLogLevel, dirLogLevel), nil } case networkname.DevChainName: - return tests.NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, logger), nil + return networks.NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, producerCount, gasLimit, logger, consoleLogLevel, dirLogLevel), nil default: return nil, fmt.Errorf("unknown network: '%s'", chainName) diff --git a/cmd/devnet/tests/devnet_bor.go b/cmd/devnet/networks/devnet_bor.go similarity index 63% rename from cmd/devnet/tests/devnet_bor.go rename to cmd/devnet/networks/devnet_bor.go index 003c662742b..ce81a45a060 100644 --- a/cmd/devnet/tests/devnet_bor.go +++ b/cmd/devnet/networks/devnet_bor.go @@ -1,8 +1,11 @@ -package tests +package networks import ( + "strconv" "time" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon/cmd/devnet/accounts" "github.com/ledgerwatch/erigon/cmd/devnet/args" @@ -12,14 +15,17 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" ) func NewBorDevnetWithoutHeimdall( dataDir string, baseRpcHost string, baseRpcPort int, + gasLimit uint64, logger log.Logger, + consoleLogLevel log.Lvl, + dirLogLevel log.Lvl, ) devnet.Devnet { faucetSource := accounts.NewAccount("faucet-source") @@ -32,8 +38,11 @@ func NewBorDevnetWithoutHeimdall( BaseRPCHost: baseRpcHost, BaseRPCPort: baseRpcPort, //Snapshots: true, - Alloc: types.GenesisAlloc{ - faucetSource.Address: {Balance: accounts.EtherAmount(200_000)}, + Genesis: &types.Genesis{ + Alloc: types.GenesisAlloc{ + faucetSource.Address: {Balance: accounts.EtherAmount(200_000)}, + }, + GasLimit: gasLimit, }, Services: []devnet.Service{ account_services.NewFaucet(networkname.BorDevnetChainName, faucetSource), @@ -41,16 +50,16 @@ func NewBorDevnetWithoutHeimdall( Nodes: []devnet.Node{ &args.BlockProducer{ NodeArgs: args.NodeArgs{ - ConsoleVerbosity: "0", - DirVerbosity: "5", + ConsoleVerbosity: strconv.Itoa(int(consoleLogLevel)), + DirVerbosity: strconv.Itoa(int(dirLogLevel)), WithoutHeimdall: true, }, AccountSlots: 200, }, - &args.NonBlockProducer{ + &args.BlockConsumer{ NodeArgs: args.NodeArgs{ - ConsoleVerbosity: "0", - DirVerbosity: "5", + ConsoleVerbosity: strconv.Itoa(int(consoleLogLevel)), + DirVerbosity: strconv.Itoa(int(dirLogLevel)), WithoutHeimdall: true, }, }, @@ -65,10 +74,14 @@ func NewBorDevnetWithHeimdall( baseRpcHost string, baseRpcPort int, heimdall *polygon.Heimdall, - heimdallGrpcAddr string, + heimdallURL string, checkpointOwner *accounts.Account, + producerCount int, + gasLimit uint64, withMilestones bool, logger log.Logger, + consoleLogLevel log.Lvl, + dirLogLevel log.Lvl, ) devnet.Devnet { faucetSource := accounts.NewAccount("faucet-source") @@ -77,6 +90,23 @@ func NewBorDevnetWithHeimdall( services = append(services, heimdall) } + var nodes []devnet.Node + + if producerCount == 0 { + producerCount++ + } + + for i := 0; i < producerCount; i++ { + nodes = append(nodes, &args.BlockProducer{ + NodeArgs: args.NodeArgs{ + ConsoleVerbosity: strconv.Itoa(int(consoleLogLevel)), + DirVerbosity: strconv.Itoa(int(dirLogLevel)), + HeimdallURL: heimdallURL, + }, + AccountSlots: 20000, + }) + } + borNetwork := devnet.Network{ DataDir: dataDir, Chain: networkname.BorDevnetChainName, @@ -88,42 +118,20 @@ func NewBorDevnetWithHeimdall( BorStateSyncDelay: 5 * time.Second, BorWithMilestones: &withMilestones, Services: append(services, account_services.NewFaucet(networkname.BorDevnetChainName, faucetSource)), - Alloc: types.GenesisAlloc{ - faucetSource.Address: {Balance: accounts.EtherAmount(200_000)}, - }, - Nodes: []devnet.Node{ - &args.BlockProducer{ - NodeArgs: args.NodeArgs{ - ConsoleVerbosity: "0", - DirVerbosity: "5", - HeimdallGrpcAddr: heimdallGrpcAddr, - }, - AccountSlots: 200, + Genesis: &types.Genesis{ + Alloc: types.GenesisAlloc{ + faucetSource.Address: {Balance: accounts.EtherAmount(200_000)}, }, - &args.BlockProducer{ - NodeArgs: args.NodeArgs{ - ConsoleVerbosity: "0", - DirVerbosity: "5", - HeimdallGrpcAddr: heimdallGrpcAddr, - }, - AccountSlots: 200, - }, - /*&args.BlockProducer{ - Node: args.Node{ - ConsoleVerbosity: "0", - DirVerbosity: "5", - HeimdallGrpcAddr: heimdallGrpcAddr, - }, - AccountSlots: 200, - },*/ - &args.NonBlockProducer{ + GasLimit: gasLimit, + }, + Nodes: append(nodes, + &args.BlockConsumer{ NodeArgs: args.NodeArgs{ - ConsoleVerbosity: "0", - DirVerbosity: "5", - HeimdallGrpcAddr: heimdallGrpcAddr, + ConsoleVerbosity: strconv.Itoa(int(consoleLogLevel)), + DirVerbosity: strconv.Itoa(int(dirLogLevel)), + HeimdallURL: heimdallURL, }, - }, - }, + }), } devNetwork := devnet.Network{ @@ -135,25 +143,27 @@ func NewBorDevnetWithHeimdall( BaseRPCHost: baseRpcHost, BaseRPCPort: baseRpcPort + 1000, Services: append(services, account_services.NewFaucet(networkname.DevChainName, faucetSource)), - Alloc: types.GenesisAlloc{ - faucetSource.Address: {Balance: accounts.EtherAmount(200_000)}, - checkpointOwner.Address: {Balance: accounts.EtherAmount(10_000)}, + Genesis: &types.Genesis{ + Alloc: types.GenesisAlloc{ + faucetSource.Address: {Balance: accounts.EtherAmount(200_000)}, + checkpointOwner.Address: {Balance: accounts.EtherAmount(10_000)}, + }, }, Nodes: []devnet.Node{ &args.BlockProducer{ NodeArgs: args.NodeArgs{ - ConsoleVerbosity: "0", - DirVerbosity: "5", + ConsoleVerbosity: strconv.Itoa(int(consoleLogLevel)), + DirVerbosity: strconv.Itoa(int(dirLogLevel)), VMDebug: true, HttpCorsDomain: "*", }, DevPeriod: 5, AccountSlots: 200, }, - &args.NonBlockProducer{ + &args.BlockConsumer{ NodeArgs: args.NodeArgs{ - ConsoleVerbosity: "0", - DirVerbosity: "3", + ConsoleVerbosity: strconv.Itoa(int(consoleLogLevel)), + DirVerbosity: strconv.Itoa(int(dirLogLevel)), }, }, }, @@ -169,9 +179,13 @@ func NewBorDevnetWithRemoteHeimdall( dataDir string, baseRpcHost string, baseRpcPort int, + producerCount int, + gasLimit uint64, logger log.Logger, + consoleLogLevel log.Lvl, + dirLogLevel log.Lvl, ) devnet.Devnet { - heimdallGrpcAddr := "" + heimdallURL := "" checkpointOwner := accounts.NewAccount("checkpoint-owner") withMilestones := utils.WithHeimdallMilestones.Value return NewBorDevnetWithHeimdall( @@ -179,30 +193,39 @@ func NewBorDevnetWithRemoteHeimdall( baseRpcHost, baseRpcPort, nil, - heimdallGrpcAddr, + heimdallURL, checkpointOwner, + producerCount, + gasLimit, withMilestones, - logger) + logger, + consoleLogLevel, + dirLogLevel) } func NewBorDevnetWithLocalHeimdall( dataDir string, baseRpcHost string, baseRpcPort int, - heimdallGrpcAddr string, + heimdallURL string, sprintSize uint64, + producerCount int, + gasLimit uint64, logger log.Logger, + consoleLogLevel log.Lvl, + dirLogLevel log.Lvl, ) devnet.Devnet { config := *params.BorDevnetChainConfig + borConfig := config.Bor.(*borcfg.BorConfig) if sprintSize > 0 { - config.Bor.Sprint = map[string]uint64{"0": sprintSize} + borConfig.Sprint = map[string]uint64{"0": sprintSize} } checkpointOwner := accounts.NewAccount("checkpoint-owner") heimdall := polygon.NewHeimdall( &config, - heimdallGrpcAddr, + heimdallURL, &polygon.CheckpointConfig{ CheckpointBufferTime: 60 * time.Second, CheckpointAccount: checkpointOwner, @@ -214,9 +237,11 @@ func NewBorDevnetWithLocalHeimdall( baseRpcHost, baseRpcPort, heimdall, - heimdallGrpcAddr, + heimdallURL, checkpointOwner, + producerCount, + gasLimit, // milestones are not supported yet on the local heimdall false, - logger) + logger, consoleLogLevel, dirLogLevel) } diff --git a/cmd/devnet/tests/devnet_dev.go b/cmd/devnet/networks/devnet_dev.go similarity index 62% rename from cmd/devnet/tests/devnet_dev.go rename to cmd/devnet/networks/devnet_dev.go index f4aeed1d0f7..fa8e399f8bb 100644 --- a/cmd/devnet/tests/devnet_dev.go +++ b/cmd/devnet/networks/devnet_dev.go @@ -1,6 +1,8 @@ -package tests +package networks import ( + "strconv" + "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon/cmd/devnet/accounts" "github.com/ledgerwatch/erigon/cmd/devnet/args" @@ -14,10 +16,30 @@ func NewDevDevnet( dataDir string, baseRpcHost string, baseRpcPort int, + producerCount int, + gasLimit uint64, logger log.Logger, + consoleLogLevel log.Lvl, + dirLogLevel log.Lvl, ) devnet.Devnet { faucetSource := accounts.NewAccount("faucet-source") + var nodes []devnet.Node + + if producerCount == 0 { + producerCount++ + } + + for i := 0; i < producerCount; i++ { + nodes = append(nodes, &args.BlockProducer{ + NodeArgs: args.NodeArgs{ + ConsoleVerbosity: strconv.Itoa(int(consoleLogLevel)), + DirVerbosity: strconv.Itoa(int(dirLogLevel)), + }, + AccountSlots: 200, + }) + } + network := devnet.Network{ DataDir: dataDir, Chain: networkname.DevChainName, @@ -25,28 +47,23 @@ func NewDevDevnet( BasePrivateApiAddr: "localhost:10090", BaseRPCHost: baseRpcHost, BaseRPCPort: baseRpcPort, - Alloc: types.GenesisAlloc{ - faucetSource.Address: {Balance: accounts.EtherAmount(200_000)}, + Genesis: &types.Genesis{ + Alloc: types.GenesisAlloc{ + faucetSource.Address: {Balance: accounts.EtherAmount(200_000)}, + }, + GasLimit: gasLimit, }, Services: []devnet.Service{ account_services.NewFaucet(networkname.DevChainName, faucetSource), }, MaxNumberOfEmptyBlockChecks: 30, - Nodes: []devnet.Node{ - &args.BlockProducer{ + Nodes: append(nodes, + &args.BlockConsumer{ NodeArgs: args.NodeArgs{ ConsoleVerbosity: "0", DirVerbosity: "5", }, - AccountSlots: 200, - }, - &args.NonBlockProducer{ - NodeArgs: args.NodeArgs{ - ConsoleVerbosity: "0", - DirVerbosity: "5", - }, - }, - }, + }), } return devnet.Devnet{&network} diff --git a/cmd/devnet/requests/request_generator.go b/cmd/devnet/requests/request_generator.go index 1c1e04628d1..1598cba5ce7 100644 --- a/cmd/devnet/requests/request_generator.go +++ b/cmd/devnet/requests/request_generator.go @@ -184,34 +184,50 @@ func (req *requestGenerator) rpcCall(ctx context.Context, result interface{}, me }) } -const connectionTimeout = time.Second * 5 +const requestTimeout = time.Second * 20 +const connectionTimeout = time.Millisecond * 500 func isConnectionError(err error) bool { var opErr *net.OpError - if errors.As(err, &opErr) { + switch { + case errors.As(err, &opErr): return opErr.Op == "dial" + + case errors.Is(err, context.DeadlineExceeded): + return true } + return false } func retryConnects(ctx context.Context, op func(context.Context) error) error { - ctx, cancel := context.WithTimeout(ctx, connectionTimeout) + ctx, cancel := context.WithTimeout(ctx, requestTimeout) defer cancel() - return retry(ctx, op, isConnectionError, time.Millisecond*200, nil) + return retry(ctx, op, isConnectionError, time.Second*1, nil) } func retry(ctx context.Context, op func(context.Context) error, isRecoverableError func(error) bool, delay time.Duration, lastErr error) error { - err := op(ctx) + opctx, cancel := context.WithTimeout(ctx, connectionTimeout) + defer cancel() + + err := op(opctx) + if err == nil { return nil } - if errors.Is(err, context.DeadlineExceeded) && lastErr != nil { - return lastErr - } + if !isRecoverableError(err) { return err } + if errors.Is(err, context.DeadlineExceeded) { + if lastErr != nil { + return lastErr + } + + err = nil + } + delayTimer := time.NewTimer(delay) select { case <-delayTimer.C: diff --git a/cmd/devnet/services/accounts/faucet.go b/cmd/devnet/services/accounts/faucet.go index 5a0b88b6dd4..ff4e1b82d1c 100644 --- a/cmd/devnet/services/accounts/faucet.go +++ b/cmd/devnet/services/accounts/faucet.go @@ -60,7 +60,7 @@ func (d *deployer) deploy(ctx context.Context, node devnet.Node) { d.faucet.deployer = nil d.faucet.transactOpts = nil - logger.Error("failed to deploy faucet", "chain", d.faucet.chainName, "err", err) + logger.Error("failed while waiting to deploy faucet", "chain", d.faucet.chainName, "err", err) return } @@ -92,7 +92,7 @@ func (d *deployer) deploy(ctx context.Context, node devnet.Node) { d.faucet.deployer = nil d.faucet.transactOpts = nil - logger.Error("failed to deploy faucet", "chain", d.faucet.chainName, "err", err) + logger.Error("failed while waiting to receive faucet funds", "chain", d.faucet.chainName, "err", err) return } @@ -111,7 +111,7 @@ func NewFaucet(chainName string, source *accounts.Account) *Faucet { } } -func (f *Faucet) Start(context context.Context) error { +func (f *Faucet) Start(_ context.Context) error { return nil } @@ -157,7 +157,7 @@ func (f *Faucet) Send(ctx context.Context, destination *accounts.Account, eth fl } if f.transactOpts == nil { - return nil, libcommon.Hash{}, fmt.Errorf("Faucet not initialized") + return nil, libcommon.Hash{}, fmt.Errorf("faucet not initialized") } node := devnet.SelectNode(ctx) @@ -208,7 +208,7 @@ func (f *Faucet) Receive(ctx context.Context, source *accounts.Account, eth floa return transactOpts.Value, trn.Hash(), nil } -func (f *Faucet) NodeCreated(ctx context.Context, node devnet.Node) { +func (f *Faucet) NodeCreated(_ context.Context, _ devnet.Node) { } func (f *Faucet) NodeStarted(ctx context.Context, node devnet.Node) { diff --git a/cmd/devnet/services/polygon/checkpoint.go b/cmd/devnet/services/polygon/checkpoint.go index 5a39652cdf3..903919ecb49 100644 --- a/cmd/devnet/services/polygon/checkpoint.go +++ b/cmd/devnet/services/polygon/checkpoint.go @@ -19,9 +19,9 @@ import ( "github.com/ledgerwatch/erigon/cmd/devnet/contracts" "github.com/ledgerwatch/erigon/cmd/devnet/devnet" "github.com/ledgerwatch/erigon/cmd/devnet/requests" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/polygon/heimdall" ) type CheckpointBlock struct { @@ -176,7 +176,7 @@ func (h *Heimdall) handleChildHeader(ctx context.Context, header *types.Header) return err } - h.pendingCheckpoint = &checkpoint.Checkpoint{ + h.pendingCheckpoint = &heimdall.Checkpoint{ Timestamp: timeStamp, StartBlock: big.NewInt(int64(expectedCheckpointState.newStart)), EndBlock: big.NewInt(int64(expectedCheckpointState.newEnd)), diff --git a/cmd/devnet/services/polygon/heimdall.go b/cmd/devnet/services/polygon/heimdall.go index 11529c93c14..80253bf276f 100644 --- a/cmd/devnet/services/polygon/heimdall.go +++ b/cmd/devnet/services/polygon/heimdall.go @@ -2,12 +2,20 @@ package polygon import ( "context" + "encoding/json" + "errors" "fmt" "math/big" + "net" + "net/http" + "strconv" "strings" "sync" "time" + "github.com/go-chi/chi/v5" + "github.com/ledgerwatch/log/v3" + ethereum "github.com/ledgerwatch/erigon" "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -16,12 +24,9 @@ import ( "github.com/ledgerwatch/erigon/cmd/devnet/blocks" "github.com/ledgerwatch/erigon/cmd/devnet/contracts" "github.com/ledgerwatch/erigon/cmd/devnet/devnet" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" - "github.com/ledgerwatch/erigon/consensus/bor/heimdallgrpc" - "github.com/ledgerwatch/erigon/consensus/bor/valset" - "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" + "github.com/ledgerwatch/erigon/polygon/bor/valset" + "github.com/ledgerwatch/erigon/polygon/heimdall" ) type BridgeEvent string @@ -52,7 +57,7 @@ const ( DefaultCheckpointBufferTime time.Duration = 1000 * time.Second ) -const HeimdallGrpcAddressDefault = "localhost:8540" +const HeimdallURLDefault = "http://localhost:1317" type CheckpointConfig struct { RootChainTxConfirmations uint64 @@ -67,13 +72,14 @@ type CheckpointConfig struct { type Heimdall struct { sync.Mutex chainConfig *chain.Config - grpcAddr string + borConfig *borcfg.BorConfig + listenAddr string validatorSet *valset.ValidatorSet - pendingCheckpoint *checkpoint.Checkpoint + pendingCheckpoint *heimdall.Checkpoint latestCheckpoint *CheckpointAck ackWaiter *sync.Cond - currentSpan *span.HeimdallSpan - spans map[uint64]*span.HeimdallSpan + currentSpan *heimdall.HeimdallSpan + spans map[uint64]*heimdall.HeimdallSpan logger log.Logger cancelFunc context.CancelFunc syncSenderAddress libcommon.Address @@ -90,15 +96,16 @@ type Heimdall struct { func NewHeimdall( chainConfig *chain.Config, - grpcAddr string, + serverURL string, checkpointConfig *CheckpointConfig, logger log.Logger, ) *Heimdall { heimdall := &Heimdall{ chainConfig: chainConfig, - grpcAddr: grpcAddr, + borConfig: chainConfig.Bor.(*borcfg.BorConfig), + listenAddr: serverURL[7:], checkpointConfig: *checkpointConfig, - spans: map[uint64]*span.HeimdallSpan{}, + spans: map[uint64]*heimdall.HeimdallSpan{}, pendingSyncRecords: map[syncRecordKey]*EventRecordWithBlock{}, logger: logger} @@ -135,7 +142,7 @@ func NewHeimdall( return heimdall } -func (h *Heimdall) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) { +func (h *Heimdall) Span(ctx context.Context, spanID uint64) (*heimdall.HeimdallSpan, error) { h.Lock() defer h.Unlock() @@ -144,7 +151,7 @@ func (h *Heimdall) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, return span, nil } - var nextSpan = span.Span{ + var nextSpan = heimdall.Span{ ID: spanID, } @@ -158,7 +165,7 @@ func (h *Heimdall) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, nextSpan.StartBlock = h.currentSpan.EndBlock + 1 } - nextSpan.EndBlock = nextSpan.StartBlock + (100 * h.chainConfig.Bor.CalculateSprint(nextSpan.StartBlock)) - 1 + nextSpan.EndBlock = nextSpan.StartBlock + (100 * h.borConfig.CalculateSprintLength(nextSpan.StartBlock)) - 1 // TODO we should use a subset here - see: https://wiki.polygon.technology/docs/pos/bor/ @@ -168,7 +175,7 @@ func (h *Heimdall) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, selectedProducers[i] = *v } - h.currentSpan = &span.HeimdallSpan{ + h.currentSpan = &heimdall.HeimdallSpan{ Span: nextSpan, ValidatorSet: *h.validatorSet, SelectedProducers: selectedProducers, @@ -182,10 +189,10 @@ func (h *Heimdall) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, func (h *Heimdall) currentSprintLength() int { if h.currentSpan != nil { - return int(h.chainConfig.Bor.CalculateSprint(h.currentSpan.StartBlock)) + return int(h.borConfig.CalculateSprintLength(h.currentSpan.StartBlock)) } - return int(h.chainConfig.Bor.CalculateSprint(256)) + return int(h.borConfig.CalculateSprintLength(256)) } func (h *Heimdall) getSpanOverrideHeight() uint64 { @@ -194,7 +201,7 @@ func (h *Heimdall) getSpanOverrideHeight() uint64 { //MumbaiChain: 10205000 } -func (h *Heimdall) FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) { +func (h *Heimdall) FetchCheckpoint(ctx context.Context, number int64) (*heimdall.Checkpoint, error) { return nil, fmt.Errorf("TODO") } @@ -202,7 +209,7 @@ func (h *Heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) { return 0, fmt.Errorf("TODO") } -func (h *Heimdall) FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) { +func (h *Heimdall) FetchMilestone(ctx context.Context, number int64) (*heimdall.Milestone, error) { return nil, fmt.Errorf("TODO") } @@ -292,7 +299,7 @@ func (h *Heimdall) NodeStarted(ctx context.Context, node devnet.Node) { h.Unlock() h.unsubscribe() h.Lock() - h.logger.Error("Failed to deploy state sender", "err", err) + h.logger.Error("Failed to create transact opts for deploying state sender", "err", err) return } @@ -352,7 +359,7 @@ func (h *Heimdall) addValidator(validatorAddress libcommon.Address, votingPower VotingPower: votingPower, ProposerPriority: proposerPriority, }, - }, h.logger) + }) } else { h.validatorSet.UpdateWithChangeSet([]*valset.Validator{ { @@ -361,7 +368,7 @@ func (h *Heimdall) addValidator(validatorAddress libcommon.Address, votingPower VotingPower: votingPower, ProposerPriority: proposerPriority, }, - }, h.logger) + }) } } @@ -377,7 +384,154 @@ func (h *Heimdall) Start(ctx context.Context) error { // if this is a restart h.unsubscribe() - return heimdallgrpc.StartHeimdallServer(ctx, h, h.grpcAddr, h.logger) + server := &http.Server{Addr: h.listenAddr, Handler: makeHeimdallRouter(ctx, h)} + return startHTTPServer(ctx, server, "devnet Heimdall service", h.logger) +} + +func makeHeimdallRouter(ctx context.Context, client heimdall.HeimdallClient) *chi.Mux { + router := chi.NewRouter() + + writeResponse := func(w http.ResponseWriter, result any, err error) { + if err != nil { + http.Error(w, http.StatusText(500), 500) + return + } + + var resultEnvelope struct { + Height string `json:"height"` + Result any `json:"result"` + } + resultEnvelope.Height = "0" + resultEnvelope.Result = result + + response, err := json.Marshal(resultEnvelope) + if err != nil { + http.Error(w, http.StatusText(500), 500) + return + } + + _, _ = w.Write(response) + } + + wrapResult := func(result any) map[string]any { + return map[string]any{ + "result": result, + } + } + + router.Get("/clerk/event-record/list", func(w http.ResponseWriter, r *http.Request) { + fromIdStr := r.URL.Query().Get("from-id") + fromId, err := strconv.ParseUint(fromIdStr, 10, 64) + if err != nil { + http.Error(w, http.StatusText(400), 400) + return + } + + toTimeStr := r.URL.Query().Get("to-time") + toTime, err := strconv.ParseInt(toTimeStr, 10, 64) + if err != nil { + http.Error(w, http.StatusText(400), 400) + return + } + + result, err := client.StateSyncEvents(ctx, fromId, toTime) + writeResponse(w, result, err) + }) + + router.Get("/bor/span/{id}", func(w http.ResponseWriter, r *http.Request) { + idStr := chi.URLParam(r, "id") + id, err := strconv.ParseUint(idStr, 10, 64) + if err != nil { + http.Error(w, http.StatusText(400), 400) + return + } + result, err := client.Span(ctx, id) + writeResponse(w, result, err) + }) + + router.Get("/checkpoints/{number}", func(w http.ResponseWriter, r *http.Request) { + numberStr := chi.URLParam(r, "number") + number, err := strconv.ParseInt(numberStr, 10, 64) + if err != nil { + http.Error(w, http.StatusText(400), 400) + return + } + result, err := client.FetchCheckpoint(ctx, number) + writeResponse(w, result, err) + }) + + router.Get("/checkpoints/latest", func(w http.ResponseWriter, r *http.Request) { + result, err := client.FetchCheckpoint(ctx, -1) + writeResponse(w, result, err) + }) + + router.Get("/checkpoints/count", func(w http.ResponseWriter, r *http.Request) { + result, err := client.FetchCheckpointCount(ctx) + writeResponse(w, wrapResult(result), err) + }) + + router.Get("/milestone/{number}", func(w http.ResponseWriter, r *http.Request) { + numberStr := chi.URLParam(r, "number") + number, err := strconv.ParseInt(numberStr, 10, 64) + if err != nil { + http.Error(w, http.StatusText(400), 400) + return + } + result, err := client.FetchMilestone(ctx, number) + writeResponse(w, result, err) + }) + + router.Get("/milestone/latest", func(w http.ResponseWriter, r *http.Request) { + result, err := client.FetchMilestone(ctx, -1) + writeResponse(w, result, err) + }) + + router.Get("/milestone/count", func(w http.ResponseWriter, r *http.Request) { + result, err := client.FetchMilestoneCount(ctx) + writeResponse(w, heimdall.MilestoneCount{Count: result}, err) + }) + + router.Get("/milestone/noAck/{id}", func(w http.ResponseWriter, r *http.Request) { + id := chi.URLParam(r, "id") + err := client.FetchNoAckMilestone(ctx, id) + result := err == nil + writeResponse(w, wrapResult(result), err) + }) + + router.Get("/milestone/lastNoAck", func(w http.ResponseWriter, r *http.Request) { + result, err := client.FetchLastNoAckMilestone(ctx) + writeResponse(w, wrapResult(result), err) + }) + + router.Get("/milestone/ID/{id}", func(w http.ResponseWriter, r *http.Request) { + id := chi.URLParam(r, "id") + err := client.FetchMilestoneID(ctx, id) + result := err == nil + writeResponse(w, wrapResult(result), err) + }) + + return router +} + +func startHTTPServer(ctx context.Context, server *http.Server, serverName string, logger log.Logger) error { + listener, err := net.Listen("tcp", server.Addr) + if err != nil { + return err + } + + go func() { + err := server.Serve(listener) + if (err != nil) && !errors.Is(err, http.ErrServerClosed) { + logger.Error("server.Serve error", "serverName", serverName, "err", err) + } + }() + + go func() { + <-ctx.Done() + _ = server.Close() + }() + + return nil } func (h *Heimdall) Stop() { diff --git a/cmd/devnet/services/polygon/heimdall_test.go b/cmd/devnet/services/polygon/heimdall_test.go new file mode 100644 index 00000000000..0cb3e58ab94 --- /dev/null +++ b/cmd/devnet/services/polygon/heimdall_test.go @@ -0,0 +1,61 @@ +package polygon + +import ( + "context" + "math/big" + "net/http" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/ledgerwatch/erigon/polygon/heimdall" +) + +func TestHeimdallServer(t *testing.T) { + t.Skip() + + ctx := context.Background() + ctrl := gomock.NewController(t) + client := heimdall.NewMockHeimdallClient(ctrl) + + events := []*heimdall.EventRecordWithTime{ + { + EventRecord: heimdall.EventRecord{ + ID: 1, + ChainID: "80001", + }, + Time: time.Now(), + }, + { + EventRecord: heimdall.EventRecord{ + ID: 2, + ChainID: "80001", + }, + Time: time.Now(), + }, + } + client.EXPECT().StateSyncEvents(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(events, nil) + + span := &heimdall.HeimdallSpan{ + Span: heimdall.Span{ + ID: 1, + StartBlock: 1000, + EndBlock: 2000, + }, + ChainID: "80001", + } + client.EXPECT().Span(gomock.Any(), gomock.Any()).AnyTimes().Return(span, nil) + + checkpoint1 := &heimdall.Checkpoint{ + StartBlock: big.NewInt(1000), + EndBlock: big.NewInt(1999), + BorChainID: "80001", + } + client.EXPECT().FetchCheckpoint(gomock.Any(), gomock.Any()).AnyTimes().Return(checkpoint1, nil) + client.EXPECT().FetchCheckpointCount(gomock.Any()).AnyTimes().Return(int64(1), nil) + + err := http.ListenAndServe(HeimdallURLDefault[7:], makeHeimdallRouter(ctx, client)) + require.Nil(t, err) +} diff --git a/cmd/devnet/services/polygon/proofgenerator_test.go b/cmd/devnet/services/polygon/proofgenerator_test.go index b835c1a0024..8c9bd9f985f 100644 --- a/cmd/devnet/services/polygon/proofgenerator_test.go +++ b/cmd/devnet/services/polygon/proofgenerator_test.go @@ -11,6 +11,9 @@ import ( "testing" "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" + "github.com/pion/randutil" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -21,10 +24,6 @@ import ( "github.com/ledgerwatch/erigon/cmd/devnet/blocks" "github.com/ledgerwatch/erigon/cmd/devnet/requests" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/contract" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" - "github.com/ledgerwatch/erigon/consensus/bor/valset" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" @@ -32,14 +31,15 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/valset" + "github.com/ledgerwatch/erigon/polygon/heimdall" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/jsonrpc" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/stages/mock" "github.com/ledgerwatch/erigon/turbo/transactions" - "github.com/ledgerwatch/log/v3" - "github.com/pion/randutil" ) type requestGenerator struct { @@ -75,9 +75,9 @@ func newRequestGenerator(sentry *mock.MockSentry, chain *core.ChainPack) (*reque sentry: sentry, bor: bor.NewRo(params.BorDevnetChainConfig, db, reader, &spanner{ - span.NewChainSpanner(contract.ValidatorSet(), params.BorDevnetChainConfig, false, log.Root()), + bor.NewChainSpanner(bor.GenesisContractValidatorSetABI(), params.BorDevnetChainConfig, false, log.Root()), libcommon.Address{}, - span.Span{}}, + heimdall.Span{}}, genesisContract{}, log.Root()), txBlockMap: map[libcommon.Hash]*types.Block{}, }, nil @@ -274,16 +274,16 @@ func (g genesisContract) LastStateId(syscall consensus.SystemCall) (*big.Int, er } type spanner struct { - *span.ChainSpanner + *bor.ChainSpanner validatorAddress libcommon.Address - currentSpan span.Span + currentSpan heimdall.Span } -func (c spanner) GetCurrentSpan(_ consensus.SystemCall) (*span.Span, error) { +func (c spanner) GetCurrentSpan(_ consensus.SystemCall) (*heimdall.Span, error) { return &c.currentSpan, nil } -func (c *spanner) CommitSpan(heimdallSpan span.HeimdallSpan, syscall consensus.SystemCall) error { +func (c *spanner) CommitSpan(heimdallSpan heimdall.HeimdallSpan, syscall consensus.SystemCall) error { c.currentSpan = heimdallSpan.Span return nil } diff --git a/cmd/devnet/services/polygon/statesync.go b/cmd/devnet/services/polygon/statesync.go index 0429f5085db..6c79c92c1f2 100644 --- a/cmd/devnet/services/polygon/statesync.go +++ b/cmd/devnet/services/polygon/statesync.go @@ -9,7 +9,7 @@ import ( "github.com/ledgerwatch/erigon/accounts/abi/bind" "github.com/ledgerwatch/erigon/cmd/devnet/contracts" - "github.com/ledgerwatch/erigon/consensus/bor/clerk" + "github.com/ledgerwatch/erigon/polygon/heimdall" ) // Maximum allowed event record data size @@ -19,7 +19,7 @@ const LegacyMaxStateSyncSize = 100000 const MaxStateSyncSize = 30000 type EventRecordWithBlock struct { - clerk.EventRecordWithTime + heimdall.EventRecordWithTime BlockNumber uint64 } @@ -42,7 +42,7 @@ func (h *Heimdall) startStateSyncSubscription() { } } -func (h *Heimdall) StateSyncEvents(ctx context.Context, fromID uint64, to int64, limit int) (uint64, []*clerk.EventRecordWithTime, error) { +func (h *Heimdall) StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*heimdall.EventRecordWithTime, error) { h.Lock() defer h.Unlock() @@ -72,19 +72,14 @@ func (h *Heimdall) StateSyncEvents(ctx context.Context, fromID uint64, to int64, if len(events) == 0 { h.logger.Info("Processed sync request", "from", fromID, "to", time.Unix(to, 0), "min-time", minEventTime, "pending", len(h.pendingSyncRecords), "filtered", len(events)) - return 0, nil, nil + return nil, nil } sort.Slice(events, func(i, j int) bool { return events[i].ID < events[j].ID }) - if len(events) > limit { - events = events[0 : limit-1] - } - - eventsWithTime := make([]*clerk.EventRecordWithTime, len(events)) - + eventsWithTime := make([]*heimdall.EventRecordWithTime, len(events)) for i, event := range events { eventsWithTime[i] = &event.EventRecordWithTime } @@ -98,7 +93,7 @@ func (h *Heimdall) StateSyncEvents(ctx context.Context, fromID uint64, to int64, "pending", len(h.pendingSyncRecords), "filtered", len(events), "sent", fmt.Sprintf("%d-%d", events[0].ID, events[len(events)-1].ID)) - return events[len(events)-1].BlockNumber, eventsWithTime, nil + return eventsWithTime, nil } // handleStateSyncEvent - handle state sync event from rootchain @@ -144,8 +139,8 @@ func (h *Heimdall) handleStateSynced(event *contracts.TestStateSenderStateSynced } h.pendingSyncRecords[syncRecordKey{event.Raw.TxHash, uint64(event.Raw.Index)}] = &EventRecordWithBlock{ - EventRecordWithTime: clerk.EventRecordWithTime{ - EventRecord: clerk.EventRecord{ + EventRecordWithTime: heimdall.EventRecordWithTime{ + EventRecord: heimdall.EventRecord{ ID: event.Id.Uint64(), Contract: event.ContractAddress, Data: event.Data, diff --git a/cmd/devnet/tests/bor/devnet_test.go b/cmd/devnet/tests/bor_devnet_test.go similarity index 91% rename from cmd/devnet/tests/bor/devnet_test.go rename to cmd/devnet/tests/bor_devnet_test.go index ad43f982c28..30cb8839d38 100644 --- a/cmd/devnet/tests/bor/devnet_test.go +++ b/cmd/devnet/tests/bor_devnet_test.go @@ -1,24 +1,22 @@ //go:build integration -package bor +package tests import ( "context" "testing" + "github.com/stretchr/testify/require" + "github.com/ledgerwatch/erigon-lib/chain/networkname" accounts_steps "github.com/ledgerwatch/erigon/cmd/devnet/accounts/steps" contracts_steps "github.com/ledgerwatch/erigon/cmd/devnet/contracts/steps" "github.com/ledgerwatch/erigon/cmd/devnet/requests" "github.com/ledgerwatch/erigon/cmd/devnet/services" - "github.com/ledgerwatch/erigon/cmd/devnet/tests" - "github.com/stretchr/testify/require" ) func TestStateSync(t *testing.T) { - t.Skip("FIXME: hangs in GenerateSyncEvents without any visible progress") - - runCtx, err := tests.ContextStart(t, networkname.BorDevnetChainName) + runCtx, err := ContextStart(t, networkname.BorDevnetChainName) require.Nil(t, err) var ctx context.Context = runCtx @@ -57,7 +55,7 @@ func TestStateSync(t *testing.T) { func TestChildChainExit(t *testing.T) { t.Skip("FIXME: step CreateAccountWithFunds fails: Failed to get transfer tx: failed to search reserves for hashes: no block heads subscription") - runCtx, err := tests.ContextStart(t, networkname.BorDevnetChainName) + runCtx, err := ContextStart(t, networkname.BorDevnetChainName) require.Nil(t, err) var ctx context.Context = runCtx diff --git a/cmd/devnet/tests/context.go b/cmd/devnet/tests/context.go index 7a1a27f645b..554bf99d000 100644 --- a/cmd/devnet/tests/context.go +++ b/cmd/devnet/tests/context.go @@ -4,35 +4,38 @@ import ( "fmt" "os" "runtime" + "strconv" "testing" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon/cmd/devnet/devnet" + "github.com/ledgerwatch/erigon/cmd/devnet/networks" "github.com/ledgerwatch/erigon/cmd/devnet/services" "github.com/ledgerwatch/erigon/cmd/devnet/services/polygon" "github.com/ledgerwatch/erigon/turbo/debug" - "github.com/ledgerwatch/log/v3" ) -func initDevnet(chainName string, dataDir string, logger log.Logger) (devnet.Devnet, error) { +func initDevnet(chainName string, dataDir string, producerCount int, gasLimit uint64, logger log.Logger, consoleLogLevel log.Lvl, dirLogLevel log.Lvl) (devnet.Devnet, error) { const baseRpcHost = "localhost" - const baseRpcPort = 8545 + const baseRpcPort = 9545 switch chainName { case networkname.BorDevnetChainName: - heimdallGrpcAddr := polygon.HeimdallGrpcAddressDefault + heimdallURL := polygon.HeimdallURLDefault const sprintSize uint64 = 0 - return NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, logger), nil + return networks.NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallURL, sprintSize, producerCount, gasLimit, logger, consoleLogLevel, dirLogLevel), nil case networkname.DevChainName: - return NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, logger), nil + return networks.NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, producerCount, gasLimit, logger, consoleLogLevel, dirLogLevel), nil case "": envChainName, _ := os.LookupEnv("DEVNET_CHAIN") if envChainName == "" { envChainName = networkname.DevChainName } - return initDevnet(envChainName, dataDir, logger) + return initDevnet(envChainName, dataDir, producerCount, gasLimit, logger, consoleLogLevel, dirLogLevel) default: return nil, fmt.Errorf("unknown network: '%s'", chainName) @@ -40,6 +43,7 @@ func initDevnet(chainName string, dataDir string, logger log.Logger) (devnet.Dev } func ContextStart(t *testing.T, chainName string) (devnet.Context, error) { + //goland:noinspection GoBoolExpressions if runtime.GOOS == "windows" { t.Skip("FIXME: TempDir RemoveAll cleanup error: remove dev-0\\clique\\db\\clique\\mdbx.dat: The process cannot access the file because it is being used by another process") } @@ -48,8 +52,19 @@ func ContextStart(t *testing.T, chainName string) (devnet.Context, error) { logger := log.New() dataDir := t.TempDir() + envProducerCount, _ := os.LookupEnv("PRODUCER_COUNT") + if envProducerCount == "" { + envProducerCount = "1" + } + + producerCount, _ := strconv.ParseUint(envProducerCount, 10, 64) + + // TODO get log levels from env + var dirLogLevel log.Lvl = log.LvlTrace + var consoleLogLevel log.Lvl = log.LvlCrit + var network devnet.Devnet - network, err := initDevnet(chainName, dataDir, logger) + network, err := initDevnet(chainName, dataDir, int(producerCount), 0, logger, consoleLogLevel, dirLogLevel) if err != nil { return nil, fmt.Errorf("ContextStart initDevnet failed: %w", err) } diff --git a/cmd/devnet/tests/generic/devnet_test.go b/cmd/devnet/tests/generic_devnet_test.go similarity index 85% rename from cmd/devnet/tests/generic/devnet_test.go rename to cmd/devnet/tests/generic_devnet_test.go index 8f0f944ab85..8deddfb1fe8 100644 --- a/cmd/devnet/tests/generic/devnet_test.go +++ b/cmd/devnet/tests/generic_devnet_test.go @@ -1,20 +1,20 @@ //go:build integration -package generic +package tests import ( "context" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ledgerwatch/erigon/cmd/devnet/accounts" "github.com/ledgerwatch/erigon/cmd/devnet/admin" "github.com/ledgerwatch/erigon/cmd/devnet/contracts/steps" "github.com/ledgerwatch/erigon/cmd/devnet/requests" "github.com/ledgerwatch/erigon/cmd/devnet/services" - "github.com/ledgerwatch/erigon/cmd/devnet/tests" "github.com/ledgerwatch/erigon/cmd/devnet/transactions" - "github.com/stretchr/testify/require" ) func testDynamicTx(t *testing.T, ctx context.Context) { @@ -39,21 +39,19 @@ func testDynamicTx(t *testing.T, ctx context.Context) { } func TestDynamicTxNode0(t *testing.T) { - runCtx, err := tests.ContextStart(t, "") + runCtx, err := ContextStart(t, "") require.Nil(t, err) testDynamicTx(t, runCtx.WithCurrentNetwork(0).WithCurrentNode(0)) } func TestDynamicTxAnyNode(t *testing.T) { - runCtx, err := tests.ContextStart(t, "") + runCtx, err := ContextStart(t, "") require.Nil(t, err) testDynamicTx(t, runCtx.WithCurrentNetwork(0)) } func TestCallContract(t *testing.T) { - t.Skip("FIXME: DeployAndCallLogSubscriber step fails: Log result is incorrect expected txIndex: 1, actual txIndex 2") - - runCtx, err := tests.ContextStart(t, "") + runCtx, err := ContextStart(t, "") require.Nil(t, err) ctx := runCtx.WithCurrentNetwork(0) diff --git a/cmd/devnet/transactions/tx.go b/cmd/devnet/transactions/tx.go index f56775094e9..3a241171f6f 100644 --- a/cmd/devnet/transactions/tx.go +++ b/cmd/devnet/transactions/tx.go @@ -27,6 +27,7 @@ func init() { scenarios.StepHandler(CheckTxPoolContent), scenarios.StepHandler(SendTxWithDynamicFee), scenarios.StepHandler(AwaitBlocks), + scenarios.StepHandler(SendTxLoad), ) } @@ -93,7 +94,7 @@ func SendTxWithDynamicFee(ctx context.Context, to, from string, amount uint64) ( // get the latest nonce for the next transaction logger := devnet.Logger(ctx) - lowerThanBaseFeeTxs, higherThanBaseFeeTxs, err := CreateManyEIP1559TransactionsRefWithBaseFee2(ctx, to, from) + lowerThanBaseFeeTxs, higherThanBaseFeeTxs, err := CreateManyEIP1559TransactionsRefWithBaseFee2(ctx, to, from, 200) if err != nil { logger.Error("failed CreateManyEIP1559TransactionsRefWithBaseFee", "error", err) return nil, err @@ -112,7 +113,7 @@ func SendTxWithDynamicFee(ctx context.Context, to, from string, amount uint64) ( return nil, err } - CheckTxPoolContent(ctx, 100, 0, 100) + CheckTxPoolContent(ctx, len(higherThanBaseFeeHashlist), 0, len(lowerThanBaseFeeHashlist)) CheckTxPoolContent(ctx, -1, -1, -1) @@ -125,6 +126,48 @@ func SendTxWithDynamicFee(ctx context.Context, to, from string, amount uint64) ( return append(lowerThanBaseFeeHashlist, higherThanBaseFeeHashlist...), nil } +func SendTxLoad(ctx context.Context, to, from string, amount uint64, txPerSec uint) error { + logger := devnet.Logger(ctx) + + batchCount := txPerSec / 4 + + if batchCount < 1 { + batchCount = 1 + } + + ms250 := 250 * time.Millisecond + + for { + start := time.Now() + + tx, err := CreateManyEIP1559TransactionsHigherThanBaseFee(ctx, to, from, int(batchCount)) + + if err != nil { + logger.Error("failed Create Txs", "error", err) + return err + } + + _, err = SendManyTransactions(ctx, tx) + + if err != nil { + logger.Error("failed SendManyTransactions(higherThanBaseFeeTxs)", "error", err) + return err + } + + select { + case <-ctx.Done(): + return nil + default: + } + + duration := time.Since(start) + + if duration < ms250 { + time.Sleep(ms250 - duration) + } + } +} + func AwaitBlocks(ctx context.Context, sleepTime time.Duration) error { logger := devnet.Logger(ctx) @@ -154,7 +197,6 @@ func AwaitBlocks(ctx context.Context, sleepTime time.Duration) error { } const gasPrice = 912_345_678 -const gasAmount = 875_000_000 func CreateManyEIP1559TransactionsRefWithBaseFee(ctx context.Context, to, from string, logger log.Logger) ([]types.Transaction, []types.Transaction, error) { toAddress := libcommon.HexToAddress(to) @@ -177,7 +219,7 @@ func CreateManyEIP1559TransactionsRefWithBaseFee(ctx context.Context, to, from s return lowerBaseFeeTransactions, higherBaseFeeTransactions, nil } -func CreateManyEIP1559TransactionsRefWithBaseFee2(ctx context.Context, to, from string) ([]types.Transaction, []types.Transaction, error) { +func CreateManyEIP1559TransactionsRefWithBaseFee2(ctx context.Context, to, from string, count int) ([]types.Transaction, []types.Transaction, error) { toAddress := libcommon.HexToAddress(to) fromAddress := libcommon.HexToAddress(from) @@ -188,7 +230,10 @@ func CreateManyEIP1559TransactionsRefWithBaseFee2(ctx context.Context, to, from devnet.Logger(ctx).Info("BaseFeePerGas2", "val", baseFeePerGas) - lowerBaseFeeTransactions, higherBaseFeeTransactions, err := signEIP1559TxsLowerAndHigherThanBaseFee2(ctx, 100, 100, baseFeePerGas, toAddress, fromAddress) + lower := count - devnetutils.RandomInt(count) + higher := count - lower + + lowerBaseFeeTransactions, higherBaseFeeTransactions, err := signEIP1559TxsLowerAndHigherThanBaseFee2(ctx, lower, higher, baseFeePerGas, toAddress, fromAddress) if err != nil { return nil, nil, fmt.Errorf("failed signEIP1559TxsLowerAndHigherThanBaseFee2: %v", err) @@ -197,6 +242,33 @@ func CreateManyEIP1559TransactionsRefWithBaseFee2(ctx context.Context, to, from return lowerBaseFeeTransactions, higherBaseFeeTransactions, nil } +func CreateManyEIP1559TransactionsHigherThanBaseFee(ctx context.Context, to, from string, count int) ([]types.Transaction, error) { + toAddress := libcommon.HexToAddress(to) + fromAddress := libcommon.HexToAddress(from) + + baseFeePerGas, err := blocks.BaseFeeFromBlock(ctx) + + if err != nil { + return nil, fmt.Errorf("failed BaseFeeFromBlock: %v", err) + } + + baseFeePerGas = baseFeePerGas * 2 + + devnet.Logger(ctx).Info("BaseFeePerGas2", "val", baseFeePerGas) + + node := devnet.SelectNode(ctx) + + res, err := node.GetTransactionCount(fromAddress, rpc.PendingBlock) + + if err != nil { + return nil, fmt.Errorf("failed to get transaction count for address 0x%x: %v", fromAddress, err) + } + + nonce := res.Uint64() + + return signEIP1559TxsHigherThanBaseFee(ctx, count, baseFeePerGas, &nonce, toAddress, fromAddress) +} + // createNonContractTx returns a signed transaction and the recipient address func CreateTransaction(node devnet.Node, to, from string, value uint64) (types.Transaction, libcommon.Address, error) { toAccount := accounts.GetAccount(to) @@ -207,7 +279,7 @@ func CreateTransaction(node devnet.Node, to, from string, value uint64) (types.T if strings.HasPrefix(to, "0x") { toAddress = libcommon.HexToAddress(from) } else { - return nil, libcommon.Address{}, fmt.Errorf("Unknown to account: %s", to) + return nil, libcommon.Address{}, fmt.Errorf("unknown to account: %s", to) } } else { toAddress = toAccount.Address @@ -216,7 +288,7 @@ func CreateTransaction(node devnet.Node, to, from string, value uint64) (types.T fromAccount := accounts.GetAccount(from) if fromAccount == nil { - return nil, libcommon.Address{}, fmt.Errorf("Unknown from account: %s", from) + return nil, libcommon.Address{}, fmt.Errorf("unknown from account: %s", from) } res, err := node.GetTransactionCount(fromAccount.Address, rpc.PendingBlock) @@ -292,7 +364,7 @@ func signEIP1559TxsLowerThanBaseFee(ctx context.Context, n int, baseFeePerGas ui transaction := types.NewEIP1559Transaction(chainId, *nonce, toAddress, uint256.NewInt(value), uint64(210_000), uint256.NewInt(gasPrice), new(uint256.Int), uint256.NewInt(gasFeeCap), nil) - devnet.Logger(ctx).Info("LOWER", "transaction", i, "nonce", transaction.Nonce, "value", transaction.Value, "feecap", transaction.FeeCap) + devnet.Logger(ctx).Trace("LOWER", "transaction", i, "nonce", transaction.Nonce, "value", transaction.Value, "feecap", transaction.FeeCap) signedTransaction, err := types.SignTx(transaction, signer, accounts.SigKey(fromAddress)) @@ -333,7 +405,7 @@ func signEIP1559TxsHigherThanBaseFee(ctx context.Context, n int, baseFeePerGas u transaction := types.NewEIP1559Transaction(chainId, *nonce, toAddress, uint256.NewInt(value), uint64(210_000), uint256.NewInt(gasPrice), new(uint256.Int), uint256.NewInt(gasFeeCap), nil) - devnet.Logger(ctx).Info("HIGHER", "transaction", i, "nonce", transaction.Nonce, "value", transaction.Value, "feecap", transaction.FeeCap) + devnet.Logger(ctx).Trace("HIGHER", "transaction", i, "nonce", transaction.Nonce, "value", transaction.Value, "feecap", transaction.FeeCap) signerKey := accounts.SigKey(fromAddress) if signerKey == nil { @@ -355,7 +427,7 @@ func signEIP1559TxsHigherThanBaseFee(ctx context.Context, n int, baseFeePerGas u func SendManyTransactions(ctx context.Context, signedTransactions []types.Transaction) ([]libcommon.Hash, error) { logger := devnet.Logger(ctx) - logger.Info("Sending multiple transactions to the txpool...") + logger.Info(fmt.Sprintf("Sending %d transactions to the txpool...", len(signedTransactions))) hashes := make([]libcommon.Hash, len(signedTransactions)) for idx, tx := range signedTransactions { diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index 9f132f63399..f7251501bde 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -7,7 +7,6 @@ import ( "net" "os" "path/filepath" - "runtime" "strings" "time" @@ -20,7 +19,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/downloader" - downloadercfg2 "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" + "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/kv" @@ -60,8 +59,10 @@ var ( datadirCli, chain string filePath string forceRebuild bool - forceVerify bool - forceVerifyFiles []string + verify bool + verifyFailfast bool + _verifyFiles string + verifyFiles []string downloaderApiAddr string natSetting string torrentVerbosity int @@ -96,8 +97,9 @@ func init() { rootCmd.Flags().BoolVar(&disableIPV6, "downloader.disable.ipv6", utils.DisableIPV6.Value, utils.DisableIPV6.Usage) rootCmd.Flags().BoolVar(&disableIPV4, "downloader.disable.ipv4", utils.DisableIPV4.Value, utils.DisableIPV6.Usage) rootCmd.Flags().BoolVar(&seedbox, "seedbox", false, "Turns downloader into independent (doesn't need Erigon) software which discover/download/seed new files - useful for Erigon network, and can work on very cheap hardware. It will: 1) download .torrent from webseed 2) download new files after upgrade 3) we planing add discovery of new files soon") - rootCmd.PersistentFlags().BoolVar(&forceVerify, "verify", false, "Verify files. All by default, or passed by --verify.files") - rootCmd.PersistentFlags().StringArrayVar(&forceVerifyFiles, "verify.files", nil, "Limit list of files to verify") + rootCmd.PersistentFlags().BoolVar(&verify, "verify", false, utils.DownloaderVerifyFlag.Usage) + rootCmd.PersistentFlags().StringVar(&_verifyFiles, "verify.files", "", "Limit list of files to verify") + rootCmd.PersistentFlags().BoolVar(&verifyFailfast, "verify.failfast", false, "Stop on first found error. Report it and exit") withDataDir(createTorrent) withFile(createTorrent) @@ -106,6 +108,9 @@ func init() { rootCmd.AddCommand(torrentCat) rootCmd.AddCommand(torrentMagnet) + withDataDir(manifestCmd) + rootCmd.AddCommand(manifestCmd) + withDataDir(printTorrentHashes) printTorrentHashes.PersistentFlags().BoolVar(&forceRebuild, "rebuild", false, "Force re-create .torrent files") printTorrentHashes.Flags().StringVar(&targetFile, "targetfile", "", "write output to file") @@ -161,7 +166,7 @@ func Downloader(ctx context.Context, logger log.Logger) error { if err := checkChainName(ctx, dirs, chain); err != nil { return err } - torrentLogLevel, _, err := downloadercfg2.Int2LogLevel(torrentVerbosity) + torrentLogLevel, _, err := downloadercfg.Int2LogLevel(torrentVerbosity) if err != nil { return err } @@ -183,12 +188,12 @@ func Downloader(ctx context.Context, logger log.Logger) error { if known, ok := snapcfg.KnownWebseeds[chain]; ok { webseedsList = append(webseedsList, known...) } - cfg, err := downloadercfg2.New(dirs, version, torrentLogLevel, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots, staticPeers, webseedsList, chain) + cfg, err := downloadercfg.New(dirs, version, torrentLogLevel, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots, staticPeers, webseedsList, chain) if err != nil { return err } - cfg.ClientConfig.PieceHashersPerTorrent = 32 * runtime.NumCPU() + cfg.ClientConfig.PieceHashersPerTorrent = 32 cfg.ClientConfig.DisableIPv6 = disableIPV6 cfg.ClientConfig.DisableIPv4 = disableIPV4 @@ -198,6 +203,8 @@ func Downloader(ctx context.Context, logger log.Logger) error { } downloadernat.DoNat(natif, cfg.ClientConfig, logger) + cfg.AddTorrentsFromDisk = true // always true unless using uploader - which wants control of torrent files + d, err := downloader.New(ctx, cfg, dirs, logger, log.LvlInfo, seedbox) if err != nil { return err @@ -205,20 +212,12 @@ func Downloader(ctx context.Context, logger log.Logger) error { defer d.Close() logger.Info("[snapshots] Start bittorrent server", "my_peer_id", fmt.Sprintf("%x", d.TorrentClient().PeerID())) - if forceVerify { // remove and create .torrent files (will re-read all snapshots) - if err = d.VerifyData(ctx, forceVerifyFiles); err != nil { - return err - } - logger.Info("[snapshots] Verify done") - return nil - } - - d.MainLoopInBackground(false) - if err := addPreConfiguredHashes(ctx, d); err != nil { return err } + d.MainLoopInBackground(false) + bittorrentServer, err := downloader.NewGrpcServer(d) if err != nil { return fmt.Errorf("new server: %w", err) @@ -230,6 +229,15 @@ func Downloader(ctx context.Context, logger log.Logger) error { } defer grpcServer.GracefulStop() + if len(_verifyFiles) > 0 { + verifyFiles = strings.Split(_verifyFiles, ",") + } + if verify || verifyFailfast || len(verifyFiles) > 0 { // remove and create .torrent files (will re-read all snapshots) + if err = d.VerifyData(ctx, verifyFiles, verifyFailfast); err != nil { + return err + } + } + <-ctx.Done() return nil } @@ -260,23 +268,18 @@ var printTorrentHashes = &cobra.Command{ }, } -var torrentVerify = &cobra.Command{ - Use: "torrent_verify", - Example: "go run ./cmd/downloader torrent_verify ", +var manifestCmd = &cobra.Command{ + Use: "manifest", + Example: "go run ./cmd/downloader torrent_hashes --datadir ", RunE: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return fmt.Errorf("please pass .torrent file path by first argument") - } - fPath := args[0] - mi, err := metainfo.LoadFromFile(fPath) - if err != nil { - return fmt.Errorf("LoadFromFile: %w, file=%s", err, fPath) + logger := debug.SetupCobra(cmd, "downloader") + if err := manifest(cmd.Context(), logger); err != nil { + log.Error(err.Error()) } - - fmt.Printf("%s\n", mi.HashInfoBytes()) return nil }, } + var torrentCat = &cobra.Command{ Use: "torrent_cat", Example: "go run ./cmd/downloader torrent_cat ", @@ -289,8 +292,13 @@ var torrentCat = &cobra.Command{ if err != nil { return fmt.Errorf("LoadFromFile: %w, file=%s", err, fPath) } - - fmt.Printf("%s\n", mi.HashInfoBytes()) + fmt.Printf("InfoHash = '%x'\n", mi.HashInfoBytes()) + mi.InfoBytes = nil + bytes, err := toml.Marshal(mi) + if err != nil { + return err + } + fmt.Printf("%s\n", string(bytes)) return nil }, } @@ -311,6 +319,53 @@ var torrentMagnet = &cobra.Command{ }, } +func manifest(ctx context.Context, logger log.Logger) error { + dirs := datadir.New(datadirCli) + extList := []string{ + ".torrent", + ".seg", ".idx", // e2 + ".kv", ".kvi", ".bt", ".kvei", // e3 domain + ".v", ".vi", //e3 hist + ".ef", ".efi", //e3 idx + ".txt", //salt.txt + } + l, _ := dir.ListFiles(dirs.Snap, extList...) + for _, fPath := range l { + _, fName := filepath.Split(fPath) + fmt.Printf("%s\n", fName) + } + l, _ = dir.ListFiles(dirs.SnapDomain, extList...) + for _, fPath := range l { + _, fName := filepath.Split(fPath) + fmt.Printf("domain/%s\n", fName) + } + l, _ = dir.ListFiles(dirs.SnapHistory, extList...) + for _, fPath := range l { + _, fName := filepath.Split(fPath) + if strings.Contains(fName, "commitment") { + continue + } + fmt.Printf("history/%s\n", fName) + } + l, _ = dir.ListFiles(dirs.SnapIdx, extList...) + for _, fPath := range l { + _, fName := filepath.Split(fPath) + if strings.Contains(fName, "commitment") { + continue + } + fmt.Printf("idx/%s\n", fName) + } + l, _ = dir.ListFiles(dirs.SnapAccessors, extList...) + for _, fPath := range l { + _, fName := filepath.Split(fPath) + if strings.Contains(fName, "commitment") { + continue + } + fmt.Printf("accessors/%s\n", fName) + } + return nil +} + func doPrintTorrentHashes(ctx context.Context, logger log.Logger) error { dirs := datadir.New(datadirCli) if err := datadir.ApplyMigrations(dirs); err != nil { @@ -340,12 +395,13 @@ func doPrintTorrentHashes(ctx context.Context, logger log.Logger) error { if err != nil { return err } + for _, t := range torrents { // we don't release commitment history in this time. let's skip it here. - if strings.HasPrefix(t.DisplayName, "history/commitment") { + if strings.Contains(t.DisplayName, "history") && strings.Contains(t.DisplayName, "commitment") { continue } - if strings.HasPrefix(t.DisplayName, "idx/commitment") { + if strings.Contains(t.DisplayName, "idx") && strings.Contains(t.DisplayName, "commitment") { continue } res[t.DisplayName] = t.InfoHash.String() @@ -435,7 +491,7 @@ func StartGrpc(snServer *downloader.GrpcServer, addr string, creds *credentials. // Add pre-configured func addPreConfiguredHashes(ctx context.Context, d *downloader.Downloader) error { - for _, it := range snapcfg.KnownCfg(chain).Preverified { + for _, it := range snapcfg.KnownCfg(chain, 0).Preverified { if err := d.AddMagnetLink(ctx, snaptype.Hex2InfoHash(it.Hash), it.Name); err != nil { return err } diff --git a/cmd/evm/README.md b/cmd/evm/README.md index 2fd52134c0f..6dc57eaf408 100644 --- a/cmd/evm/README.md +++ b/cmd/evm/README.md @@ -214,7 +214,7 @@ exitcode:3 OK The chain configuration to be used for a transition is specified via the `--state.fork` CLI flag. A list of possible values and configurations can be -found in [`tests/init.go`](tests/init.go). +found in [`tests/init.go`](../../tests/init.go). #### Examples ##### Basic usage diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index cde453423ba..6f88d47e85e 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -154,7 +154,7 @@ func runCmd(ctx *cli.Context) error { defer db.Close() if ctx.String(GenesisFlag.Name) != "" { gen := readGenesis(ctx.String(GenesisFlag.Name)) - core.MustCommitGenesis(gen, db, "") + core.MustCommitGenesis(gen, db, "", log.Root()) genesisConfig = gen chainConfig = gen.Config } else { diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 42aa5932bdf..b6585f90f42 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -8,7 +8,6 @@ import ( "encoding/json" "flag" "fmt" - "github.com/ledgerwatch/erigon-lib/kv/dbutils" "math/big" "net/http" _ "net/http/pprof" //nolint:gosec @@ -19,6 +18,8 @@ import ( "strings" "time" + "github.com/ledgerwatch/erigon-lib/kv/dbutils" + "github.com/RoaringBitmap/roaring/roaring64" "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" @@ -59,15 +60,16 @@ import ( ) var ( - action = flag.String("action", "", "action to execute") - cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") - block = flag.Int("block", 1, "specifies a block number for operation") - blockTotal = flag.Int("blocktotal", 1, "specifies a total amount of blocks to process (will offset from head block if <= 0)") - account = flag.String("account", "0x", "specifies account to investigate") - name = flag.String("name", "", "name to add to the file names") - chaindata = flag.String("chaindata", "chaindata", "path to the chaindata database file") - bucket = flag.String("bucket", "", "bucket in the database") - hash = flag.String("hash", "0x00", "image for preimage or state root for testBlockHashes action") + action = flag.String("action", "", "action to execute") + cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") + block = flag.Int("block", 1, "specifies a block number for operation") + blockTotal = flag.Int("blocktotal", 1, "specifies a total amount of blocks to process (will offset from head block if <= 0)") + account = flag.String("account", "0x", "specifies account to investigate") + name = flag.String("name", "", "name to add to the file names") + chaindata = flag.String("chaindata", "chaindata", "path to the chaindata database file") + bucket = flag.String("bucket", "", "bucket in the database") + hash = flag.String("hash", "0x00", "image for preimage or state root for testBlockHashes action") + shapshotVersion = flag.Uint("stapshots.version", 1, "specifies the snapshot file version") ) func dbSlice(chaindata string, bucket string, prefix []byte) { @@ -91,10 +93,10 @@ func dbSlice(chaindata string, bucket string, prefix []byte) { } // Searches 1000 blocks from the given one to try to find the one with the given state root hash -func testBlockHashes(chaindata string, block int, stateRoot libcommon.Hash) { +func testBlockHashes(chaindata string, snapshotVersion uint8, block int, stateRoot libcommon.Hash) { ethDb := mdbx.MustOpen(chaindata) defer ethDb.Close() - br, _ := blocksIO(ethDb) + br, _ := blocksIO(ethDb, snapshotVersion) tool.Check(ethDb.View(context.Background(), func(tx kv.Tx) error { blocksToSearch := 10000000 for i := uint64(block); i < uint64(block+blocksToSearch); i++ { @@ -130,7 +132,7 @@ func printCurrentBlockNumber(chaindata string) { }) } -func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { +func blocksIO(db kv.RoDB, snapshotVersion uint8) (services.FullBlockReader, *blockio.BlockWriter) { var histV3 bool if err := db.View(context.Background(), func(tx kv.Tx) error { histV3, _ = kvcfg.HistoryV3.Enabled(tx) @@ -138,15 +140,15 @@ func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { }); err != nil { panic(err) } - br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */) + br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", snapshotVersion, log.New()), nil /* BorSnapshots */) bw := blockio.NewBlockWriter(histV3) return br, bw } -func printTxHashes(chaindata string, block uint64) error { +func printTxHashes(chaindata string, snapshotVersion uint8, block uint64) error { db := mdbx.MustOpen(chaindata) defer db.Close() - br, _ := blocksIO(db) + br, _ := blocksIO(db, snapshotVersion) if err := db.View(context.Background(), func(tx kv.Tx) error { for b := block; b < block+1; b++ { block, _ := br.BlockByNumber(context.Background(), tx, b) @@ -458,10 +460,10 @@ func getBlockTotal(tx kv.Tx, blockFrom uint64, blockTotalOrOffset int64) uint64 return 1 } -func extractHashes(chaindata string, blockStep uint64, blockTotalOrOffset int64, name string) error { +func extractHashes(chaindata string, snapshotVersion uint8, blockStep uint64, blockTotalOrOffset int64, name string) error { db := mdbx.MustOpen(chaindata) defer db.Close() - br, _ := blocksIO(db) + br, _ := blocksIO(db, snapshotVersion) f, err := os.Create(fmt.Sprintf("preverified_hashes_%s.go", name)) if err != nil { @@ -533,12 +535,12 @@ func extractHeaders(chaindata string, block uint64, blockTotalOrOffset int64) er return nil } -func extractBodies(datadir string) error { +func extractBodies(datadir string, snapshotVersion uint8) error { snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ Enabled: true, KeepBlocks: true, Produce: false, - }, filepath.Join(datadir, "snapshots"), log.New()) + }, filepath.Join(datadir, "snapshots"), snapshotVersion, log.New()) snaps.ReopenFolder() /* method Iterate was removed, need re-implement @@ -577,7 +579,7 @@ func extractBodies(datadir string) error { */ db := mdbx.MustOpen(filepath.Join(datadir, "chaindata")) defer db.Close() - br, _ := blocksIO(db) + br, _ := blocksIO(db, snapshotVersion) tx, err := db.BeginRo(context.Background()) if err != nil { @@ -1023,7 +1025,7 @@ func scanReceipts3(chaindata string, block uint64) error { return nil } -func scanReceipts2(chaindata string) error { +func scanReceipts2(chaindata string, snapshotVersion uint8) error { f, err := os.Create("receipts.txt") if err != nil { return err @@ -1037,7 +1039,7 @@ func scanReceipts2(chaindata string) error { if err != nil { return err } - br, _ := blocksIO(dbdb) + br, _ := blocksIO(dbdb, snapshotVersion) defer tx.Rollback() blockNum, err := historyv2.AvailableFrom(tx) @@ -1386,7 +1388,7 @@ func main() { flow.TestGenCfg() case "testBlockHashes": - testBlockHashes(*chaindata, *block, libcommon.HexToHash(*hash)) + testBlockHashes(*chaindata, uint8(*shapshotVersion), *block, libcommon.HexToHash(*hash)) case "readAccount": if err := readAccount(*chaindata, libcommon.HexToAddress(*account)); err != nil { @@ -1424,7 +1426,7 @@ func main() { err = extractHeaders(*chaindata, uint64(*block), int64(*blockTotal)) case "extractHashes": - err = extractHashes(*chaindata, uint64(*block), int64(*blockTotal), *name) + err = extractHashes(*chaindata, uint8(*shapshotVersion), uint64(*block), int64(*blockTotal), *name) case "defrag": err = hackdb.Defrag() @@ -1433,13 +1435,13 @@ func main() { err = hackdb.TextInfo(*chaindata, &strings.Builder{}) case "extractBodies": - err = extractBodies(*chaindata) + err = extractBodies(*chaindata, uint8(*shapshotVersion)) case "repairCurrent": repairCurrent() case "printTxHashes": - printTxHashes(*chaindata, uint64(*block)) + printTxHashes(*chaindata, uint8(*shapshotVersion), uint64(*block)) case "snapSizes": err = snapSizes(*chaindata) @@ -1466,7 +1468,7 @@ func main() { err = scanTxs(*chaindata) case "scanReceipts2": - err = scanReceipts2(*chaindata) + err = scanReceipts2(*chaindata, uint8(*shapshotVersion)) case "scanReceipts3": err = scanReceipts3(*chaindata, uint64(*block)) diff --git a/cmd/integration/Readme.md b/cmd/integration/Readme.md index 389248b1e5c..5d97a69368c 100644 --- a/cmd/integration/Readme.md +++ b/cmd/integration/Readme.md @@ -88,7 +88,7 @@ make all 4. Build integration: cd erigon; make integration 5. Run: ./build/bin/integration mdbx_to_mdbx --chaindata /existing/erigon/path/chaindata/ --chaindata.to /path/to/copy-to/chaindata/ 6. cp -R /existing/erigon/path/snapshots /path/to/copy-to/snapshots -7. start erigon in new datadir as usualy +7. start erigon in new datadir as usually ``` ## Clear bad blocks markers table in the case some block was marked as invalid after some error diff --git a/cmd/integration/commands/flags.go b/cmd/integration/commands/flags.go index 22e583d0fff..70036f55425 100644 --- a/cmd/integration/commands/flags.go +++ b/cmd/integration/commands/flags.go @@ -22,7 +22,6 @@ var ( migration string integrityFast, integritySlow bool file string - HeimdallgRPCAddress string HeimdallURL string txtrace bool // Whether to trace the execution (should only be used together with `block`) pruneFlag string @@ -40,6 +39,7 @@ var ( _forceSetHistoryV3 bool workers, reconWorkers uint64 + snapshotVersion uint8 = 1 ) func must(err error) { @@ -170,3 +170,7 @@ func withCommitment(cmd *cobra.Command) { cmd.Flags().StringVar(&commitmentTrie, "commitment.trie", "hex", "hex - use Hex Patricia Hashed Trie for commitments, bin - use of binary patricia trie") cmd.Flags().IntVar(&commitmentFreq, "commitment.freq", 1000000, "how many blocks to skip between calculating commitment") } + +func withSnapshotVersion(cmd *cobra.Command) { + cmd.Flags().Uint8Var(&snapshotVersion, "stapshots.version", 1, "specifies the snapshot file version") +} diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index dec456eb865..0f995b2cdec 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -9,6 +9,7 @@ import ( "text/tabwriter" "github.com/ledgerwatch/erigon/turbo/backup" + "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" @@ -30,14 +31,14 @@ var cmdResetState = &cobra.Command{ Short: "Reset StateStages (5,6,7,8,9,10) and buckets", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return } ctx, _ := common.RootContext() defer db.Close() - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -49,7 +50,7 @@ var cmdResetState = &cobra.Command{ return } - if err = reset2.ResetState(db, ctx, chain, ""); err != nil { + if err = reset2.ResetState(db, ctx, chain, "", log.Root()); err != nil { if !errors.Is(err, context.Canceled) { logger.Error(err.Error()) } @@ -73,7 +74,7 @@ var cmdClearBadBlocks = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { logger := debug.SetupCobra(cmd, "integration") ctx, _ := common.RootContext() - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return err @@ -90,10 +91,11 @@ func init() { withConfig(cmdResetState) withDataDir(cmdResetState) withChain(cmdResetState) - + withSnapshotVersion(cmdResetState) rootCmd.AddCommand(cmdResetState) withDataDir(cmdClearBadBlocks) + withSnapshotVersion(cmdClearBadBlocks) rootCmd.AddCommand(cmdClearBadBlocks) } diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index 95120c4f822..e90e38b2222 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -72,7 +72,7 @@ func dbCfg(label kv.Label, path string) kv2.MdbxOpts { return opts } -func openDB(opts kv2.MdbxOpts, applyMigrations bool, logger log.Logger) (kv.RwDB, error) { +func openDB(opts kv2.MdbxOpts, applyMigrations bool, snapshotVersion uint8, logger log.Logger) (kv.RwDB, error) { db := opts.MustOpen() if applyMigrations { migrator := migrations.NewMigrator(opts.GetLabel()) @@ -105,7 +105,7 @@ func openDB(opts kv2.MdbxOpts, applyMigrations bool, logger log.Logger) (kv.RwDB return nil, err } if h3 { - _, _, agg := allSnapshots(context.Background(), db, logger) + _, _, agg := allSnapshots(context.Background(), db, snapshotVersion, logger) tdb, err := temporal.New(db, agg, systemcontracts.SystemContractCodeLookup[chain]) if err != nil { return nil, err diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index c4dea1d3a55..18959df6224 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -13,18 +13,19 @@ import ( "github.com/c2h5oh/datasize" "github.com/erigontech/mdbx-go/mdbx" lru "github.com/hashicorp/golang-lru/arc/v2" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall" - "github.com/ledgerwatch/erigon/consensus/bor/heimdallgrpc" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/secp256k1" + "github.com/spf13/cobra" + "golang.org/x/exp/slices" + + "github.com/ledgerwatch/erigon/polygon/heimdall" + "github.com/ledgerwatch/erigon/core/rawdb/blockio" "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client" + "github.com/ledgerwatch/erigon/polygon/bor" "github.com/ledgerwatch/erigon/turbo/builder" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/secp256k1" - "github.com/spf13/cobra" - "golang.org/x/exp/slices" chain2 "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/commitment" @@ -37,6 +38,8 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon-lib/wrap" + "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core" @@ -65,7 +68,7 @@ var cmdStageSnapshots = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -86,7 +89,7 @@ var cmdStageHeaders = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -107,7 +110,7 @@ var cmdStageBorHeimdall = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -128,7 +131,7 @@ var cmdStageBodies = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -149,7 +152,7 @@ var cmdStageSenders = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -170,7 +173,7 @@ var cmdStageExec = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -193,7 +196,7 @@ var cmdStageTrie = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -214,7 +217,7 @@ var cmdStageHashState = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -235,7 +238,7 @@ var cmdStageHistory = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -256,7 +259,7 @@ var cmdLogIndex = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -277,7 +280,7 @@ var cmdCallTraces = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -298,7 +301,7 @@ var cmdStageTxLookup = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -318,7 +321,7 @@ var cmdPrintStages = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -339,7 +342,7 @@ var cmdPrintMigrations = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -359,7 +362,7 @@ var cmdRemoveMigration = &cobra.Command{ Short: "", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -381,7 +384,7 @@ var cmdRunMigrations = &cobra.Command{ logger := debug.SetupCobra(cmd, "integration") //non-accede and exclusive mode - to apply create new tables if need. cfg := dbCfg(kv.ChainDB, chaindata).Flags(func(u uint) uint { return u &^ mdbx.Accede }).Exclusive() - db, err := openDB(cfg, true, logger) + db, err := openDB(cfg, true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -396,7 +399,7 @@ var cmdSetPrune = &cobra.Command{ Short: "Override existing --prune flag value (if you know what you are doing)", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -416,13 +419,13 @@ var cmdSetSnap = &cobra.Command{ Short: "Override existing --snapshots flag value (if you know what you are doing)", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return } defer db.Close() - sn, borSn, agg := allSnapshots(cmd.Context(), db, logger) + sn, borSn, agg := allSnapshots(cmd.Context(), db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -452,7 +455,7 @@ var cmdForceSetHistoryV3 = &cobra.Command{ Short: "Override existing --history.v3 flag value (if you know what you are doing)", Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -474,6 +477,7 @@ func init() { withDataDir(cmdPrintStages) withChain(cmdPrintStages) withHeimdall(cmdPrintStages) + withSnapshotVersion(cmdPrintStages) rootCmd.AddCommand(cmdPrintStages) withConfig(cmdStageSenders) @@ -484,11 +488,13 @@ func init() { withDataDir(cmdStageSenders) withChain(cmdStageSenders) withHeimdall(cmdStageSenders) + withSnapshotVersion(cmdStageSenders) rootCmd.AddCommand(cmdStageSenders) withConfig(cmdStageSnapshots) withDataDir(cmdStageSnapshots) withReset(cmdStageSnapshots) + withSnapshotVersion(cmdStageSnapshots) rootCmd.AddCommand(cmdStageSnapshots) withConfig(cmdStageHeaders) @@ -498,6 +504,7 @@ func init() { withReset(cmdStageHeaders) withChain(cmdStageHeaders) withHeimdall(cmdStageHeaders) + withSnapshotVersion(cmdStageHeaders) rootCmd.AddCommand(cmdStageHeaders) withConfig(cmdStageBorHeimdall) @@ -505,6 +512,7 @@ func init() { withReset(cmdStageBorHeimdall) withChain(cmdStageBorHeimdall) withHeimdall(cmdStageBorHeimdall) + withSnapshotVersion(cmdStageBorHeimdall) rootCmd.AddCommand(cmdStageBorHeimdall) withConfig(cmdStageBodies) @@ -512,6 +520,7 @@ func init() { withUnwind(cmdStageBodies) withChain(cmdStageBodies) withHeimdall(cmdStageBodies) + withSnapshotVersion(cmdStageBodies) rootCmd.AddCommand(cmdStageBodies) withConfig(cmdStageExec) @@ -526,6 +535,7 @@ func init() { withChain(cmdStageExec) withHeimdall(cmdStageExec) withWorkers(cmdStageExec) + withSnapshotVersion(cmdStageExec) rootCmd.AddCommand(cmdStageExec) withConfig(cmdStageHashState) @@ -537,6 +547,7 @@ func init() { withBatchSize(cmdStageHashState) withChain(cmdStageHashState) withHeimdall(cmdStageHashState) + withSnapshotVersion(cmdStageHashState) rootCmd.AddCommand(cmdStageHashState) withConfig(cmdStageTrie) @@ -548,6 +559,7 @@ func init() { withIntegrityChecks(cmdStageTrie) withChain(cmdStageTrie) withHeimdall(cmdStageTrie) + withSnapshotVersion(cmdStageTrie) rootCmd.AddCommand(cmdStageTrie) withConfig(cmdStageHistory) @@ -558,6 +570,7 @@ func init() { withPruneTo(cmdStageHistory) withChain(cmdStageHistory) withHeimdall(cmdStageHistory) + withSnapshotVersion(cmdStageHistory) rootCmd.AddCommand(cmdStageHistory) withConfig(cmdLogIndex) @@ -568,6 +581,7 @@ func init() { withPruneTo(cmdLogIndex) withChain(cmdLogIndex) withHeimdall(cmdLogIndex) + withSnapshotVersion(cmdLogIndex) rootCmd.AddCommand(cmdLogIndex) withConfig(cmdCallTraces) @@ -578,6 +592,7 @@ func init() { withPruneTo(cmdCallTraces) withChain(cmdCallTraces) withHeimdall(cmdCallTraces) + withSnapshotVersion(cmdCallTraces) rootCmd.AddCommand(cmdCallTraces) withConfig(cmdStageTxLookup) @@ -588,10 +603,12 @@ func init() { withPruneTo(cmdStageTxLookup) withChain(cmdStageTxLookup) withHeimdall(cmdStageTxLookup) + withSnapshotVersion(cmdStageTxLookup) rootCmd.AddCommand(cmdStageTxLookup) withConfig(cmdPrintMigrations) withDataDir(cmdPrintMigrations) + withSnapshotVersion(cmdPrintMigrations) rootCmd.AddCommand(cmdPrintMigrations) withConfig(cmdRemoveMigration) @@ -599,23 +616,27 @@ func init() { withMigration(cmdRemoveMigration) withChain(cmdRemoveMigration) withHeimdall(cmdRemoveMigration) + withSnapshotVersion(cmdRemoveMigration) rootCmd.AddCommand(cmdRemoveMigration) withConfig(cmdRunMigrations) withDataDir(cmdRunMigrations) withChain(cmdRunMigrations) withHeimdall(cmdRunMigrations) + withSnapshotVersion(cmdRunMigrations) rootCmd.AddCommand(cmdRunMigrations) withConfig(cmdSetSnap) withDataDir2(cmdSetSnap) withChain(cmdSetSnap) + withSnapshotVersion(cmdSetSnap) cmdSetSnap.Flags().Bool("snapshots", false, "") must(cmdSetSnap.MarkFlagRequired("snapshots")) rootCmd.AddCommand(cmdSetSnap) withConfig(cmdForceSetHistoryV3) withDataDir2(cmdForceSetHistoryV3) + withSnapshotVersion(cmdForceSetHistoryV3) cmdForceSetHistoryV3.Flags().BoolVar(&_forceSetHistoryV3, "history.v3", false, "") must(cmdForceSetHistoryV3.MarkFlagRequired("history.v3")) rootCmd.AddCommand(cmdForceSetHistoryV3) @@ -623,6 +644,7 @@ func init() { withConfig(cmdSetPrune) withDataDir(cmdSetPrune) withChain(cmdSetPrune) + withSnapshotVersion(cmdSetPrune) cmdSetPrune.Flags().StringVar(&pruneFlag, "prune", "hrtc", "") cmdSetPrune.Flags().Uint64Var(&pruneH, "prune.h.older", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneR, "prune.r.older", 0, "") @@ -658,7 +680,7 @@ func stageHeaders(db kv.RwDB, ctx context.Context, logger log.Logger) error { return err } - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -756,7 +778,7 @@ func stageBorHeimdall(db kv.RwDB, ctx context.Context, logger log.Logger) error } func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -773,7 +795,7 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { } u := sync.NewUnwindState(stages.Bodies, s.BlockNumber-unwind, s.BlockNumber) - cfg := stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, br, historyV3, bw) + cfg := stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, br, historyV3, bw, nil) if err := stagedsync.UnwindBodiesStage(u, tx, cfg, ctx); err != nil { return err } @@ -796,7 +818,7 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { func stageSenders(db kv.RwDB, ctx context.Context, logger log.Logger) error { tmpdir := datadir.New(datadirCli).Tmp chainConfig := fromdb.ChainConfig(db) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -863,7 +885,7 @@ func stageSenders(db kv.RwDB, ctx context.Context, logger log.Logger) error { return err } - cfg := stagedsync.StageSendersCfg(db, chainConfig, false, tmpdir, pm, br, nil) + cfg := stagedsync.StageSendersCfg(db, chainConfig, false, tmpdir, pm, br, nil, nil) if unwind > 0 { u := sync.NewUnwindState(stages.Senders, s.BlockNumber-unwind, s.BlockNumber) if err = stagedsync.UnwindSendersStage(u, tx, cfg, ctx); err != nil { @@ -894,7 +916,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { engine, vmConfig, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) must(sync.SetCurrentStage(stages.Execution)) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -903,7 +925,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { return reset2.WarmupExec(ctx, db) } if reset { - return reset2.ResetExec(ctx, db, chain, "") + return reset2.ResetExec(ctx, db, chain, "", logger) } if txtrace { @@ -945,10 +967,11 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { } defer tx.Rollback() } + txc := wrap.TxContainer{Tx: tx} if unwind > 0 { u := sync.NewUnwindState(stages.Execution, s.BlockNumber-unwind, s.BlockNumber) - err := stagedsync.UnwindExecutionStage(u, s, tx, ctx, cfg, true, logger) + err := stagedsync.UnwindExecutionStage(u, s, txc, ctx, cfg, true, logger) if err != nil { return err } @@ -967,7 +990,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { return nil } - err := stagedsync.SpawnExecuteBlocksStage(s, sync, tx, block, ctx, cfg, true /* initialCycle */, logger) + err := stagedsync.SpawnExecuteBlocksStage(s, sync, txc, block, ctx, cfg, true /* initialCycle */, logger) if err != nil { return err } @@ -976,7 +999,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -1034,7 +1057,7 @@ func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { func stageHashState(db kv.RwDB, ctx context.Context, logger log.Logger) error { dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -1212,7 +1235,7 @@ func stageHistory(db kv.RwDB, ctx context.Context, logger log.Logger) error { if historyV3 { return fmt.Errorf("this stage is disable in --history.v3=true") } - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -1288,7 +1311,7 @@ func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error { _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) chainConfig := fromdb.ChainConfig(db) must(sync.SetCurrentStage(stages.TxLookup)) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -1338,7 +1361,7 @@ func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error { } func printAllStages(db kv.RoDB, ctx context.Context, logger log.Logger) error { - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -1374,7 +1397,7 @@ var _allSnapshotsSingleton *freezeblocks.RoSnapshots var _allBorSnapshotsSingleton *freezeblocks.BorRoSnapshots var _aggSingleton *libstate.AggregatorV3 -func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.AggregatorV3) { +func allSnapshots(ctx context.Context, db kv.RoDB, version uint8, logger log.Logger) (*freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.AggregatorV3) { openSnapshotOnce.Do(func() { var useSnapshots bool _ = db.View(context.Background(), func(tx kv.Tx) error { @@ -1385,8 +1408,8 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl dir.MustExist(dirs.SnapHistory) snapCfg := ethconfig.NewSnapCfg(useSnapshots, true, true) - _allSnapshotsSingleton = freezeblocks.NewRoSnapshots(snapCfg, dirs.Snap, logger) - _allBorSnapshotsSingleton = freezeblocks.NewBorRoSnapshots(snapCfg, dirs.Snap, logger) + _allSnapshotsSingleton = freezeblocks.NewRoSnapshots(snapCfg, dirs.Snap, version, logger) + _allBorSnapshotsSingleton = freezeblocks.NewBorRoSnapshots(snapCfg, dirs.Snap, snapshotVersion, logger) var err error _aggSingleton, err = libstate.NewAggregatorV3(ctx, dirs.SnapHistory, dirs.Tmp, ethconfig.HistoryV3AggregationStep, db, logger) @@ -1402,11 +1425,11 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl if err := _allSnapshotsSingleton.ReopenFolder(); err != nil { panic(err) } - _allSnapshotsSingleton.LogStat() + _allSnapshotsSingleton.LogStat("all") if err := _allBorSnapshotsSingleton.ReopenFolder(); err != nil { panic(err) } - _allBorSnapshotsSingleton.LogStat() + _allBorSnapshotsSingleton.LogStat("all") db.View(context.Background(), func(tx kv.Tx) error { _aggSingleton.LogStats(tx, func(endTxNumMinimax uint64) uint64 { _, histBlockNumProgress, _ := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax) @@ -1425,7 +1448,7 @@ var _blockWriterSingleton *blockio.BlockWriter func blocksIO(db kv.RoDB, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter) { openBlockReaderOnce.Do(func() { - sn, borSn, _ := allSnapshots(context.Background(), db, logger) + sn, borSn, _ := allSnapshots(context.Background(), db, snapshotVersion, logger) histV3 := kvcfg.HistoryV3.FromDB(db) _blockReaderSingleton = freezeblocks.NewBlockReader(sn, borSn) _blockWriterSingleton = blockio.NewBlockWriter(histV3) @@ -1447,7 +1470,7 @@ func allDomains(ctx context.Context, db kv.RoDB, stepSize uint64, mode libstate. dir.MustExist(dirs.SnapHistory) snapCfg := ethconfig.NewSnapCfg(useSnapshots, true, true) - _allSnapshotsSingleton = freezeblocks.NewRoSnapshots(snapCfg, dirs.Snap, logger) + _allSnapshotsSingleton = freezeblocks.NewRoSnapshots(snapCfg, dirs.Snap, snapshotVersion, logger) var err error _aggDomainSingleton, err = libstate.NewAggregator(filepath.Join(dirs.DataDir, "state"), dirs.Tmp, stepSize, mode, trie, logger) @@ -1462,7 +1485,7 @@ func allDomains(ctx context.Context, db kv.RoDB, stepSize uint64, mode libstate. if err := _allSnapshotsSingleton.ReopenFolder(); err != nil { panic(err) } - _allSnapshotsSingleton.LogStat() + _allSnapshotsSingleton.LogStat("all:singleton") //db.View(context.Background(), func(tx kv.Tx) error { // _aggSingleton.LogStats(tx, func(endTxNumMinimax uint64) uint64 { // _, histBlockNumProgress, _ := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax) @@ -1539,7 +1562,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, cfg.Miner = *miningConfig } cfg.Dirs = datadir.New(datadirCli) - allSn, _, agg := allSnapshots(ctx, db, logger) + allSn, _, agg := allSnapshots(ctx, db, snapshotVersion, logger) cfg.Snapshot = allSn.Cfg() blockReader, blockWriter := blocksIO(db, logger) @@ -1583,7 +1606,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, } stages := stages2.NewDefaultStages(context.Background(), db, snapDb, p2p.Config{}, &cfg, sentryControlServer, notifications, nil, blockReader, blockRetire, agg, nil, nil, heimdallClient, recents, signatures, logger) - sync := stagedsync.New(stages, stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, logger) + sync := stagedsync.New(cfg.Sync, stages, stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, logger) miner := stagedsync.NewMiningState(&cfg.Miner) miningCancel := make(chan struct{}) @@ -1593,9 +1616,10 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, }() miningSync := stagedsync.New( + cfg.Sync, stagedsync.MiningStages(ctx, stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, dirs.Tmp, blockReader), - stagedsync.StageBorHeimdallCfg(db, snapDb, miner, *chainConfig, heimdallClient, blockReader, nil, nil, recents, signatures), + stagedsync.StageBorHeimdallCfg(db, snapDb, miner, *chainConfig, heimdallClient, blockReader, nil, nil, nil, recents, signatures), stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, dirs.Tmp, nil, 0, nil, nil, blockReader), stagedsync.StageHashStateCfg(db, dirs, historyV3), stagedsync.StageTrieCfg(db, false, true, false, dirs.Tmp, blockReader, nil, historyV3, agg), @@ -1645,7 +1669,7 @@ func overrideStorageMode(db kv.RwDB, logger log.Logger) error { }) } -func initConsensusEngine(ctx context.Context, cc *chain2.Config, dir string, db kv.RwDB, blockReader services.FullBlockReader, logger log.Logger) (engine consensus.Engine, heimdallClient heimdall.IHeimdallClient) { +func initConsensusEngine(ctx context.Context, cc *chain2.Config, dir string, db kv.RwDB, blockReader services.FullBlockReader, logger log.Logger) (engine consensus.Engine, heimdallClient heimdall.HeimdallClient) { config := ethconfig.Defaults var consensusConfig interface{} @@ -1654,14 +1678,10 @@ func initConsensusEngine(ctx context.Context, cc *chain2.Config, dir string, db } else if cc.Aura != nil { consensusConfig = &config.Aura } else if cc.Bor != nil { - consensusConfig = &config.Bor + consensusConfig = cc.Bor config.HeimdallURL = HeimdallURL if !config.WithoutHeimdall { - if config.HeimdallgRPCAddress != "" { - heimdallClient = heimdallgrpc.NewHeimdallGRPCClient(config.HeimdallgRPCAddress, logger) - } else { - heimdallClient = heimdall.NewHeimdallClient(config.HeimdallURL, logger) - } + heimdallClient = heimdall.NewHeimdallClient(config.HeimdallURL, logger) } } else { consensusConfig = &config.Ethash diff --git a/cmd/integration/commands/state_domains.go b/cmd/integration/commands/state_domains.go index fabde89f2f4..cacbb6238de 100644 --- a/cmd/integration/commands/state_domains.go +++ b/cmd/integration/commands/state_domains.go @@ -5,12 +5,13 @@ import ( "encoding/hex" "errors" "fmt" - "github.com/ledgerwatch/erigon-lib/metrics" "path/filepath" "runtime" "strings" "time" + "github.com/ledgerwatch/erigon-lib/metrics" + "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" @@ -92,7 +93,7 @@ var readDomains = &cobra.Command{ } dirs := datadir.New(datadirCli) - chainDb, err := openDB(dbCfg(kv.ChainDB, dirs.Chaindata), true, logger) + chainDb, err := openDB(dbCfg(kv.ChainDB, dirs.Chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index 3401cf669de..ab9955dfca2 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -6,11 +6,13 @@ import ( "encoding/json" "errors" "fmt" - "github.com/ledgerwatch/erigon-lib/kv/dbutils" "os" "sort" "time" + "github.com/ledgerwatch/erigon-lib/kv/dbutils" + "github.com/ledgerwatch/erigon-lib/wrap" + "github.com/c2h5oh/datasize" chain2 "github.com/ledgerwatch/erigon-lib/chain" common2 "github.com/ledgerwatch/erigon-lib/common" @@ -63,7 +65,7 @@ Examples: erigoncli.ApplyFlagsForEthConfigCobra(cmd.Flags(), ethConfig) miningConfig := params.MiningConfig{} utils.SetupMinerCobra(cmd, &miningConfig) - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -93,7 +95,7 @@ var loopIhCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") ctx, _ := common2.RootContext() - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -117,7 +119,7 @@ var loopExecCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { logger := debug.SetupCobra(cmd, "integration") ctx, _ := common2.RootContext() - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, snapshotVersion, logger) if err != nil { logger.Error("Opening DB", "error", err) return @@ -147,6 +149,7 @@ func init() { withChain(stateStages) withHeimdall(stateStages) withWorkers(stateStages) + withSnapshotVersion(stateStages) rootCmd.AddCommand(stateStages) withConfig(loopIhCmd) @@ -155,6 +158,7 @@ func init() { withUnwind(loopIhCmd) withChain(loopIhCmd) withHeimdall(loopIhCmd) + withSnapshotVersion(loopIhCmd) rootCmd.AddCommand(loopIhCmd) withConfig(loopExecCmd) @@ -164,6 +168,7 @@ func init() { withChain(loopExecCmd) withHeimdall(loopExecCmd) withWorkers(loopExecCmd) + withSnapshotVersion(loopExecCmd) rootCmd.AddCommand(loopExecCmd) } @@ -173,7 +178,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. return err } - sn, borSn, agg := allSnapshots(ctx, db, logger1) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger1) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -224,9 +229,9 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, changesAcc, false, false, historyV3, dirs, br, nil, genesis, syncCfg, agg, nil) - execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error { - return func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error { - if err := stagedsync.SpawnExecuteBlocksStage(s, unwinder, tx, execToBlock, ctx, execCfg, firstCycle, logger); err != nil { + execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error { + if err := stagedsync.SpawnExecuteBlocksStage(s, unwinder, txc, execToBlock, ctx, execCfg, firstCycle, logger); err != nil { return fmt.Errorf("spawnExecuteBlocksStage: %w", err) } return nil @@ -313,7 +318,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. stateStages.MockExecFunc(stages.Execution, execUntilFunc(execToBlock)) _ = stateStages.SetCurrentStage(stages.Execution) - if err := stateStages.Run(db, tx, false /* firstCycle */); err != nil { + if _, err := stateStages.Run(db, wrap.TxContainer{Tx: tx}, false /* firstCycle */); err != nil { return err } @@ -348,8 +353,8 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. if miner.MiningConfig.Enabled && nextBlock != nil && nextBlock.Coinbase() != (common2.Address{}) { miner.MiningConfig.Etherbase = nextBlock.Coinbase() miner.MiningConfig.ExtraData = nextBlock.Extra() - miningStages.MockExecFunc(stages.MiningCreateBlock, func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error { - err = stagedsync.SpawnMiningCreateBlockStage(s, tx, + miningStages.MockExecFunc(stages.MiningCreateBlock, func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error { + err = stagedsync.SpawnMiningCreateBlockStage(s, txc.Tx, stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, dirs.Tmp, br), quit, logger) if err != nil { @@ -371,7 +376,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. //}) _ = miningStages.SetCurrentStage(stages.MiningCreateBlock) - if err := miningStages.Run(db, tx, false /* firstCycle */); err != nil { + if _, err := miningStages.Run(db, wrap.TxContainer{Tx: tx}, false /* firstCycle */); err != nil { return err } tx.Rollback() @@ -450,7 +455,7 @@ func checkMinedBlock(b1, b2 *types.Block, chainConfig *chain2.Config) { } func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) error { - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -464,7 +469,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e } defer tx.Rollback() sync.DisableStages(stages.Snapshots, stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders, stages.Execution, stages.AccountHistoryIndex, stages.StorageHistoryIndex, stages.TxLookup, stages.Finish) - if err = sync.Run(db, tx, false /* firstCycle */); err != nil { + if _, err = sync.Run(db, wrap.TxContainer{Tx: tx}, false /* firstCycle */); err != nil { return err } execStage := stage(sync, tx, nil, stages.HashState) @@ -488,7 +493,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e sync.DisableStages(stages.IntermediateHashes) _ = sync.SetCurrentStage(stages.HashState) - if err = sync.Run(db, tx, false /* firstCycle */); err != nil { + if _, err = sync.Run(db, wrap.TxContainer{Tx: tx}, false /* firstCycle */); err != nil { return err } must(tx.Commit()) @@ -508,7 +513,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e _ = sync.SetCurrentStage(stages.IntermediateHashes) t := time.Now() - if err = sync.Run(db, tx, false /* firstCycle */); err != nil { + if _, err = sync.Run(db, wrap.TxContainer{Tx: tx}, false /* firstCycle */); err != nil { return err } logger.Warn("loop", "time", time.Since(t).String()) @@ -524,7 +529,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) error { chainConfig := fromdb.ChainConfig(db) dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) - sn, borSn, agg := allSnapshots(ctx, db, logger) + sn, borSn, agg := allSnapshots(ctx, db, snapshotVersion, logger) defer sn.Close() defer borSn.Close() defer agg.Close() @@ -563,8 +568,8 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) /*badBlockHalt=*/ false, historyV3, dirs, br, nil, genesis, syncCfg, agg, nil) // set block limit of execute stage - sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error { - if err = stagedsync.SpawnExecuteBlocksStage(stageState, sync, tx, to, ctx, cfg, initialCycle, logger); err != nil { + sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error { + if err = stagedsync.SpawnExecuteBlocksStage(stageState, sync, txc, to, ctx, cfg, initialCycle, logger); err != nil { return fmt.Errorf("spawnExecuteBlocksStage: %w", err) } return nil @@ -579,7 +584,7 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) _ = sync.SetCurrentStage(stages.Execution) t := time.Now() - if err = sync.Run(db, tx, initialCycle); err != nil { + if _, err = sync.Run(db, wrap.TxContainer{Tx: tx}, initialCycle); err != nil { return err } logger.Info("[Integration] ", "loop time", time.Since(t)) diff --git a/cmd/observer/observer/server.go b/cmd/observer/observer/server.go index 99c2cb4bbc2..7eb2bfab476 100644 --- a/cmd/observer/observer/server.go +++ b/cmd/observer/observer/server.go @@ -85,7 +85,7 @@ func NewServer(ctx context.Context, flags CommandFlags, logger log.Logger) (*Ser } func makeLocalNode(ctx context.Context, nodeDBPath string, privateKey *ecdsa.PrivateKey, chain string, logger log.Logger) (*enode.LocalNode, error) { - db, err := enode.OpenDB(ctx, nodeDBPath, "") + db, err := enode.OpenDB(ctx, nodeDBPath, "", logger) if err != nil { return nil, err } diff --git a/cmd/p2psim/main.go b/cmd/p2psim/main.go index 3f567847bbd..e7202866500 100644 --- a/cmd/p2psim/main.go +++ b/cmd/p2psim/main.go @@ -39,12 +39,14 @@ import ( "context" "encoding/json" "fmt" - "github.com/ledgerwatch/erigon-lib/common" "io" "os" "strings" "text/tabwriter" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/turbo/logging" "github.com/urfave/cli/v2" @@ -70,7 +72,7 @@ func main() { }, } app.Before = func(ctx *cli.Context) error { - logger := logging.SetupLoggerCtx("p2psim", ctx, false /* rootLogger */) + logger := logging.SetupLoggerCtx("p2psim", ctx, log.LvlInfo, log.LvlInfo, false /* rootLogger */) client = simulations.NewClient(ctx.String("api"), logger) return nil } diff --git a/cmd/rpcdaemon/README.md b/cmd/rpcdaemon/README.md index 7a275d2ac5b..79833756911 100644 --- a/cmd/rpcdaemon/README.md +++ b/cmd/rpcdaemon/README.md @@ -1,21 +1,21 @@ - [Introduction](#introduction) - [Getting Started](#getting-started) - * [Running locally](#running-locally) - * [Running remotely](#running-remotely) - * [Healthcheck](#healthcheck) - * [Testing](#testing) + - [Running locally](#running-locally) + - [Running remotely](#running-remotely) + - [Healthcheck](#healthcheck) + - [Testing](#testing) - [FAQ](#faq) - * [Relations between prune options and rpc methods](#relations-between-prune-options-and-rpc-method) - * [RPC Implementation Status](#rpc-implementation-status) - * [Securing the communication between RPC daemon and Erigon instance via TLS and authentication](#securing-the-communication-between-rpc-daemon-and-erigon-instance-via-tls-and-authentication) - * [Ethstats](#ethstats) - * [Allowing only specific methods (Allowlist)](#allowing-only-specific-methods--allowlist-) - * [Trace transactions progress](#trace-transactions-progress) - * [Clients getting timeout, but server load is low](#clients-getting-timeout--but-server-load-is-low) - * [Server load too high](#server-load-too-high) - * [Faster Batch requests](#faster-batch-requests) + - [Relations between prune options and rpc methods](#relations-between-prune-options-and-rpc-method) + - [RPC Implementation Status](#rpc-implementation-status) + - [Securing the communication between RPC daemon and Erigon instance via TLS and authentication](#securing-the-communication-between-rpc-daemon-and-erigon-instance-via-tls-and-authentication) + - [Ethstats](#ethstats) + - [Allowing only specific methods (Allowlist)](#allowing-only-specific-methods--allowlist-) + - [Trace transactions progress](#trace-transactions-progress) + - [Clients getting timeout, but server load is low](#clients-getting-timeout--but-server-load-is-low) + - [Server load too high](#server-load-too-high) + - [Faster Batch requests](#faster-batch-requests) - [For Developers](#for-developers) - * [Code generation](#code-generation) + - [Code generation](#code-generation) ## Introduction @@ -72,7 +72,7 @@ it may scale well for some workloads that are heavy on the current state queries ### Healthcheck -There are 2 options for running healtchecks, POST request, or GET request with custom headers. Both options are available +There are 2 options for running healtchecks, POST request, or GET request with custom headers. Both options are available at the `/health` endpoint. #### POST request @@ -99,7 +99,7 @@ Not adding a check disables that. `eth` namespace to be listed in `http.api`. Example request -```http POST http://localhost:8545/health --raw '{"min_peer_count": 3, "known_block": "0x1F"}'``` +`http POST http://localhost:8545/health --raw '{"min_peer_count": 3, "known_block": "0x1F"}'` Example response ``` @@ -114,19 +114,21 @@ Example response If the healthcheck is successful it will return a 200 status code. -If the healthcheck fails for any reason a status 500 will be returned. This is true if one of the criteria requested +If the healthcheck fails for any reason a status 500 will be returned. This is true if one of the criteria requested fails its check. -You can set any number of values on the `X-ERIGON-HEALTHCHECK` header. Ones that are not included are skipped in the +You can set any number of values on the `X-ERIGON-HEALTHCHECK` header. Ones that are not included are skipped in the checks. Available Options: + - `synced` - will check if the node has completed syncing - `min_peer_count` - will check that the node has at least `` many peers - `check_block` - will check that the node is at least ahead of the `` specified - `max_seconds_behind` - will check that the node is no more than `` behind from its latest block Example Request + ``` curl --location --request GET 'http://localhost:8545/health' \ --header 'X-ERIGON-HEALTHCHECK: min_peer_count1' \ @@ -135,6 +137,7 @@ curl --location --request GET 'http://localhost:8545/health' \ ``` Example Response + ``` { "check_block":"DISABLED", @@ -194,7 +197,6 @@ If the `--http.url` flag is set, then `--http.addr` and `--http.port` with both note that this is NOT geth-style IPC. for that, read the next section, IPC endpoint(geth-compatible) - ### HTTPS, HTTP2, and H2C Erigon supports HTTPS, HTTP2, and H2C out of the box. H2C is served by the default HTTP handler. @@ -207,7 +209,6 @@ The HTTPS server will inherit all other configuration parameters from http, for If the `--https.url` flag is set, then `--https.addr` and `--https.port` with both be ignored. - ### IPC endpoint (geth compatible) erigon supports the geth-style unix socket IPC. you can enable this with `--socket.enabled` flag, @@ -225,7 +226,7 @@ Label "remote" means: `--private.api.addr` flag is required. The following table shows the current implementation status of Erigon's RPC daemon. | Command | Avail | Notes | -| ------------------------------------------ |---------|--------------------------------------| +| ------------------------------------------ | ------- | ------------------------------------ | | admin_nodeInfo | Yes | | | admin_peers | Yes | | | admin_addPeer | Yes | | @@ -280,7 +281,7 @@ The following table shows the current implementation status of Erigon's RPC daem | eth_getFilterChanges | Yes | | | eth_uninstallFilter | Yes | | | eth_getLogs | Yes | | -| interned spe | | | +| interned spe | | | | eth_accounts | No | deprecated | | eth_sendRawTransaction | Yes | `remote`. | | eth_sendTransaction | - | not yet implemented | @@ -337,6 +338,7 @@ The following table shows the current implementation status of Erigon's RPC daem | trace_transaction | Yes | | | | | | | txpool_content | Yes | `remote` | +| txpool_contentFrom | Yes | `remote` | | txpool_status | Yes | `remote` | | | | | | eth_getCompilers | No | deprecated | @@ -371,10 +373,10 @@ The following table shows the current implementation status of Erigon's RPC daem ### GraphQL -| Command | Avail | Notes | -|--------------------------------------------|---------|--------------------------------------| -| GetBlockDetails | Yes | | -| GetChainID | Yes | | +| Command | Avail | Notes | +| --------------- | ----- | ----- | +| GetBlockDetails | Yes | | +| GetChainID | Yes | | This table is constantly updated. Please visit again. @@ -530,10 +532,7 @@ with `rpc.accessList` flag. ```json { - "allow": [ - "net_version", - "web3_eth_getBlockByHash" - ] + "allow": ["net_version", "web3_eth_getBlockByHash"] } ``` @@ -568,7 +567,7 @@ Currently batch requests are spawn multiple goroutines and process all sub-reque huge batch to other users - added flag `--rpc.batch.concurrency` (default: 2). Increase it to process large batches faster. -Known Issue: if at least 1 request is "streamable" (has parameter of type *jsoniter.Stream) - then whole batch will +Known Issue: if at least 1 request is "streamable" (has parameter of type \*jsoniter.Stream) - then whole batch will processed sequentially (on 1 goroutine). ## For Developers diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index a5c23b2ede3..297f7e8f8d7 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "errors" "fmt" + "math/big" "net" "net/http" "net/url" @@ -13,29 +14,19 @@ import ( "strings" "time" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" + "golang.org/x/sync/semaphore" + "google.golang.org/grpc" + grpcHealth "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/common/hexutility" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" - "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" - libstate "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/contract" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" - "github.com/ledgerwatch/erigon/consensus/ethash" - "github.com/ledgerwatch/erigon/core/state/temporal" - "github.com/ledgerwatch/erigon/core/systemcontracts" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/rpc/rpccfg" - "github.com/ledgerwatch/erigon/turbo/debug" - "github.com/ledgerwatch/erigon/turbo/logging" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" - "github.com/ledgerwatch/erigon-lib/direct" "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" @@ -43,16 +34,12 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/kv/remotedb" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" - "golang.org/x/sync/semaphore" - "google.golang.org/grpc" - grpcHealth "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" - + libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli/httpcfg" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/graphql" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/health" @@ -61,12 +48,26 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils/flags" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/paths" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/state/temporal" + "github.com/ledgerwatch/erigon/core/systemcontracts" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/node" "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/rpc/rpccfg" + "github.com/ledgerwatch/erigon/turbo/debug" + "github.com/ledgerwatch/erigon/turbo/logging" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" // Force-load native and js packages, to trigger registration _ "github.com/ledgerwatch/erigon/eth/tracers/js" @@ -208,7 +209,7 @@ func subscribeToStateChangesLoop(ctx context.Context, client StateChangesClient, time.Sleep(3 * time.Second) continue } - log.Warn("[txpool.handleStateChanges]", "err", err) + log.Warn("[rpcdaemon subscribeToStateChanges]", "err", err) } } }() @@ -376,15 +377,17 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger logger.Info("Use --snapshots=false") } + snapshotVersion := snapcfg.KnownCfg(cc.ChainName, 0).Version + // Configure sapshots - allSnapshots = freezeblocks.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap, logger) - allBorSnapshots = freezeblocks.NewBorRoSnapshots(cfg.Snap, cfg.Dirs.Snap, logger) + allSnapshots = freezeblocks.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap, snapshotVersion, logger) + allBorSnapshots = freezeblocks.NewBorRoSnapshots(cfg.Snap, cfg.Dirs.Snap, snapshotVersion, logger) // To povide good UX - immediatly can read snapshots after RPCDaemon start, even if Erigon is down // Erigon does store list of snapshots in db: means RPCDaemon can read this list now, but read by `remoteKvClient.Snapshots` after establish grpc connection allSnapshots.OptimisticReopenWithDB(db) allBorSnapshots.OptimisticalyReopenWithDB(db) - allSnapshots.LogStat() - allBorSnapshots.LogStat() + allSnapshots.LogStat("remote") + allBorSnapshots.LogStat("remote") if agg, err = libstate.NewAggregatorV3(ctx, cfg.Dirs.SnapHistory, cfg.Dirs.Tmp, ethconfig.HistoryV3AggregationStep, db, logger); err != nil { return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("create aggregator: %w", err) @@ -408,12 +411,12 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger if err := allSnapshots.ReopenList(reply.BlocksFiles, true); err != nil { logger.Error("[snapshots] reopen", "err", err) } else { - allSnapshots.LogStat() + allSnapshots.LogStat("reopen") } if err := allBorSnapshots.ReopenList(reply.BlocksFiles, true); err != nil { logger.Error("[bor snapshots] reopen", "err", err) } else { - allSnapshots.LogStat() + allBorSnapshots.LogStat("reopen") } _ = reply.HistoryFiles @@ -503,9 +506,11 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger } // Skip the compatibility check, until we have a schema in erigon-lib + borConfig := cc.Bor.(*borcfg.BorConfig) + engine = bor.NewRo(cc, borKv, blockReader, - span.NewChainSpanner(contract.ValidatorSet(), cc, true, logger), - contract.NewGenesisContractsClient(cc, cc.Bor.ValidatorContract, cc.Bor.StateReceiverContract, logger), logger) + bor.NewChainSpanner(bor.GenesisContractValidatorSetABI(), cc, true, logger), + bor.NewGenesisContractsClient(cc, borConfig.ValidatorContract, borConfig.StateReceiverContract, logger), logger) default: engine = ethash.NewFaker() @@ -781,7 +786,7 @@ func isWebsocket(r *http.Request) bool { strings.Contains(strings.ToLower(r.Header.Get("Connection")), "upgrade") } -// obtainJWTSecret loads the jwt-secret, either from the provided config, +// ObtainJWTSecret loads the jwt-secret, either from the provided config, // or from the default location. If neither of those are present, it generates // a new secret and stores to the default location. func ObtainJWTSecret(cfg *httpcfg.HttpCfg, logger log.Logger) ([]byte, error) { @@ -873,8 +878,10 @@ func createEngineListener(cfg *httpcfg.HttpCfg, engineApi []rpc.API, logger log. return engineListener, engineSrv, engineAddr.String(), nil } +var remoteConsensusEngineNotReadyErr = errors.New("remote consensus engine not ready") + type remoteConsensusEngine struct { - engine consensus.EngineReader + engine consensus.Engine } func (e *remoteConsensusEngine) HasEngine() bool { @@ -885,6 +892,18 @@ func (e *remoteConsensusEngine) Engine() consensus.EngineReader { return e.engine } +func (e *remoteConsensusEngine) validateEngineReady() error { + if !e.HasEngine() { + return remoteConsensusEngineNotReadyErr + } + + return nil +} + +// init - reasoning behind init is that we would like to initialise the remote consensus engine either post rpcdaemon +// service startup or in a background goroutine, so that we do not depend on the liveness of other services when +// starting up rpcdaemon and do not block startup (avoiding "cascade outage" scenario). In this case the DB dependency +// can be a remote DB service running on another machine. func (e *remoteConsensusEngine) init(db kv.RoDB, blockReader services.FullBlockReader, remoteKV remote.KVClient, logger log.Logger) bool { var cc *chain.Config @@ -911,9 +930,11 @@ func (e *remoteConsensusEngine) init(db kv.RoDB, blockReader services.FullBlockR return false } + borConfig := cc.Bor.(*borcfg.BorConfig) + e.engine = bor.NewRo(cc, borKv, blockReader, - span.NewChainSpanner(contract.ValidatorSet(), cc, true, logger), - contract.NewGenesisContractsClient(cc, cc.Bor.ValidatorContract, cc.Bor.StateReceiverContract, logger), logger) + bor.NewChainSpanner(bor.GenesisContractValidatorSetABI(), cc, true, logger), + bor.NewGenesisContractsClient(cc, borConfig.ValidatorContract, borConfig.StateReceiverContract, logger), logger) } else { e.engine = ethash.NewFaker() } @@ -922,41 +943,89 @@ func (e *remoteConsensusEngine) init(db kv.RoDB, blockReader services.FullBlockR } func (e *remoteConsensusEngine) Author(header *types.Header) (libcommon.Address, error) { - if e.engine != nil { - return e.engine.Author(header) + if err := e.validateEngineReady(); err != nil { + return libcommon.Address{}, err } - return libcommon.Address{}, fmt.Errorf("remote consensus engine not iinitialized") + return e.engine.Author(header) } func (e *remoteConsensusEngine) IsServiceTransaction(sender libcommon.Address, syscall consensus.SystemCall) bool { - if e.engine != nil { - return e.engine.IsServiceTransaction(sender, syscall) + if err := e.validateEngineReady(); err != nil { + panic(err) } - return false + return e.engine.IsServiceTransaction(sender, syscall) } func (e *remoteConsensusEngine) Type() chain.ConsensusName { - if e.engine != nil { - return e.engine.Type() + if err := e.validateEngineReady(); err != nil { + panic(err) } - return "" + return e.engine.Type() } func (e *remoteConsensusEngine) CalculateRewards(config *chain.Config, header *types.Header, uncles []*types.Header, syscall consensus.SystemCall) ([]consensus.Reward, error) { - if e.engine != nil { - return e.engine.CalculateRewards(config, header, uncles, syscall) + if err := e.validateEngineReady(); err != nil { + return nil, err } - return nil, fmt.Errorf("remote consensus engine not iinitialized") + return e.engine.CalculateRewards(config, header, uncles, syscall) } func (e *remoteConsensusEngine) Close() error { - if e.engine != nil { - return e.engine.Close() + if err := e.validateEngineReady(); err != nil { + return err } - return nil + return e.engine.Close() +} + +func (e *remoteConsensusEngine) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger) { + if err := e.validateEngineReady(); err != nil { + panic(err) + } + + e.engine.Initialize(config, chain, header, state, syscall, logger) +} + +func (e *remoteConsensusEngine) VerifyHeader(_ consensus.ChainHeaderReader, _ *types.Header, _ bool) error { + panic("remoteConsensusEngine.VerifyHeader not supported") +} + +func (e *remoteConsensusEngine) VerifyUncles(_ consensus.ChainReader, _ *types.Header, _ []*types.Header) error { + panic("remoteConsensusEngine.VerifyUncles not supported") +} + +func (e *remoteConsensusEngine) Prepare(_ consensus.ChainHeaderReader, _ *types.Header, _ *state.IntraBlockState) error { + panic("remoteConsensusEngine.Prepare not supported") +} + +func (e *remoteConsensusEngine) Finalize(_ *chain.Config, _ *types.Header, _ *state.IntraBlockState, _ types.Transactions, _ []*types.Header, _ types.Receipts, _ []*types.Withdrawal, _ consensus.ChainReader, _ consensus.SystemCall, _ log.Logger) (types.Transactions, types.Receipts, error) { + panic("remoteConsensusEngine.Finalize not supported") +} + +func (e *remoteConsensusEngine) FinalizeAndAssemble(_ *chain.Config, _ *types.Header, _ *state.IntraBlockState, _ types.Transactions, _ []*types.Header, _ types.Receipts, _ []*types.Withdrawal, _ consensus.ChainReader, _ consensus.SystemCall, _ consensus.Call, _ log.Logger) (*types.Block, types.Transactions, types.Receipts, error) { + panic("remoteConsensusEngine.FinalizeAndAssemble not supported") +} + +func (e *remoteConsensusEngine) Seal(_ consensus.ChainHeaderReader, _ *types.Block, _ chan<- *types.Block, _ <-chan struct{}) error { + panic("remoteConsensusEngine.Seal not supported") +} + +func (e *remoteConsensusEngine) SealHash(_ *types.Header) libcommon.Hash { + panic("remoteConsensusEngine.SealHash not supported") +} + +func (e *remoteConsensusEngine) CalcDifficulty(_ consensus.ChainHeaderReader, _ uint64, _ uint64, _ *big.Int, _ uint64, _ libcommon.Hash, _ libcommon.Hash, _ uint64) *big.Int { + panic("remoteConsensusEngine.CalcDifficulty not supported") +} + +func (e *remoteConsensusEngine) GenerateSeal(_ consensus.ChainHeaderReader, _ *types.Header, _ *types.Header, _ consensus.Call) []byte { + panic("remoteConsensusEngine.GenerateSeal not supported") +} + +func (e *remoteConsensusEngine) APIs(_ consensus.ChainHeaderReader) []rpc.API { + panic("remoteConsensusEngine.APIs not supported") } diff --git a/cmd/silkworm_api/snapshot_idx.go b/cmd/silkworm_api/snapshot_idx.go index 4265ef19471..8f728ddf06f 100644 --- a/cmd/silkworm_api/snapshot_idx.go +++ b/cmd/silkworm_api/snapshot_idx.go @@ -6,6 +6,7 @@ import ( "path/filepath" "time" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" @@ -37,7 +38,7 @@ func main() { }, }, Action: func(cCtx *cli.Context) error { - return buildIndex(cCtx, cCtx.String("datadir"), cCtx.StringSlice("snapshot_path")) + return buildIndex(cCtx, cCtx.String("datadir"), cCtx.StringSlice("snapshot_path"), 0) }, } @@ -55,7 +56,7 @@ func FindIf(segments []snaptype.FileInfo, predicate func(snaptype.FileInfo) bool return snaptype.FileInfo{}, false // Return zero value and false if not found } -func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string) error { +func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string, minBlock uint64) error { logger, _, err := debug.Setup(cliCtx, true /* rootLogger */) if err != nil { return err @@ -75,7 +76,7 @@ func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string) err chainConfig := fromdb.ChainConfig(chainDB) - segments, _, err := freezeblocks.Segments(dirs.Snap) + segments, _, err := freezeblocks.Segments(dirs.Snap, snapcfg.KnownCfg(chainConfig.ChainName, 0).Version, minBlock) if err != nil { return err } @@ -97,7 +98,7 @@ func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string) err jobProgress := &background.Progress{} ps.Add(jobProgress) defer ps.Delete(jobProgress) - return freezeblocks.HeadersIdx(ctx, chainConfig, segment.Path, segment.From, dirs.Tmp, jobProgress, logLevel, logger) + return freezeblocks.HeadersIdx(ctx, segment.Path, segment.Version, segment.From, dirs.Tmp, jobProgress, logLevel, logger) }) case snaptype.Bodies: g.Go(func() error { @@ -112,7 +113,7 @@ func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string) err ps.Add(jobProgress) defer ps.Delete(jobProgress) dir, _ := filepath.Split(segment.Path) - return freezeblocks.TransactionsIdx(ctx, chainConfig, segment.From, segment.To, dir, dirs.Tmp, jobProgress, logLevel, logger) + return freezeblocks.TransactionsIdx(ctx, chainConfig, segment.Version, segment.From, segment.To, dir, dirs.Tmp, jobProgress, logLevel, logger) }) } } diff --git a/cmd/snapshots/README.md b/cmd/snapshots/README.md new file mode 100644 index 00000000000..06544214de4 --- /dev/null +++ b/cmd/snapshots/README.md @@ -0,0 +1,79 @@ +# Snapshots - tool for managing remote stanshots + +In the root of `Erigon` project, use this command to build the the commands: + +```shell +make snapshots +``` + +It can then be run using the following command + +```shell +./buid/bin/snapshots sub-command options... +``` + +Snapshots supports the following sub commands: + +## cmp - compare snapshots + +This command takes the following form: + +```shell + snapshots cmp +``` + +This will cause the .seg files from each location to be copied to the local machine, indexed and then have their rlp contents compared. + +Optionally a `` and optionally an `` may be specified to limit the scope of the operation + +It is also possible to set the `--types` flag to limit the type of segment file being downloaded and compared. The currently supported types are `header` and `body` + +## copy - copy snapshots + +This command can be used to copy segment files from one location to another. + +This command takes the following form: + +```shell + snapshots copy +``` + +Optionally a `` and optionally an `` may be specified to limit the scope of the operation + +## verify - verify snapshots + +-- TBD + +## manifest - manage the manifest file in the root of remote snapshot locations + +The `manifest` command supports the following actions + +| Action | Description | +|--------|-------------| +| list | list manifest from storage location| +| update | update the manifest to match the files available at its storage location | +| verify |verify that manifest matches the files available at its storage location| + +All actions take a `` argument which specified the remote location which contains the manifest + +Optionally a `` and optionally an `` may be specified to limit the scope of the operation + +## torrent - manage snapshot torrent files + +The `torrent` command supports the following actions + +| Action | Description | +|--------|-------------| +| list | list torrents available at the specified storage location | +| hashes | list the hashes (in toml format) at the specified storage location | +| update | update re-create the torrents for the contents available at its storage location | +| verify |verify that manifest contents are available at its storage location| + +All actions take a `` argument which specified the remote location which contains the torrents. + +Optionally a ``` and optionally an `` may be specified to limit the scope of the operation + + + + + diff --git a/cmd/snapshots/cmp/cmp.go b/cmd/snapshots/cmp/cmp.go new file mode 100644 index 00000000000..2ba6e0fde47 --- /dev/null +++ b/cmd/snapshots/cmp/cmp.go @@ -0,0 +1,788 @@ +package cmp + +import ( + "bytes" + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "strconv" + "sync/atomic" + "time" + + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/snapshots/flags" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" + "github.com/ledgerwatch/log/v3" + "github.com/urfave/cli/v2" + "golang.org/x/sync/errgroup" +) + +var Command = cli.Command{ + Action: cmp, + Name: "cmp", + Usage: "Compare snapshot segments", + ArgsUsage: " ", + Flags: []cli.Flag{ + &flags.SegTypes, + &utils.DataDirFlag, + &logging.LogVerbosityFlag, + &logging.LogConsoleVerbosityFlag, + &logging.LogDirVerbosityFlag, + &utils.WebSeedsFlag, + &utils.NATFlag, + &utils.DisableIPV6, + &utils.DisableIPV4, + &utils.TorrentDownloadRateFlag, + &utils.TorrentUploadRateFlag, + &utils.TorrentVerbosityFlag, + &utils.TorrentPortFlag, + &utils.TorrentMaxPeersFlag, + &utils.TorrentConnsPerFileFlag, + }, + Description: ``, +} + +func cmp(cliCtx *cli.Context) error { + + logger := sync.Logger(cliCtx.Context) + + var loc1, loc2 *sync.Locator + + var rcCli *downloader.RCloneClient + var torrentCli *sync.TorrentClient + + dataDir := cliCtx.String(utils.DataDirFlag.Name) + var tempDir string + + if len(dataDir) == 0 { + dataDir, err := os.MkdirTemp("", "snapshot-cpy-") + if err != nil { + return err + } + tempDir = dataDir + defer os.RemoveAll(dataDir) + } else { + tempDir = filepath.Join(dataDir, "temp") + + if err := os.MkdirAll(tempDir, 0755); err != nil { + return err + } + } + + cliCtx.Context = sync.WithTempDir(cliCtx.Context, tempDir) + + var err error + + checkRemote := func(src string) error { + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + return sync.CheckRemote(rcCli, src) + } + + var chain string + + pos := 0 + + if cliCtx.Args().Len() > pos { + val := cliCtx.Args().Get(pos) + + if loc1, err = sync.ParseLocator(val); err != nil { + return err + } + + switch loc1.LType { + case sync.RemoteFs: + if err = checkRemote(loc1.Src); err != nil { + return err + } + + chain = loc1.Chain + } + } + + pos++ + + if cliCtx.Args().Len() > pos { + val := cliCtx.Args().Get(pos) + + if loc2, err = sync.ParseLocator(val); err != nil { + return err + } + + switch loc2.LType { + case sync.RemoteFs: + if err = checkRemote(loc2.Src); err != nil { + return err + } + + chain = loc2.Chain + } + + pos++ + } + + if loc1.LType == sync.TorrentFs || loc2.LType == sync.TorrentFs { + torrentCli, err = sync.NewTorrentClient(cliCtx, chain) + if err != nil { + return fmt.Errorf("can't create torrent: %w", err) + } + } + + typeValues := cliCtx.StringSlice(flags.SegTypes.Name) + snapTypes := make([]snaptype.Type, 0, len(typeValues)) + + for _, val := range typeValues { + segType, ok := snaptype.ParseFileType(val) + + if !ok { + return fmt.Errorf("unknown file type: %s", val) + } + + snapTypes = append(snapTypes, segType) + } + + var firstBlock, lastBlock uint64 + + if cliCtx.Args().Len() > pos { + firstBlock, err = strconv.ParseUint(cliCtx.Args().Get(0), 10, 64) + } + + if cliCtx.Args().Len() > 1 { + lastBlock, err = strconv.ParseUint(cliCtx.Args().Get(1), 10, 64) + } + + var session1 sync.DownloadSession + var session2 sync.DownloadSession + + if rcCli != nil { + if loc1.LType == sync.RemoteFs { + session1, err = rcCli.NewSession(cliCtx.Context, filepath.Join(tempDir, "l1"), loc1.Src+":"+loc1.Root) + + if err != nil { + return err + } + } + + if loc2.LType == sync.RemoteFs { + session2, err = rcCli.NewSession(cliCtx.Context, filepath.Join(tempDir, "l2"), loc2.Src+":"+loc2.Root) + + if err != nil { + return err + } + } + } + + if torrentCli != nil { + if loc1.LType == sync.TorrentFs { + session1 = sync.NewTorrentSession(torrentCli, chain) + } + + if loc2.LType == sync.TorrentFs { + session2 = sync.NewTorrentSession(torrentCli, chain) + } + } + + if session1 == nil { + return fmt.Errorf("no first session established") + } + + if session1 == nil { + return fmt.Errorf("no second session established") + } + + logger.Info(fmt.Sprintf("Starting compare: %s==%s", loc1.String(), loc2.String()), "first", firstBlock, "last", lastBlock, "types", snapTypes, "dir", tempDir) + + logger.Info("Reading s1 dir", "remoteFs", session1.RemoteFsRoot(), "label", session1.Label()) + files, err := sync.DownloadManifest(cliCtx.Context, session1) + + if err != nil { + files, err = session1.ReadRemoteDir(cliCtx.Context, true) + } + + if err != nil { + return err + } + + h1ents, b1ents := splitEntries(files, loc1.Version, firstBlock, lastBlock) + + logger.Info("Reading s2 dir", "remoteFs", session2.RemoteFsRoot(), "label", session2.Label()) + files, err = sync.DownloadManifest(cliCtx.Context, session2) + + if err != nil { + files, err = session2.ReadRemoteDir(cliCtx.Context, true) + } + + if err != nil { + return err + } + + h2ents, b2ents := splitEntries(files, loc2.Version, firstBlock, lastBlock) + + c := comparitor{ + chain: chain, + loc1: loc1, + loc2: loc2, + session1: session1, + session2: session2, + } + + var funcs []func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error) + + bodyWorkers := 4 + headerWorkers := 4 + + if len(snapTypes) == 0 { + funcs = append(funcs, func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error) { + return c.compareHeaders(ctx, h1ents, h2ents, headerWorkers, logger) + }, func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error) { + return c.compareBodies(ctx, b1ents, b2ents, bodyWorkers, logger) + }) + } else { + for _, snapType := range snapTypes { + if snapType == snaptype.Headers { + funcs = append(funcs, func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error) { + return c.compareHeaders(ctx, h1ents, h2ents, headerWorkers, logger) + }) + } + + if snapType == snaptype.Bodies { + funcs = append(funcs, func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error) { + return c.compareBodies(ctx, b1ents, b2ents, bodyWorkers, logger) + }) + } + } + } + + if len(funcs) > 0 { + startTime := time.Now() + + var downloadTime uint64 + var indexTime uint64 + var compareTime uint64 + + g, ctx := errgroup.WithContext(cliCtx.Context) + g.SetLimit(len(funcs)) + + for _, f := range funcs { + func(ctx context.Context, f func(ctx context.Context) (time.Duration, time.Duration, time.Duration, error)) { + g.Go(func() error { + dt, it, ct, err := f(ctx) + + atomic.AddUint64(&downloadTime, uint64(dt)) + atomic.AddUint64(&indexTime, uint64(it)) + atomic.AddUint64(&compareTime, uint64(ct)) + + return err + }) + }(ctx, f) + } + + err = g.Wait() + + if err == nil { + logger.Info(fmt.Sprintf("Finished compare: %s==%s", loc1.String(), loc2.String()), "elapsed", time.Since(startTime), + "downloading", time.Duration(downloadTime), "indexing", time.Duration(indexTime), "comparing", time.Duration(compareTime)) + } else { + logger.Info(fmt.Sprintf("Failed compare: %s==%s", loc1.String(), loc2.String()), "err", err, "elapsed", time.Since(startTime), + "downloading", time.Duration(downloadTime), "indexing", time.Duration(indexTime), "comparing", time.Duration(compareTime)) + } + + } + return nil +} + +type BodyEntry struct { + From, To uint64 + Body, Transactions fs.DirEntry +} + +func splitEntries(files []fs.DirEntry, version uint8, firstBlock, lastBlock uint64) (hents []fs.DirEntry, bents []*BodyEntry) { + for _, ent := range files { + if info, err := ent.Info(); err == nil { + if snapInfo, ok := info.Sys().(downloader.SnapInfo); ok && snapInfo.Version() > 0 { + if version == snapInfo.Version() && + (firstBlock == 0 || snapInfo.From() >= firstBlock) && + (lastBlock == 0 || snapInfo.From() < lastBlock) { + + if snapInfo.Type() == snaptype.Headers { + hents = append(hents, ent) + } + + if snapInfo.Type() == snaptype.Bodies { + found := false + + for _, bent := range bents { + if snapInfo.From() == bent.From && + snapInfo.To() == bent.To { + bent.Body = ent + found = true + } + } + + if !found { + bents = append(bents, &BodyEntry{snapInfo.From(), snapInfo.To(), ent, nil}) + } + } + + if snapInfo.Type() == snaptype.Transactions { + found := false + + for _, bent := range bents { + if snapInfo.From() == bent.From && + snapInfo.To() == bent.To { + bent.Transactions = ent + found = true + + } + } + + if !found { + bents = append(bents, &BodyEntry{snapInfo.From(), snapInfo.To(), nil, ent}) + } + } + } + } + } + } + + return hents, bents +} + +type comparitor struct { + chain string + loc1, loc2 *sync.Locator + session1 sync.DownloadSession + session2 sync.DownloadSession +} + +func (c comparitor) chainConfig() *chain.Config { + return params.ChainConfigByChainName(c.chain) +} + +func (c comparitor) compareHeaders(ctx context.Context, f1ents []fs.DirEntry, f2ents []fs.DirEntry, workers int, logger log.Logger) (time.Duration, time.Duration, time.Duration, error) { + var downloadTime uint64 + var compareTime uint64 + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(workers) + + for i1, ent1 := range f1ents { + var snapInfo1 downloader.SnapInfo + + if info, err := ent1.Info(); err == nil { + snapInfo1, _ = info.Sys().(downloader.SnapInfo) + } + + if snapInfo1 == nil { + continue + } + + for i2, ent2 := range f2ents { + + var snapInfo2 downloader.SnapInfo + + ent2Info, err := ent2.Info() + + if err == nil { + snapInfo2, _ = ent2Info.Sys().(downloader.SnapInfo) + } + + if snapInfo2 == nil || + snapInfo1.Type() != snapInfo2.Type() || + snapInfo1.From() != snapInfo2.From() || + snapInfo1.To() != snapInfo2.To() { + continue + } + + i1, i2, ent1, ent2 := i1, i2, ent1, ent2 + + g.Go(func() error { + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(2) + + g.Go(func() error { + logger.Info(fmt.Sprintf("Downloading %s", ent1.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + startTime := time.Now() + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + + err := c.session1.Download(gctx, ent1.Name()) + + if err != nil { + return err + } + + return nil + }) + + g.Go(func() error { + startTime := time.Now() + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Downloading %s", ent2.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents)), "size", datasize.ByteSize(ent2Info.Size())) + err := c.session2.Download(gctx, ent2.Name()) + + if err != nil { + return err + } + + return nil + }) + + if err := g.Wait(); err != nil { + return err + } + + f1snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, c.session1.LocalFsRoot(), c.loc1.Version, logger) + + f1snaps.ReopenList([]string{ent1.Name()}, false) + + f2snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, c.session2.LocalFsRoot(), c.loc2.Version, logger) + + f2snaps.ReopenList([]string{ent2.Name()}, false) + + err = func() error { + logger.Info(fmt.Sprintf("Comparing %s %s", ent1.Name(), ent2.Name())) + startTime := time.Now() + + defer func() { + atomic.AddUint64(&compareTime, uint64(time.Since(startTime))) + }() + + blockReader1 := freezeblocks.NewBlockReader(f1snaps, nil) + blockReader2 := freezeblocks.NewBlockReader(f2snaps, nil) + + g, gctx = errgroup.WithContext(ctx) + g.SetLimit(2) + + h2chan := make(chan *types.Header) + + g.Go(func() error { + blockReader2.HeadersRange(gctx, func(h2 *types.Header) error { + select { + case h2chan <- h2: + return nil + case <-gctx.Done(): + return gctx.Err() + } + }) + + close(h2chan) + return nil + }) + + g.Go(func() error { + err := blockReader1.HeadersRange(gctx, func(h1 *types.Header) error { + select { + case h2 := <-h2chan: + if h2 == nil { + return fmt.Errorf("header %d unknown", h1.Number.Uint64()) + } + + if h1.Number.Uint64() != h2.Number.Uint64() { + return fmt.Errorf("mismatched headers: expected %d, Got: %d", h1.Number.Uint64(), h2.Number.Uint64()) + } + + var h1buf, h2buf bytes.Buffer + + h1.EncodeRLP(&h1buf) + h2.EncodeRLP(&h2buf) + + if !bytes.Equal(h1buf.Bytes(), h2buf.Bytes()) { + return fmt.Errorf("%d: headers do not match", h1.Number.Uint64()) + } + + return nil + case <-gctx.Done(): + return gctx.Err() + } + }) + + return err + }) + + return g.Wait() + }() + + files := f1snaps.OpenFiles() + f1snaps.Close() + + files = append(files, f2snaps.OpenFiles()...) + f2snaps.Close() + + for _, file := range files { + os.Remove(file) + } + + return err + }) + } + } + + err := g.Wait() + + return time.Duration(downloadTime), 0, time.Duration(compareTime), err +} + +func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2ents []*BodyEntry, workers int, logger log.Logger) (time.Duration, time.Duration, time.Duration, error) { + var downloadTime uint64 + var indexTime uint64 + var compareTime uint64 + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(workers) + + for i1, ent1 := range f1ents { + for i2, ent2 := range f2ents { + if ent1.From != ent2.From || + ent1.To != ent2.To { + continue + } + + i1, i2, ent1, ent2 := i1, i2, ent1, ent2 + + g.Go(func() error { + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(4) + + b1err := make(chan error, 1) + + g.Go(func() error { + + err := func() error { + startTime := time.Now() + + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Downloading %s", ent1.Body.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + return c.session1.Download(ctx, ent1.Body.Name()) + }() + + b1err <- err + + if err != nil { + return fmt.Errorf("can't download %s: %w", ent1.Body.Name(), err) + } + + startTime := time.Now() + + defer func() { + atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Indexing %s", ent1.Body.Name())) + return freezeblocks.BodiesIdx(ctx, + filepath.Join(c.session1.LocalFsRoot(), ent1.Body.Name()), ent1.From, c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) + }) + + g.Go(func() error { + err := func() error { + startTime := time.Now() + + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + logger.Info(fmt.Sprintf("Downloading %s", ent1.Transactions.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + return c.session1.Download(ctx, ent1.Transactions.Name()) + }() + + if err != nil { + return fmt.Errorf("can't download %s: %w", ent1.Transactions.Name(), err) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case err = <-b1err: + if err != nil { + return fmt.Errorf("can't create transaction index: no bodies: %w", err) + } + } + + startTime := time.Now() + + defer func() { + atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Indexing %s", ent1.Transactions.Name())) + return freezeblocks.TransactionsIdx(ctx, c.chainConfig(), c.loc1.Version, ent1.From, ent1.To, + c.session1.LocalFsRoot(), c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) + }) + + b2err := make(chan error, 1) + + g.Go(func() error { + err := func() error { + startTime := time.Now() + + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Downloading %s", ent2.Body.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) + return c.session2.Download(ctx, ent2.Body.Name()) + }() + + b2err <- err + + if err != nil { + return fmt.Errorf("can't download %s: %w", ent2.Body.Name(), err) + } + + startTime := time.Now() + + defer func() { + atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Indexing %s", ent2.Body.Name())) + return freezeblocks.BodiesIdx(ctx, + filepath.Join(c.session2.LocalFsRoot(), ent2.Body.Name()), ent2.From, c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) + }) + + g.Go(func() error { + err := func() error { + startTime := time.Now() + + defer func() { + atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) + }() + logger.Info(fmt.Sprintf("Downloading %s", ent2.Transactions.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) + return c.session2.Download(ctx, ent2.Transactions.Name()) + }() + + if err != nil { + return fmt.Errorf("can't download %s: %w", ent2.Transactions.Name(), err) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case err = <-b2err: + if err != nil { + return fmt.Errorf("can't create transaction index: no bodies: %w", err) + } + } + + startTime := time.Now() + + defer func() { + atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) + }() + + logger.Info(fmt.Sprintf("Indexing %s", ent2.Transactions.Name())) + return freezeblocks.TransactionsIdx(ctx, c.chainConfig(), c.loc2.Version, ent2.From, ent2.To, + c.session2.LocalFsRoot(), c.session2.LocalFsRoot(), nil, log.LvlDebug, logger) + }) + + if err := g.Wait(); err != nil { + return err + } + + f1snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, c.session1.LocalFsRoot(), c.loc1.Version, logger) + + f1snaps.ReopenList([]string{ent1.Body.Name(), ent1.Transactions.Name()}, false) + + f2snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, c.session2.LocalFsRoot(), c.loc2.Version, logger) + + f2snaps.ReopenList([]string{ent2.Body.Name(), ent2.Transactions.Name()}, false) + + err := func() error { + logger.Info(fmt.Sprintf("Comparing %s %s", ent1.Body.Name(), ent2.Body.Name())) + + startTime := time.Now() + + defer func() { + atomic.AddUint64(&compareTime, uint64(time.Since(startTime))) + }() + + blockReader1 := freezeblocks.NewBlockReader(f1snaps, nil) + blockReader2 := freezeblocks.NewBlockReader(f2snaps, nil) + + return func() error { + for i := ent1.From; i < ent1.To; i++ { + body1, err := blockReader1.BodyWithTransactions(ctx, nil, common.Hash{}, i) + + if err != nil { + return fmt.Errorf("%d: can't get body 1: %w", i, err) + } + + body2, err := blockReader2.BodyWithTransactions(ctx, nil, common.Hash{}, i) + + if err != nil { + return fmt.Errorf("%d: can't get body 2: %w", i, err) + } + + var b1buf, b2buf bytes.Buffer + + body1.EncodeRLP(&b1buf) + body2.EncodeRLP(&b2buf) + + if !bytes.Equal(b1buf.Bytes(), b2buf.Bytes()) { + return fmt.Errorf("%d: bodies do not match", i) + } + } + + return nil + }() + }() + + files := f1snaps.OpenFiles() + f1snaps.Close() + + files = append(files, f2snaps.OpenFiles()...) + f2snaps.Close() + + for _, file := range files { + os.Remove(file) + } + + return err + }) + } + } + + err := g.Wait() + + return time.Duration(downloadTime), time.Duration(indexTime), time.Duration(compareTime), err +} diff --git a/cmd/snapshots/copy/copy.go b/cmd/snapshots/copy/copy.go new file mode 100644 index 00000000000..4faebc1c6bc --- /dev/null +++ b/cmd/snapshots/copy/copy.go @@ -0,0 +1,333 @@ +package copy + +import ( + "context" + "fmt" + "io/fs" + "path/filepath" + "strconv" + "strings" + + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/snapshots/flags" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/urfave/cli/v2" +) + +var ( + TorrentsFlag = cli.BoolFlag{ + Name: "torrents", + Usage: `Include torrent files in copy`, + Required: false, + } + + HashesFlag = cli.BoolFlag{ + Name: "hashes", + Usage: `Include hash .toml in copy`, + Required: false, + } + + ManifestFlag = cli.BoolFlag{ + Name: "manifest", + Usage: `Include mannfest .txt in copy`, + Required: false, + } + + VersionFlag = cli.IntFlag{ + Name: "version", + Usage: `File versions to copy`, + Required: false, + Value: 0, + } +) + +var Command = cli.Command{ + Action: copy, + Name: "copy", + Usage: "copy snapshot segments", + ArgsUsage: " ", + Flags: []cli.Flag{ + &VersionFlag, + &flags.SegTypes, + &TorrentsFlag, + &HashesFlag, + &ManifestFlag, + &utils.DataDirFlag, + &logging.LogVerbosityFlag, + &logging.LogConsoleVerbosityFlag, + &logging.LogDirVerbosityFlag, + &utils.WebSeedsFlag, + &utils.NATFlag, + &utils.DisableIPV6, + &utils.DisableIPV4, + &utils.TorrentDownloadRateFlag, + &utils.TorrentUploadRateFlag, + &utils.TorrentVerbosityFlag, + &utils.TorrentPortFlag, + &utils.TorrentMaxPeersFlag, + &utils.TorrentConnsPerFileFlag, + }, + Description: ``, +} + +func copy(cliCtx *cli.Context) error { + logger := sync.Logger(cliCtx.Context) + + logger.Info("Starting copy") + + var src, dst *sync.Locator + var err error + + var rcCli *downloader.RCloneClient + var torrentCli *sync.TorrentClient + + pos := 0 + + if cliCtx.Args().Len() > pos { + val := cliCtx.Args().Get(pos) + + if src, err = sync.ParseLocator(val); err != nil { + return err + } + } + + pos++ + + if cliCtx.Args().Len() > pos { + val := cliCtx.Args().Get(pos) + + if src, err = sync.ParseLocator(val); err != nil { + return err + } + + pos++ + } + + switch dst.LType { + case sync.TorrentFs: + return fmt.Errorf("can't copy to torrent - need intermediate local fs") + + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + } + + switch src.LType { + case sync.TorrentFs: + torrentCli, err = sync.NewTorrentClient(cliCtx, dst.Chain) + if err != nil { + return fmt.Errorf("can't create torrent: %w", err) + } + + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + } + + typeValues := cliCtx.StringSlice(flags.SegTypes.Name) + snapTypes := make([]snaptype.Type, 0, len(typeValues)) + + for _, val := range typeValues { + segType, ok := snaptype.ParseFileType(val) + + if !ok { + return fmt.Errorf("unknown file type: %s", val) + } + + snapTypes = append(snapTypes, segType) + } + + torrents := cliCtx.Bool(TorrentsFlag.Name) + hashes := cliCtx.Bool(HashesFlag.Name) + manifest := cliCtx.Bool(ManifestFlag.Name) + + var firstBlock, lastBlock uint64 + + version := cliCtx.Int(VersionFlag.Name) + + if version != 0 { + dst.Version = uint8(version) + } + + if cliCtx.Args().Len() > pos { + if firstBlock, err = strconv.ParseUint(cliCtx.Args().Get(pos), 10, 64); err != nil { + return err + } + + pos++ + } + + if cliCtx.Args().Len() > pos { + if lastBlock, err = strconv.ParseUint(cliCtx.Args().Get(pos), 10, 64); err != nil { + return err + } + } + + switch src.LType { + case sync.LocalFs: + switch dst.LType { + case sync.LocalFs: + return localToLocal(src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + case sync.RemoteFs: + return localToRemote(rcCli, src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + default: + return fmt.Errorf("unhandled torrent destination: %s", dst) + } + + case sync.RemoteFs: + switch dst.LType { + case sync.LocalFs: + return remoteToLocal(cliCtx.Context, rcCli, src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + case sync.RemoteFs: + return remoteToRemote(rcCli, src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + default: + return fmt.Errorf("unhandled torrent destination: %s", dst) + } + + case sync.TorrentFs: + switch dst.LType { + case sync.LocalFs: + return torrentToLocal(torrentCli, src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + case sync.RemoteFs: + return torrentToRemote(torrentCli, rcCli, src, dst, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) + default: + return fmt.Errorf("unhandled torrent destination: %s", dst) + } + + } + return nil +} + +func torrentToLocal(torrentCli *sync.TorrentClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} + +func torrentToRemote(torrentCli *sync.TorrentClient, rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} + +func localToRemote(rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} + +func localToLocal(src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} + +func remoteToLocal(ctx context.Context, rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + logger := sync.Logger(ctx) + + if rcCli == nil { + return fmt.Errorf("no remote downloader") + } + + session, err := rcCli.NewSession(ctx, dst.Root, src.Src+":"+src.Root) + + if err != nil { + return err + } + + logger.Info("Reading src dir", "remoteFs", session.RemoteFsRoot(), "label", session.Label()) + fileEntries, err := session.ReadRemoteDir(ctx, true) + + if err != nil { + return err + } + + files := selectFiles(fileEntries, dst.Version, from, to, snapTypes, torrents, hashes, manifest) + + logger.Info(fmt.Sprintf("Downloading %s", files)) + + return session.Download(ctx, files...) +} + +func remoteToRemote(rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} + +type sinf struct { + snaptype.FileInfo +} + +func (i sinf) Version() uint8 { + return i.FileInfo.Version +} + +func (i sinf) From() uint64 { + return i.FileInfo.From +} + +func (i sinf) To() uint64 { + return i.FileInfo.To +} + +func (i sinf) Type() snaptype.Type { + return i.FileInfo.T +} + +func selectFiles(entries []fs.DirEntry, version uint8, firstBlock, lastBlock uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) []string { + var files []string + + for _, ent := range entries { + if info, err := ent.Info(); err == nil { + snapInfo, _ := info.Sys().(downloader.SnapInfo) + + if torrents { + if ext := filepath.Ext(info.Name()); ext == ".torrent" { + fileName := strings.TrimSuffix(info.Name(), ".torrent") + + if fileInfo, ok := snaptype.ParseFileName("", fileName); ok { + snapInfo = sinf{fileInfo} + } + } + } + + switch { + case snapInfo != nil && snapInfo.Type() != snaptype.Unknown: + if (version == 0 || version == snapInfo.Version()) && + (firstBlock == 0 || snapInfo.From() >= firstBlock) && + (lastBlock == 0 || snapInfo.From() < lastBlock) { + + if len(snapTypes) == 0 { + files = append(files, info.Name()) + } else { + for _, snapType := range snapTypes { + if snapType == snapInfo.Type() { + files = append(files, info.Name()) + break + } + } + } + } + + case manifest: + + case hashes: + + } + } + } + + return files +} diff --git a/cmd/snapshots/flags/flags.go b/cmd/snapshots/flags/flags.go new file mode 100644 index 00000000000..b905ffa1cc0 --- /dev/null +++ b/cmd/snapshots/flags/flags.go @@ -0,0 +1,11 @@ +package flags + +import "github.com/urfave/cli/v2" + +var ( + SegTypes = cli.StringSliceFlag{ + Name: "types", + Usage: `Segment types to comparre with optional e.g. headers,bodies,transactions`, + Required: false, + } +) diff --git a/cmd/snapshots/main.go b/cmd/snapshots/main.go new file mode 100644 index 00000000000..47e2f447616 --- /dev/null +++ b/cmd/snapshots/main.go @@ -0,0 +1,112 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "path/filepath" + "syscall" + + "github.com/ledgerwatch/erigon/cmd/snapshots/cmp" + "github.com/ledgerwatch/erigon/cmd/snapshots/copy" + "github.com/ledgerwatch/erigon/cmd/snapshots/manifest" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/snapshots/torrents" + "github.com/ledgerwatch/erigon/cmd/snapshots/verify" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/debug" + "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/ledgerwatch/log/v3" + "github.com/urfave/cli/v2" +) + +func main() { + logging.LogVerbosityFlag.Value = log.LvlError.String() + logging.LogConsoleVerbosityFlag.Value = log.LvlError.String() + + app := cli.NewApp() + app.Name = "snapshots" + app.Version = params.VersionWithCommit(params.GitCommit) + + app.Commands = []*cli.Command{ + &cmp.Command, + ©.Command, + &verify.Command, + &torrents.Command, + &manifest.Command, + } + + app.Flags = []cli.Flag{} + + app.UsageText = app.Name + ` [command] [flags]` + + app.Action = func(context *cli.Context) error { + if context.Args().Present() { + var goodNames []string + for _, c := range app.VisibleCommands() { + goodNames = append(goodNames, c.Name) + } + _, _ = fmt.Fprintf(os.Stderr, "Command '%s' not found. Available commands: %s\n", context.Args().First(), goodNames) + cli.ShowAppHelpAndExit(context, 1) + } + + return nil + } + + for _, command := range app.Commands { + command.Before = func(ctx *cli.Context) error { + debug.RaiseFdLimit() + + logger, err := setupLogger(ctx) + + if err != nil { + return err + } + + var cancel context.CancelFunc + + ctx.Context, cancel = context.WithCancel(sync.WithLogger(ctx.Context, logger)) + + go handleTerminationSignals(cancel, logger) + + return nil + } + } + + if err := app.Run(os.Args); err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func setupLogger(ctx *cli.Context) (log.Logger, error) { + dataDir := ctx.String(utils.DataDirFlag.Name) + + if len(dataDir) > 0 { + logsDir := filepath.Join(dataDir, "logs") + + if err := os.MkdirAll(logsDir, 0755); err != nil { + return nil, err + } + } + + logger := logging.SetupLoggerCtx("snapshots-"+ctx.Command.Name, ctx, log.LvlError, log.LvlInfo, false) + + return logger, nil +} + +func handleTerminationSignals(stopFunc func(), logger log.Logger) { + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, syscall.SIGTERM, syscall.SIGINT) + + switch s := <-signalCh; s { + case syscall.SIGTERM: + logger.Info("Stopping") + stopFunc() + case syscall.SIGINT: + logger.Info("Terminating") + os.Exit(-int(syscall.SIGINT)) + } +} diff --git a/cmd/snapshots/manifest/manifest.go b/cmd/snapshots/manifest/manifest.go new file mode 100644 index 00000000000..54e803fb0c2 --- /dev/null +++ b/cmd/snapshots/manifest/manifest.go @@ -0,0 +1,365 @@ +package manifest + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/urfave/cli/v2" +) + +var ( + VersionFlag = cli.IntFlag{ + Name: "version", + Usage: `Manifest file versions`, + Required: false, + Value: 0, + } +) + +var Command = cli.Command{ + Action: func(cliCtx *cli.Context) error { + return manifest(cliCtx, "list") + }, + Name: "manifest", + Usage: "manifest utilities", + Subcommands: []*cli.Command{ + { + Action: func(cliCtx *cli.Context) error { + return manifest(cliCtx, "list") + }, + Name: "list", + Usage: "list manifest from storage location", + ArgsUsage: "", + }, + { + Action: func(cliCtx *cli.Context) error { + return manifest(cliCtx, "update") + }, + Name: "update", + Usage: "update the manifest to match the files available at its storage location", + ArgsUsage: "", + }, + { + Action: func(cliCtx *cli.Context) error { + return manifest(cliCtx, "verify") + }, + Name: "verify", + Usage: "verify that manifest matches the files available at its storage location", + ArgsUsage: "", + }, + }, + Flags: []cli.Flag{ + &VersionFlag, + &utils.DataDirFlag, + &logging.LogVerbosityFlag, + &logging.LogConsoleVerbosityFlag, + &logging.LogDirVerbosityFlag, + }, + Description: ``, +} + +func manifest(cliCtx *cli.Context, command string) error { + logger := sync.Logger(cliCtx.Context) + + var src *sync.Locator + var err error + + var rcCli *downloader.RCloneClient + + pos := 0 + + if cliCtx.Args().Len() == 0 { + return fmt.Errorf("missing manifest location") + } + + arg := cliCtx.Args().Get(pos) + + if src, err = sync.ParseLocator(arg); err != nil { + return err + } + + switch src.LType { + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + } + + var srcSession *downloader.RCloneSession + + tempDir, err := os.MkdirTemp("", "snapshot-manifest-") + + if err != nil { + return err + } + + defer os.RemoveAll(tempDir) + + if rcCli != nil { + if src != nil && src.LType == sync.RemoteFs { + srcSession, err = rcCli.NewSession(cliCtx.Context, tempDir, src.Src+":"+src.Root) + + if err != nil { + return err + } + } + } + + if src != nil && srcSession == nil { + return fmt.Errorf("no src session established") + } + + logger.Debug("Starting manifest " + command) + + var version *uint8 + + if val := cliCtx.Int(VersionFlag.Name); val != 0 { + v := uint8(val) + version = &v + } + + switch command { + case "update": + return updateManifest(cliCtx.Context, tempDir, srcSession, version) + case "verify": + return verifyManifest(cliCtx.Context, srcSession, version, os.Stdout) + default: + return listManifest(cliCtx.Context, srcSession, os.Stdout) + } +} + +func listManifest(ctx context.Context, srcSession *downloader.RCloneSession, out *os.File) error { + entries, err := DownloadManifest(ctx, srcSession) + + if err != nil { + return err + } + + for _, fi := range entries { + fmt.Fprintln(out, fi.Name()) + } + + return nil +} + +func updateManifest(ctx context.Context, tmpDir string, srcSession *downloader.RCloneSession, version *uint8) error { + entities, err := srcSession.ReadRemoteDir(ctx, true) + + if err != nil { + return err + } + + manifestFile := "manifest.txt" + + fileMap := map[string]string{} + torrentMap := map[string]string{} + + for _, fi := range entities { + var file string + var files map[string]string + + if filepath.Ext(fi.Name()) == ".torrent" { + file = strings.TrimSuffix(fi.Name(), ".torrent") + files = torrentMap + } else { + file = fi.Name() + files = fileMap + } + + info, ok := snaptype.ParseFileName("", file) + + if !ok || (version != nil && *version != info.Version) { + continue + } + + files[file] = fi.Name() + } + + var files []string + + for file := range fileMap { + if torrent, ok := torrentMap[file]; ok { + files = append(files, file, torrent) + } + } + + sort.Strings(files) + + manifestEntries := bytes.Buffer{} + + for _, file := range files { + fmt.Fprintln(&manifestEntries, file) + } + + _ = os.WriteFile(filepath.Join(tmpDir, manifestFile), manifestEntries.Bytes(), 0644) + defer os.Remove(filepath.Join(tmpDir, manifestFile)) + + return srcSession.Upload(ctx, manifestFile) +} + +func verifyManifest(ctx context.Context, srcSession *downloader.RCloneSession, version *uint8, out *os.File) error { + manifestEntries, err := DownloadManifest(ctx, srcSession) + + if err != nil { + return fmt.Errorf("verification failed: can't read manifest: %w", err) + } + + manifestFiles := map[string]struct{}{} + + for _, fi := range manifestEntries { + var file string + + if filepath.Ext(fi.Name()) == ".torrent" { + file = strings.TrimSuffix(fi.Name(), ".torrent") + } else { + file = fi.Name() + } + + info, ok := snaptype.ParseFileName("", file) + + if !ok || (version != nil && *version != info.Version) { + continue + } + + manifestFiles[fi.Name()] = struct{}{} + } + + dirEntries, err := srcSession.ReadRemoteDir(ctx, true) + + if err != nil { + return fmt.Errorf("verification failed: can't read dir: %w", err) + } + + dirFiles := map[string]struct{}{} + + for _, fi := range dirEntries { + + var file string + + if filepath.Ext(fi.Name()) == ".torrent" { + file = strings.TrimSuffix(fi.Name(), ".torrent") + } else { + file = fi.Name() + } + + info, ok := snaptype.ParseFileName("", file) + + if !ok || (version != nil && *version != info.Version) { + continue + } + + if _, ok := manifestFiles[fi.Name()]; ok { + delete(manifestFiles, fi.Name()) + } else { + dirFiles[fi.Name()] = struct{}{} + } + } + + var missing string + var extra string + + if len(manifestFiles) != 0 { + files := make([]string, len(manifestFiles)) + + for file := range manifestFiles { + files = append(files, file) + } + + missing = fmt.Sprintf(": manifest files not in src: %s", files) + } + + if len(dirFiles) != 0 { + files := make([]string, len(dirFiles)) + + for file := range dirFiles { + files = append(files, file) + } + + extra = fmt.Sprintf(": src files not in manifest: %s", files) + } + + if len(missing) > 0 || len(extra) != 0 { + return fmt.Errorf("manifest does not match src contents%s%s", missing, extra) + } + return nil +} + +type dirEntry struct { + name string +} + +func (e dirEntry) Name() string { + return e.name +} + +func (e dirEntry) IsDir() bool { + return false +} + +func (e dirEntry) Type() fs.FileMode { + return e.Mode() +} + +func (e dirEntry) Size() int64 { + return -1 +} + +func (e dirEntry) Mode() fs.FileMode { + return fs.ModeIrregular +} + +func (e dirEntry) ModTime() time.Time { + return time.Time{} +} + +func (e dirEntry) Sys() any { + return nil +} + +func (e dirEntry) Info() (fs.FileInfo, error) { + return e, nil +} + +func DownloadManifest(ctx context.Context, session *downloader.RCloneSession) ([]fs.DirEntry, error) { + + reader, err := session.Cat(ctx, "manifest.txt") + + if err != nil { + return nil, err + } + + var entries []fs.DirEntry + + scanner := bufio.NewScanner(reader) + + for scanner.Scan() { + entries = append(entries, dirEntry{scanner.Text()}) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil +} diff --git a/cmd/snapshots/sync/context.go b/cmd/snapshots/sync/context.go new file mode 100644 index 00000000000..fce2de1215c --- /dev/null +++ b/cmd/snapshots/sync/context.go @@ -0,0 +1,38 @@ +package sync + +import ( + "context" + + "github.com/ledgerwatch/log/v3" +) + +type ctxKey int + +const ( + ckLogger ctxKey = iota + ckTempDir +) + +func WithLogger(ctx context.Context, logger log.Logger) context.Context { + return context.WithValue(ctx, ckLogger, logger) +} + +func Logger(ctx context.Context) log.Logger { + if logger, ok := ctx.Value(ckLogger).(log.Logger); ok { + return logger + } + + return log.Root() +} + +func WithTempDir(ctx context.Context, tempDir string) context.Context { + return context.WithValue(ctx, ckTempDir, tempDir) +} + +func TempDir(ctx context.Context) string { + if tempDir, ok := ctx.Value(ckTempDir).(string); ok { + return tempDir + } + + return "" +} diff --git a/cmd/snapshots/sync/sync.go b/cmd/snapshots/sync/sync.go new file mode 100644 index 00000000000..c01626f0678 --- /dev/null +++ b/cmd/snapshots/sync/sync.go @@ -0,0 +1,444 @@ +package sync + +import ( + "bufio" + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/storage" + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/downloader/downloadernat" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/p2p/nat" + "github.com/ledgerwatch/erigon/params" + "github.com/urfave/cli/v2" + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" +) + +type LType int + +const ( + TorrentFs LType = iota + LocalFs + RemoteFs +) + +type Locator struct { + LType LType + Src string + Root string + Version uint8 + Chain string +} + +func (l Locator) String() string { + var val string + + switch l.LType { + case TorrentFs: + val = "torrent" + case LocalFs: + val = l.Root + case RemoteFs: + val = l.Src + ":" + l.Root + } + + if l.Version > 0 { + val += fmt.Sprint(":v", l.Version) + } + + return val +} + +var locatorExp, _ = regexp.Compile(`^(?:(\w+)\:)?([^\:]*)(?:\:(v\d+))?`) +var srcExp, _ = regexp.Compile(`^erigon-v\d+-snapshots-(.*)$`) + +func ParseLocator(value string) (*Locator, error) { + if matches := locatorExp.FindStringSubmatch(value); len(matches) > 0 { + var loc Locator + + switch { + case matches[1] == "torrent": + loc.LType = TorrentFs + + if len(matches[2]) > 0 { + version, err := strconv.ParseUint(matches[2][1:], 10, 8) + if err != nil { + return nil, fmt.Errorf("can't parse version: %s: %w", matches[3], err) + } + + loc.Version = uint8(version) + } + + case len(matches[1]) > 0: + loc.LType = RemoteFs + loc.Src = matches[1] + loc.Root = matches[2] + + if matches := srcExp.FindStringSubmatch(loc.Root); len(matches) > 1 { + loc.Chain = matches[1] + } + + if len(matches[3]) > 0 { + version, err := strconv.ParseUint(matches[3][1:], 10, 8) + if err != nil { + return nil, fmt.Errorf("can't parse version: %s: %w", matches[3], err) + } + + loc.Version = uint8(version) + } + + default: + loc.LType = LocalFs + loc.Root = downloader.Clean(matches[2]) + } + + return &loc, nil + } + + if path, err := filepath.Abs(value); err == nil { + return &Locator{ + LType: LocalFs, + Root: path, + }, nil + } + + return nil, fmt.Errorf("Invalid locator syntax") +} + +type TorrentClient struct { + *torrent.Client + cfg *torrent.ClientConfig +} + +func NewTorrentClient(cliCtx *cli.Context, chain string) (*TorrentClient, error) { + logger := Logger(cliCtx.Context) + tempDir := TempDir(cliCtx.Context) + + torrentDir := filepath.Join(tempDir, "torrents", chain) + + dirs := datadir.New(torrentDir) + + webseedsList := common.CliString2Array(cliCtx.String(utils.WebSeedsFlag.Name)) + + if known, ok := snapcfg.KnownWebseeds[chain]; ok { + webseedsList = append(webseedsList, known...) + } + + var downloadRate, uploadRate datasize.ByteSize + + if err := downloadRate.UnmarshalText([]byte(cliCtx.String(utils.TorrentDownloadRateFlag.Name))); err != nil { + return nil, err + } + + if err := uploadRate.UnmarshalText([]byte(cliCtx.String(utils.TorrentUploadRateFlag.Name))); err != nil { + return nil, err + } + + logLevel, _, err := downloadercfg.Int2LogLevel(cliCtx.Int(utils.TorrentVerbosityFlag.Name)) + + if err != nil { + return nil, err + } + + version := "erigon: " + params.VersionWithCommit(params.GitCommit) + + cfg, err := downloadercfg.New(dirs, version, logLevel, downloadRate, uploadRate, + cliCtx.Int(utils.TorrentPortFlag.Name), + cliCtx.Int(utils.TorrentConnsPerFileFlag.Name), 0, nil, webseedsList, chain) + + if err != nil { + return nil, err + } + + err = os.RemoveAll(torrentDir) + + if err != nil { + return nil, fmt.Errorf("can't clean torrent dir: %w", err) + } + + if err := os.MkdirAll(torrentDir, 0755); err != nil { + return nil, err + } + + cfg.ClientConfig.DataDir = torrentDir + + cfg.ClientConfig.PieceHashersPerTorrent = 32 * runtime.NumCPU() + cfg.ClientConfig.DisableIPv6 = cliCtx.Bool(utils.DisableIPV6.Name) + cfg.ClientConfig.DisableIPv4 = cliCtx.Bool(utils.DisableIPV4.Name) + + natif, err := nat.Parse(utils.NATFlag.Value) + + if err != nil { + return nil, fmt.Errorf("invalid nat option %s: %w", utils.NATFlag.Value, err) + } + + downloadernat.DoNat(natif, cfg.ClientConfig, logger) + + cfg.ClientConfig.DefaultStorage = storage.NewMMap(torrentDir) + + cli, err := torrent.NewClient(cfg.ClientConfig) + + if err != nil { + return nil, fmt.Errorf("can't create torrent client: %w", err) + } + + return &TorrentClient{cli, cfg.ClientConfig}, nil +} + +type torrentSession struct { + cli *TorrentClient + items map[string]snapcfg.PreverifiedItem +} + +type fileInfo struct { + info snapcfg.PreverifiedItem +} + +func (fi *fileInfo) Name() string { + return fi.info.Name +} + +func (fi *fileInfo) Size() int64 { + return 0 +} + +func (fi *fileInfo) Mode() fs.FileMode { + return fs.ModeIrregular +} + +func (fi *fileInfo) ModTime() time.Time { + return time.Time{} +} + +func (fi *fileInfo) IsDir() bool { + return false +} + +type torrentInfo struct { + snapInfo *snaptype.FileInfo + hash string +} + +func (i *torrentInfo) Version() uint8 { + if i.snapInfo != nil { + return i.snapInfo.Version + } + + return 0 +} + +func (i *torrentInfo) From() uint64 { + if i.snapInfo != nil { + return i.snapInfo.From + } + + return 0 +} + +func (i *torrentInfo) To() uint64 { + if i.snapInfo != nil { + return i.snapInfo.To + } + + return 0 +} + +func (i *torrentInfo) Type() snaptype.Type { + if i.snapInfo != nil { + return i.snapInfo.T + } + + return 0 +} + +func (i *torrentInfo) Hash() string { + return i.hash +} + +func (fi *fileInfo) Sys() any { + info := torrentInfo{hash: fi.info.Hash} + if snapInfo, ok := snaptype.ParseFileName("", fi.Name()); ok { + info.snapInfo = &snapInfo + } + + return &info +} + +type dirEntry struct { + info *fileInfo +} + +func (e dirEntry) Name() string { + return e.info.Name() +} + +func (e dirEntry) IsDir() bool { + return e.info.IsDir() +} + +func (e dirEntry) Type() fs.FileMode { + return fs.ModeIrregular +} + +func (e dirEntry) Info() (fs.FileInfo, error) { + return e.info, nil +} + +func (s *torrentSession) ReadRemoteDir(ctx context.Context, refresh bool) ([]fs.DirEntry, error) { + var entries = make([]fs.DirEntry, 0, len(s.items)) + + for _, info := range s.items { + entries = append(entries, &dirEntry{&fileInfo{info}}) + } + + slices.SortFunc(entries, func(a, b fs.DirEntry) int { + return strings.Compare(a.Name(), b.Name()) + }) + + return entries, nil +} + +func (s *torrentSession) LocalFsRoot() string { + return s.cli.cfg.DataDir +} + +func (s *torrentSession) RemoteFsRoot() string { + return "" +} + +func (s *torrentSession) Download(ctx context.Context, files ...string) error { + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(len(files)) + + for _, f := range files { + file := f + + g.Go(func() error { + it, ok := s.items[file] + + if !ok { + return fs.ErrNotExist + } + + t, err := func() (*torrent.Torrent, error) { + infoHash := snaptype.Hex2InfoHash(it.Hash) + + for _, t := range s.cli.Torrents() { + if t.Name() == file { + return t, nil + } + } + + mi := &metainfo.MetaInfo{AnnounceList: downloader.Trackers} + magnet := mi.Magnet(&infoHash, &metainfo.Info{Name: file}) + spec, err := torrent.TorrentSpecFromMagnetUri(magnet.String()) + + if err != nil { + return nil, err + } + + spec.DisallowDataDownload = true + + t, _, err := s.cli.AddTorrentSpec(spec) + if err != nil { + return nil, err + } + + return t, nil + }() + + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.GotInfo(): + } + + if !t.Complete.Bool() { + t.AllowDataDownload() + t.DownloadAll() + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.Complete.On(): + } + } + + closed := t.Closed() + t.Drop() + <-closed + + return nil + }) + } + + return g.Wait() +} + +func (s *torrentSession) Label() string { + return "torrents" +} + +func NewTorrentSession(cli *TorrentClient, chain string) *torrentSession { + session := &torrentSession{cli, map[string]snapcfg.PreverifiedItem{}} + for _, it := range snapcfg.KnownCfg(chain, 0).Preverified { + session.items[it.Name] = it + } + + return session +} + +func DownloadManifest(ctx context.Context, session DownloadSession) ([]fs.DirEntry, error) { + if session, ok := session.(*downloader.RCloneSession); ok { + reader, err := session.Cat(ctx, "manifest.txt") + + if err != nil { + return nil, err + } + + var entries []fs.DirEntry + + scanner := bufio.NewScanner(reader) + + for scanner.Scan() { + entries = append(entries, dirEntry{&fileInfo{snapcfg.PreverifiedItem{Name: scanner.Text()}}}) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil + } + + return nil, fmt.Errorf("not implemented for %T", session) +} + +type DownloadSession interface { + Download(ctx context.Context, files ...string) error + ReadRemoteDir(ctx context.Context, refresh bool) ([]fs.DirEntry, error) + LocalFsRoot() string + RemoteFsRoot() string + Label() string +} diff --git a/cmd/snapshots/sync/util.go b/cmd/snapshots/sync/util.go new file mode 100644 index 00000000000..a0a69547bd6 --- /dev/null +++ b/cmd/snapshots/sync/util.go @@ -0,0 +1,32 @@ +package sync + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon-lib/downloader" +) + +func CheckRemote(rcCli *downloader.RCloneClient, src string) error { + + remotes, err := rcCli.ListRemotes(context.Background()) + + if err != nil { + return err + } + + hasRemote := false + + for _, remote := range remotes { + if src == remote { + hasRemote = true + break + } + } + + if !hasRemote { + return fmt.Errorf("unknown remote: %s", src) + } + + return nil +} diff --git a/cmd/snapshots/torrents/torrents.go b/cmd/snapshots/torrents/torrents.go new file mode 100644 index 00000000000..01f01ab6e14 --- /dev/null +++ b/cmd/snapshots/torrents/torrents.go @@ -0,0 +1,504 @@ +package torrents + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + gosync "sync" + "time" + + "golang.org/x/exp/slices" + + "github.com/ledgerwatch/log/v3" + + "github.com/anacrolix/torrent/metainfo" + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/snapshots/manifest" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/urfave/cli/v2" + "golang.org/x/sync/errgroup" +) + +var Command = cli.Command{ + Action: func(cliCtx *cli.Context) error { + return torrents(cliCtx, "list") + }, + Name: "torrent", + Usage: "torrent utilities", + Subcommands: []*cli.Command{ + { + Action: func(cliCtx *cli.Context) error { + return torrents(cliCtx, "list") + }, + Name: "list", + Usage: "list torrents available at the specified storage location", + ArgsUsage: "", + }, + { + Action: func(cliCtx *cli.Context) error { + return torrents(cliCtx, "hashes") + }, + Name: "hashes", + Usage: "list the hashes (in toml format) at the specified storage location", + ArgsUsage: " ", + }, + { + Action: func(cliCtx *cli.Context) error { + return torrents(cliCtx, "update") + }, + Name: "update", + Usage: "update re-create the torrents for the contents available at its storage location", + ArgsUsage: " ", + }, + { + Action: func(cliCtx *cli.Context) error { + return torrents(cliCtx, "verify") + }, + Name: "verify", + Usage: "verify that manifest contents are available at its storage location", + ArgsUsage: " ", + }, + }, + Flags: []cli.Flag{ + &utils.DataDirFlag, + &logging.LogVerbosityFlag, + &logging.LogConsoleVerbosityFlag, + &logging.LogDirVerbosityFlag, + }, + Description: ``, +} + +func torrents(cliCtx *cli.Context, command string) error { + logger := sync.Logger(cliCtx.Context) + + var src *sync.Locator + var err error + + var firstBlock, lastBlock uint64 + + pos := 0 + + if src, err = sync.ParseLocator(cliCtx.Args().Get(pos)); err != nil { + return err + } + + pos++ + + if cliCtx.Args().Len() > pos { + if src, err = sync.ParseLocator(cliCtx.Args().Get(pos)); err != nil { + return err + } + + if err != nil { + return err + } + } + + pos++ + + if cliCtx.Args().Len() > pos { + firstBlock, err = strconv.ParseUint(cliCtx.Args().Get(pos), 10, 64) + if err != nil { + return err + } + } + + pos++ + + if cliCtx.Args().Len() > pos { + lastBlock, err = strconv.ParseUint(cliCtx.Args().Get(pos), 10, 64) + + if err != nil { + return err + } + } + + if src == nil { + return fmt.Errorf("missing data source") + } + + var rcCli *downloader.RCloneClient + + switch src.LType { + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + } + + var srcSession *downloader.RCloneSession + + dataDir := cliCtx.String(utils.DataDirFlag.Name) + var tempDir string + + if len(dataDir) == 0 { + dataDir, err := os.MkdirTemp("", "snapshot-torrents-") + if err != nil { + return err + } + tempDir = dataDir + defer os.RemoveAll(dataDir) + } else { + tempDir = filepath.Join(dataDir, "temp") + + if err := os.MkdirAll(tempDir, 0755); err != nil { + return err + } + } + + if rcCli != nil { + if src != nil && src.LType == sync.RemoteFs { + srcSession, err = rcCli.NewSession(cliCtx.Context, filepath.Join(tempDir, "src"), src.Src+":"+src.Root) + + if err != nil { + return err + } + } + } + + if src != nil && srcSession == nil { + return fmt.Errorf("no src session established") + } + + logger.Debug("Starting torrents " + command) + + switch command { + case "hashes": + return torrentHashes(cliCtx.Context, srcSession, firstBlock, lastBlock) + case "update": + startTime := time.Now() + + logger.Info(fmt.Sprintf("Starting update: %s", src.String()), "first", firstBlock, "last", lastBlock, "dir", tempDir) + + err := updateTorrents(cliCtx.Context, srcSession, firstBlock, lastBlock, logger) + + if err == nil { + logger.Info(fmt.Sprintf("Finished update: %s", src.String()), "elapsed", time.Since(startTime)) + } else { + logger.Info(fmt.Sprintf("Aborted update: %s", src.String()), "err", err) + } + + return err + + case "verify": + startTime := time.Now() + + logger.Info(fmt.Sprintf("Starting verify: %s", src.String()), "first", firstBlock, "last", lastBlock, "dir", tempDir) + + err := verifyTorrents(cliCtx.Context, srcSession, firstBlock, lastBlock, logger) + + if err == nil { + logger.Info(fmt.Sprintf("Verified: %s", src.String()), "elapsed", time.Since(startTime)) + } else { + logger.Info(fmt.Sprintf("Verification failed: %s", src.String()), "err", err) + } + + return err + } + + return listTorrents(cliCtx.Context, srcSession, os.Stdout, firstBlock, lastBlock) +} + +func listTorrents(ctx context.Context, srcSession *downloader.RCloneSession, out *os.File, from uint64, to uint64) error { + entries, err := manifest.DownloadManifest(ctx, srcSession) + + if err != nil { + entries, err = srcSession.ReadRemoteDir(ctx, true) + } + + if err != nil { + return err + } + + for _, fi := range entries { + if filepath.Ext(fi.Name()) == ".torrent" { + if from > 0 || to > 0 { + info, _ := snaptype.ParseFileName("", strings.TrimSuffix(fi.Name(), ".torrent")) + + if from > 0 && info.From < from { + continue + } + + if to > 0 && info.From > to { + continue + } + } + + fmt.Fprintln(out, fi.Name()) + } + } + + return nil +} + +func torrentHashes(ctx context.Context, srcSession *downloader.RCloneSession, from uint64, to uint64) error { + entries, err := manifest.DownloadManifest(ctx, srcSession) + + if err != nil { + return err + } + + type hashInfo struct { + name, hash string + } + + var hashes []hashInfo + var hashesMutex gosync.Mutex + + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(16) + + for _, fi := range entries { + if filepath.Ext(fi.Name()) == ".torrent" { + if from > 0 || to > 0 { + info, _ := snaptype.ParseFileName("", strings.TrimSuffix(fi.Name(), ".torrent")) + + if from > 0 && info.From < from { + continue + } + + if to > 0 && info.From > to { + continue + } + } + + file := fi.Name() + + g.Go(func() error { + var mi *metainfo.MetaInfo + + errs := 0 + + for { + reader, err := srcSession.Cat(gctx, file) + + if err != nil { + return fmt.Errorf("can't read remote torrent: %s: %w", file, err) + } + + mi, err = metainfo.Load(reader) + + if err != nil { + errs++ + + if errs == 4 { + return fmt.Errorf("can't parse remote torrent: %s: %w", file, err) + } + + continue + } + + break + } + + info, err := mi.UnmarshalInfo() + + if err != nil { + return fmt.Errorf("can't unmarshal torrent info: %s: %w", file, err) + } + + hashesMutex.Lock() + defer hashesMutex.Unlock() + hashes = append(hashes, hashInfo{info.Name, mi.HashInfoBytes().String()}) + + return nil + }) + } + } + + if err := g.Wait(); err != nil { + return err + } + + slices.SortFunc(hashes, func(a, b hashInfo) int { + return strings.Compare(a.name, b.name) + }) + + for _, hi := range hashes { + fmt.Printf("'%s' = '%s'\n", hi.name, hi.hash) + } + + return nil +} + +func updateTorrents(ctx context.Context, srcSession *downloader.RCloneSession, from uint64, to uint64, logger log.Logger) error { + entries, err := manifest.DownloadManifest(ctx, srcSession) + + if err != nil { + return err + } + + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(16) + + torrentFiles := downloader.NewAtomicTorrentFiles(srcSession.LocalFsRoot()) + + for _, fi := range entries { + if filepath.Ext(fi.Name()) == ".torrent" { + file := strings.TrimSuffix(fi.Name(), ".torrent") + + g.Go(func() error { + if from > 0 || to > 0 { + info, _ := snaptype.ParseFileName("", file) + + if from > 0 && info.From < from { + return nil + } + + if to > 0 && info.From > to { + return nil + } + } + + logger.Info(fmt.Sprintf("Updating %s", file+".torrent")) + + err := srcSession.Download(gctx, file) + + if err != nil { + return err + } + + defer os.Remove(filepath.Join(srcSession.LocalFsRoot(), file)) + + err = downloader.BuildTorrentIfNeed(gctx, file, srcSession.LocalFsRoot(), torrentFiles) + + if err != nil { + return err + } + + defer os.Remove(filepath.Join(srcSession.LocalFsRoot(), file+".torrent")) + + return srcSession.Upload(gctx, file+".torrent") + }) + } + } + + return g.Wait() +} + +func verifyTorrents(ctx context.Context, srcSession *downloader.RCloneSession, from uint64, to uint64, logger log.Logger) error { + entries, err := manifest.DownloadManifest(ctx, srcSession) + + if err != nil { + return err + } + + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(16) + + torrentFiles := downloader.NewAtomicTorrentFiles(srcSession.LocalFsRoot()) + + for _, fi := range entries { + if filepath.Ext(fi.Name()) == ".torrent" { + file := strings.TrimSuffix(fi.Name(), ".torrent") + + g.Go(func() error { + if from > 0 || to > 0 { + info, _ := snaptype.ParseFileName("", file) + + if from > 0 && info.From < from { + return nil + } + + if to > 0 && info.From > to { + return nil + } + } + + logger.Info(fmt.Sprintf("Validating %s", file+".torrent")) + + var mi *metainfo.MetaInfo + + errs := 0 + + for { + reader, err := srcSession.Cat(gctx, file+".torrent") + + if err != nil { + return fmt.Errorf("can't read remote torrent: %s: %w", file+".torrent", err) + } + + mi, err = metainfo.Load(reader) + + if err != nil { + errs++ + + if errs == 4 { + return fmt.Errorf("can't parse remote torrent: %s: %w", file+".torrent", err) + } + + continue + } + + break + } + + info, err := mi.UnmarshalInfo() + + if err != nil { + return fmt.Errorf("can't unmarshal torrent info: %s: %w", file+".torrent", err) + } + + if info.Name != file { + return fmt.Errorf("torrent name does not match file: %s", file) + } + + err = srcSession.Download(gctx, file) + + if err != nil { + return err + } + + defer os.Remove(filepath.Join(srcSession.LocalFsRoot(), file)) + + err = downloader.BuildTorrentIfNeed(gctx, file, srcSession.LocalFsRoot(), torrentFiles) + + if err != nil { + return err + } + + torrentPath := filepath.Join(srcSession.LocalFsRoot(), file+".torrent") + + defer os.Remove(torrentPath) + + lmi, err := metainfo.LoadFromFile(torrentPath) + + if err != nil { + return fmt.Errorf("can't load local torrent from: %s: %w", torrentPath, err) + } + + if lmi.HashInfoBytes() != mi.HashInfoBytes() { + return fmt.Errorf("computed local hash does not match torrent: %s: expected: %s, got: %s", file+".torrent", lmi.HashInfoBytes(), mi.HashInfoBytes()) + } + + localInfo, err := lmi.UnmarshalInfo() + + if err != nil { + return fmt.Errorf("can't unmarshal local torrent info: %s: %w", torrentPath, err) + } + + if localInfo.Name != info.Name { + return fmt.Errorf("computed local name does not match torrent: %s: expected: %s, got: %s", file+".torrent", localInfo.Name, info.Name) + } + + return nil + }) + } + } + + return g.Wait() +} diff --git a/cmd/snapshots/verify/verify.go b/cmd/snapshots/verify/verify.go new file mode 100644 index 00000000000..bb0fbc83b70 --- /dev/null +++ b/cmd/snapshots/verify/verify.go @@ -0,0 +1,249 @@ +package verify + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/snapshots/flags" + "github.com/ledgerwatch/erigon/cmd/snapshots/sync" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/urfave/cli/v2" +) + +var ( + SrcFlag = cli.StringFlag{ + Name: "src", + Usage: `Source location for verification files (torrent,hash,manifest)`, + Required: false, + } + DstFlag = cli.StringFlag{ + Name: "dst", + Usage: `Destination location containiong copies to be verified`, + Required: true, + } + ChainFlag = cli.StringFlag{ + Name: "chain", + Usage: `The chain being validated, required if not included src or dst naming`, + Required: false, + } + TorrentsFlag = cli.BoolFlag{ + Name: "torrents", + Usage: `Verify against torrent files`, + Required: false, + } + + HashesFlag = cli.BoolFlag{ + Name: "hashes", + Usage: `Verify against hash .toml contents`, + Required: false, + } + + ManifestFlag = cli.BoolFlag{ + Name: "manifest", + Usage: `Verify against manifest .txt contents`, + Required: false, + } +) + +var Command = cli.Command{ + Action: verify, + Name: "verify", + Usage: "verify snapshot segments against hashes and torrents", + ArgsUsage: " ", + Flags: []cli.Flag{ + &SrcFlag, + &DstFlag, + &ChainFlag, + &flags.SegTypes, + &TorrentsFlag, + &HashesFlag, + &ManifestFlag, + &utils.WebSeedsFlag, + &utils.NATFlag, + &utils.DisableIPV6, + &utils.DisableIPV4, + &utils.TorrentDownloadRateFlag, + &utils.TorrentUploadRateFlag, + &utils.TorrentVerbosityFlag, + &utils.TorrentPortFlag, + &utils.TorrentMaxPeersFlag, + &utils.TorrentConnsPerFileFlag, + }, + Description: ``, +} + +func verify(cliCtx *cli.Context) error { + logger := sync.Logger(cliCtx.Context) + + logger.Info("Starting verify") + + var src, dst *sync.Locator + var err error + + var rcCli *downloader.RCloneClient + var torrentCli *sync.TorrentClient + + if src, err = sync.ParseLocator(cliCtx.String(SrcFlag.Name)); err != nil { + return err + } + + if dst, err = sync.ParseLocator(cliCtx.String(DstFlag.Name)); err != nil { + return err + } + + chain := cliCtx.String(ChainFlag.Name) + + switch dst.LType { + case sync.TorrentFs: + torrentCli, err = sync.NewTorrentClient(cliCtx, dst.Chain) + if err != nil { + return fmt.Errorf("can't create torrent: %w", err) + } + + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + + if len(chain) == 0 { + chain = dst.Chain + } + } + + switch src.LType { + case sync.TorrentFs: + if torrentCli == nil { + torrentCli, err = sync.NewTorrentClient(cliCtx, dst.Chain) + if err != nil { + return fmt.Errorf("can't create torrent: %w", err) + } + } + + case sync.RemoteFs: + if rcCli == nil { + rcCli, err = downloader.NewRCloneClient(logger) + + if err != nil { + return err + } + } + + if err = sync.CheckRemote(rcCli, src.Src); err != nil { + return err + } + + if len(chain) == 0 { + chain = src.Chain + } + } + + typeValues := cliCtx.StringSlice(flags.SegTypes.Name) + snapTypes := make([]snaptype.Type, 0, len(typeValues)) + + for _, val := range typeValues { + segType, ok := snaptype.ParseFileType(val) + + if !ok { + return fmt.Errorf("unknown file type: %s", val) + } + + snapTypes = append(snapTypes, segType) + } + + torrents := cliCtx.Bool(TorrentsFlag.Name) + hashes := cliCtx.Bool(HashesFlag.Name) + manifest := cliCtx.Bool(ManifestFlag.Name) + + var firstBlock, lastBlock uint64 + + if cliCtx.Args().Len() > 0 { + if firstBlock, err = strconv.ParseUint(cliCtx.Args().Get(0), 10, 64); err != nil { + return err + } + } + + if cliCtx.Args().Len() > 1 { + if lastBlock, err = strconv.ParseUint(cliCtx.Args().Get(1), 10, 64); err != nil { + return err + } + } + + var srcSession sync.DownloadSession + var dstSession sync.DownloadSession + + dataDir := cliCtx.String(utils.DataDirFlag.Name) + var tempDir string + + if len(dataDir) == 0 { + dataDir, err := os.MkdirTemp("", "snapshot-verify-") + if err != nil { + return err + } + tempDir = dataDir + defer os.RemoveAll(dataDir) + } else { + tempDir = filepath.Join(dataDir, "temp") + + if err := os.MkdirAll(tempDir, 0755); err != nil { + return err + } + } + + if rcCli != nil { + if src != nil && src.LType == sync.RemoteFs { + srcSession, err = rcCli.NewSession(cliCtx.Context, filepath.Join(tempDir, "src"), src.Src+":"+src.Root) + + if err != nil { + return err + } + } + + if dst.LType == sync.RemoteFs { + dstSession, err = rcCli.NewSession(cliCtx.Context, filepath.Join(tempDir, "dst"), dst.Src+":"+dst.Root) + + if err != nil { + return err + } + } + } + + if torrentCli != nil { + if src != nil && src.LType == sync.TorrentFs { + srcSession = sync.NewTorrentSession(torrentCli, chain) + } + + if dst.LType == sync.TorrentFs { + dstSession = sync.NewTorrentSession(torrentCli, chain) + } + } + + if src != nil && srcSession == nil { + return fmt.Errorf("no src session established") + } + + if dstSession == nil { + return fmt.Errorf("no dst session established") + } + + if srcSession == nil { + srcSession = dstSession + } + + return verfifySnapshots(srcSession, dstSession, firstBlock, lastBlock, snapTypes, torrents, hashes, manifest) +} + +func verfifySnapshots(srcSession sync.DownloadSession, rcSession sync.DownloadSession, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { + return fmt.Errorf("TODO") +} diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go index 85308fdcd3a..3b844f1e5d7 100644 --- a/cmd/state/commands/check_change_sets.go +++ b/cmd/state/commands/check_change_sets.go @@ -46,6 +46,7 @@ func init() { withBlock(checkChangeSetsCmd) withDataDir(checkChangeSetsCmd) withSnapshotBlocks(checkChangeSetsCmd) + withSnapshotVersion(checkChangeSetsCmd) checkChangeSetsCmd.Flags().StringVar(&historyfile, "historyfile", "", "path to the file where the changesets and history are expected to be. If omitted, the same as /erion/chaindata") checkChangeSetsCmd.Flags().BoolVar(&nocheck, "nocheck", false, "set to turn off the changeset checking and only execute transaction (for performance testing)") rootCmd.AddCommand(checkChangeSetsCmd) @@ -56,13 +57,13 @@ var checkChangeSetsCmd = &cobra.Command{ Short: "Re-executes historical transactions in read-only mode and checks that their outputs match the database ChangeSets", RunE: func(cmd *cobra.Command, args []string) error { logger := debug.SetupCobra(cmd, "check_change_sets") - return CheckChangeSets(cmd.Context(), genesis, block, chaindata, historyfile, nocheck, logger) + return CheckChangeSets(cmd.Context(), genesis, snapshotVersion, block, chaindata, historyfile, nocheck, logger) }, } // CheckChangeSets re-executes historical transactions in read-only mode // and checks that their outputs match the database ChangeSets. -func CheckChangeSets(ctx context.Context, genesis *types.Genesis, blockNum uint64, chaindata string, historyfile string, nocheck bool, logger log.Logger) error { +func CheckChangeSets(ctx context.Context, genesis *types.Genesis, snapshotVersion uint8, blockNum uint64, chaindata string, historyfile string, nocheck bool, logger log.Logger) error { if len(historyfile) == 0 { historyfile = chaindata } @@ -81,7 +82,7 @@ func CheckChangeSets(ctx context.Context, genesis *types.Genesis, blockNum uint6 if err != nil { return err } - allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadirCli, "snapshots"), logger) + allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadirCli, "snapshots"), snapshotVersion, logger) defer allSnapshots.Close() if err := allSnapshots.ReopenFolder(); err != nil { return fmt.Errorf("reopen snapshot segments: %w", err) @@ -287,7 +288,7 @@ func initConsensusEngine(ctx context.Context, cc *chain2.Config, snapshots *free } else if cc.Aura != nil { consensusConfig = &config.Aura } else if cc.Bor != nil { - consensusConfig = &config.Bor + consensusConfig = cc.Bor } else { consensusConfig = &config.Ethash } diff --git a/cmd/state/commands/global_flags_vars.go b/cmd/state/commands/global_flags_vars.go index dd81e19aee6..a45471410b7 100644 --- a/cmd/state/commands/global_flags_vars.go +++ b/cmd/state/commands/global_flags_vars.go @@ -19,6 +19,7 @@ var ( snapshotsCli bool chain string logdir string + snapshotVersion uint8 ) func must(err error) { @@ -39,6 +40,10 @@ func withDataDir(cmd *cobra.Command) { must(cmd.MarkFlagDirname("chaindata")) } +func withSnapshotVersion(cmd *cobra.Command) { + cmd.Flags().Uint8Var(&snapshotVersion, "stapshots.version", 1, "specifies the snapshot file version") +} + func withStatsfile(cmd *cobra.Command) { cmd.Flags().StringVar(&statsfile, "statsfile", "stateless.csv", "path where to write the stats file") must(cmd.MarkFlagFilename("statsfile", "csv")) diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index e1b08226701..92c73d4ec4a 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -45,6 +45,7 @@ var ( func init() { withBlock(opcodeTracerCmd) withDataDir(opcodeTracerCmd) + withSnapshotVersion(opcodeTracerCmd) opcodeTracerCmd.Flags().Uint64Var(&numBlocks, "numBlocks", 1, "number of blocks to run the operation on") opcodeTracerCmd.Flags().BoolVar(&saveOpcodes, "saveOpcodes", false, "set to save the opcodes") opcodeTracerCmd.Flags().BoolVar(&saveBBlocks, "saveBBlocks", false, "set to save the basic blocks") @@ -57,7 +58,7 @@ var opcodeTracerCmd = &cobra.Command{ Short: "Re-executes historical transactions in read-only mode and traces them at the opcode level", RunE: func(cmd *cobra.Command, args []string) error { logger := log.New("opcode-tracer", genesis.Config.ChainID) - return OpcodeTracer(genesis, block, chaindata, numBlocks, saveOpcodes, saveBBlocks, logger) + return OpcodeTracer(genesis, snapshotVersion, block, chaindata, numBlocks, saveOpcodes, saveBBlocks, logger) }, } @@ -396,7 +397,7 @@ type segPrefix struct { // OpcodeTracer re-executes historical transactions in read-only mode // and traces them at the opcode level -func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, numBlocks uint64, +func OpcodeTracer(genesis *types.Genesis, snapshotVersion uint8, blockNum uint64, chaindata string, numBlocks uint64, saveOpcodes bool, saveBblocks bool, logger log.Logger) error { blockNumOrig := blockNum @@ -429,7 +430,7 @@ func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, num } return nil }) - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", snapshotVersion, log.New()), nil /* BorSnapshots */) chainConfig := genesis.Config vmConfig := vm.Config{Tracer: ot, Debug: true} diff --git a/cmd/state/commands/state_root.go b/cmd/state/commands/state_root.go index 8945289cff3..18e32915fe6 100644 --- a/cmd/state/commands/state_root.go +++ b/cmd/state/commands/state_root.go @@ -35,6 +35,7 @@ import ( func init() { withBlock(stateRootCmd) withDataDir(stateRootCmd) + withSnapshotVersion(stateRootCmd) rootCmd.AddCommand(stateRootCmd) } @@ -43,11 +44,11 @@ var stateRootCmd = &cobra.Command{ Short: "Exerimental command to re-execute blocks from beginning and compute state root", RunE: func(cmd *cobra.Command, args []string) error { logger := debug.SetupCobra(cmd, "stateroot") - return StateRoot(cmd.Context(), genesis, block, datadirCli, logger) + return StateRoot(cmd.Context(), genesis, snapshotVersion, block, datadirCli, logger) }, } -func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { +func blocksIO(db kv.RoDB, snapshotVersion uint8) (services.FullBlockReader, *blockio.BlockWriter) { var histV3 bool if err := db.View(context.Background(), func(tx kv.Tx) error { histV3, _ = kvcfg.HistoryV3.Enabled(tx) @@ -55,12 +56,12 @@ func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { }); err != nil { panic(err) } - br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */) + br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", snapshotVersion, log.New()), nil /* BorSnapshots */) bw := blockio.NewBlockWriter(histV3) return br, bw } -func StateRoot(ctx context.Context, genesis *types.Genesis, blockNum uint64, datadir string, logger log.Logger) error { +func StateRoot(ctx context.Context, genesis *types.Genesis, snapshotVersion uint8, blockNum uint64, datadir string, logger log.Logger) error { sigs := make(chan os.Signal, 1) interruptCh := make(chan bool, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) @@ -93,7 +94,7 @@ func StateRoot(ctx context.Context, genesis *types.Genesis, blockNum uint64, dat return err2 } defer db.Close() - blockReader, _ := blocksIO(db) + blockReader, _ := blocksIO(db, snapshotVersion) chainConfig := genesis.Config vmConfig := vm.Config{} @@ -108,7 +109,7 @@ func StateRoot(ctx context.Context, genesis *types.Genesis, blockNum uint64, dat if rwTx, err = db.BeginRw(ctx); err != nil { return err } - _, genesisIbs, err4 := core.GenesisToBlock(genesis, "") + _, genesisIbs, err4 := core.GenesisToBlock(genesis, "", logger) if err4 != nil { return err4 } diff --git a/cmd/state/commands/verify_txlookup.go b/cmd/state/commands/verify_txlookup.go index 8dd27671015..3b5c4707c22 100644 --- a/cmd/state/commands/verify_txlookup.go +++ b/cmd/state/commands/verify_txlookup.go @@ -8,6 +8,7 @@ import ( func init() { withDataDir(verifyTxLookupCmd) + withSnapshotVersion(verifyTxLookupCmd) rootCmd.AddCommand(verifyTxLookupCmd) } @@ -16,6 +17,6 @@ var verifyTxLookupCmd = &cobra.Command{ Short: "Generate tx lookup index", RunE: func(cmd *cobra.Command, args []string) error { logger := debug.SetupCobra(cmd, "verify_txlookup") - return verify.ValidateTxLookups(chaindata, logger) + return verify.ValidateTxLookups(chaindata, snapshotVersion, logger) }, } diff --git a/cmd/state/exec3/state.go b/cmd/state/exec3/state.go index 7c4a38486a6..79f699237d0 100644 --- a/cmd/state/exec3/state.go +++ b/cmd/state/exec3/state.go @@ -152,7 +152,7 @@ func (rw *Worker) RunTxTaskNoLock(txTask *exec22.TxTask) { if txTask.BlockNum == 0 { // Genesis block // fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) - _, ibs, err = core.GenesisToBlock(rw.genesis, "") + _, ibs, err = core.GenesisToBlock(rw.genesis, "", logger) if err != nil { panic(err) } diff --git a/cmd/state/exec3/state_recon.go b/cmd/state/exec3/state_recon.go index 5af74a6054d..98740461d48 100644 --- a/cmd/state/exec3/state_recon.go +++ b/cmd/state/exec3/state_recon.go @@ -301,7 +301,7 @@ func (rw *ReconWorker) runTxTask(txTask *exec22.TxTask) error { if txTask.BlockNum == 0 && txTask.TxIndex == -1 { //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) // Genesis block - _, ibs, err = core.GenesisToBlock(rw.genesis, "") + _, ibs, err = core.GenesisToBlock(rw.genesis, "", logger) if err != nil { return err } diff --git a/cmd/state/verify/verify_txlookup.go b/cmd/state/verify/verify_txlookup.go index 3a7351d11b8..625ef1fc717 100644 --- a/cmd/state/verify/verify_txlookup.go +++ b/cmd/state/verify/verify_txlookup.go @@ -20,7 +20,7 @@ import ( "github.com/ledgerwatch/log/v3" ) -func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { +func blocksIO(db kv.RoDB, snapshotVersion uint8) (services.FullBlockReader, *blockio.BlockWriter) { var histV3 bool if err := db.View(context.Background(), func(tx kv.Tx) error { histV3, _ = kvcfg.HistoryV3.Enabled(tx) @@ -28,14 +28,14 @@ func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { }); err != nil { panic(err) } - br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */) + br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", snapshotVersion, log.New()), nil /* BorSnapshots */) bw := blockio.NewBlockWriter(histV3) return br, bw } -func ValidateTxLookups(chaindata string, logger log.Logger) error { +func ValidateTxLookups(chaindata string, snapshotVersion uint8, logger log.Logger) error { db := mdbx.MustOpen(chaindata) - br, _ := blocksIO(db) + br, _ := blocksIO(db, snapshotVersion) tx, err := db.BeginRo(context.Background()) if err != nil { return err diff --git a/cmd/tooling/cli.go b/cmd/tooling/cli.go index 1fc0ae9a558..a30a30a4ad8 100644 --- a/cmd/tooling/cli.go +++ b/cmd/tooling/cli.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" "golang.org/x/net/context" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" "github.com/ledgerwatch/erigon/cl/persistence" @@ -78,7 +79,10 @@ func (c *BucketCaplinAutomation) Run(ctx *Context) error { tickerTriggerer := time.NewTicker(c.UploadPeriod) defer tickerTriggerer.Stop() // do the checking at first run - if err := checkSnapshots(ctx, beaconConfig, dirs); err != nil { + + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version + + if err := checkSnapshots(ctx, beaconConfig, dirs, snapshotVersion); err != nil { return err } log.Info("Uploading snapshots to R2 bucket") @@ -93,7 +97,9 @@ func (c *BucketCaplinAutomation) Run(ctx *Context) error { select { case <-tickerTriggerer.C: log.Info("Checking snapshots") - if err := checkSnapshots(ctx, beaconConfig, dirs); err != nil { + snapshotVersion := snapcfg.KnownCfg(c.Chain, 0).Version + + if err := checkSnapshots(ctx, beaconConfig, dirs, snapshotVersion); err != nil { return err } log.Info("Finishing snapshots") @@ -111,7 +117,7 @@ func (c *BucketCaplinAutomation) Run(ctx *Context) error { } } -func checkSnapshots(ctx context.Context, beaconConfig *clparams.BeaconChainConfig, dirs datadir.Dirs) error { +func checkSnapshots(ctx context.Context, beaconConfig *clparams.BeaconChainConfig, dirs datadir.Dirs, snapshotVersion uint8) error { rawDB, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory) _, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false) if err != nil { @@ -132,7 +138,7 @@ func checkSnapshots(ctx context.Context, beaconConfig *clparams.BeaconChainConfi to = (to / snaptype.Erigon2MergeLimit) * snaptype.Erigon2MergeLimit - csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, log.Root()) + csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs.Snap, snapshotVersion, log.Root()) if err := csn.ReopenFolder(); err != nil { return err } diff --git a/cmd/txpool/main.go b/cmd/txpool/main.go index d915a18b32b..905ba3ef774 100644 --- a/cmd/txpool/main.go +++ b/cmd/txpool/main.go @@ -24,6 +24,7 @@ import ( "github.com/ledgerwatch/erigon-lib/types" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" common2 "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" @@ -163,19 +164,13 @@ func doTxpool(ctx context.Context, logger log.Logger) error { newTxs := make(chan types.Announcements, 1024) defer close(newTxs) txPoolDB, txPool, fetch, send, txpoolGrpcServer, err := txpooluitl.AllComponents(ctx, cfg, - kvcache.New(cacheConfig), newTxs, coreDB, sentryClients, kvClient, logger) + kvcache.New(cacheConfig), newTxs, coreDB, sentryClients, kvClient, misc.Eip1559FeeCalculator, logger) if err != nil { return err } fetch.ConnectCore() fetch.ConnectSentries() - /* - var ethashApi *ethash.API - sif casted, ok := backend.engine.(*ethash.Ethash); ok { - ethashApi = casted.APIs(nil)[1].Service.(*ethash.API) - } - */ miningGrpcServer := privateapi.NewMiningServer(ctx, &rpcdaemontest.IsMiningMock{}, nil, logger) grpcServer, err := txpool.StartGrpc(txpoolGrpcServer, miningGrpcServer, txpoolApiAddr, nil, logger) @@ -184,7 +179,7 @@ func doTxpool(ctx context.Context, logger log.Logger) error { } notifyMiner := func() {} - txpool.MainLoop(ctx, txPoolDB, coreDB, txPool, newTxs, send, txpoolGrpcServer.NewSlotsStreams, notifyMiner) + txpool.MainLoop(ctx, txPoolDB, txPool, newTxs, send, txpoolGrpcServer.NewSlotsStreams, notifyMiner) grpcServer.GracefulStop() return nil diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 0d487e1c841..67db9fda68e 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -152,6 +152,7 @@ var ( TxPoolDisableFlag = cli.BoolFlag{ Name: "txpool.disable", Usage: "Experimental external pool and block producer, see ./cmd/txpool/readme.md for more info. Disabling internal txpool and block producer.", + Value: false, } TxPoolGossipDisableFlag = cli.BoolFlag{ Name: "txpool.gossip.disable", @@ -763,18 +764,18 @@ var ( Usage: "Enabling grpc health check", } - HeimdallURLFlag = cli.StringFlag{ - Name: "bor.heimdall", - Usage: "URL of Heimdall service", - Value: "http://localhost:1317", - } - WebSeedsFlag = cli.StringFlag{ Name: "webseed", Usage: "Comma-separated URL's, holding metadata about network-support infrastructure (like S3 buckets with snapshots, bootnodes, etc...)", Value: "", } + HeimdallURLFlag = cli.StringFlag{ + Name: "bor.heimdall", + Usage: "URL of Heimdall service", + Value: "http://localhost:1317", + } + // WithoutHeimdallFlag no heimdall (for testing purpose) WithoutHeimdallFlag = cli.BoolFlag{ Name: "bor.withoutheimdall", @@ -797,18 +798,12 @@ var ( Value: true, } - // HeimdallgRPCAddressFlag flag for heimdall gRPC address - HeimdallgRPCAddressFlag = cli.StringFlag{ - Name: "bor.heimdallgRPC", - Usage: "Address of Heimdall gRPC service", - Value: "", - } - ConfigFlag = cli.StringFlag{ Name: "config", Usage: "Sets erigon flags from YAML/TOML file", Value: "", } + LightClientDiscoveryAddrFlag = cli.StringFlag{ Name: "lightclient.discovery.addr", Usage: "Address for lightclient DISCV5 protocol", @@ -824,6 +819,7 @@ var ( Usage: "TCP Port for lightclient DISCV5 protocol", Value: 4001, } + SentinelAddrFlag = cli.StringFlag{ Name: "sentinel.addr", Usage: "Address for sentinel", @@ -934,6 +930,21 @@ var ( Usage: "enables archival node in caplin (Experimental, does not work)", Value: false, } + BeaconApiAllowCredentialsFlag = cli.BoolFlag{ + Name: "beacon.api.cors.allow-credentials", + Usage: "set the cors' allow credentials", + Value: false, + } + BeaconApiAllowMethodsFlag = cli.StringSliceFlag{ + Name: "beacon.api.cors.allow-methods", + Usage: "set the cors' allow methods", + Value: cli.NewStringSlice("GET", "POST", "PUT", "DELETE", "OPTIONS"), + } + BeaconApiAllowOriginsFlag = cli.StringSliceFlag{ + Name: "beacon.api.cors.allow-origins", + Usage: "set the cors' allow origins", + Value: cli.NewStringSlice(), + } ) var MetricFlags = []cli.Flag{&MetricsEnabledFlag, &MetricsHTTPFlag, &MetricsPortFlag} @@ -1354,7 +1365,7 @@ func setGPOCobra(f *pflag.FlagSet, cfg *gaspricecfg.Config) { func setTxPool(ctx *cli.Context, fullCfg *ethconfig.Config) { cfg := &fullCfg.DeprecatedTxPool - if ctx.IsSet(TxPoolDisableFlag.Name) { + if ctx.IsSet(TxPoolDisableFlag.Name) || TxPoolDisableFlag.Value { cfg.Disable = true } if ctx.IsSet(TxPoolLocalsFlag.Name) { @@ -1500,7 +1511,6 @@ func setClique(ctx *cli.Context, cfg *params.ConsensusSnapshotConfig, datadir st func setBorConfig(ctx *cli.Context, cfg *ethconfig.Config) { cfg.HeimdallURL = ctx.String(HeimdallURLFlag.Name) cfg.WithoutHeimdall = ctx.Bool(WithoutHeimdallFlag.Name) - cfg.HeimdallgRPCAddress = ctx.String(HeimdallgRPCAddressFlag.Name) cfg.WithHeimdallMilestones = ctx.Bool(WithHeimdallMilestones.Name) } @@ -1561,6 +1571,9 @@ func setBeaconAPI(ctx *cli.Context, cfg *ethconfig.Config) { cfg.BeaconRouter.ReadTimeTimeout = time.Duration(ctx.Uint64(BeaconApiReadTimeoutFlag.Name)) * time.Second cfg.BeaconRouter.WriteTimeout = time.Duration(ctx.Uint64(BeaconApiWriteTimeoutFlag.Name)) * time.Second cfg.BeaconRouter.IdleTimeout = time.Duration(ctx.Uint64(BeaconApiIdleTimeoutFlag.Name)) * time.Second + cfg.BeaconRouter.AllowedMethods = ctx.StringSlice(BeaconApiAllowMethodsFlag.Name) + cfg.BeaconRouter.AllowedOrigins = ctx.StringSlice(BeaconApiAllowOriginsFlag.Name) + cfg.BeaconRouter.AllowCredentials = ctx.Bool(BeaconApiAllowCredentialsFlag.Name) } func setCaplin(ctx *cli.Context, cfg *ethconfig.Config) { diff --git a/consensus/aura/aura_test.go b/consensus/aura/aura_test.go index 1772905f68a..dd9a94fee8b 100644 --- a/consensus/aura/aura_test.go +++ b/consensus/aura/aura_test.go @@ -17,6 +17,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/turbo/stages/mock" "github.com/ledgerwatch/erigon/turbo/trie" + "github.com/ledgerwatch/log/v3" ) // Check that the first block of Gnosis Chain, which doesn't have any transactions, @@ -24,7 +25,7 @@ import ( func TestEmptyBlock(t *testing.T) { require := require.New(t) genesis := core.GnosisGenesisBlock() - genesisBlock, _, err := core.GenesisToBlock(genesis, "") + genesisBlock, _, err := core.GenesisToBlock(genesis, "", log.Root()) require.NoError(err) genesis.Config.TerminalTotalDifficultyPassed = false diff --git a/consensus/bor/abi/interface.go b/consensus/bor/abi/interface.go deleted file mode 100644 index bb05bf0b23f..00000000000 --- a/consensus/bor/abi/interface.go +++ /dev/null @@ -1,6 +0,0 @@ -package abi - -type ABI interface { - Pack(name string, args ...interface{}) ([]byte, error) - UnpackIntoInterface(v interface{}, name string, data []byte) error -} diff --git a/consensus/bor/contract/client.go b/consensus/bor/contract/client.go deleted file mode 100644 index 09f2ba5a340..00000000000 --- a/consensus/bor/contract/client.go +++ /dev/null @@ -1,82 +0,0 @@ -package contract - -import ( - "math/big" - "strings" - - "github.com/ledgerwatch/erigon-lib/chain" - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/accounts/abi" - "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/log/v3" -) - -var ( - vABI, _ = abi.JSON(strings.NewReader(ValidatorsetABI)) - sABI, _ = abi.JSON(strings.NewReader(StateReceiverABI)) -) - -func ValidatorSet() abi.ABI { - return vABI -} - -func StateReceiver() abi.ABI { - return sABI -} - -type GenesisContractsClient struct { - validatorSetABI abi.ABI - stateReceiverABI abi.ABI - ValidatorContract libcommon.Address - StateReceiverContract libcommon.Address - chainConfig *chain.Config - logger log.Logger -} - -const ( - ValidatorsetABI = `[{"constant":true,"inputs":[],"name":"SPRINT","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"SYSTEM_ADDRESS","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"CHAIN","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"FIRST_END_BLOCK","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"producers","outputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"ROUND_TYPE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"BOR_ID","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"spanNumbers","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"VOTE_TYPE","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"validators","outputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"spans","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"startBlock","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"endBlock","type":"uint256"}],"name":"NewSpan","type":"event"},{"constant":true,"inputs":[],"name":"currentSprint","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getCurrentSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getNextSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"number","type":"uint256"}],"name":"getSpanByBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"currentSpanNumber","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getValidatorsTotalStakeBySpan","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getProducersTotalStakeBySpan","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"getValidatorBySigner","outputs":[{"components":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"internalType":"struct BorValidatorSet.Validator","name":"result","type":"tuple"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"isValidator","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"isProducer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"signer","type":"address"}],"name":"isCurrentValidator","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"signer","type":"address"}],"name":"isCurrentProducer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"number","type":"uint256"}],"name":"getBorValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getInitialValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"newSpan","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"},{"internalType":"bytes","name":"validatorBytes","type":"bytes"},{"internalType":"bytes","name":"producerBytes","type":"bytes"}],"name":"commitSpan","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"bytes32","name":"dataHash","type":"bytes32"},{"internalType":"bytes","name":"sigs","type":"bytes"}],"name":"getStakePowerBySigs","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"rootHash","type":"bytes32"},{"internalType":"bytes32","name":"leaf","type":"bytes32"},{"internalType":"bytes","name":"proof","type":"bytes"}],"name":"checkMembership","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"d","type":"bytes32"}],"name":"leafNode","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"left","type":"bytes32"},{"internalType":"bytes32","name":"right","type":"bytes32"}],"name":"innerNode","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"pure","type":"function"}]` - StateReceiverABI = `[{"constant":true,"inputs":[],"name":"SYSTEM_ADDRESS","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"lastStateId","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"syncTime","type":"uint256"},{"internalType":"bytes","name":"recordBytes","type":"bytes"}],"name":"commitState","outputs":[{"internalType":"bool","name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]` -) - -func NewGenesisContractsClient( - chainConfig *chain.Config, - validatorContract, - stateReceiverContract string, - logger log.Logger, -) *GenesisContractsClient { - return &GenesisContractsClient{ - validatorSetABI: ValidatorSet(), - stateReceiverABI: StateReceiver(), - ValidatorContract: libcommon.HexToAddress(validatorContract), - StateReceiverContract: libcommon.HexToAddress(stateReceiverContract), - chainConfig: chainConfig, - logger: logger, - } -} - -func (gc *GenesisContractsClient) CommitState(event rlp.RawValue, syscall consensus.SystemCall) error { - _, err := syscall(gc.StateReceiverContract, event) - return err -} - -func (gc *GenesisContractsClient) LastStateId(syscall consensus.SystemCall) (*big.Int, error) { - const method = "lastStateId" - - data, err := gc.stateReceiverABI.Pack(method) - if err != nil { - gc.logger.Error("[bor] Unable to pack tx for LastStateId", "err", err) - return nil, err - } - - result, err := syscall(gc.StateReceiverContract, data) - if err != nil { - return nil, err - } - - var ret = new(*big.Int) - if err := gc.stateReceiverABI.UnpackIntoInterface(ret, method, result); err != nil { - return nil, err - } - return *ret, nil -} diff --git a/consensus/bor/finality/generics/generics.go b/consensus/bor/finality/generics/generics.go deleted file mode 100644 index d54b26fbbda..00000000000 --- a/consensus/bor/finality/generics/generics.go +++ /dev/null @@ -1,22 +0,0 @@ -package generics - -import ( - "sync/atomic" - - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/core/types" -) - -func Empty[T any]() (t T) { - return -} - -// BorMilestoneRewind is used as a flag/variable -// Flag: if equals 0, no rewind according to bor whitelisting service -// Variable: if not equals 0, rewind chain back to BorMilestoneRewind -var BorMilestoneRewind atomic.Pointer[uint64] - -type Response struct { - Headers []*types.Header - Hashes []libcommon.Hash -} diff --git a/consensus/bor/genesis_contract.go b/consensus/bor/genesis_contract.go deleted file mode 100644 index 7a232733bf2..00000000000 --- a/consensus/bor/genesis_contract.go +++ /dev/null @@ -1,14 +0,0 @@ -package bor - -import ( - "math/big" - - "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/rlp" -) - -//go:generate mockgen -destination=./mock/genesis_contract_mock.go -package=mock . GenesisContract -type GenesisContract interface { - CommitState(event rlp.RawValue, syscall consensus.SystemCall) error - LastStateId(syscall consensus.SystemCall) (*big.Int, error) -} diff --git a/consensus/bor/heimdall/heimdall.go b/consensus/bor/heimdall/heimdall.go deleted file mode 100644 index 6d81f1aac2b..00000000000 --- a/consensus/bor/heimdall/heimdall.go +++ /dev/null @@ -1,42 +0,0 @@ -package heimdall - -import ( - "context" - - "github.com/ledgerwatch/erigon/consensus/bor/clerk" - "github.com/ledgerwatch/erigon/consensus/bor/finality/generics" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" -) - -func MilestoneRewindPending() bool { - return generics.BorMilestoneRewind.Load() != nil && *generics.BorMilestoneRewind.Load() != 0 -} - -//go:generate mockgen -destination=./mock/heimdall_client_mock.go -package=mock . IHeimdallClient -type IHeimdallClient interface { - StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error) - Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) - FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) - FetchCheckpointCount(ctx context.Context) (int64, error) - FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) - FetchMilestoneCount(ctx context.Context) (int64, error) - FetchNoAckMilestone(ctx context.Context, milestoneID string) error //Fetch the bool value whether milestone corresponding to the given id failed in the Heimdall - FetchLastNoAckMilestone(ctx context.Context) (string, error) //Fetch latest failed milestone id - FetchMilestoneID(ctx context.Context, milestoneID string) error //Fetch the bool value whether milestone corresponding to the given id is in process in Heimdall - Close() -} - -type HeimdallServer interface { - StateSyncEvents(ctx context.Context, fromID uint64, to int64, limit int) (uint64, []*clerk.EventRecordWithTime, error) - Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) - FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) - FetchCheckpointCount(ctx context.Context) (int64, error) - FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) - FetchMilestoneCount(ctx context.Context) (int64, error) - FetchNoAckMilestone(ctx context.Context, milestoneID string) error - FetchLastNoAckMilestone(ctx context.Context) (string, error) - FetchMilestoneID(ctx context.Context, milestoneID string) error - Close() -} diff --git a/consensus/bor/heimdall/mock/heimdall_client_mock.go b/consensus/bor/heimdall/mock/heimdall_client_mock.go deleted file mode 100644 index e7d29b17ee6..00000000000 --- a/consensus/bor/heimdall/mock/heimdall_client_mock.go +++ /dev/null @@ -1,184 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ledgerwatch/erigon/consensus/bor/heimdall (interfaces: IHeimdallClient) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - clerk "github.com/ledgerwatch/erigon/consensus/bor/clerk" - checkpoint "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - milestone "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" - span "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" -) - -// MockIHeimdallClient is a mock of IHeimdallClient interface. -type MockIHeimdallClient struct { - ctrl *gomock.Controller - recorder *MockIHeimdallClientMockRecorder -} - -// MockIHeimdallClientMockRecorder is the mock recorder for MockIHeimdallClient. -type MockIHeimdallClientMockRecorder struct { - mock *MockIHeimdallClient -} - -// NewMockIHeimdallClient creates a new mock instance. -func NewMockIHeimdallClient(ctrl *gomock.Controller) *MockIHeimdallClient { - mock := &MockIHeimdallClient{ctrl: ctrl} - mock.recorder = &MockIHeimdallClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockIHeimdallClient) EXPECT() *MockIHeimdallClientMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockIHeimdallClient) Close() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Close") -} - -// Close indicates an expected call of Close. -func (mr *MockIHeimdallClientMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIHeimdallClient)(nil).Close)) -} - -// FetchCheckpoint mocks base method. -func (m *MockIHeimdallClient) FetchCheckpoint(arg0 context.Context, arg1 int64) (*checkpoint.Checkpoint, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchCheckpoint", arg0, arg1) - ret0, _ := ret[0].(*checkpoint.Checkpoint) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchCheckpoint indicates an expected call of FetchCheckpoint. -func (mr *MockIHeimdallClientMockRecorder) FetchCheckpoint(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchCheckpoint", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchCheckpoint), arg0, arg1) -} - -// FetchCheckpointCount mocks base method. -func (m *MockIHeimdallClient) FetchCheckpointCount(arg0 context.Context) (int64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchCheckpointCount", arg0) - ret0, _ := ret[0].(int64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchCheckpointCount indicates an expected call of FetchCheckpointCount. -func (mr *MockIHeimdallClientMockRecorder) FetchCheckpointCount(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchCheckpointCount", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchCheckpointCount), arg0) -} - -// FetchLastNoAckMilestone mocks base method. -func (m *MockIHeimdallClient) FetchLastNoAckMilestone(arg0 context.Context) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchLastNoAckMilestone", arg0) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchLastNoAckMilestone indicates an expected call of FetchLastNoAckMilestone. -func (mr *MockIHeimdallClientMockRecorder) FetchLastNoAckMilestone(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchLastNoAckMilestone", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchLastNoAckMilestone), arg0) -} - -// FetchMilestone mocks base method. -func (m *MockIHeimdallClient) FetchMilestone(arg0 context.Context, arg1 int64) (*milestone.Milestone, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchMilestone", arg0, arg1) - ret0, _ := ret[0].(*milestone.Milestone) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchMilestone indicates an expected call of FetchMilestone. -func (mr *MockIHeimdallClientMockRecorder) FetchMilestone(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestone", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchMilestone), arg0, arg1) -} - -// FetchMilestoneCount mocks base method. -func (m *MockIHeimdallClient) FetchMilestoneCount(arg0 context.Context) (int64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchMilestoneCount", arg0) - ret0, _ := ret[0].(int64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchMilestoneCount indicates an expected call of FetchMilestoneCount. -func (mr *MockIHeimdallClientMockRecorder) FetchMilestoneCount(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestoneCount", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchMilestoneCount), arg0) -} - -// FetchMilestoneID mocks base method. -func (m *MockIHeimdallClient) FetchMilestoneID(arg0 context.Context, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchMilestoneID", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// FetchMilestoneID indicates an expected call of FetchMilestoneID. -func (mr *MockIHeimdallClientMockRecorder) FetchMilestoneID(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestoneID", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchMilestoneID), arg0, arg1) -} - -// FetchNoAckMilestone mocks base method. -func (m *MockIHeimdallClient) FetchNoAckMilestone(arg0 context.Context, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchNoAckMilestone", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// FetchNoAckMilestone indicates an expected call of FetchNoAckMilestone. -func (mr *MockIHeimdallClientMockRecorder) FetchNoAckMilestone(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchNoAckMilestone", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchNoAckMilestone), arg0, arg1) -} - -// Span mocks base method. -func (m *MockIHeimdallClient) Span(arg0 context.Context, arg1 uint64) (*span.HeimdallSpan, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Span", arg0, arg1) - ret0, _ := ret[0].(*span.HeimdallSpan) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Span indicates an expected call of Span. -func (mr *MockIHeimdallClientMockRecorder) Span(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Span", reflect.TypeOf((*MockIHeimdallClient)(nil).Span), arg0, arg1) -} - -// StateSyncEvents mocks base method. -func (m *MockIHeimdallClient) StateSyncEvents(arg0 context.Context, arg1 uint64, arg2 int64) ([]*clerk.EventRecordWithTime, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StateSyncEvents", arg0, arg1, arg2) - ret0, _ := ret[0].([]*clerk.EventRecordWithTime) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StateSyncEvents indicates an expected call of StateSyncEvents. -func (mr *MockIHeimdallClientMockRecorder) StateSyncEvents(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSyncEvents", reflect.TypeOf((*MockIHeimdallClient)(nil).StateSyncEvents), arg0, arg1, arg2) -} diff --git a/consensus/bor/heimdallgrpc/checkpoint.go b/consensus/bor/heimdallgrpc/checkpoint.go deleted file mode 100644 index 17a86ae9e38..00000000000 --- a/consensus/bor/heimdallgrpc/checkpoint.go +++ /dev/null @@ -1,50 +0,0 @@ -package heimdallgrpc - -import ( - "context" - "math/big" - - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - - proto "github.com/maticnetwork/polyproto/heimdall" - protoutils "github.com/maticnetwork/polyproto/utils" -) - -func (h *HeimdallGRPCClient) FetchCheckpointCount(ctx context.Context) (int64, error) { - h.logger.Info("Fetching checkpoint count") - - res, err := h.client.FetchCheckpointCount(ctx, nil) - if err != nil { - return 0, err - } - - h.logger.Info("Fetched checkpoint count") - - return res.Result.Result, nil -} - -func (h *HeimdallGRPCClient) FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) { - req := &proto.FetchCheckpointRequest{ - ID: number, - } - - h.logger.Info("Fetching checkpoint", "number", number) - - res, err := h.client.FetchCheckpoint(ctx, req) - if err != nil { - return nil, err - } - - h.logger.Info("Fetched checkpoint", "number", number) - - checkpoint := &checkpoint.Checkpoint{ - StartBlock: new(big.Int).SetUint64(res.Result.StartBlock), - EndBlock: new(big.Int).SetUint64(res.Result.EndBlock), - RootHash: protoutils.ConvertH256ToHash(res.Result.RootHash), - Proposer: protoutils.ConvertH160toAddress(res.Result.Proposer), - BorChainID: res.Result.BorChainID, - Timestamp: uint64(res.Result.Timestamp.GetSeconds()), - } - - return checkpoint, nil -} diff --git a/consensus/bor/heimdallgrpc/client.go b/consensus/bor/heimdallgrpc/client.go deleted file mode 100644 index 6c5799d0415..00000000000 --- a/consensus/bor/heimdallgrpc/client.go +++ /dev/null @@ -1,53 +0,0 @@ -package heimdallgrpc - -import ( - "time" - - grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" - "github.com/ledgerwatch/log/v3" - proto "github.com/maticnetwork/polyproto/heimdall" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" -) - -const ( - stateFetchLimit = 50 -) - -type HeimdallGRPCClient struct { - conn *grpc.ClientConn - client proto.HeimdallClient - logger log.Logger -} - -func NewHeimdallGRPCClient(address string, logger log.Logger) *HeimdallGRPCClient { - opts := []grpc_retry.CallOption{ - grpc_retry.WithMax(10000), - grpc_retry.WithBackoff(grpc_retry.BackoffLinear(5 * time.Second)), - grpc_retry.WithCodes(codes.Internal, codes.Unavailable, codes.Aborted, codes.NotFound), - } - - conn, err := grpc.Dial(address, - grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(opts...)), - grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(opts...)), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - logger.Crit("Failed to connect to Heimdall gRPC", "error", err) - } - - logger.Info("Connected to Heimdall gRPC server", "address", address) - - return &HeimdallGRPCClient{ - conn: conn, - client: proto.NewHeimdallClient(conn), - logger: logger, - } -} - -func (h *HeimdallGRPCClient) Close() { - h.logger.Debug("Shutdown detected, Closing Heimdall gRPC client") - h.conn.Close() -} diff --git a/consensus/bor/heimdallgrpc/milestone.go b/consensus/bor/heimdallgrpc/milestone.go deleted file mode 100644 index a42bab955c5..00000000000 --- a/consensus/bor/heimdallgrpc/milestone.go +++ /dev/null @@ -1,103 +0,0 @@ -package heimdallgrpc - -import ( - "context" - "fmt" - "math/big" - - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" - - proto "github.com/maticnetwork/polyproto/heimdall" - protoutils "github.com/maticnetwork/polyproto/utils" -) - -func (h *HeimdallGRPCClient) FetchMilestoneCount(ctx context.Context) (int64, error) { - h.logger.Info("Fetching milestone count") - - res, err := h.client.FetchMilestoneCount(ctx, nil) - if err != nil { - return 0, err - } - - h.logger.Info("Fetched milestone count") - - return res.Result.Count, nil -} - -func (h *HeimdallGRPCClient) FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) { - h.logger.Info("Fetching milestone") - - // TODO: use number - res, err := h.client.FetchMilestone(ctx, nil) - if err != nil { - return nil, err - } - - h.logger.Info("Fetched milestone") - - milestone := &milestone.Milestone{ - StartBlock: new(big.Int).SetUint64(res.Result.StartBlock), - EndBlock: new(big.Int).SetUint64(res.Result.EndBlock), - Hash: protoutils.ConvertH256ToHash(res.Result.RootHash), - Proposer: protoutils.ConvertH160toAddress(res.Result.Proposer), - BorChainID: res.Result.BorChainID, - Timestamp: uint64(res.Result.Timestamp.GetSeconds()), - } - - return milestone, nil -} - -func (h *HeimdallGRPCClient) FetchLastNoAckMilestone(ctx context.Context) (string, error) { - h.logger.Info("Fetching latest no ack milestone Id") - - res, err := h.client.FetchLastNoAckMilestone(ctx, nil) - if err != nil { - return "", err - } - - h.logger.Info("Fetched last no-ack milestone") - - return res.Result.Result, nil -} - -func (h *HeimdallGRPCClient) FetchNoAckMilestone(ctx context.Context, milestoneID string) error { - req := &proto.FetchMilestoneNoAckRequest{ - MilestoneID: milestoneID, - } - - h.logger.Info("Fetching no ack milestone", "milestoneID", milestoneID) - - res, err := h.client.FetchNoAckMilestone(ctx, req) - if err != nil { - return err - } - - if !res.Result.Result { - return fmt.Errorf("Not in rejected list: milestoneID %q", milestoneID) - } - - h.logger.Info("Fetched no ack milestone", "milestoneID", milestoneID) - - return nil -} - -func (h *HeimdallGRPCClient) FetchMilestoneID(ctx context.Context, milestoneID string) error { - req := &proto.FetchMilestoneIDRequest{ - MilestoneID: milestoneID, - } - - h.logger.Info("Fetching milestone id", "milestoneID", milestoneID) - - res, err := h.client.FetchMilestoneID(ctx, req) - if err != nil { - return err - } - - if !res.Result.Result { - return fmt.Errorf("This milestoneID %q does not exist", milestoneID) - } - - h.logger.Info("Fetched milestone id", "milestoneID", milestoneID) - - return nil -} diff --git a/consensus/bor/heimdallgrpc/server.go b/consensus/bor/heimdallgrpc/server.go deleted file mode 100644 index 8139c33ddac..00000000000 --- a/consensus/bor/heimdallgrpc/server.go +++ /dev/null @@ -1,249 +0,0 @@ -package heimdallgrpc - -import ( - "context" - "fmt" - "net" - "time" - - "github.com/ledgerwatch/erigon-lib/gointerfaces" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall" - "github.com/ledgerwatch/log/v3" - proto "github.com/maticnetwork/polyproto/heimdall" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/emptypb" - "google.golang.org/protobuf/types/known/timestamppb" -) - -type HeimdallGRPCServer struct { - proto.UnimplementedHeimdallServer - heimdall heimdall.HeimdallServer - logger log.Logger -} - -func (h *HeimdallGRPCServer) Span(ctx context.Context, in *proto.SpanRequest) (*proto.SpanResponse, error) { - result, err := h.heimdall.Span(ctx, in.ID) - - if err != nil { - h.logger.Error("[bor.heimdall] Error while fetching span") - return nil, err - } - - validators := make([]*proto.Validator, len(result.ValidatorSet.Validators)) - - for i, validator := range result.ValidatorSet.Validators { - h160 := gointerfaces.ConvertAddressToH160(validator.Address) - validators[i] = &proto.Validator{ - ID: validator.ID, - Address: &proto.H160{ - Hi: &proto.H128{ - Hi: h160.Hi.Hi, - Lo: h160.Hi.Lo, - }, - Lo: h160.Lo, - }, - VotingPower: validator.VotingPower, - ProposerPriority: validator.ProposerPriority, - } - } - - var proposer *proto.Validator - - if vsp := result.ValidatorSet.Proposer; vsp != nil { - proposerH160 := gointerfaces.ConvertAddressToH160(vsp.Address) - proposer = &proto.Validator{ - ID: vsp.ID, - Address: &proto.H160{ - Hi: &proto.H128{ - Hi: proposerH160.Hi.Hi, - Lo: proposerH160.Hi.Lo, - }, - Lo: proposerH160.Lo, - }, - VotingPower: vsp.VotingPower, - ProposerPriority: vsp.ProposerPriority, - } - } - - producers := make([]*proto.Validator, len(result.SelectedProducers)) - - for i, producer := range result.SelectedProducers { - h160 := gointerfaces.ConvertAddressToH160(producer.Address) - producers[i] = &proto.Validator{ - ID: producer.ID, - Address: &proto.H160{ - Hi: &proto.H128{ - Hi: h160.Hi.Hi, - Lo: h160.Hi.Lo, - }, - Lo: h160.Lo, - }, - VotingPower: producer.VotingPower, - ProposerPriority: producer.ProposerPriority, - } - } - - resp := &proto.SpanResponse{ - Result: &proto.Span{ - ID: result.ID, - StartBlock: result.StartBlock, - EndBlock: result.EndBlock, - ValidatorSet: &proto.ValidatorSet{ - Validators: validators, - Proposer: proposer, - }, - SelectedProducers: producers, - ChainID: result.ChainID, - }, - } - - return resp, nil -} - -func (h *HeimdallGRPCServer) FetchCheckpointCount(ctx context.Context, in *emptypb.Empty) (*proto.FetchCheckpointCountResponse, error) { - count, err := h.heimdall.FetchCheckpointCount(ctx) - - if err != nil { - h.logger.Error("[bor.heimdall] Error while fetching checkpoint count") - return nil, err - } - - resp := &proto.FetchCheckpointCountResponse{} - resp.Height = fmt.Sprint(count) - - return resp, nil -} - -func (h *HeimdallGRPCServer) FetchCheckpoint(ctx context.Context, in *proto.FetchCheckpointRequest) (*proto.FetchCheckpointResponse, error) { - - _ /*checkpoint*/, err := h.heimdall.FetchCheckpoint(ctx, in.ID) - - if err != nil { - h.logger.Error("[bor.heimdall] Error while fetching checkpoint") - return nil, err - } - - /* TODO - - var hash [32]byte - - copy(hash[:], checkPoint.RootHash.Bytes()) - - var address [20]byte - - copy(address[:], checkPoint.Proposer.Bytes()) - */ - - resp := &proto.FetchCheckpointResponse{} - - /* TODO - resp.Height = fmt.Sprint(result.Height) - resp.Result = &proto.Checkpoint{ - StartBlock: checkPoint.StartBlock, - EndBlock: checkPoint.EndBlock, - RootHash: protoutils.ConvertHashToH256(hash), - Proposer: protoutils.ConvertAddressToH160(address), - Timestamp: timestamppb.New(time.Unix(int64(checkPoint.TimeStamp), 0)), - BorChainID: checkPoint.BorChainID, - } - */ - return resp, nil -} - -func (h *HeimdallGRPCServer) StateSyncEvents(req *proto.StateSyncEventsRequest, reply proto.Heimdall_StateSyncEventsServer) error { - fromId := req.FromID - - for { - height, events, err := h.heimdall.StateSyncEvents(context.Background(), fromId, int64(req.ToTime), int(req.Limit)) - - if err != nil { - h.logger.Error("[bor.heimdall] Error while fetching event records", "error", err) - return status.Errorf(codes.Internal, err.Error()) - } - - eventRecords := make([]*proto.EventRecord, len(events)) - - for i, event := range events { - eventRecords[i] = &proto.EventRecord{ - ID: event.ID, - Contract: event.Contract.Hex(), - Data: event.Data.String(), - TxHash: event.TxHash.Hex(), - LogIndex: event.LogIndex, - ChainID: event.ChainID, - Time: timestamppb.New(event.Time), - } - } - - if len(eventRecords) == 0 { - break - } - - err = reply.Send(&proto.StateSyncEventsResponse{ - Height: fmt.Sprint(height), - Result: eventRecords, - }) - - if err != nil { - h.logger.Error("[bor.heimdall] Error while sending event record", "error", err) - return status.Errorf(codes.Internal, err.Error()) - } - - if len(eventRecords) < int(req.Limit) { - break - } - - fromId += req.Limit - } - - return nil -} - -// StartHeimdallServer creates a heimdall GRPC server - which is implemented via the passed in client -// interface. It is intended for use in testing where more than a single test validator is required rather -// than to replace the maticnetwork implementation -func StartHeimdallServer(shutDownCtx context.Context, heimdall heimdall.HeimdallServer, addr string, logger log.Logger) error { - grpcServer := grpc.NewServer(withLoggingUnaryInterceptor(logger)) - proto.RegisterHeimdallServer(grpcServer, - &HeimdallGRPCServer{ - heimdall: heimdall, - logger: logger, - }) - - lis, err := net.Listen("tcp", addr) - if err != nil { - return err - } - - go func() { - if err := grpcServer.Serve(lis); err != nil { - logger.Error("[bor.heimdall] failed to serve grpc server", "err", err) - } - - <-shutDownCtx.Done() - grpcServer.Stop() - lis.Close() - logger.Info("[bor.heimdall] GRPC Server stopped", "addr", addr) - }() - - logger.Info("[bor.heimdall] GRPC Server started", "addr", addr) - - return nil -} - -func withLoggingUnaryInterceptor(logger log.Logger) grpc.ServerOption { - return grpc.UnaryInterceptor(func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - start := time.Now() - - h, err := handler(ctx, req) - if err != nil { - err = status.Errorf(codes.Internal, err.Error()) - } - - logger.Debug("[bor.heimdall] Request", "method", info.FullMethod, "duration", time.Since(start), "error", err) - - return h, err - }) -} diff --git a/consensus/bor/heimdallgrpc/span.go b/consensus/bor/heimdallgrpc/span.go deleted file mode 100644 index 7bc6ddb8f78..00000000000 --- a/consensus/bor/heimdallgrpc/span.go +++ /dev/null @@ -1,62 +0,0 @@ -package heimdallgrpc - -import ( - "context" - - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" - "github.com/ledgerwatch/erigon/consensus/bor/valset" - - proto "github.com/maticnetwork/polyproto/heimdall" - protoutils "github.com/maticnetwork/polyproto/utils" -) - -func (h *HeimdallGRPCClient) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) { - req := &proto.SpanRequest{ - ID: spanID, - } - - h.logger.Info("Fetching span", "spanID", spanID) - - res, err := h.client.Span(ctx, req) - if err != nil { - return nil, err - } - - h.logger.Info("Fetched span", "spanID", spanID) - - return parseSpan(res.Result), nil -} - -func parseSpan(protoSpan *proto.Span) *span.HeimdallSpan { - resp := &span.HeimdallSpan{ - Span: span.Span{ - ID: protoSpan.ID, - StartBlock: protoSpan.StartBlock, - EndBlock: protoSpan.EndBlock, - }, - ValidatorSet: valset.ValidatorSet{}, - SelectedProducers: []valset.Validator{}, - ChainID: protoSpan.ChainID, - } - - for _, validator := range protoSpan.ValidatorSet.Validators { - resp.ValidatorSet.Validators = append(resp.ValidatorSet.Validators, parseValidator(validator)) - } - - resp.ValidatorSet.Proposer = parseValidator(protoSpan.ValidatorSet.Proposer) - - for _, validator := range protoSpan.SelectedProducers { - resp.SelectedProducers = append(resp.SelectedProducers, *parseValidator(validator)) - } - - return resp -} - -func parseValidator(validator *proto.Validator) *valset.Validator { - return &valset.Validator{ - ID: validator.ID, - Address: protoutils.ConvertH160toAddress(validator.Address), - VotingPower: validator.VotingPower, - ProposerPriority: validator.ProposerPriority, - } -} diff --git a/consensus/bor/heimdallgrpc/state_sync.go b/consensus/bor/heimdallgrpc/state_sync.go deleted file mode 100644 index e1b49e67d93..00000000000 --- a/consensus/bor/heimdallgrpc/state_sync.go +++ /dev/null @@ -1,58 +0,0 @@ -package heimdallgrpc - -import ( - "context" - "errors" - "io" - - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/consensus/bor/clerk" - proto "github.com/maticnetwork/polyproto/heimdall" -) - -func (h *HeimdallGRPCClient) StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error) { - eventRecords := make([]*clerk.EventRecordWithTime, 0) - - req := &proto.StateSyncEventsRequest{ - FromID: fromID, - ToTime: uint64(to), - Limit: uint64(stateFetchLimit), - } - - var ( - res proto.Heimdall_StateSyncEventsClient - events *proto.StateSyncEventsResponse - err error - ) - - res, err = h.client.StateSyncEvents(ctx, req) - if err != nil { - return nil, err - } - - for { - events, err = res.Recv() - if errors.Is(err, io.EOF) { - return eventRecords, nil - } - - if err != nil { - return nil, err - } - - for _, event := range events.Result { - eventRecord := &clerk.EventRecordWithTime{ - EventRecord: clerk.EventRecord{ - ID: event.ID, - Contract: libcommon.HexToAddress(event.Contract), - Data: libcommon.Hex2Bytes(event.Data[2:]), - TxHash: libcommon.HexToHash(event.TxHash), - LogIndex: event.LogIndex, - ChainID: event.ChainID, - }, - Time: event.Time.AsTime(), - } - eventRecords = append(eventRecords, eventRecord) - } - } -} diff --git a/consensus/bor/mock/genesis_contract_mock.go b/consensus/bor/mock/genesis_contract_mock.go deleted file mode 100644 index 9ad12ae63d4..00000000000 --- a/consensus/bor/mock/genesis_contract_mock.go +++ /dev/null @@ -1,66 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ledgerwatch/erigon/consensus/bor (interfaces: GenesisContract) - -// Package mock is a generated GoMock package. -package mock - -import ( - big "math/big" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - consensus "github.com/ledgerwatch/erigon/consensus" - rlp "github.com/ledgerwatch/erigon/rlp" -) - -// MockGenesisContract is a mock of GenesisContract interface. -type MockGenesisContract struct { - ctrl *gomock.Controller - recorder *MockGenesisContractMockRecorder -} - -// MockGenesisContractMockRecorder is the mock recorder for MockGenesisContract. -type MockGenesisContractMockRecorder struct { - mock *MockGenesisContract -} - -// NewMockGenesisContract creates a new mock instance. -func NewMockGenesisContract(ctrl *gomock.Controller) *MockGenesisContract { - mock := &MockGenesisContract{ctrl: ctrl} - mock.recorder = &MockGenesisContractMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGenesisContract) EXPECT() *MockGenesisContractMockRecorder { - return m.recorder -} - -// CommitState mocks base method. -func (m *MockGenesisContract) CommitState(arg0 rlp.RawValue, arg1 consensus.SystemCall) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CommitState", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// CommitState indicates an expected call of CommitState. -func (mr *MockGenesisContractMockRecorder) CommitState(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitState", reflect.TypeOf((*MockGenesisContract)(nil).CommitState), arg0, arg1) -} - -// LastStateId mocks base method. -func (m *MockGenesisContract) LastStateId(arg0 consensus.SystemCall) (*big.Int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LastStateId", arg0) - ret0, _ := ret[0].(*big.Int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// LastStateId indicates an expected call of LastStateId. -func (mr *MockGenesisContractMockRecorder) LastStateId(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastStateId", reflect.TypeOf((*MockGenesisContract)(nil).LastStateId), arg0) -} diff --git a/consensus/bor/span_id.go b/consensus/bor/span_id.go deleted file mode 100644 index 699e7fef98c..00000000000 --- a/consensus/bor/span_id.go +++ /dev/null @@ -1,20 +0,0 @@ -package bor - -const ( - spanLength = 6400 // Number of blocks in a span - zerothSpanEnd = 255 // End block of 0th span -) - -func SpanIDAt(number uint64) uint64 { - if number > zerothSpanEnd { - return 1 + (number-zerothSpanEnd-1)/spanLength - } - return 0 -} - -func SpanEndBlockNum(spanID uint64) uint64 { - if spanID > 0 { - return spanID*spanLength + zerothSpanEnd - } - return zerothSpanEnd -} diff --git a/consensus/bor/spanner.go b/consensus/bor/spanner.go deleted file mode 100644 index 77769ea835e..00000000000 --- a/consensus/bor/spanner.go +++ /dev/null @@ -1,16 +0,0 @@ -package bor - -import ( - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" - "github.com/ledgerwatch/erigon/consensus/bor/valset" -) - -//go:generate mockgen -destination=./mock/spanner_mock.go -package=mock . Spanner -type Spanner interface { - GetCurrentSpan(syscall consensus.SystemCall) (*span.Span, error) - GetCurrentValidators(spanId uint64, signer libcommon.Address, chain consensus.ChainHeaderReader) ([]*valset.Validator, error) - GetCurrentProducers(spanId uint64, signer libcommon.Address, chain consensus.ChainHeaderReader) ([]*valset.Validator, error) - CommitSpan(heimdallSpan span.HeimdallSpan, syscall consensus.SystemCall) error -} diff --git a/consensus/chain_reader.go b/consensus/chain_reader.go index f79de40c4cc..c7d81953b7c 100644 --- a/consensus/chain_reader.go +++ b/consensus/chain_reader.go @@ -82,7 +82,7 @@ func (cr ChainReaderImpl) FrozenBlocks() uint64 { func (cr ChainReaderImpl) BorSpan(spanId uint64) []byte { spanBytes, err := cr.BlockReader.Span(context.Background(), cr.Db, spanId) if err != nil { - log.Error("BorSpan failed", "err", err) + log.Error("[consensus] BorSpan failed", "err", err) } return spanBytes } diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go index 0f487177bfc..de00f536d79 100644 --- a/consensus/clique/snapshot_test.go +++ b/consensus/clique/snapshot_test.go @@ -23,6 +23,8 @@ import ( "sort" "testing" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" @@ -35,7 +37,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages/mock" - "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/turbo/testlog" ) // testerAccountPool is a pool to maintain currently active tester accounts, @@ -392,6 +394,7 @@ func TestClique(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) // Create the account pool and generate the initial set of signers accounts := newTesterAccountPool() @@ -509,7 +512,13 @@ func TestClique(t *testing.T) { var snap *clique.Snapshot if err := m.DB.View(context.Background(), func(tx kv.Tx) error { - snap, err = engine.Snapshot(stagedsync.ChainReader{Cfg: config, Db: tx, BlockReader: m.BlockReader}, head.NumberU64(), head.Hash(), nil) + chainReader := stagedsync.ChainReader{ + Cfg: config, + Db: tx, + BlockReader: m.BlockReader, + Logger: logger, + } + snap, err = engine.Snapshot(chainReader, head.NumberU64(), head.Hash(), nil) if err != nil { return err } diff --git a/consensus/misc/eip1559.go b/consensus/misc/eip1559.go index 82e68e33bf8..7c3ba36ee8e 100644 --- a/consensus/misc/eip1559.go +++ b/consensus/misc/eip1559.go @@ -22,8 +22,12 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/params" ) @@ -58,6 +62,42 @@ func VerifyEip1559Header(config *chain.Config, parent, header *types.Header, ski return nil } +var Eip1559FeeCalculator eip1559Calculator + +type eip1559Calculator struct{} + +func (f eip1559Calculator) CurrentFees(chainConfig *chain.Config, db kv.Getter) (baseFee uint64, blobFee uint64, minBlobGasPrice uint64, err error) { + hash := rawdb.ReadHeadHeaderHash(db) + + if hash == (libcommon.Hash{}) { + return 0, 0, 0, fmt.Errorf("can't get head header hash") + } + + currentHeader, err := rawdb.ReadHeaderByHash(db, hash) + + if err != nil { + return 0, 0, 0, err + } + + if chainConfig != nil { + if currentHeader.BaseFee != nil { + baseFee = CalcBaseFee(chainConfig, currentHeader, currentHeader.Time).Uint64() + } + + if currentHeader.ExcessBlobGas != nil { + excessBlobGas := CalcExcessBlobGas(chainConfig, currentHeader) + b, err := GetBlobGasPrice(chainConfig, excessBlobGas) + if err == nil { + blobFee = b.Uint64() + } + } + } + + minBlobGasPrice = chainConfig.GetMinBlobGasPrice() + + return baseFee, blobFee, minBlobGasPrice, nil +} + // CalcBaseFee calculates the basefee of the header. func CalcBaseFee(config *chain.Config, parent *types.Header, time uint64) *big.Int { // If the current block is pre bedrock, return 0. @@ -106,7 +146,7 @@ func CalcBaseFee(config *chain.Config, parent *types.Header, time uint64) *big.I func getBaseFeeChangeDenominator(config *chain.Config, number, time uint64) uint64 { // If we're running bor based chain post delhi hardfork, return the new value - if config.Bor != nil && config.Bor.IsDelhi(number) { + if borConfig, ok := config.Bor.(*borcfg.BorConfig); ok && borConfig.IsDelhi(number) { return params.BaseFeeChangeDenominatorPostDelhi } diff --git a/core/allocs/amoy.json b/core/allocs/amoy.json index df3d74e4526..5cdc8e52465 100644 --- a/core/allocs/amoy.json +++ b/core/allocs/amoy.json @@ -14,4 +14,4 @@ "6aB3d36C46ecFb9B9c0bD51CB1c3da5A2C81cea6": { "balance": "0x3635c9adc5dea00000" } -} \ No newline at end of file +} diff --git a/core/block_validator_test.go b/core/block_validator_test.go index 34ab7a2c21d..e57d40d3477 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -20,13 +20,17 @@ import ( "context" "testing" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages/mock" + "github.com/ledgerwatch/erigon/turbo/testlog" ) // Tests that simple header verification works, for both good and bad blocks. @@ -37,6 +41,7 @@ func TestHeaderVerification(t *testing.T) { gspec = &types.Genesis{Config: params.TestChainConfig} engine = ethash.NewFaker() ) + logger := testlog.Logger(t, log.LvlInfo) checkStateRoot := true m := mock.MockWithGenesisEngine(t, gspec, engine, false, checkStateRoot) @@ -48,13 +53,19 @@ func TestHeaderVerification(t *testing.T) { for i := 0; i < chain.Length(); i++ { if err := m.DB.View(context.Background(), func(tx kv.Tx) error { for j, valid := range []bool{true, false} { + chainReader := stagedsync.ChainReader{ + Cfg: *params.TestChainConfig, + Db: tx, + BlockReader: m.BlockReader, + Logger: logger, + } + var engine consensus.Engine if valid { - engine := ethash.NewFaker() - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFaker() } else { - engine := ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) } + err = engine.VerifyHeader(chainReader, chain.Headers[i], true) if (err == nil) != valid { t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, err, valid) } @@ -79,6 +90,7 @@ func TestHeaderWithSealVerification(t *testing.T) { gspec = &types.Genesis{Config: params.TestChainAuraConfig} engine = ethash.NewFaker() ) + logger := testlog.Logger(t, log.LvlInfo) checkStateRoot := true m := mock.MockWithGenesisEngine(t, gspec, engine, false, checkStateRoot) @@ -91,13 +103,19 @@ func TestHeaderWithSealVerification(t *testing.T) { for i := 0; i < chain.Length(); i++ { if err := m.DB.View(context.Background(), func(tx kv.Tx) error { for j, valid := range []bool{true, false} { + chainReader := stagedsync.ChainReader{ + Cfg: *params.TestChainAuraConfig, + Db: tx, + BlockReader: m.BlockReader, + Logger: logger, + } + var engine consensus.Engine if valid { - engine := ethash.NewFaker() - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainAuraConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFaker() } else { - engine := ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainAuraConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) } + err = engine.VerifyHeader(chainReader, chain.Headers[i], true) if (err == nil) != valid { t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, err, valid) } diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go index 433c9221b18..7e5202657cd 100644 --- a/core/forkid/forkid.go +++ b/core/forkid/forkid.go @@ -244,8 +244,13 @@ func GatherForks(config *chain.Config, genesisTime uint64) (heightForks []uint64 heightForks = append(heightForks, *config.Aura.PosdaoTransition) } - if config.Bor != nil && config.Bor.AgraBlock != nil { - heightForks = append(heightForks, config.Bor.AgraBlock.Uint64()) + if config.Bor != nil { + if config.Bor.GetAgraBlock() != nil { + heightForks = append(heightForks, config.Bor.GetAgraBlock().Uint64()) + } + if config.Bor.GetNapoliBlock() != nil { + heightForks = append(heightForks, config.Bor.GetNapoliBlock().Uint64()) + } } // Sort the fork block numbers & times to permit chronological XOR diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 310beaa739a..179c2dd0e90 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -84,16 +84,18 @@ func TestCreation(t *testing.T) { params.GoerliChainConfig, params.GoerliGenesisHash, []testcase{ - {0, 1548854791, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block - {1561650, 1572443570, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Last Petersburg block - {1561651, 1572443585, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // First Istanbul block - {4460643, 1616045376, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // Last Istanbul block - {4460644, 1616045391, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // First Berlin block - {5062604, 1625109564, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // Last Berlin block - {5062605, 1625109579, ID{Hash: checksumToBytes(0xB8C6299D), Next: 1678832736}}, // First London block - {8656122, 1678832724, ID{Hash: checksumToBytes(0xB8C6299D), Next: 1678832736}}, // Last pre-Shanghai block - {8656123, 1678832784, ID{Hash: checksumToBytes(0xf9843abf), Next: 0}}, // First Shanghai block - {9900000, 1700000000, ID{Hash: checksumToBytes(0xf9843abf), Next: 0}}, // Future Shanghai block (mock) + {0, 1548854791, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block + {1561650, 1572443570, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Last Petersburg block + {1561651, 1572443585, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // First Istanbul block + {4460643, 1616045376, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // Last Istanbul block + {4460644, 1616045391, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // First Berlin block + {5062604, 1625109564, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // Last Berlin block + {5062605, 1625109579, ID{Hash: checksumToBytes(0xB8C6299D), Next: 1678832736}}, // First London block + {8656122, 1678832724, ID{Hash: checksumToBytes(0xB8C6299D), Next: 1678832736}}, // Last pre-Shanghai block + {8656123, 1678832784, ID{Hash: checksumToBytes(0xf9843abf), Next: 1705473120}}, // First Shanghai block + {10388175, 1705473108, ID{Hash: checksumToBytes(0xf9843abf), Next: 1705473120}}, // Last Shanghai block + {10388176, 1705473120, ID{Hash: checksumToBytes(0x70cc14e2), Next: 0}}, // First Cancun block + {12000000, 1800000000, ID{Hash: checksumToBytes(0x70cc14e2), Next: 0}}, // Future Cancun block (mock) }, }, // Sepolia test cases @@ -105,8 +107,10 @@ func TestCreation(t *testing.T) { {1735370, 1661130096, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Last pre-MergeNetsplit block {1735371, 1661130108, ID{Hash: checksumToBytes(0xb96cbd13), Next: 1677557088}}, // First MergeNetsplit block {2990907, 1677557076, ID{Hash: checksumToBytes(0xb96cbd13), Next: 1677557088}}, // Last pre-Shanghai block - {2990908, 1677557088, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 0}}, // First Shanghai block - {5000000, 1700000000, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 0}}, // Future Shanghai block (mock) + {2990908, 1677557088, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 1706655072}}, // First Shanghai block + {5198775, 1706655060, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 1706655072}}, // Last Shanghai block (approx) + {5198776, 1706655072, ID{Hash: checksumToBytes(0x88cf81d9), Next: 0}}, // First Cancun block (approx) + {8000000, 1800000000, ID{Hash: checksumToBytes(0x88cf81d9), Next: 0}}, // Future Cancun block (mock) }, }, // Gnosis test cases @@ -128,8 +132,8 @@ func TestCreation(t *testing.T) { {19039999, 1636753575, ID{Hash: checksumToBytes(0x069a83d9), Next: 19040000}}, // Last Berlin block {19040000, 1636753580, ID{Hash: checksumToBytes(0x018479d3), Next: 1690889660}}, // First London block {21735000, 1650443255, ID{Hash: checksumToBytes(0x018479d3), Next: 1690889660}}, // First GIP-31 block - {29272666, 1690889655, ID{Hash: checksumToBytes(0x018479d3), Next: 1690889660}}, // Last pre-Shanghai block (approx) - {29272667, 1690889660, ID{Hash: checksumToBytes(0x2efe91ba), Next: 0}}, // First Shanghai block (approx) + {29242931, 1690889650, ID{Hash: checksumToBytes(0x018479d3), Next: 1690889660}}, // Last pre-Shanghai block + {29242932, 1690889660, ID{Hash: checksumToBytes(0x2efe91ba), Next: 0}}, // First Shanghai block }, }, // Chiado test cases @@ -139,7 +143,10 @@ func TestCreation(t *testing.T) { []testcase{ {0, 0, ID{Hash: checksumToBytes(0x50d39d7b), Next: 1684934220}}, {4100418, 1684934215, ID{Hash: checksumToBytes(0x50d39d7b), Next: 1684934220}}, // Last pre-Shanghai block - {4100419, 1684934220, ID{Hash: checksumToBytes(0xa15a4252), Next: 0}}, // First Shanghai block + {4100419, 1684934220, ID{Hash: checksumToBytes(0xa15a4252), Next: 1706724940}}, // First Shanghai block + {8102175, 1706724935, ID{Hash: checksumToBytes(0xa15a4252), Next: 1706724940}}, // Last Shanghai block (approx) + {8102176, 1706724940, ID{Hash: checksumToBytes(0x5fbc16bc), Next: 0}}, // First Cancun block (approx) + {10000000, 1800000000, ID{Hash: checksumToBytes(0x5fbc16bc), Next: 0}}, // Future Cancun block (mock) }, }, // Mumbai test cases diff --git a/core/genesis_test.go b/core/genesis_test.go index d47284b3ac3..dd1b7489893 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -53,12 +53,12 @@ func TestGenesisBlockRoots(t *testing.T) { require := require.New(t) var err error - block, _, _ := core.GenesisToBlock(core.MainnetGenesisBlock(), "") + block, _, _ := core.GenesisToBlock(core.MainnetGenesisBlock(), "", log.Root()) if block.Hash() != params.MainnetGenesisHash { t.Errorf("wrong mainnet genesis hash, got %v, want %v", block.Hash(), params.MainnetGenesisHash) } - block, _, err = core.GenesisToBlock(core.GnosisGenesisBlock(), "") + block, _, err = core.GenesisToBlock(core.GnosisGenesisBlock(), "", log.Root()) require.NoError(err) if block.Root() != params.GnosisGenesisStateRoot { t.Errorf("wrong Gnosis Chain genesis state root, got %v, want %v", block.Root(), params.GnosisGenesisStateRoot) @@ -67,7 +67,7 @@ func TestGenesisBlockRoots(t *testing.T) { t.Errorf("wrong Gnosis Chain genesis hash, got %v, want %v", block.Hash(), params.GnosisGenesisHash) } - block, _, err = core.GenesisToBlock(core.ChiadoGenesisBlock(), "") + block, _, err = core.GenesisToBlock(core.ChiadoGenesisBlock(), "", log.Root()) require.NoError(err) if block.Root() != params.ChiadoGenesisStateRoot { t.Errorf("wrong Chiado genesis state root, got %v, want %v", block.Root(), params.ChiadoGenesisStateRoot) diff --git a/core/genesis_write.go b/core/genesis_write.go index d0680a371f7..d24e36517f0 100644 --- a/core/genesis_write.go +++ b/core/genesis_write.go @@ -139,7 +139,7 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideCancunTime, o custom = false } applyOverrides(genesis.Config) - block, _, err1 := write(tx, genesis, tmpDir) + block, _, err1 := write(tx, genesis, tmpDir, logger) if err1 != nil { return genesis.Config, nil, err1 } @@ -151,7 +151,7 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideCancunTime, o // Check whether the genesis block is already written. if genesis != nil { - block, _, err1 := GenesisToBlock(genesis, tmpDir) + block, _, err1 := GenesisToBlock(genesis, tmpDir, logger) if err1 != nil { return genesis.Config, nil, err1 } @@ -208,8 +208,8 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideCancunTime, o return newCfg, storedBlock, nil } -func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string) (*types.Block, *state.IntraBlockState, error) { - block, statedb, err := GenesisToBlock(g, tmpDir) +func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { + block, statedb, err := GenesisToBlock(g, tmpDir, logger) if err != nil { return nil, nil, err } @@ -257,13 +257,13 @@ func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string) (*types.Bloc } return block, statedb, nil } -func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string) *types.Block { +func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string, logger log.Logger) *types.Block { tx, err := db.BeginRw(context.Background()) if err != nil { panic(err) } defer tx.Rollback() - block, _, err := write(tx, g, tmpDir) + block, _, err := write(tx, g, tmpDir, logger) if err != nil { panic(err) } @@ -276,8 +276,8 @@ func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string) *types.Block // Write writes the block and state of a genesis specification to the database. // The block is committed as the canonical head block. -func write(tx kv.RwTx, g *types.Genesis, tmpDir string) (*types.Block, *state.IntraBlockState, error) { - block, statedb, err2 := WriteGenesisState(g, tx, tmpDir) +func write(tx kv.RwTx, g *types.Genesis, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { + block, statedb, err2 := WriteGenesisState(g, tx, tmpDir, logger) if err2 != nil { return block, statedb, err2 } @@ -344,9 +344,9 @@ func write(tx kv.RwTx, g *types.Genesis, tmpDir string) (*types.Block, *state.In } // GenesisBlockForTesting creates and writes a block in which addr has the given wei balance. -func GenesisBlockForTesting(db kv.RwDB, addr libcommon.Address, balance *big.Int, tmpDir string) *types.Block { +func GenesisBlockForTesting(db kv.RwDB, addr libcommon.Address, balance *big.Int, tmpDir string, logger log.Logger) *types.Block { g := types.Genesis{Alloc: types.GenesisAlloc{addr: {Balance: balance}}, Config: params.TestChainConfig} - block := MustCommitGenesis(&g, db, tmpDir) + block := MustCommitGenesis(&g, db, tmpDir, logger) return block } @@ -355,14 +355,14 @@ type GenAccount struct { Balance *big.Int } -func GenesisWithAccounts(db kv.RwDB, accs []GenAccount, tmpDir string) *types.Block { +func GenesisWithAccounts(db kv.RwDB, accs []GenAccount, tmpDir string, logger log.Logger) *types.Block { g := types.Genesis{Config: params.TestChainConfig} allocs := make(map[libcommon.Address]types.GenesisAccount) for _, acc := range accs { allocs[acc.Addr] = types.GenesisAccount{Balance: acc.Balance} } g.Alloc = allocs - block := MustCommitGenesis(&g, db, tmpDir) + block := MustCommitGenesis(&g, db, tmpDir, logger) return block } @@ -557,7 +557,7 @@ func DeveloperGenesisBlock(period uint64, faucet libcommon.Address) *types.Genes // ToBlock creates the genesis block and writes state of a genesis specification // to the given database (or discards it if nil). -func GenesisToBlock(g *types.Genesis, tmpDir string) (*types.Block, *state.IntraBlockState, error) { +func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { _ = g.Alloc //nil-check head := &types.Header{ @@ -624,7 +624,7 @@ func GenesisToBlock(g *types.Genesis, tmpDir string) (*types.Block, *state.Intra // TODO(yperbasis): use memdb.MemoryMutation instead defer wg.Done() - genesisTmpDB := mdbx.NewMDBX(log.New()).InMem(tmpDir).MapSize(2 * datasize.GB).GrowthStep(1 * datasize.MB).MustOpen() + genesisTmpDB := mdbx.NewMDBX(logger).InMem(tmpDir).MapSize(2 * datasize.GB).GrowthStep(1 * datasize.MB).MustOpen() defer genesisTmpDB.Close() var tx kv.RwTx if tx, err = genesisTmpDB.BeginRw(context.Background()); err != nil { diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index d93691c723d..73292258960 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -19,6 +19,7 @@ package rawdb import ( "encoding/json" "fmt" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -34,10 +35,20 @@ func ReadChainConfig(db kv.Getter, hash libcommon.Hash) (*chain.Config, error) { if len(data) == 0 { return nil, nil } + var config chain.Config if err := json.Unmarshal(data, &config); err != nil { return nil, fmt.Errorf("invalid chain config JSON: %x, %w", hash, err) } + + if config.BorJSON != nil { + borConfig := &borcfg.BorConfig{} + if err := json.Unmarshal(config.BorJSON, borConfig); err != nil { + return nil, fmt.Errorf("invalid chain config 'bor' JSON: %x, %w", hash, err) + } + config.Bor = borConfig + } + return &config, nil } @@ -46,10 +57,20 @@ func WriteChainConfig(db kv.Putter, hash libcommon.Hash, cfg *chain.Config) erro if cfg == nil { return nil } + + if cfg.Bor != nil { + borJSON, err := json.Marshal(cfg.Bor) + if err != nil { + return fmt.Errorf("failed to JSON encode chain config 'bor': %w", err) + } + cfg.BorJSON = borJSON + } + data, err := json.Marshal(cfg) if err != nil { return fmt.Errorf("failed to JSON encode chain config: %w", err) } + if err := db.Put(kv.ConfigTable, hash[:], data); err != nil { return fmt.Errorf("failed to store chain config: %w", err) } diff --git a/core/rawdb/rawdbreset/reset_stages.go b/core/rawdb/rawdbreset/reset_stages.go index cb5e238b8c8..b791e0e84cf 100644 --- a/core/rawdb/rawdbreset/reset_stages.go +++ b/core/rawdb/rawdbreset/reset_stages.go @@ -20,7 +20,7 @@ import ( "github.com/ledgerwatch/log/v3" ) -func ResetState(db kv.RwDB, ctx context.Context, chain string, tmpDir string) error { +func ResetState(db kv.RwDB, ctx context.Context, chain string, tmpDir string, logger log.Logger) error { // don't reset senders here if err := Reset(ctx, db, stages.HashState); err != nil { return err @@ -44,7 +44,7 @@ func ResetState(db kv.RwDB, ctx context.Context, chain string, tmpDir string) er return err } - if err := ResetExec(ctx, db, chain, tmpDir); err != nil { + if err := ResetExec(ctx, db, chain, tmpDir, logger); err != nil { return err } return nil @@ -130,7 +130,7 @@ func WarmupExec(ctx context.Context, db kv.RwDB) (err error) { return } -func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string) (err error) { +func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string, logger log.Logger) (err error) { historyV3 := kvcfg.HistoryV3.FromDB(db) if historyV3 { stateHistoryBuckets = append(stateHistoryBuckets, stateHistoryV3Buckets...) @@ -156,7 +156,7 @@ func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string) (er } if !historyV3 { genesis := core.GenesisBlockByChainName(chain) - if _, _, err := core.WriteGenesisState(genesis, tx, tmpDir); err != nil { + if _, _, err := core.WriteGenesisState(genesis, tx, tmpDir, logger); err != nil { return err } } diff --git a/core/rlp_test.go b/core/rlp_test.go index 65b98dbff43..0d03b0cdd75 100644 --- a/core/rlp_test.go +++ b/core/rlp_test.go @@ -25,6 +25,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon/core/state/temporal" + "github.com/ledgerwatch/log/v3" "golang.org/x/crypto/sha3" "github.com/ledgerwatch/erigon/common/u256" @@ -35,7 +36,7 @@ import ( "github.com/ledgerwatch/erigon/rlp" ) -func getBlock(tb testing.TB, transactions int, uncles int, dataSize int, tmpDir string) *types.Block { +func getBlock(tb testing.TB, transactions int, uncles int, dataSize int, tmpDir string, logger log.Logger) *types.Block { _, db, _ := temporal.NewTestDB(tb, datadir.New(tmpDir), nil) var ( aa = libcommon.HexToAddress("0x000000000000000000000000000000000000aaaa") @@ -49,7 +50,7 @@ func getBlock(tb testing.TB, transactions int, uncles int, dataSize int, tmpDir Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: funds}}, } - genesis = MustCommitGenesis(gspec, db, tmpDir) + genesis = MustCommitGenesis(gspec, db, tmpDir, logger) ) // We need to generate as many blocks +1 as uncles @@ -91,7 +92,7 @@ func TestRlpIterator(t *testing.T) { func testRlpIterator(t *testing.T, txs, uncles, datasize int) { desc := fmt.Sprintf("%d txs [%d datasize] and %d uncles", txs, datasize, uncles) - bodyRlp, _ := rlp.EncodeToBytes(getBlock(t, txs, uncles, datasize, "").Body()) + bodyRlp, _ := rlp.EncodeToBytes(getBlock(t, txs, uncles, datasize, "", log.Root()).Body()) it, err := rlp.NewListIterator(bodyRlp) if err != nil { t.Fatal(err) @@ -150,7 +151,7 @@ func BenchmarkHashing(b *testing.B) { blockRlp []byte ) { - block := getBlock(b, 200, 2, 50, "") + block := getBlock(b, 200, 2, 50, "", log.Root()) bodyRlp, _ = rlp.EncodeToBytes(block.Body()) blockRlp, _ = rlp.EncodeToBytes(block) } diff --git a/core/state/rw_v3.go b/core/state/rw_v3.go index 6ed8f08400c..b8bb03c664e 100644 --- a/core/state/rw_v3.go +++ b/core/state/rw_v3.go @@ -521,7 +521,7 @@ func recoverCodeHashPlain(acc *accounts.Account, db kv.Tx, key []byte) { } } -func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, txUnwindTo uint64, agg *libstate.AggregatorV3, accumulator *shards.Accumulator) error { +func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, blockUnwindTo, txUnwindTo uint64, agg *libstate.AggregatorV3, accumulator *shards.Accumulator) error { agg.SetTx(tx) var currentInc uint64 handle := func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { diff --git a/core/system_contract_lookup.go b/core/system_contract_lookup.go index 2905904a2d3..dc0805a1940 100644 --- a/core/system_contract_lookup.go +++ b/core/system_contract_lookup.go @@ -7,6 +7,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain/networkname" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/core/types" @@ -23,7 +24,8 @@ func init() { allocToCodeRecords(genesisBlock.Alloc, byChain, 0) // Process upgrades chainConfig := params.ChainConfigByChainName(chainName) - for blockNumStr, genesisAlloc := range chainConfig.Bor.BlockAlloc { + borConfig := chainConfig.Bor.(*borcfg.BorConfig) + for blockNumStr, genesisAlloc := range borConfig.BlockAlloc { blockNum, err := strconv.ParseUint(blockNumStr, 10, 64) if err != nil { panic(fmt.Errorf("failed to parse block number in BlockAlloc: %s", err.Error())) diff --git a/core/types/blob_tx.go b/core/types/blob_tx.go index d2bede77c40..32547e054bf 100644 --- a/core/types/blob_tx.go +++ b/core/types/blob_tx.go @@ -1,6 +1,7 @@ package types import ( + "errors" "fmt" "io" "math/big" @@ -47,15 +48,50 @@ func (stx BlobTx) GetBlobGas() uint64 { } func (stx BlobTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (Message, error) { - msg, err := stx.DynamicFeeTransaction.AsMessage(s, baseFee, rules) - if err != nil { - return Message{}, err + msg := Message{ + nonce: stx.Nonce, + gasLimit: stx.Gas, + gasPrice: *stx.FeeCap, + tip: *stx.Tip, + feeCap: *stx.FeeCap, + to: stx.To, + amount: *stx.Value, + data: stx.Data, + accessList: stx.AccessList, + checkNonce: true, + } + if !rules.IsCancun { + return msg, errors.New("BlobTx transactions require Cancun") + } + if baseFee != nil { + overflow := msg.gasPrice.SetFromBig(baseFee) + if overflow { + return msg, fmt.Errorf("gasPrice higher than 2^256-1") + } } + msg.gasPrice.Add(&msg.gasPrice, stx.Tip) + if msg.gasPrice.Gt(stx.FeeCap) { + msg.gasPrice.Set(stx.FeeCap) + } + var err error + msg.from, err = stx.Sender(s) msg.maxFeePerBlobGas = *stx.MaxFeePerBlobGas msg.blobHashes = stx.BlobVersionedHashes return msg, err } +func (stx *BlobTx) Sender(signer Signer) (libcommon.Address, error) { + if sc := stx.from.Load(); sc != nil { + return sc.(libcommon.Address), nil + } + addr, err := signer.Sender(stx) + if err != nil { + return libcommon.Address{}, err + } + stx.from.Store(addr) + return addr, nil +} + func (stx BlobTx) Hash() libcommon.Hash { if hash := stx.hash.Load(); hash != nil { return *hash.(*libcommon.Hash) diff --git a/core/types/block_test.go b/core/types/block_test.go index 4e2a8303d3f..9db421134ac 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -42,7 +42,7 @@ import ( // the following 2 functions are replica for the test // This is a replica of `bor.GetValidatorBytes` function -// This was needed because currently, `IsParallelUniverse` will always return false. +// This was needed because currently, `IsNapoli` will always return false. func GetValidatorBytesTest(h *Header) []byte { if len(h.Extra) < ExtraVanityLength+ExtraSealLength { log.Error("length of extra is less than vanity and seal") diff --git a/core/vm/contracts.go b/core/vm/contracts.go index ccc55074486..b71f949ef1f 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -20,13 +20,13 @@ import ( "crypto/sha256" "encoding/binary" "errors" - "github.com/ledgerwatch/erigon-lib/crypto/blake2b" "math/big" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/crypto/blake2b" libkzg "github.com/ledgerwatch/erigon-lib/crypto/kzg" "github.com/ledgerwatch/erigon/common" @@ -34,7 +34,7 @@ import ( "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/crypto/bls12381" "github.com/ledgerwatch/erigon/crypto/bn256" - + "github.com/ledgerwatch/erigon/crypto/secp256r1" "github.com/ledgerwatch/erigon/params" //lint:ignore SA1019 Needed for precompile @@ -112,6 +112,19 @@ var PrecompiledContractsCancun = map[libcommon.Address]PrecompiledContract{ libcommon.BytesToAddress([]byte{0x0a}): &pointEvaluation{}, } +var PrecompiledContractsNapoli = map[libcommon.Address]PrecompiledContract{ + libcommon.BytesToAddress([]byte{0x01}): &ecrecover{}, + libcommon.BytesToAddress([]byte{0x02}): &sha256hash{}, + libcommon.BytesToAddress([]byte{0x03}): &ripemd160hash{}, + libcommon.BytesToAddress([]byte{0x04}): &dataCopy{}, + libcommon.BytesToAddress([]byte{0x05}): &bigModExp{eip2565: true}, + libcommon.BytesToAddress([]byte{0x06}): &bn256AddIstanbul{}, + libcommon.BytesToAddress([]byte{0x07}): &bn256ScalarMulIstanbul{}, + libcommon.BytesToAddress([]byte{0x08}): &bn256PairingIstanbul{}, + libcommon.BytesToAddress([]byte{0x09}): &blake2F{}, + libcommon.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{}, +} + // PrecompiledContractsBLS contains the set of pre-compiled Ethereum // contracts specified in EIP-2537. These are exported for testing purposes. var PrecompiledContractsBLS = map[libcommon.Address]PrecompiledContract{ @@ -127,6 +140,7 @@ var PrecompiledContractsBLS = map[libcommon.Address]PrecompiledContract{ } var ( + PrecompiledAddressesNapoli []libcommon.Address PrecompiledAddressesCancun []libcommon.Address PrecompiledAddressesBerlin []libcommon.Address PrecompiledAddressesIstanbul []libcommon.Address @@ -150,11 +164,16 @@ func init() { for k := range PrecompiledContractsCancun { PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k) } + for k := range PrecompiledContractsNapoli { + PrecompiledAddressesNapoli = append(PrecompiledAddressesNapoli, k) + } } // ActivePrecompiles returns the precompiles enabled with the current configuration. func ActivePrecompiles(rules *chain.Rules) []libcommon.Address { switch { + case rules.IsNapoli: + return PrecompiledAddressesNapoli case rules.IsCancun: return PrecompiledAddressesCancun case rules.IsBerlin: @@ -1098,3 +1117,37 @@ func (c *pointEvaluation) RequiredGas(input []byte) uint64 { func (c *pointEvaluation) Run(input []byte) ([]byte, error) { return libkzg.PointEvaluationPrecompile(input) } + +// P256VERIFY (secp256r1 signature verification) +// implemented as a native contract +type p256Verify struct{} + +// RequiredGas returns the gas required to execute the precompiled contract +func (c *p256Verify) RequiredGas(input []byte) uint64 { + return params.P256VerifyGas +} + +// Run executes the precompiled contract with given 160 bytes of param, returning the output and the used gas +func (c *p256Verify) Run(input []byte) ([]byte, error) { + // Required input length is 160 bytes + const p256VerifyInputLength = 160 + // Check the input length + if len(input) != p256VerifyInputLength { + // Input length is invalid + return nil, nil + } + + // Extract the hash, r, s, x, y from the input + hash := input[0:32] + r, s := new(big.Int).SetBytes(input[32:64]), new(big.Int).SetBytes(input[64:96]) + x, y := new(big.Int).SetBytes(input[96:128]), new(big.Int).SetBytes(input[128:160]) + + // Verify the secp256r1 signature + if secp256r1.Verify(hash, r, s, x, y) { + // Signature is valid + return common.LeftPadBytes(big1.Bytes(), 32), nil + } else { + // Signature is invalid + return nil, nil + } +} diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index a9ca23ef1b6..d9b05437a71 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -48,26 +48,27 @@ type precompiledFailureTest struct { // allPrecompiles does not map to the actual set of precompiles, as it also contains // repriced versions of precompiles at certain slots var allPrecompiles = map[libcommon.Address]PrecompiledContract{ - libcommon.BytesToAddress([]byte{1}): &ecrecover{}, - libcommon.BytesToAddress([]byte{2}): &sha256hash{}, - libcommon.BytesToAddress([]byte{3}): &ripemd160hash{}, - libcommon.BytesToAddress([]byte{4}): &dataCopy{}, - libcommon.BytesToAddress([]byte{5}): &bigModExp{eip2565: false}, - libcommon.BytesToAddress([]byte{0xf5}): &bigModExp{eip2565: true}, - libcommon.BytesToAddress([]byte{6}): &bn256AddIstanbul{}, - libcommon.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, - libcommon.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, - libcommon.BytesToAddress([]byte{9}): &blake2F{}, - libcommon.BytesToAddress([]byte{10}): &bls12381G1Add{}, - libcommon.BytesToAddress([]byte{11}): &bls12381G1Mul{}, - libcommon.BytesToAddress([]byte{12}): &bls12381G1MultiExp{}, - libcommon.BytesToAddress([]byte{13}): &bls12381G2Add{}, - libcommon.BytesToAddress([]byte{14}): &bls12381G2Mul{}, - libcommon.BytesToAddress([]byte{15}): &bls12381G2MultiExp{}, - libcommon.BytesToAddress([]byte{16}): &bls12381Pairing{}, - libcommon.BytesToAddress([]byte{17}): &bls12381MapG1{}, - libcommon.BytesToAddress([]byte{18}): &bls12381MapG2{}, - libcommon.BytesToAddress([]byte{20}): &pointEvaluation{}, + libcommon.BytesToAddress([]byte{1}): &ecrecover{}, + libcommon.BytesToAddress([]byte{2}): &sha256hash{}, + libcommon.BytesToAddress([]byte{3}): &ripemd160hash{}, + libcommon.BytesToAddress([]byte{4}): &dataCopy{}, + libcommon.BytesToAddress([]byte{5}): &bigModExp{eip2565: false}, + libcommon.BytesToAddress([]byte{0xf5}): &bigModExp{eip2565: true}, + libcommon.BytesToAddress([]byte{6}): &bn256AddIstanbul{}, + libcommon.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, + libcommon.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, + libcommon.BytesToAddress([]byte{9}): &blake2F{}, + libcommon.BytesToAddress([]byte{10}): &bls12381G1Add{}, + libcommon.BytesToAddress([]byte{11}): &bls12381G1Mul{}, + libcommon.BytesToAddress([]byte{12}): &bls12381G1MultiExp{}, + libcommon.BytesToAddress([]byte{13}): &bls12381G2Add{}, + libcommon.BytesToAddress([]byte{14}): &bls12381G2Mul{}, + libcommon.BytesToAddress([]byte{15}): &bls12381G2MultiExp{}, + libcommon.BytesToAddress([]byte{16}): &bls12381Pairing{}, + libcommon.BytesToAddress([]byte{17}): &bls12381MapG1{}, + libcommon.BytesToAddress([]byte{18}): &bls12381MapG2{}, + libcommon.BytesToAddress([]byte{20}): &pointEvaluation{}, + libcommon.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{}, } // EIP-152 test vectors @@ -400,3 +401,19 @@ func BenchmarkPrecompiledBLS12381G2MultiExpWorstCase(b *testing.B) { } benchmarkPrecompiled(b, "0f", testcase) } + +// Benchmarks the sample inputs from the P256VERIFY precompile. +func BenchmarkPrecompiledP256Verify(b *testing.B) { + testcase := precompiledTest{ + Input: "4cee90eb86eaa050036147a12d49004b6b9c72bd725d39d4785011fe190f0b4da73bd4903f0ce3b639bbbf6e8e80d16931ff4bcf5993d58468e8fb19086e8cac36dbcd03009df8c59286b162af3bd7fcc0450c9aa81be5d10d312af6c66b1d604aebd3099c618202fcfe16ae7770b0c49ab5eadf74b754204a3bb6060e44eff37618b065f9832de4ca6ca971a7a1adc826d0f7c00181a5fb2ddf79ae00b4e10e", + Expected: "0000000000000000000000000000000000000000000000000000000000000001", + Name: "p256Verify", + } + benchmarkPrecompiled(b, "100", testcase) +} + +func TestPrecompiledP256Verify(t *testing.T) { + t.Parallel() + + testJson("p256Verify", "100", t) +} diff --git a/core/vm/evm.go b/core/vm/evm.go index dc5dce586c8..56ffc3bc2fa 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -37,6 +37,8 @@ var emptyCodeHash = crypto.Keccak256Hash(nil) func (evm *EVM) precompile(addr libcommon.Address) (PrecompiledContract, bool) { var precompiles map[libcommon.Address]PrecompiledContract switch { + case evm.chainRules.IsNapoli: + precompiles = PrecompiledContractsNapoli case evm.chainRules.IsCancun: precompiles = PrecompiledContractsCancun case evm.chainRules.IsBerlin: diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 161068229bf..e2cf325c105 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -128,6 +128,8 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { jt = &pragueInstructionSet case evm.ChainRules().IsCancun: jt = &cancunInstructionSet + case evm.ChainRules().IsNapoli: + jt = &napoliInstructionSet case evm.ChainRules().IsShanghai: jt = &shanghaiInstructionSet case evm.ChainRules().IsLondon: diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 047c9f53845..806ae494133 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -61,6 +61,7 @@ var ( berlinInstructionSet = newBerlinInstructionSet() londonInstructionSet = newLondonInstructionSet() shanghaiInstructionSet = newShanghaiInstructionSet() + napoliInstructionSet = newNapoliInstructionSet() cancunInstructionSet = newCancunInstructionSet() pragueInstructionSet = newPragueInstructionSet() ) @@ -99,12 +100,18 @@ func newPragueInstructionSet() JumpTable { // constantinople, istanbul, petersburg, berlin, london, paris, shanghai, // and cancun instructions. func newCancunInstructionSet() JumpTable { + instructionSet := newNapoliInstructionSet() + enable4844(&instructionSet) // BLOBHASH opcode + enable7516(&instructionSet) // BLOBBASEFEE opcode + validateAndFillMaxStack(&instructionSet) + return instructionSet +} + +func newNapoliInstructionSet() JumpTable { instructionSet := newShanghaiInstructionSet() enable1153(&instructionSet) // Transient storage opcodes - enable4844(&instructionSet) // BLOBHASH opcode enable5656(&instructionSet) // MCOPY opcode enable6780(&instructionSet) // SELFDESTRUCT only in same transaction - enable7516(&instructionSet) // BLOBBASEFEE opcode validateAndFillMaxStack(&instructionSet) return instructionSet } diff --git a/core/vm/testdata/precompiles/p256Verify.json b/core/vm/testdata/precompiles/p256Verify.json new file mode 100644 index 00000000000..54723147a51 --- /dev/null +++ b/core/vm/testdata/precompiles/p256Verify.json @@ -0,0 +1,37 @@ +[ + { + "Input": "4cee90eb86eaa050036147a12d49004b6b9c72bd725d39d4785011fe190f0b4da73bd4903f0ce3b639bbbf6e8e80d16931ff4bcf5993d58468e8fb19086e8cac36dbcd03009df8c59286b162af3bd7fcc0450c9aa81be5d10d312af6c66b1d604aebd3099c618202fcfe16ae7770b0c49ab5eadf74b754204a3bb6060e44eff37618b065f9832de4ca6ca971a7a1adc826d0f7c00181a5fb2ddf79ae00b4e10e", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Gas": 3450, + "Name": "CallP256Verify", + "NoBenchmark": false + }, + { + "Input": "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9414de3726ee4d237b410c1d85ebcb05553dc578561d9f7942b7250795beb9b9027b657067322fc00ab35263fde0acabf998cd9fcf1282df9555f85dba7bdbbe2dc90f74c9e210bc3e0c60aeaa03729c9e6acde4a048ee58fd2e466c1e7b0374e606b8c22ad2985df7d792ff344f03ce94a079da801006b13640bc5af7932a7b9", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Gas": 3450, + "Name": "CallP256Verify", + "NoBenchmark": false + }, + { + "Input": "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9b35d6a4f7f6fc5620c97d4287696f5174b3d37fa537b74b5fc26997ba79c725d62fe5e5fe6da76eec924e822c5ef853ede6c17069a9e9133a38f87d61599f68e7d5f3c812a255436846ee84a262b79ec4d0783afccf2433deabdca9ecf62bef5ff24e90988c7f139d378549c3a8bc6c94e6a1c911c1e02e6f48ed65aaf3d296e", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Gas": 3450, + "Name": "CallP256Verify", + "NoBenchmark": false + }, + { + "Input": "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9c29c3df6ce3431b6f030b1b68b1589508ad9d1a149830489c638653aa4b08af93f6e86a9a7643403b6f5c593410d9f7234a8cd27309bce90447073ce17476850615ff147863bc8652be1e369444f90bbc5f9df05a26362e609f73ab1f1839fe3cd34fd2ae672c110671d49115825fc56b5148321aabe5ba39f2b46f71149cff9", + "Expected": "", + "Gas": 3450, + "Name": "CallP256Verify", + "NoBenchmark": false + }, + { + "Input": "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9", + "Expected": "", + "Gas": 3450, + "Name": "CallP256Verify", + "NoBenchmark": false + } +] \ No newline at end of file diff --git a/crypto/secp256r1/publickey.go b/crypto/secp256r1/publickey.go new file mode 100644 index 00000000000..9b84044efa0 --- /dev/null +++ b/crypto/secp256r1/publickey.go @@ -0,0 +1,26 @@ +package secp256r1 + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "math/big" +) + +// Generates approptiate public key format from given coordinates +func newPublicKey(x, y *big.Int) *ecdsa.PublicKey { + // Check if the given coordinates are valid + if x == nil || y == nil || !elliptic.P256().IsOnCurve(x, y) { + return nil + } + + // Check if the given coordinates are the reference point (infinity) + if x.Sign() == 0 && y.Sign() == 0 { + return nil + } + + return &ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: x, + Y: y, + } +} diff --git a/crypto/secp256r1/verifier.go b/crypto/secp256r1/verifier.go new file mode 100644 index 00000000000..ccc0786610b --- /dev/null +++ b/crypto/secp256r1/verifier.go @@ -0,0 +1,21 @@ +package secp256r1 + +import ( + "crypto/ecdsa" + "math/big" +) + +// Verifies the given signature (r, s) for the given hash and public key (x, y). +func Verify(hash []byte, r, s, x, y *big.Int) bool { + // Create the public key format + publicKey := newPublicKey(x, y) + + // Check if they are invalid public key coordinates + if publicKey == nil { + return false + } + + // Verify the signature with the public key, + // then return true if it's valid, false otherwise + return ecdsa.Verify(publicKey, hash, r, s) +} diff --git a/diagnostics/diagnostic.go b/diagnostics/diagnostic.go index 69470f7a0d9..dcd332d9929 100644 --- a/diagnostics/diagnostic.go +++ b/diagnostics/diagnostic.go @@ -16,11 +16,11 @@ type DiagnosticClient struct { metricsMux *http.ServeMux node *node.ErigonNode - snapshotDownload diaglib.SnapshotDownloadStatistics + syncStats diaglib.SyncStatistics } func NewDiagnosticClient(ctx *cli.Context, metricsMux *http.ServeMux, node *node.ErigonNode) *DiagnosticClient { - return &DiagnosticClient{ctx: ctx, metricsMux: metricsMux, node: node, snapshotDownload: diaglib.SnapshotDownloadStatistics{}} + return &DiagnosticClient{ctx: ctx, metricsMux: metricsMux, node: node, syncStats: diaglib.SyncStatistics{}} } func (d *DiagnosticClient) Setup() { @@ -28,6 +28,8 @@ func (d *DiagnosticClient) Setup() { d.runSegmentDownloadingListener() d.runSegmentIndexingListener() d.runSegmentIndexingFinishedListener() + d.runCurrentSyncStageListener() + d.runSyncStagesListListener() } func (d *DiagnosticClient) runSnapshotListener() { @@ -44,18 +46,18 @@ func (d *DiagnosticClient) runSnapshotListener() { cancel() return case info := <-ch: - d.snapshotDownload.Downloaded = info.Downloaded - d.snapshotDownload.Total = info.Total - d.snapshotDownload.TotalTime = info.TotalTime - d.snapshotDownload.DownloadRate = info.DownloadRate - d.snapshotDownload.UploadRate = info.UploadRate - d.snapshotDownload.Peers = info.Peers - d.snapshotDownload.Files = info.Files - d.snapshotDownload.Connections = info.Connections - d.snapshotDownload.Alloc = info.Alloc - d.snapshotDownload.Sys = info.Sys - d.snapshotDownload.DownloadFinished = info.DownloadFinished - d.snapshotDownload.TorrentMetadataReady = info.TorrentMetadataReady + d.syncStats.SnapshotDownload.Downloaded = info.Downloaded + d.syncStats.SnapshotDownload.Total = info.Total + d.syncStats.SnapshotDownload.TotalTime = info.TotalTime + d.syncStats.SnapshotDownload.DownloadRate = info.DownloadRate + d.syncStats.SnapshotDownload.UploadRate = info.UploadRate + d.syncStats.SnapshotDownload.Peers = info.Peers + d.syncStats.SnapshotDownload.Files = info.Files + d.syncStats.SnapshotDownload.Connections = info.Connections + d.syncStats.SnapshotDownload.Alloc = info.Alloc + d.syncStats.SnapshotDownload.Sys = info.Sys + d.syncStats.SnapshotDownload.DownloadFinished = info.DownloadFinished + d.syncStats.SnapshotDownload.TorrentMetadataReady = info.TorrentMetadataReady if info.DownloadFinished { return @@ -66,8 +68,8 @@ func (d *DiagnosticClient) runSnapshotListener() { }() } -func (d *DiagnosticClient) SnapshotDownload() diaglib.SnapshotDownloadStatistics { - return d.snapshotDownload +func (d *DiagnosticClient) SyncStatistics() diaglib.SyncStatistics { + return d.syncStats } func (d *DiagnosticClient) runSegmentDownloadingListener() { @@ -84,11 +86,11 @@ func (d *DiagnosticClient) runSegmentDownloadingListener() { cancel() return case info := <-ch: - if d.snapshotDownload.SegmentsDownloading == nil { - d.snapshotDownload.SegmentsDownloading = map[string]diaglib.SegmentDownloadStatistics{} + if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { + d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]diaglib.SegmentDownloadStatistics{} } - d.snapshotDownload.SegmentsDownloading[info.Name] = info + d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = info } } }() @@ -129,15 +131,15 @@ func (d *DiagnosticClient) runSegmentIndexingFinishedListener() { return case info := <-ch: found := false - for i := range d.snapshotDownload.SegmentIndexing.Segments { - if d.snapshotDownload.SegmentIndexing.Segments[i].SegmentName == info.SegmentName { + for i := range d.syncStats.SnapshotIndexing.Segments { + if d.syncStats.SnapshotIndexing.Segments[i].SegmentName == info.SegmentName { found = true - d.snapshotDownload.SegmentIndexing.Segments[i].Percent = 100 + d.syncStats.SnapshotIndexing.Segments[i].Percent = 100 } } if !found { - d.snapshotDownload.SegmentIndexing.Segments = append(d.snapshotDownload.SegmentIndexing.Segments, diaglib.SnapshotSegmentIndexingStatistics{ + d.syncStats.SnapshotIndexing.Segments = append(d.syncStats.SnapshotIndexing.Segments, diaglib.SnapshotSegmentIndexingStatistics{ SegmentName: info.SegmentName, Percent: 100, Alloc: 0, @@ -150,26 +152,70 @@ func (d *DiagnosticClient) runSegmentIndexingFinishedListener() { } func (d *DiagnosticClient) addOrUpdateSegmentIndexingState(upd diaglib.SnapshotIndexingStatistics) { - if d.snapshotDownload.SegmentIndexing.Segments == nil { - d.snapshotDownload.SegmentIndexing.Segments = []diaglib.SnapshotSegmentIndexingStatistics{} + if d.syncStats.SnapshotIndexing.Segments == nil { + d.syncStats.SnapshotIndexing.Segments = []diaglib.SnapshotSegmentIndexingStatistics{} } for i := range upd.Segments { found := false - for j := range d.snapshotDownload.SegmentIndexing.Segments { - if d.snapshotDownload.SegmentIndexing.Segments[j].SegmentName == upd.Segments[i].SegmentName { - d.snapshotDownload.SegmentIndexing.Segments[j].Percent = upd.Segments[i].Percent - d.snapshotDownload.SegmentIndexing.Segments[j].Alloc = upd.Segments[i].Alloc - d.snapshotDownload.SegmentIndexing.Segments[j].Sys = upd.Segments[i].Sys + for j := range d.syncStats.SnapshotIndexing.Segments { + if d.syncStats.SnapshotIndexing.Segments[j].SegmentName == upd.Segments[i].SegmentName { + d.syncStats.SnapshotIndexing.Segments[j].Percent = upd.Segments[i].Percent + d.syncStats.SnapshotIndexing.Segments[j].Alloc = upd.Segments[i].Alloc + d.syncStats.SnapshotIndexing.Segments[j].Sys = upd.Segments[i].Sys found = true break } } if !found { - d.snapshotDownload.SegmentIndexing.Segments = append(d.snapshotDownload.SegmentIndexing.Segments, upd.Segments[i]) + d.syncStats.SnapshotIndexing.Segments = append(d.syncStats.SnapshotIndexing.Segments, upd.Segments[i]) } } - d.snapshotDownload.SegmentIndexing.TimeElapsed = upd.TimeElapsed + d.syncStats.SnapshotIndexing.TimeElapsed = upd.TimeElapsed +} + +func (d *DiagnosticClient) runSyncStagesListListener() { + go func() { + ctx, ch, cancel := diaglib.Context[diaglib.SyncStagesList](context.Background(), 1) + defer cancel() + + rootCtx, _ := common.RootContext() + + diaglib.StartProviders(ctx, diaglib.TypeOf(diaglib.SyncStagesList{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + cancel() + return + case info := <-ch: + d.syncStats.SyncStages.StagesList = info.Stages + return + } + } + }() +} + +func (d *DiagnosticClient) runCurrentSyncStageListener() { + go func() { + ctx, ch, cancel := diaglib.Context[diaglib.CurrentSyncStage](context.Background(), 1) + defer cancel() + + rootCtx, _ := common.RootContext() + + diaglib.StartProviders(ctx, diaglib.TypeOf(diaglib.CurrentSyncStage{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + cancel() + return + case info := <-ch: + d.syncStats.SyncStages.CurrentStage = info.Stage + if int(d.syncStats.SyncStages.CurrentStage) >= len(d.syncStats.SyncStages.StagesList) { + return + } + } + } + }() } diff --git a/diagnostics/snapshot_sync.go b/diagnostics/snapshot_sync.go index 66bb2a8a392..6e99b8ba4c1 100644 --- a/diagnostics/snapshot_sync.go +++ b/diagnostics/snapshot_sync.go @@ -14,5 +14,5 @@ func SetupStagesAccess(metricsMux *http.ServeMux, diag *DiagnosticClient) { } func writeStages(w http.ResponseWriter, diag *DiagnosticClient) { - json.NewEncoder(w).Encode(diag.SnapshotDownload()) + json.NewEncoder(w).Encode(diag.SyncStatistics()) } diff --git a/docs/programmers_guide/witness_format.md b/docs/programmers_guide/witness_format.md index 8454e808d66..fe0909f6898 100644 --- a/docs/programmers_guide/witness_format.md +++ b/docs/programmers_guide/witness_format.md @@ -91,6 +91,6 @@ encoded as `[ 0x05 CBOR(key|[]byte)... flags /CBOR(nonce).../ /CBOR(balance).../ *flags* is a bitset encoded in a single bit (see [`witness_operators_test.go`](../../trie/witness_operators_test.go) to see flags in action). * bit 0 defines if **code** is present; if set to 1 it assumes that either `OpCode` or `OpHash` already put something on the stack; -* bit 1 defines if **storage** is present; if set to 1, the operators preceeding `OpAccountLeaf` will reconstruct a storage trie; +* bit 1 defines if **storage** is present; if set to 1, the operators preceding `OpAccountLeaf` will reconstruct a storage trie; * bit 2 defines if **nonce** is not 0; if set to 0, *nonce* field is not encoded; * bit 3 defines if **balance** is not 0; if set to 0, *balance* field is not encoded; diff --git a/docs/readthedocs/source/etl.rst b/docs/readthedocs/source/etl.rst index 3fbbc2e5c0a..4e12ddbcdf2 100644 --- a/docs/readthedocs/source/etl.rst +++ b/docs/readthedocs/source/etl.rst @@ -110,7 +110,7 @@ To avoid that, the ETL framework allows storing progress by setting `OnLoadCommi Then we can use this data to know the progress the ETL transformation made. -You can also specify `ExtractStartKey` and `ExtractEndKey` to limit the nubmer +You can also specify `ExtractStartKey` and `ExtractEndKey` to limit the number of items transformed. diff --git a/docs/readthedocs/source/rpc/index.rst b/docs/readthedocs/source/rpc/index.rst index 8114154fba7..78c2c520476 100644 --- a/docs/readthedocs/source/rpc/index.rst +++ b/docs/readthedocs/source/rpc/index.rst @@ -1842,7 +1842,7 @@ Submit the mining hashrate to the blockchain. * - Type - Description * - ``BOOLEAN`` - - ``true`` if submitting went through succesfully, ``false`` otherwise + - ``true`` if submitting went through successfully, ``false`` otherwise -------------- diff --git a/docs/readthedocs/source/stagedsync.rst b/docs/readthedocs/source/stagedsync.rst index 537ce3afef3..fef78f79c08 100644 --- a/docs/readthedocs/source/stagedsync.rst +++ b/docs/readthedocs/source/stagedsync.rst @@ -42,7 +42,7 @@ the process method takes the headers retrieve thanks to the fetcher and does the * Save block headers in database. -This process repeates until we reach the maximun height. once it is reached the stage finish. +This process repeats until we reach the maximum height. once it is reached the stage finish. Changes in DB: diff --git a/docs/readthedocs/source/types.rst b/docs/readthedocs/source/types.rst index f5f88ea8832..2af774f3b4d 100644 --- a/docs/readthedocs/source/types.rst +++ b/docs/readthedocs/source/types.rst @@ -22,7 +22,7 @@ Address represents the 20 byte address of an Ethereum account. type Hash [32]byte Hash represents the 32 byte Keccak256 hash of arbitrary data. -Address repressents the 20 byte standard Ethereum Address +Address represents the 20 byte standard Ethereum Address Both Hash and Address are bytes arrays. @@ -60,7 +60,7 @@ Transaction type Transactions []*Transaction -repressent an Ethereum Transaction. +represent an Ethereum Transaction. **from** @@ -218,7 +218,7 @@ Block ReceivedFrom interface{} } -repressent a block of the chain. +represent a block of the chain. **header** diff --git a/erigon-lib/chain/chain_config.go b/erigon-lib/chain/chain_config.go index d0be8c3514f..e6d674b3e88 100644 --- a/erigon-lib/chain/chain_config.go +++ b/erigon-lib/chain/chain_config.go @@ -17,9 +17,9 @@ package chain import ( + "encoding/json" "fmt" "math/big" - "sort" "strconv" "github.com/ledgerwatch/erigon-lib/common" @@ -83,11 +83,22 @@ type Config struct { BurntContract map[string]common.Address `json:"burntContract,omitempty"` // Various consensus engines - Ethash *EthashConfig `json:"ethash,omitempty"` - Clique *CliqueConfig `json:"clique,omitempty"` - Aura *AuRaConfig `json:"aura,omitempty"` - Bor *BorConfig `json:"bor,omitempty"` + Ethash *EthashConfig `json:"ethash,omitempty"` + Clique *CliqueConfig `json:"clique,omitempty"` + Aura *AuRaConfig `json:"aura,omitempty"` + Optimism *OptimismConfig `json:"optimism,omitempty"` + + Bor BorConfig `json:"-"` + BorJSON json.RawMessage `json:"bor,omitempty"` +} + +type BorConfig interface { + fmt.Stringer + IsAgra(num uint64) bool + GetAgraBlock() *big.Int + IsNapoli(num uint64) bool + GetNapoliBlock() *big.Int } func (c *Config) String() string { @@ -214,10 +225,12 @@ func (c *Config) IsShanghai(time uint64) bool { // Also Agra is activated based on the block number rather than the timestamp. // Refer to https://forum.polygon.technology/t/pip-28-agra-hardfork func (c *Config) IsAgra(num uint64) bool { - if c == nil || c.Bor == nil { - return false - } - return isForked(c.Bor.AgraBlock, num) + return (c != nil) && (c.Bor != nil) && c.Bor.IsAgra(num) +} + +// Refer to https://forum.polygon.technology/t/pip-33-napoli-upgrade +func (c *Config) IsNapoli(num uint64) bool { + return (c != nil) && (c.Bor != nil) && c.Bor.IsNapoli(num) } // IsCancun returns whether time is either equal to the Cancun fork time or greater. @@ -293,28 +306,28 @@ func (c *Config) ElasticityMultiplier(defaultParam int) uint64 { } func (c *Config) GetMinBlobGasPrice() uint64 { - if c.MinBlobGasPrice != nil { + if c != nil && c.MinBlobGasPrice != nil { return *c.MinBlobGasPrice } return 1 // MIN_BLOB_GASPRICE (EIP-4844) } func (c *Config) GetMaxBlobGasPerBlock() uint64 { - if c.MaxBlobGasPerBlock != nil { + if c != nil && c.MaxBlobGasPerBlock != nil { return *c.MaxBlobGasPerBlock } return 786432 // MAX_BLOB_GAS_PER_BLOCK (EIP-4844) } func (c *Config) GetTargetBlobGasPerBlock() uint64 { - if c.TargetBlobGasPerBlock != nil { + if c != nil && c.TargetBlobGasPerBlock != nil { return *c.TargetBlobGasPerBlock } return 393216 // TARGET_BLOB_GAS_PER_BLOCK (EIP-4844) } func (c *Config) GetBlobGasPriceUpdateFraction() uint64 { - if c.BlobGasPriceUpdateFraction != nil { + if c != nil && c.BlobGasPriceUpdateFraction != nil { return *c.BlobGasPriceUpdateFraction } return 3338477 // BLOB_GASPRICE_UPDATE_FRACTION (EIP-4844) @@ -529,132 +542,6 @@ func (o *OptimismConfig) String() string { return "optimism" } -// BorConfig is the consensus engine configs for Matic bor based sealing. -type BorConfig struct { - Period map[string]uint64 `json:"period"` // Number of seconds between blocks to enforce - ProducerDelay map[string]uint64 `json:"producerDelay"` // Number of seconds delay between two producer interval - Sprint map[string]uint64 `json:"sprint"` // Epoch length to proposer - BackupMultiplier map[string]uint64 `json:"backupMultiplier"` // Backup multiplier to determine the wiggle time - ValidatorContract string `json:"validatorContract"` // Validator set contract - StateReceiverContract string `json:"stateReceiverContract"` // State receiver contract - - OverrideStateSyncRecords map[string]int `json:"overrideStateSyncRecords"` // override state records count - BlockAlloc map[string]interface{} `json:"blockAlloc"` - - JaipurBlock *big.Int `json:"jaipurBlock"` // Jaipur switch block (nil = no fork, 0 = already on jaipur) - DelhiBlock *big.Int `json:"delhiBlock"` // Delhi switch block (nil = no fork, 0 = already on delhi) - IndoreBlock *big.Int `json:"indoreBlock"` // Indore switch block (nil = no fork, 0 = already on indore) - AgraBlock *big.Int `json:"agraBlock"` // Agra switch block (nil = no fork, 0 = already in agra) - StateSyncConfirmationDelay map[string]uint64 `json:"stateSyncConfirmationDelay"` // StateSync Confirmation Delay, in seconds, to calculate `to` - - ParallelUniverseBlock *big.Int `json:"parallelUniverseBlock"` // TODO: update all occurrence, change name and finalize number (hardfork for block-stm related changes) - - sprints sprints -} - -// String implements the stringer interface, returning the consensus engine details. -func (b *BorConfig) String() string { - return "bor" -} - -func (c *BorConfig) CalculateProducerDelay(number uint64) uint64 { - return borKeyValueConfigHelper(c.ProducerDelay, number) -} - -func (c *BorConfig) CalculateSprint(number uint64) uint64 { - if c.sprints == nil { - c.sprints = asSprints(c.Sprint) - } - - for i := 0; i < len(c.sprints)-1; i++ { - if number >= c.sprints[i].from && number < c.sprints[i+1].from { - return c.sprints[i].size - } - } - - return c.sprints[len(c.sprints)-1].size -} - -func (c *BorConfig) CalculateSprintCount(from, to uint64) int { - switch { - case from > to: - return 0 - case from < to: - to-- - } - - if c.sprints == nil { - c.sprints = asSprints(c.Sprint) - } - - count := uint64(0) - startCalc := from - - zeroth := func(boundary uint64, size uint64) uint64 { - if boundary%size == 0 { - return 1 - } - - return 0 - } - - for i := 0; i < len(c.sprints)-1; i++ { - if startCalc >= c.sprints[i].from && startCalc < c.sprints[i+1].from { - if to >= c.sprints[i].from && to < c.sprints[i+1].from { - if startCalc == to { - return int(count + zeroth(startCalc, c.sprints[i].size)) - } - return int(count + zeroth(startCalc, c.sprints[i].size) + (to-startCalc)/c.sprints[i].size) - } else { - endCalc := c.sprints[i+1].from - 1 - count += zeroth(startCalc, c.sprints[i].size) + (endCalc-startCalc)/c.sprints[i].size - startCalc = endCalc + 1 - } - } - } - - if startCalc == to { - return int(count + zeroth(startCalc, c.sprints[len(c.sprints)-1].size)) - } - - return int(count + zeroth(startCalc, c.sprints[len(c.sprints)-1].size) + (to-startCalc)/c.sprints[len(c.sprints)-1].size) -} - -func (c *BorConfig) CalculateBackupMultiplier(number uint64) uint64 { - return borKeyValueConfigHelper(c.BackupMultiplier, number) -} - -func (c *BorConfig) CalculatePeriod(number uint64) uint64 { - return borKeyValueConfigHelper(c.Period, number) -} - -func (c *BorConfig) IsJaipur(number uint64) bool { - return isForked(c.JaipurBlock, number) -} - -func (c *BorConfig) IsDelhi(number uint64) bool { - return isForked(c.DelhiBlock, number) -} - -func (c *BorConfig) IsIndore(number uint64) bool { - return isForked(c.IndoreBlock, number) -} - -// TODO: modify this function once the block number is finalized -func (c *BorConfig) IsParallelUniverse(number uint64) bool { - if c.ParallelUniverseBlock != nil { - if c.ParallelUniverseBlock.Cmp(big.NewInt(0)) == 0 { - return false - } - } - - return isForked(c.ParallelUniverseBlock, number) -} - -func (c *BorConfig) CalculateStateSyncDelay(number uint64) uint64 { - return borKeyValueConfigHelper(c.StateSyncConfirmationDelay, number) -} - func borKeyValueConfigHelper[T uint64 | common.Address](field map[string]T, number uint64) T { fieldUint := make(map[uint64]T) for k, v := range field { @@ -676,51 +563,20 @@ func borKeyValueConfigHelper[T uint64 | common.Address](field map[string]T, numb return fieldUint[keys[len(keys)-1]] } -type sprint struct { - from, size uint64 -} - -type sprints []sprint - -func (s sprints) Len() int { - return len(s) -} - -func (s sprints) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s sprints) Less(i, j int) bool { - return s[i].from < s[j].from -} - -func asSprints(configSprints map[string]uint64) sprints { - sprints := make(sprints, len(configSprints)) - - i := 0 - for key, value := range configSprints { - sprints[i].from, _ = strconv.ParseUint(key, 10, 64) - sprints[i].size = value - i++ - } - - sort.Sort(sprints) - - return sprints -} - // Rules is syntactic sugar over Config. It can be used for functions // that do not have or require information about the block. // // Rules is a one time interface meaning that it shouldn't be used in between transition // phases. type Rules struct { - ChainID *big.Int - IsHomestead, IsTangerineWhistle, IsSpuriousDragon bool - IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool - IsBerlin, IsLondon, IsShanghai, IsCancun, IsPrague bool - IsAura bool - IsBedrock, IsOptimismRegolith bool + ChainID *big.Int + IsHomestead, IsTangerineWhistle, IsSpuriousDragon bool + IsByzantium, IsConstantinople, IsPetersburg bool + IsIstanbul, IsBerlin, IsLondon, IsShanghai bool + IsCancun, IsNapoli bool + IsPrague bool + IsAura bool + IsBedrock, IsOptimismRegolith bool } // Rules ensures c's ChainID is not nil and returns a new Rules instance @@ -743,6 +599,7 @@ func (c *Config) Rules(num uint64, time uint64) *Rules { IsLondon: c.IsLondon(num), IsShanghai: c.IsShanghai(time) || c.IsAgra(num), IsCancun: c.IsCancun(time), + IsNapoli: c.IsNapoli(num), IsPrague: c.IsPrague(time), IsBedrock: c.IsBedrock(num), IsOptimismRegolith: c.IsOptimismRegolith(time), diff --git a/erigon-lib/chain/networkname/network_name.go b/erigon-lib/chain/networkname/network_name.go index a2251a9f803..ec14bcd01ee 100644 --- a/erigon-lib/chain/networkname/network_name.go +++ b/erigon-lib/chain/networkname/network_name.go @@ -1,18 +1,19 @@ package networkname const ( - MainnetChainName = "mainnet" - HoleskyChainName = "holesky" - SepoliaChainName = "sepolia" - GoerliChainName = "goerli" - DevChainName = "dev" - MumbaiChainName = "mumbai" - AmoyChainName = "amoy" - BorMainnetChainName = "bor-mainnet" - BorDevnetChainName = "bor-devnet" - GnosisChainName = "gnosis" - BorE2ETestChain2ValName = "bor-e2e-test-2Val" - ChiadoChainName = "chiado" + MainnetChainName = "mainnet" + HoleskyChainName = "holesky" + SepoliaChainName = "sepolia" + GoerliChainName = "goerli" + DevChainName = "dev" + MumbaiChainName = "mumbai" + AmoyChainName = "amoy" + BorMainnetChainName = "bor-mainnet" + BorDevnetChainName = "bor-devnet" + GnosisChainName = "gnosis" + BorE2ETestChain2ValName = "bor-e2e-test-2Val" + ChiadoChainName = "chiado" + OptimismMainnetChainName = "optimism-mainnet" OptimismGoerliChainName = "optimism-goerli" BobaSepoliaChainName = "boba-sepolia" @@ -29,6 +30,7 @@ var All = []string{ BorDevnetChainName, GnosisChainName, ChiadoChainName, + OptimismMainnetChainName, OptimismGoerliChainName, BobaSepoliaChainName, diff --git a/erigon-lib/chain/snapcfg/util.go b/erigon-lib/chain/snapcfg/util.go index 8f5ecf2f3ae..db1e42d3276 100644 --- a/erigon-lib/chain/snapcfg/util.go +++ b/erigon-lib/chain/snapcfg/util.go @@ -49,23 +49,54 @@ func doSort(in preverified) Preverified { } var ( - MainnetChainSnapshotCfg = newCfg(Mainnet) - // HoleskyChainSnapshotCfg = newCfg(Holesky, HoleskyHistory) - SepoliaChainSnapshotCfg = newCfg(Sepolia) - GoerliChainSnapshotCfg = newCfg(Goerli) - MumbaiChainSnapshotCfg = newCfg(Mumbai) - AmoyChainSnapshotCfg = newCfg(Amoy) - BorMainnetChainSnapshotCfg = newCfg(BorMainnet) - GnosisChainSnapshotCfg = newCfg(Gnosis) - ChiadoChainSnapshotCfg = newCfg(Chiado) + isDefaultVersion bool = true + snapshotVersion uint8 = 1 ) -func newCfg(preverified Preverified) *Cfg { - return &Cfg{ExpectBlocks: maxBlockNum(preverified), Preverified: preverified} +func SnapshotVersion(version uint8) { + snapshotVersion = version + isDefaultVersion = false } -func maxBlockNum(preverified Preverified) uint64 { +func newCfg(preverified Preverified, version uint8) *Cfg { + + if version == 0 { + version = snapshotVersion + + var pv Preverified + + for _, p := range preverified { + if v, _, ok := strings.Cut(p.Name, "-"); ok && strings.HasPrefix(v, "v") { + if v, err := strconv.ParseUint(v[1:], 10, 8); err == nil && uint64(version) == v { + pv = append(pv, p) + } + } + } + + // don't do this check if the SnapshotVersion has been explicitly set + if len(pv) == 0 && isDefaultVersion { + version = maxVersion(preverified) + + for _, p := range preverified { + if v, _, ok := strings.Cut(p.Name, "-"); ok && strings.HasPrefix(v, "v") { + if v, err := strconv.ParseUint(v[1:], 10, 8); err == nil && uint64(version) == v { + pv = append(pv, p) + } + } + } + } + + preverified = pv + } + + maxBlockNum, version := cfgInfo(preverified, version) + return &Cfg{ExpectBlocks: maxBlockNum, Preverified: preverified, Version: version} +} + +func cfgInfo(preverified Preverified, defaultVersion uint8) (uint64, uint8) { max := uint64(0) + version := defaultVersion + for _, p := range preverified { _, fileName := filepath.Split(p.Name) ext := filepath.Ext(fileName) @@ -84,37 +115,61 @@ func maxBlockNum(preverified Preverified) uint64 { if max < to { max = to } + + if vp := parts[0]; strings.HasPrefix(vp, "v") { + if v, err := strconv.ParseUint(vp[1:], 10, 8); err == nil { + version = uint8(v) + } + } } if max == 0 { // to prevent underflow - return 0 + return 0, version } - return max*1_000 - 1 + return max*1_000 - 1, version } type Cfg struct { ExpectBlocks uint64 + Version uint8 Preverified Preverified } -var KnownCfgs = map[string]*Cfg{ - networkname.MainnetChainName: MainnetChainSnapshotCfg, +var knownPreverified = map[string]Preverified{ + networkname.MainnetChainName: Mainnet, // networkname.HoleskyChainName: HoleskyChainSnapshotCfg, - networkname.SepoliaChainName: SepoliaChainSnapshotCfg, - networkname.GoerliChainName: GoerliChainSnapshotCfg, - networkname.MumbaiChainName: MumbaiChainSnapshotCfg, - networkname.AmoyChainName: AmoyChainSnapshotCfg, - networkname.BorMainnetChainName: BorMainnetChainSnapshotCfg, - networkname.GnosisChainName: GnosisChainSnapshotCfg, - networkname.ChiadoChainName: ChiadoChainSnapshotCfg, + networkname.SepoliaChainName: Sepolia, + networkname.GoerliChainName: Goerli, + networkname.MumbaiChainName: Mumbai, + networkname.AmoyChainName: Amoy, + networkname.BorMainnetChainName: BorMainnet, + networkname.GnosisChainName: Gnosis, + networkname.ChiadoChainName: Chiado, } // KnownCfg return list of preverified hashes for given network, but apply whiteList filter if it's not empty -func KnownCfg(networkName string) *Cfg { - c, ok := KnownCfgs[networkName] +func KnownCfg(networkName string, version uint8) *Cfg { + c, ok := knownPreverified[networkName] if !ok { - return newCfg(Preverified{}) + return newCfg(Preverified{}, version) + } + return newCfg(c, version) +} + +func maxVersion(pv Preverified) uint8 { + var max uint8 + + for _, p := range pv { + if v, _, ok := strings.Cut(p.Name, "-"); ok && strings.HasPrefix(v, "v") { + if v, err := strconv.ParseUint(v[1:], 10, 8); err == nil { + version := uint8(v) + if max < version { + max = version + } + } + } } - return newCfg(c.Preverified) + + return max } var KnownWebseeds = map[string][]string{ diff --git a/erigon-lib/common/collections.go b/erigon-lib/common/collections.go new file mode 100644 index 00000000000..1e5a13856d2 --- /dev/null +++ b/erigon-lib/common/collections.go @@ -0,0 +1,15 @@ +package common + +func SliceReverse[T any](s []T) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +func SliceMap[T any, U any](s []T, mapFunc func(T) U) []U { + out := make([]U, 0, len(s)) + for _, x := range s { + out = append(out, mapFunc(x)) + } + return out +} diff --git a/erigon-lib/common/dbg/experiments.go b/erigon-lib/common/dbg/experiments.go index 1c6ac65021c..e9df7ace44e 100644 --- a/erigon-lib/common/dbg/experiments.go +++ b/erigon-lib/common/dbg/experiments.go @@ -278,8 +278,24 @@ func StopAfterReconst() bool { v, _ := os.LookupEnv("STOP_AFTER_RECONSTITUTE") if v == "true" { stopAfterReconst = true - log.Info("[Experiment]", "STOP_AFTER_RECONSTITUTE", writeMap) + log.Info("[Experiment]", "STOP_AFTER_RECONSTITUTE", stopAfterReconst) } }) return stopAfterReconst } + +var ( + snapshotVersion uint8 + snapshotVersionOnce sync.Once +) + +func SnapshotVersion() uint8 { + snapshotVersionOnce.Do(func() { + v, _ := os.LookupEnv("SNAPSHOT_VERSION") + if i, _ := strconv.ParseUint(v, 10, 8); i > 0 { + snapshotVersion = uint8(i) + log.Info("[Experiment]", "SNAPSHOT_VERSION", snapshotVersion) + } + }) + return snapshotVersion +} diff --git a/erigon-lib/common/dir/rw_dir.go b/erigon-lib/common/dir/rw_dir.go index 0bbf76d8f5f..2d0e7066493 100644 --- a/erigon-lib/common/dir/rw_dir.go +++ b/erigon-lib/common/dir/rw_dir.go @@ -49,6 +49,17 @@ func FileExist(path string) bool { return true } +func FileNonZero(path string) bool { + fi, err := os.Stat(path) + if err != nil && os.IsNotExist(err) { + return false + } + if !fi.Mode().IsRegular() { + return false + } + return fi.Size() > 0 +} + // nolint func WriteFileWithFsync(name string, data []byte, perm os.FileMode) error { f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) diff --git a/erigon-lib/compress/decompress.go b/erigon-lib/compress/decompress.go index 52e6bad505c..7f058628691 100644 --- a/erigon-lib/compress/decompress.go +++ b/erigon-lib/compress/decompress.go @@ -347,6 +347,10 @@ func (d *Decompressor) ModTime() time.Time { return d.modTime } +func (d *Decompressor) IsOpen() bool { + return d != nil && d.f != nil +} + func (d *Decompressor) Close() { if d.f != nil { if err := mmap.Munmap(d.mmapHandle1, d.mmapHandle2); err != nil { diff --git a/erigon-lib/diagnostics/entities.go b/erigon-lib/diagnostics/entities.go index 747697a5c09..fe8656249de 100644 --- a/erigon-lib/diagnostics/entities.go +++ b/erigon-lib/diagnostics/entities.go @@ -29,6 +29,12 @@ type PeerStatistics struct { TypeBytesOut map[string]uint64 } +type SyncStatistics struct { + SyncStages SyncStages `json:"syncStages"` + SnapshotDownload SnapshotDownloadStatistics `json:"snapshotDownload"` + SnapshotIndexing SnapshotIndexingStatistics `json:"snapshotIndexing"` +} + type SnapshotDownloadStatistics struct { Downloaded uint64 `json:"downloaded"` Total uint64 `json:"total"` @@ -42,7 +48,6 @@ type SnapshotDownloadStatistics struct { Sys uint64 `json:"sys"` DownloadFinished bool `json:"downloadFinished"` SegmentsDownloading map[string]SegmentDownloadStatistics `json:"segmentsDownloading"` - SegmentIndexing SnapshotIndexingStatistics `json:"segmentsIndexing"` TorrentMetadataReady int32 `json:"torrentMetadataReady"` } @@ -72,6 +77,19 @@ type SnapshotSegmentIndexingFinishedUpdate struct { SegmentName string `json:"segmentName"` } +type SyncStagesList struct { + Stages []string `json:"stages"` +} + +type CurrentSyncStage struct { + Stage uint `json:"stage"` +} + +type SyncStages struct { + StagesList []string `json:"stagesList"` + CurrentStage uint `json:"currentStage"` +} + func (ti SnapshotDownloadStatistics) Type() Type { return TypeOf(ti) } @@ -87,3 +105,11 @@ func (ti SnapshotIndexingStatistics) Type() Type { func (ti SnapshotSegmentIndexingFinishedUpdate) Type() Type { return TypeOf(ti) } + +func (ti SyncStagesList) Type() Type { + return TypeOf(ti) +} + +func (ti CurrentSyncStage) Type() Type { + return TypeOf(ti) +} diff --git a/erigon-lib/direct/sentinel_client.go b/erigon-lib/direct/sentinel_client.go index f421f4332bf..4a196bddb33 100644 --- a/erigon-lib/direct/sentinel_client.go +++ b/erigon-lib/direct/sentinel_client.go @@ -64,7 +64,7 @@ func (s *SentinelClientDirect) PublishGossip(ctx context.Context, in *sentinel.G // Subscribe gossip part. the only complex section of this bullshit -func (s *SentinelClientDirect) SubscribeGossip(ctx context.Context, in *sentinel.EmptyMessage, opts ...grpc.CallOption) (sentinel.Sentinel_SubscribeGossipClient, error) { +func (s *SentinelClientDirect) SubscribeGossip(ctx context.Context, in *sentinel.SubscriptionData, opts ...grpc.CallOption) (sentinel.Sentinel_SubscribeGossipClient, error) { ch := make(chan *gossipReply, 16384) streamServer := &SentinelSubscribeGossipS{ch: ch, ctx: ctx} go func() { diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go index ec1c0f03a97..8a42ed2c21f 100644 --- a/erigon-lib/downloader/downloader.go +++ b/erigon-lib/downloader/downloader.go @@ -33,6 +33,11 @@ import ( "github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/storage" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/log/v3" + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/common/dbg" @@ -42,10 +47,6 @@ import ( "github.com/ledgerwatch/erigon-lib/downloader/snaptype" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/log/v3" - "golang.org/x/exp/slices" - "golang.org/x/sync/errgroup" - "golang.org/x/sync/semaphore" ) // Downloader - component which downloading historical files. Can use BitTorrent, or other protocols @@ -119,11 +120,13 @@ func New(ctx context.Context, cfg *downloadercfg.Cfg, dirs datadir.Dirs, logger d.webseeds.torrentFiles = d.torrentFiles d.ctx, d.stopMainLoop = context.WithCancel(ctx) - if err := d.BuildTorrentFilesIfNeed(d.ctx); err != nil { - return nil, err - } - if err := d.addTorrentFilesFromDisk(false); err != nil { - return nil, err + if cfg.AddTorrentsFromDisk { + if err := d.BuildTorrentFilesIfNeed(d.ctx); err != nil { + return nil, err + } + if err := d.addTorrentFilesFromDisk(false); err != nil { + return nil, err + } } // CornerCase: no peers -> no anoncments to trackers -> no magnetlink resolution (but magnetlink has filename) @@ -485,54 +488,35 @@ func getPeersRatesForlogs(peersOfThisFile []*torrent.PeerConn, fName string) ([] return rates, averageRate } -func VerifyFile(ctx context.Context, t *torrent.Torrent, completePieces *atomic.Uint64) error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-t.GotInfo(): - } - - g := &errgroup.Group{} - for i := 0; i < t.NumPieces(); i++ { - i := i - g.Go(func() error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - t.Piece(i).VerifyData() - completePieces.Add(1) - return nil - }) - //<-t.Complete.On() - } - return g.Wait() -} - -func (d *Downloader) VerifyData(ctx context.Context, onlyFiles []string) error { +func (d *Downloader) VerifyData(ctx context.Context, whiteList []string, failFast bool) error { total := 0 - _torrents := d.torrentClient.Torrents() - torrents := make([]*torrent.Torrent, 0, len(_torrents)) - for _, t := range torrents { + allTorrents := d.torrentClient.Torrents() + toVerify := make([]*torrent.Torrent, 0, len(allTorrents)) + for _, t := range allTorrents { select { case <-t.GotInfo(): - if len(onlyFiles) > 0 && !slices.Contains(onlyFiles, t.Name()) { - continue - } - torrents = append(torrents, t) - total += t.NumPieces() case <-ctx.Done(): return ctx.Err() } + + if len(whiteList) > 0 { + name := t.Name() + exactOrPartialMatch := slices.ContainsFunc(whiteList, func(s string) bool { + return name == s || strings.HasSuffix(name, s) || strings.HasPrefix(name, s) + }) + if !exactOrPartialMatch { + continue + } + } + toVerify = append(toVerify, t) + total += t.NumPieces() } + d.logger.Info("[snapshots] Verify start") + defer d.logger.Info("[snapshots] Verify done", "files", len(toVerify), "whiteList", whiteList) completedPieces := &atomic.Uint64{} { - d.logger.Info("[snapshots] Verify start") - defer d.logger.Info("[snapshots] Verify done") logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() d.wg.Add(1) @@ -553,11 +537,13 @@ func (d *Downloader) VerifyData(ctx context.Context, onlyFiles []string) error { // torrent lib internally limiting amount of hashers per file // set limit here just to make load predictable, not to control Disk/CPU consumption g.SetLimit(runtime.GOMAXPROCS(-1) * 4) - - for _, t := range torrents { + for _, t := range toVerify { t := t g.Go(func() error { - return VerifyFile(ctx, t, completedPieces) + if failFast { + return VerifyFileFailFast(ctx, t, d.SnapDir(), completedPieces) + } + return ScheduleVerifyFile(ctx, t, completedPieces) }) } @@ -664,15 +650,19 @@ func seedableFiles(dirs datadir.Dirs) ([]string, error) { if err != nil { return nil, fmt.Errorf("seedableSegmentFiles: %w", err) } - l, err := seedableSnapshotsBySubDir(dirs.Snap, "history") + l1, err := seedableSnapshotsBySubDir(dirs.Snap, "idx") + if err != nil { + return nil, err + } + l2, err := seedableSnapshotsBySubDir(dirs.Snap, "history") if err != nil { return nil, err } - l2, err := seedableSnapshotsBySubDir(dirs.Snap, "warm") + l3, err := seedableSnapshotsBySubDir(dirs.Snap, "domain") if err != nil { return nil, err } - files = append(append(files, l...), l2...) + files = append(append(append(files, l1...), l2...), l3...) return files, nil } func (d *Downloader) addTorrentFilesFromDisk(quiet bool) error { diff --git a/erigon-lib/downloader/downloader_grpc_server.go b/erigon-lib/downloader/downloader_grpc_server.go index 5d4e763e8fe..33410793475 100644 --- a/erigon-lib/downloader/downloader_grpc_server.go +++ b/erigon-lib/downloader/downloader_grpc_server.go @@ -24,11 +24,12 @@ import ( "time" "github.com/anacrolix/torrent/metainfo" + "github.com/ledgerwatch/log/v3" + "google.golang.org/protobuf/types/known/emptypb" + "github.com/ledgerwatch/erigon-lib/gointerfaces" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" prototypes "github.com/ledgerwatch/erigon-lib/gointerfaces/types" - "github.com/ledgerwatch/log/v3" - "google.golang.org/protobuf/types/known/emptypb" ) var ( @@ -83,6 +84,7 @@ func (s *GrpcServer) Add(ctx context.Context, request *proto_downloader.AddReque return nil, err } } + return &emptypb.Empty{}, nil } @@ -114,7 +116,7 @@ func (s *GrpcServer) Delete(ctx context.Context, request *proto_downloader.Delet } func (s *GrpcServer) Verify(ctx context.Context, request *proto_downloader.VerifyRequest) (*emptypb.Empty, error) { - err := s.d.VerifyData(ctx, nil) + err := s.d.VerifyData(ctx, nil, false) if err != nil { return nil, err } diff --git a/erigon-lib/downloader/downloadercfg/downloadercfg.go b/erigon-lib/downloader/downloadercfg/downloadercfg.go index dae659c6207..6a466c2fea5 100644 --- a/erigon-lib/downloader/downloadercfg/downloadercfg.go +++ b/erigon-lib/downloader/downloadercfg/downloadercfg.go @@ -17,9 +17,9 @@ package downloadercfg import ( - "io/ioutil" "net" "net/url" + "os" "path/filepath" "runtime" "strings" @@ -54,6 +54,7 @@ type Cfg struct { WebSeedS3Tokens []string ExpectedTorrentFilesHashes snapcfg.Preverified DownloadTorrentFilesFromWebseed bool + AddTorrentsFromDisk bool ChainName string Dirs datadir.Dirs @@ -188,17 +189,17 @@ func New(dirs datadir.Dirs, version string, verbosity lg.Level, downloadRate, up webseedFileProviders = append(webseedFileProviders, localCfgFile) } //TODO: if don't pass "downloaded files list here" (which we store in db) - synced erigon will download new .torrent files. And erigon can't work with "unfinished" files. - snapCfg := snapcfg.KnownCfg(chainName) + snapCfg := snapcfg.KnownCfg(chainName, 0) return &Cfg{Dirs: dirs, ChainName: chainName, ClientConfig: torrentConfig, DownloadSlots: downloadSlots, WebSeedUrls: webseedHttpProviders, WebSeedFiles: webseedFileProviders, WebSeedS3Tokens: webseedS3Providers, - DownloadTorrentFilesFromWebseed: true, ExpectedTorrentFilesHashes: snapCfg.Preverified, + DownloadTorrentFilesFromWebseed: true, AddTorrentsFromDisk: true, ExpectedTorrentFilesHashes: snapCfg.Preverified, }, nil } func getIpv6Enabled() bool { if runtime.GOOS == "linux" { - file, err := ioutil.ReadFile("/sys/module/ipv6/parameters/disable") + file, err := os.ReadFile("/sys/module/ipv6/parameters/disable") if err != nil { log.Warn("could not read /sys/module/ipv6/parameters/disable for ipv6 detection") return false diff --git a/erigon-lib/downloader/path.go b/erigon-lib/downloader/path.go index 06ba51865b4..195c6d05ced 100644 --- a/erigon-lib/downloader/path.go +++ b/erigon-lib/downloader/path.go @@ -171,7 +171,7 @@ func Clean(path string) string { return FromSlash(out.string()) } -func unixIsLocal(path string) bool { +func unixIsLocal(path string) bool { //nolint if IsAbs(path) || path == "" { return false } diff --git a/erigon-lib/downloader/path_windows.go b/erigon-lib/downloader/path_windows.go index f5f4a01d905..687e81429ca 100644 --- a/erigon-lib/downloader/path_windows.go +++ b/erigon-lib/downloader/path_windows.go @@ -175,51 +175,6 @@ func HasPrefix(p, prefix string) bool { return strings.HasPrefix(strings.ToLower(p), strings.ToLower(prefix)) } -func splitList(path string) []string { - // The same implementation is used in LookPath in os/exec; - // consider changing os/exec when changing this. - - if path == "" { - return []string{} - } - - // Split path, respecting but preserving quotes. - list := []string{} - start := 0 - quo := false - for i := 0; i < len(path); i++ { - switch c := path[i]; { - case c == '"': - quo = !quo - case c == ListSeparator && !quo: - list = append(list, path[start:i]) - start = i + 1 - } - } - list = append(list, path[start:]) - - // Remove quotes. - for i, s := range list { - list[i] = strings.ReplaceAll(s, `"`, ``) - } - - return list -} - -func abs(path string) (string, error) { - if path == "" { - // syscall.FullPath returns an error on empty path, because it's not a valid path. - // To implement Abs behavior of returning working directory on empty string input, - // special-case empty path by changing it to "." path. See golang.org/issue/24441. - path = "." - } - fullPath, err := syscall.FullPath(path) - if err != nil { - return "", err - } - return Clean(fullPath), nil -} - func join(elem []string) string { var b strings.Builder var lastChar byte @@ -260,47 +215,3 @@ func join(elem []string) string { } return Clean(b.String()) } - -// joinNonEmpty is like join, but it assumes that the first element is non-empty. -func joinNonEmpty(elem []string) string { - if len(elem[0]) == 2 && elem[0][1] == ':' { - // First element is drive letter without terminating slash. - // Keep path relative to current directory on that drive. - // Skip empty elements. - i := 1 - for ; i < len(elem); i++ { - if elem[i] != "" { - break - } - } - return Clean(elem[0] + strings.Join(elem[i:], string(Separator))) - } - // The following logic prevents Join from inadvertently creating a - // UNC path on Windows. Unless the first element is a UNC path, Join - // shouldn't create a UNC path. See golang.org/issue/9167. - p := Clean(strings.Join(elem, string(Separator))) - if !isUNC(p) { - return p - } - // p == UNC only allowed when the first element is a UNC path. - head := Clean(elem[0]) - if isUNC(head) { - return p - } - // head + tail == UNC, but joining two non-UNC paths should not result - // in a UNC path. Undo creation of UNC path. - tail := Clean(strings.Join(elem[1:], string(Separator))) - if head[len(head)-1] == Separator { - return head + tail - } - return head + string(Separator) + tail -} - -// isUNC reports whether path is a UNC path. -func isUNC(path string) bool { - return len(path) > 1 && isSlash(path[0]) && isSlash(path[1]) -} - -func sameWord(a, b string) bool { - return strings.EqualFold(a, b) -} diff --git a/erigon-lib/downloader/rclone.go b/erigon-lib/downloader/rclone.go new file mode 100644 index 00000000000..4f43eaba6fd --- /dev/null +++ b/erigon-lib/downloader/rclone.go @@ -0,0 +1,783 @@ +package downloader + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "net" + "net/http" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "golang.org/x/exp/slices" + + "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/log/v3" + "github.com/spaolacci/murmur3" + "golang.org/x/sync/errgroup" +) + +type rcloneInfo struct { + sync.Mutex + file string + snapInfo *snaptype.FileInfo + remoteInfo remoteInfo + localInfo fs.FileInfo +} + +func (i *rcloneInfo) Version() uint8 { + if i.snapInfo != nil { + return i.snapInfo.Version + } + + return 0 +} + +func (i *rcloneInfo) From() uint64 { + if i.snapInfo != nil { + return i.snapInfo.From + } + + return 0 +} + +func (i *rcloneInfo) To() uint64 { + if i.snapInfo != nil { + return i.snapInfo.To + } + + return 0 +} + +func (i *rcloneInfo) Type() snaptype.Type { + if i.snapInfo != nil { + return i.snapInfo.T + } + + return snaptype.Unknown +} + +type RCloneClient struct { + rclone *exec.Cmd + rcloneUrl string + rcloneSession *http.Client + logger log.Logger +} + +func (c *RCloneClient) start(logger log.Logger) error { + c.logger = logger + + rclone, _ := exec.LookPath("rclone") + + if len(rclone) == 0 { + logger.Warn("[rclone] Uploading disabled: rclone not found in PATH") + return fmt.Errorf("rclone not found in PATH") + } + + if p, err := freePort(); err == nil { + ctx, cancel := context.WithCancel(context.Background()) + + addr := fmt.Sprintf("127.0.0.1:%d", p) + c.rclone = exec.CommandContext(ctx, rclone, "rcd", "--rc-addr", addr, "--rc-no-auth") + c.rcloneUrl = "http://" + addr + c.rcloneSession = &http.Client{} // no timeout - we're doing sync calls + + if err := c.rclone.Start(); err != nil { + cancel() + logger.Warn("[rclone] Uploading disabled: rclone didn't start", "err", err) + return fmt.Errorf("rclone didn't start: %w", err) + } else { + logger.Info("[rclone] rclone started", "addr", addr) + } + + go func() { + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, syscall.SIGTERM, syscall.SIGINT) + + switch s := <-signalCh; s { + case syscall.SIGTERM, syscall.SIGINT: + cancel() + } + }() + } + + return nil +} + +func (c *RCloneClient) ListRemotes(ctx context.Context) ([]string, error) { + result, err := c.cmd(ctx, "config/listremotes", nil) + + if err != nil { + return nil, err + } + + remotes := struct { + Remotes []string `json:"remotes"` + }{} + + err = json.Unmarshal(result, &remotes) + + if err != nil { + return nil, err + } + + return remotes.Remotes, nil +} + +func (u *RCloneClient) sync(ctx context.Context, request *rcloneRequest) error { + _, err := u.cmd(ctx, "sync/sync", request) + return err +} + +/* +return retryConnects(ctx, func(ctx context.Context) error { + return client.CallContext(ctx, result, string(method), args...) +}) +} +*/ + +func isConnectionError(err error) bool { + var opErr *net.OpError + if errors.As(err, &opErr) { + return opErr.Op == "dial" + } + return false +} + +const connectionTimeout = time.Second * 5 + +func retry(ctx context.Context, op func(context.Context) error, isRecoverableError func(error) bool, delay time.Duration, lastErr error) error { + err := op(ctx) + if err == nil { + return nil + } + if errors.Is(err, context.DeadlineExceeded) && lastErr != nil { + return lastErr + } + if !isRecoverableError(err) { + return err + } + + delayTimer := time.NewTimer(delay) + select { + case <-delayTimer.C: + return retry(ctx, op, isRecoverableError, delay, err) + case <-ctx.Done(): + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + return err + } + return ctx.Err() + } +} + +func (u *RCloneClient) cmd(ctx context.Context, path string, args interface{}) ([]byte, error) { + requestBody, err := json.Marshal(args) + + if err != nil { + return nil, err + } + + request, err := http.NewRequestWithContext(ctx, http.MethodPost, + u.rcloneUrl+"/"+path, bytes.NewBuffer(requestBody)) + + if err != nil { + return nil, err + } + + request.Header.Set("Content-Type", "application/json") + + ctx, cancel := context.WithTimeout(ctx, connectionTimeout) + defer cancel() + + var response *http.Response + + err = retry(ctx, func(ctx context.Context) error { + response, err = u.rcloneSession.Do(request) //nolint:bodyclose + return err + }, isConnectionError, time.Millisecond*200, nil) + + if err != nil { + return nil, err + } + + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + responseBody := struct { + Error string `json:"error"` + }{} + + if err := json.NewDecoder(response.Body).Decode(&responseBody); err == nil && len(responseBody.Error) > 0 { + u.logger.Warn("[rclone] cmd failed", "path", path, "status", response.Status, "err", responseBody.Error) + return nil, fmt.Errorf("cmd: %s failed: %s: %s", path, response.Status, responseBody.Error) + } else { + u.logger.Warn("[rclone] cmd failed", "path", path, "status", response.Status) + return nil, fmt.Errorf("cmd: %s failed: %s", path, response.Status) + } + } + + return io.ReadAll(response.Body) +} + +type RCloneSession struct { + *RCloneClient + sync.Mutex + files map[string]*rcloneInfo + oplock sync.Mutex + remoteFs string + localFs string + syncQueue chan syncRequest + syncScheduled atomic.Bool + activeSyncCount atomic.Int32 + cancel context.CancelFunc +} + +var rcClient RCloneClient +var rcClientStart sync.Once + +func NewRCloneClient(logger log.Logger) (*RCloneClient, error) { + var err error + + rcClientStart.Do(func() { + err = rcClient.start(logger) + }) + + if err != nil { + return nil, err + } + + return &rcClient, nil +} + +func freePort() (port int, err error) { + if a, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0"); err != nil { + return 0, err + } else { + if l, err := net.ListenTCP("tcp", a); err != nil { + return 0, err + } else { + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil + } + } +} + +func (c *RCloneClient) NewSession(ctx context.Context, localFs string, remoteFs string) (*RCloneSession, error) { + ctx, cancel := context.WithCancel(ctx) + + session := &RCloneSession{ + RCloneClient: c, + files: map[string]*rcloneInfo{}, + remoteFs: remoteFs, + localFs: localFs, + cancel: cancel, + syncQueue: make(chan syncRequest, 100), + } + + go func() { + if _, err := session.ReadRemoteDir(ctx, true); err == nil { + session.syncFiles(ctx) + } + }() + + return session, nil +} + +func (c *RCloneSession) RemoteFsRoot() string { + return c.remoteFs +} + +func (c *RCloneSession) LocalFsRoot() string { + return c.localFs +} + +func (c *RCloneSession) Stop() { + c.cancel() +} + +type syncRequest struct { + ctx context.Context + info map[string]*rcloneInfo + cerr chan error + request *rcloneRequest + retryTime time.Duration +} + +func (c *RCloneSession) Upload(ctx context.Context, files ...string) error { + c.Lock() + + reqInfo := map[string]*rcloneInfo{} + + for _, file := range files { + info, ok := c.files[file] + + if !ok || info.localInfo == nil { + localInfo, err := os.Stat(filepath.Join(c.localFs, file)) + + if err != nil { + c.Unlock() + return fmt.Errorf("can't upload: %s: %w", file, err) + } + + if !localInfo.Mode().IsRegular() || localInfo.Size() == 0 { + c.Unlock() + return fmt.Errorf("can't upload: %s: %s", file, "file is not uploadable") + } + + if ok { + info.localInfo = localInfo + } else { + info := &rcloneInfo{ + file: file, + localInfo: localInfo, + } + + if snapInfo, ok := snaptype.ParseFileName(c.localFs, file); ok { + info.snapInfo = &snapInfo + } + + c.files[file] = info + } + } else { + reqInfo[file] = info + } + } + + c.Unlock() + + cerr := make(chan error, 1) + + c.syncQueue <- syncRequest{ctx, reqInfo, cerr, + &rcloneRequest{ + Group: c.Label(), + SrcFs: c.localFs, + DstFs: c.remoteFs, + Filter: rcloneFilter{ + IncludeRule: files, + }}, 0} + + return <-cerr +} + +func (c *RCloneSession) Download(ctx context.Context, files ...string) error { + c.Lock() + + if len(c.files) == 0 { + c.Unlock() + _, err := c.ReadRemoteDir(ctx, false) + if err != nil { + return fmt.Errorf("can't download: %s: %w", files, err) + } + c.Lock() + } + + reqInfo := map[string]*rcloneInfo{} + + for _, file := range files { + info, ok := c.files[file] + + if !ok || info.remoteInfo.Size == 0 { + c.Unlock() + return fmt.Errorf("can't download: %s: %w", file, os.ErrNotExist) + } + + reqInfo[file] = info + } + + c.Unlock() + + cerr := make(chan error, 1) + + c.syncQueue <- syncRequest{ctx, reqInfo, cerr, + &rcloneRequest{ + SrcFs: c.remoteFs, + DstFs: c.localFs, + Filter: rcloneFilter{ + IncludeRule: files, + }}, 0} + + return <-cerr +} + +func (c *RCloneSession) Cat(ctx context.Context, file string) (io.Reader, error) { + rclone, err := exec.LookPath("rclone") + + if err != nil { + return nil, err + } + + cmd := exec.CommandContext(ctx, rclone, "cat", c.remoteFs+"/"+file) + + stdout, err := cmd.StdoutPipe() + + if err != nil { + return nil, err + } + + if err := cmd.Start(); err != nil { + return nil, err + } + + return stdout, nil +} + +func (c *RCloneSession) ReadLocalDir(ctx context.Context) ([]fs.DirEntry, error) { + return os.ReadDir(c.localFs) +} + +func (c *RCloneSession) Label() string { + return strconv.FormatUint(murmur3.Sum64([]byte(c.localFs+"<->"+c.remoteFs)), 36) +} + +type remoteInfo struct { + Name string + Size uint64 + ModTime time.Time +} + +type SnapInfo interface { + Version() uint8 + From() uint64 + To() uint64 + Type() snaptype.Type +} + +type fileInfo struct { + *rcloneInfo +} + +func (fi *fileInfo) Name() string { + return fi.file +} + +func (fi *fileInfo) Size() int64 { + return int64(fi.remoteInfo.Size) +} + +func (fi *fileInfo) Mode() fs.FileMode { + return fs.ModeIrregular +} + +func (fi *fileInfo) ModTime() time.Time { + return fi.remoteInfo.ModTime +} + +func (fi *fileInfo) IsDir() bool { + return false +} + +func (fi *fileInfo) Sys() any { + return fi.rcloneInfo +} + +type dirEntry struct { + info *fileInfo +} + +func (e dirEntry) Name() string { + return e.info.Name() +} + +func (e dirEntry) IsDir() bool { + return e.info.IsDir() +} + +func (e dirEntry) Type() fs.FileMode { + return e.info.Mode() +} + +func (e dirEntry) Info() (fs.FileInfo, error) { + return e.info, nil +} + +var ErrAccessDenied = errors.New("access denied") + +func (c *RCloneSession) ReadRemoteDir(ctx context.Context, refresh bool) ([]fs.DirEntry, error) { + if len(c.remoteFs) == 0 { + return nil, fmt.Errorf("remote fs undefined") + } + + c.oplock.Lock() + defer c.oplock.Unlock() + + c.Lock() + fileCount := len(c.files) + c.Unlock() + + if fileCount == 0 || refresh { + listBody, err := json.Marshal(struct { + Fs string `json:"fs"` + Remote string `json:"remote"` + }{ + Fs: c.remoteFs, + Remote: "", + }) + + if err != nil { + return nil, fmt.Errorf("can't marshal list request: %w", err) + } + + listRequest, err := http.NewRequestWithContext(ctx, http.MethodPost, + c.rcloneUrl+"/operations/list", bytes.NewBuffer(listBody)) + + if err != nil { + return nil, fmt.Errorf("can't create list request: %w", err) + } + + listRequest.Header.Set("Content-Type", "application/json") + + var response *http.Response + + for i := 0; i < 10; i++ { + response, err = c.rcloneSession.Do(listRequest) //nolint:bodyclose + if err == nil { + break + } + time.Sleep(2 * time.Second) + } + + if err != nil { + return nil, fmt.Errorf("can't get remote list: %w", err) + } + + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + body, _ := io.ReadAll(response.Body) + e := struct { + Error string `json:"error"` + }{} + + if err := json.Unmarshal(body, &e); err == nil { + if strings.Contains(e.Error, "AccessDenied") { + return nil, fmt.Errorf("can't get remote list: %w", ErrAccessDenied) + } + } + + return nil, fmt.Errorf("can't get remote list: %s: %s", response.Status, string(body)) + } + + responseBody := struct { + List []remoteInfo `json:"list"` + }{} + + if err := json.NewDecoder(response.Body).Decode(&responseBody); err != nil { + return nil, fmt.Errorf("can't decode remote list: %w", err) + } + + for _, fi := range responseBody.List { + localInfo, _ := os.Stat(filepath.Join(c.localFs, fi.Name)) + + c.Lock() + if rcinfo, ok := c.files[fi.Name]; ok { + rcinfo.localInfo = localInfo + rcinfo.remoteInfo = fi + + if snapInfo, ok := snaptype.ParseFileName(c.localFs, fi.Name); ok { + rcinfo.snapInfo = &snapInfo + } else { + rcinfo.snapInfo = nil + } + + } else { + info := &rcloneInfo{ + file: fi.Name, + localInfo: localInfo, + remoteInfo: fi, + } + + if snapInfo, ok := snaptype.ParseFileName(c.localFs, fi.Name); ok { + info.snapInfo = &snapInfo + } + + c.files[fi.Name] = info + } + c.Unlock() + } + } + + var entries = make([]fs.DirEntry, 0, len(c.files)) + + for _, info := range c.files { + if info.remoteInfo.Size > 0 { + entries = append(entries, &dirEntry{&fileInfo{info}}) + } + } + + slices.SortFunc(entries, func(a, b fs.DirEntry) int { + return strings.Compare(a.Name(), b.Name()) + }) + + return entries, nil +} + +type rcloneFilter struct { + IncludeRule []string `json:"IncludeRule"` +} + +type rcloneRequest struct { + Async bool `json:"_async,omitempty"` + Config map[string]interface{} `json:"_config,omitempty"` + Group string `json:"group"` + SrcFs string `json:"srcFs"` + DstFs string `json:"dstFs"` + Filter rcloneFilter `json:"_filter"` +} + +func (c *RCloneSession) syncFiles(ctx context.Context) { + if !c.syncScheduled.CompareAndSwap(false, true) { + return + } + + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(16) + + minRetryTime := 30 * time.Second + maxRetryTime := 300 * time.Second + + retry := func(request syncRequest) { + switch { + case request.retryTime == 0: + request.retryTime = minRetryTime + case request.retryTime < maxRetryTime: + request.retryTime += request.retryTime + default: + request.retryTime = maxRetryTime + } + + retryTimer := time.NewTicker(request.retryTime) + + select { + case <-request.ctx.Done(): + request.cerr <- request.ctx.Err() + return + case <-retryTimer.C: + } + + c.Lock() + syncQueue := c.syncQueue + c.Unlock() + + if syncQueue != nil { + syncQueue <- request + } else { + request.cerr <- fmt.Errorf("no sync queue available") + } + } + + go func() { + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() + + select { + case <-gctx.Done(): + if syncCount := int(c.activeSyncCount.Load()) + len(c.syncQueue); syncCount > 0 { + log.Info("[rclone] Synced files", "processed", fmt.Sprintf("%d/%d", c.activeSyncCount.Load(), syncCount)) + } + + c.Lock() + syncQueue := c.syncQueue + c.syncQueue = nil + c.Unlock() + + if syncQueue != nil { + close(syncQueue) + } + + return + case <-logEvery.C: + if syncCount := int(c.activeSyncCount.Load()) + len(c.syncQueue); syncCount > 0 { + log.Info("[rclone] Syncing files", "progress", fmt.Sprintf("%d/%d", c.activeSyncCount.Load(), syncCount)) + } + } + }() + + go func() { + for req := range c.syncQueue { + + if gctx.Err() != nil { + req.cerr <- gctx.Err() + continue + } + + func(req syncRequest) { + g.Go(func() error { + c.activeSyncCount.Add(1) + + defer func() { + c.activeSyncCount.Add(-1) + if r := recover(); r != nil { + log.Error("[rclone] snapshot sync failed", "err", r, "stack", dbg.Stack()) + + if gctx.Err() != nil { + req.cerr <- gctx.Err() + } + + var err error + var ok bool + + if err, ok = r.(error); ok { + req.cerr <- fmt.Errorf("snapshot sync failed: %w", err) + } else { + req.cerr <- fmt.Errorf("snapshot sync failed: %s", r) + } + + return + } + }() + + if req.ctx.Err() != nil { + req.cerr <- req.ctx.Err() + return nil //nolint:nilerr + } + + if err := c.sync(gctx, req.request); err != nil { + + if gctx.Err() != nil { + req.cerr <- gctx.Err() + } else { + go retry(req) + } + + return nil //nolint:nilerr + } + + for _, info := range req.info { + localInfo, _ := os.Stat(filepath.Join(c.localFs, info.file)) + + info.Lock() + info.localInfo = localInfo + info.remoteInfo = remoteInfo{ + Name: info.file, + Size: uint64(localInfo.Size()), + ModTime: localInfo.ModTime(), + } + info.Unlock() + } + + req.cerr <- nil + return nil + }) + }(req) + } + + c.syncScheduled.Store(false) + + if err := g.Wait(); err != nil { + c.logger.Debug("[rclone] uploading failed", "err", err) + } + }() +} diff --git a/erigon-lib/downloader/rclone_test.go b/erigon-lib/downloader/rclone_test.go new file mode 100644 index 00000000000..9e58dc333a7 --- /dev/null +++ b/erigon-lib/downloader/rclone_test.go @@ -0,0 +1,100 @@ +package downloader_test + +import ( + "context" + "errors" + "io" + "os" + "os/exec" + "testing" + + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/log/v3" +) + +func hasRClone() bool { + rclone, _ := exec.LookPath("rclone") + + if len(rclone) == 0 { + return false + } + + return true +} + +func TestDownload(t *testing.T) { + t.Skip() + if !hasRClone() { + t.Skip("rclone not available") + } + + ctx := context.Background() + + tmpDir := t.TempDir() + remoteDir := "r2:erigon-v2-snapshots-bor-mainnet" + + cli, err := downloader.NewRCloneClient(log.Root()) + + if err != nil { + t.Fatal(err) + } + + rcc, err := cli.NewSession(ctx, tmpDir, remoteDir) + + if err != nil { + t.Fatal(err) + } + + dir, err := rcc.ReadRemoteDir(ctx, true) + + if err != nil { + if errors.Is(err, downloader.ErrAccessDenied) { + t.Skip("rclone dir not accessible") + } + + t.Fatal(err) + } + + for _, entry := range dir { + if len(entry.Name()) == 0 { + t.Fatal("unexpected nil file name") + } + //fmt.Println(entry.Name()) + } + + err = rcc.Download(ctx, "manifest.txt") + + if err != nil { + t.Fatal(err) + } + + h0, err := os.ReadFile("manifest.txt") + + if err != nil { + t.Fatal(err) + } + + if len(h0) == 0 { + t.Fatal("unexpected nil file") + } + //fmt.Print(string(h0)) + + reader, err := rcc.Cat(ctx, "manifest.txt") + + if err != nil { + t.Fatal(err) + } + + h1, err := io.ReadAll(reader) + + if err != nil { + t.Fatal(err) + } + + if string(h0) != string(h1) { + t.Fatal("Download and Cat contents mismatched") + } + //fmt.Print(string(h1)) + + rcc.Stop() +} diff --git a/erigon-lib/downloader/snaptype/files.go b/erigon-lib/downloader/snaptype/files.go index 99adb35042e..274c91bd35f 100644 --- a/erigon-lib/downloader/snaptype/files.go +++ b/erigon-lib/downloader/snaptype/files.go @@ -35,17 +35,15 @@ import ( type Type int const ( + Unknown Type = -1 Headers Type = iota Bodies Transactions BorEvents BorSpans - NumberOfTypes BeaconBlocks ) -var BorSnapshotTypes = []Type{BorEvents, BorSpans} - func (ft Type) String() string { switch ft { case Headers: @@ -80,7 +78,7 @@ func ParseFileType(s string) (Type, bool) { case "beaconblocks": return BeaconBlocks, true default: - return NumberOfTypes, false + return Unknown, false } } @@ -94,16 +92,25 @@ func (it IdxType) String() string { return string(it) } var BlockSnapshotTypes = []Type{Headers, Bodies, Transactions} +var BorSnapshotTypes = []Type{BorEvents, BorSpans} + var ( ErrInvalidFileName = fmt.Errorf("invalid compressed file name") ) -func FileName(from, to uint64, fileType string) string { - return fmt.Sprintf("v1-%06d-%06d-%s", from/1_000, to/1_000, fileType) +func FileName(version uint8, from, to uint64, fileType string) string { + return fmt.Sprintf("v%d-%06d-%06d-%s", version, from/1_000, to/1_000, fileType) +} + +func SegmentFileName(version uint8, from, to uint64, t Type) string { + return FileName(version, from, to, t.String()) + ".seg" +} +func DatFileName(version uint8, from, to uint64, fType string) string { + return FileName(version, from, to, fType) + ".dat" +} +func IdxFileName(version uint8, from, to uint64, fType string) string { + return FileName(version, from, to, fType) + ".idx" } -func SegmentFileName(from, to uint64, t Type) string { return FileName(from, to, t.String()) + ".seg" } -func DatFileName(from, to uint64, fType string) string { return FileName(from, to, fType) + ".dat" } -func IdxFileName(from, to uint64, fType string) string { return FileName(from, to, fType) + ".idx" } func FilterExt(in []FileInfo, expectExt string) (out []FileInfo) { for _, f := range in { @@ -114,8 +121,8 @@ func FilterExt(in []FileInfo, expectExt string) (out []FileInfo) { } return out } -func FilesWithExt(dir, expectExt string) ([]FileInfo, error) { - files, err := ParseDir(dir) +func FilesWithExt(dir string, version uint8, expectExt string) ([]FileInfo, error) { + files, err := ParseDir(dir, version) if err != nil { return nil, err } @@ -139,8 +146,16 @@ func ParseFileName(dir, fileName string) (res FileInfo, ok bool) { if len(parts) < 4 { return res, ok } - version := parts[0] - _ = version + + var version uint8 + if len(parts[0]) > 1 && parts[0][0] == 'v' { + v, err := strconv.ParseUint(parts[0][1:], 10, 64) + if err != nil { + return + } + version = uint8(v) + } + from, err := strconv.ParseUint(parts[1], 10, 64) if err != nil { return @@ -153,7 +168,8 @@ func ParseFileName(dir, fileName string) (res FileInfo, ok bool) { if !ok { return res, ok } - return FileInfo{From: from * 1_000, To: to * 1_000, Path: filepath.Join(dir, fileName), T: ft, Ext: ext}, ok + + return FileInfo{Version: version, From: from * 1_000, To: to * 1_000, Path: filepath.Join(dir, fileName), T: ft, Ext: ext}, ok } const Erigon3SeedableSteps = 32 @@ -164,6 +180,7 @@ const Erigon3SeedableSteps = 32 // - avoiding having too much files: // more files(shards) - means "more metadata", "more lookups for non-indexed queries", "more dictionaries", "more bittorrent connections", ... // less files - means small files will be removed after merge (no peers for this files). +const Erigon2OldMergeLimit = 500_000 const Erigon2MergeLimit = 100_000 const Erigon2MinSegmentSize = 1_000 @@ -178,13 +195,19 @@ type FileInfo struct { } func (f FileInfo) TorrentFileExists() bool { return dir.FileExist(f.Path + ".torrent") } -func (f FileInfo) Seedable() bool { return f.To-f.From == Erigon2MergeLimit } -func (f FileInfo) NeedTorrentFile() bool { return f.Seedable() && !f.TorrentFileExists() } -func (f FileInfo) Name() string { return filepath.Base(f.Path) } +func (f FileInfo) Seedable() bool { + return f.To-f.From == Erigon2MergeLimit || f.To-f.From == Erigon2OldMergeLimit +} +func (f FileInfo) NeedTorrentFile() bool { return f.Seedable() && !f.TorrentFileExists() } +func (f FileInfo) Name() string { return filepath.Base(f.Path) } -func IdxFiles(dir string) (res []FileInfo, err error) { return FilesWithExt(dir, ".idx") } -func Segments(dir string) (res []FileInfo, err error) { return FilesWithExt(dir, ".seg") } -func TmpFiles(dir string) (res []string, err error) { +func IdxFiles(dir string, version uint8) (res []FileInfo, err error) { + return FilesWithExt(dir, version, ".idx") +} +func Segments(dir string, version uint8) (res []FileInfo, err error) { + return FilesWithExt(dir, version, ".seg") +} +func TmpFiles(dir string, version uint8) (res []string, err error) { files, err := os.ReadDir(dir) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -192,20 +215,24 @@ func TmpFiles(dir string) (res []string, err error) { } return nil, err } + + v := fmt.Sprint("v", version) + for _, f := range files { - if f.IsDir() || len(f.Name()) < 3 { + if f.IsDir() || len(f.Name()) < 3 || !strings.HasPrefix(f.Name(), v) { continue } if filepath.Ext(f.Name()) != ".tmp" { continue } + res = append(res, filepath.Join(dir, f.Name())) } return res, nil } // ParseDir - reading dir ( -func ParseDir(dir string) (res []FileInfo, err error) { +func ParseDir(dir string, version uint8) (res []FileInfo, err error) { files, err := os.ReadDir(dir) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -213,12 +240,15 @@ func ParseDir(dir string) (res []FileInfo, err error) { } return nil, err } + + v := fmt.Sprint("v", version) + for _, f := range files { fileInfo, err := f.Info() if err != nil { return nil, err } - if f.IsDir() || fileInfo.Size() == 0 || len(f.Name()) < 3 { + if f.IsDir() || fileInfo.Size() == 0 || len(f.Name()) < 3 || !strings.HasPrefix(f.Name(), v) { continue } diff --git a/erigon-lib/downloader/torrent_files.go b/erigon-lib/downloader/torrent_files.go index 1e27c8e0e40..51d1c8ddd1a 100644 --- a/erigon-lib/downloader/torrent_files.go +++ b/erigon-lib/downloader/torrent_files.go @@ -71,7 +71,7 @@ func (tf *TorrentFiles) CreateTorrentFromMetaInfo(fPath string, mi *metainfo.Met return tf.createTorrentFromMetaInfo(fPath, mi) } func (tf *TorrentFiles) createTorrentFromMetaInfo(fPath string, mi *metainfo.MetaInfo) error { - file, err := os.Create(fPath) + file, err := os.Create(fPath + ".tmp") if err != nil { return err } @@ -79,7 +79,15 @@ func (tf *TorrentFiles) createTorrentFromMetaInfo(fPath string, mi *metainfo.Met if err := mi.Write(file); err != nil { return err } - file.Sync() + if err := file.Sync(); err != nil { + return err + } + if err := file.Close(); err != nil { + return err + } + if err := os.Rename(fPath+".tmp", fPath); err != nil { + return err + } return nil } diff --git a/erigon-lib/downloader/util.go b/erigon-lib/downloader/util.go index cd5bc26d8dd..b7ee2525756 100644 --- a/erigon-lib/downloader/util.go +++ b/erigon-lib/downloader/util.go @@ -17,8 +17,12 @@ package downloader import ( + "bytes" "context" + "crypto/sha1" "fmt" + "io" + "os" "path/filepath" "regexp" "runtime" @@ -29,17 +33,21 @@ import ( "github.com/anacrolix/torrent" "github.com/anacrolix/torrent/bencode" "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/mmap_span" + "github.com/anacrolix/torrent/storage" + "github.com/edsrzf/mmap-go" + "github.com/ledgerwatch/log/v3" + "golang.org/x/sync/errgroup" + common2 "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" dir2 "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/log/v3" - "golang.org/x/sync/errgroup" ) -// udpOrHttpTrackers - torrent library spawning several goroutines and producing many requests for each tracker. So we limit amout of trackers by 7 +// udpOrHttpTrackers - torrent library spawning several goroutines and producing many requests for each tracker. So we limit amout of trackers by 8 var udpOrHttpTrackers = []string{ "udp://tracker.opentrackr.org:1337/announce", "udp://9.rarbg.com:2810/announce", @@ -155,6 +163,7 @@ func BuildTorrentIfNeed(ctx context.Context, fName, root string, torrentFiles *T if torrentFiles.Exists(fName) { return nil } + fPath := filepath.Join(root, fName) if !dir2.FileExist(fPath) { return nil @@ -359,3 +368,81 @@ func readPeerID(db kv.RoDB) (peerID []byte, err error) { func IsLocal(path string) bool { return isLocal(path) } + +func ScheduleVerifyFile(ctx context.Context, t *torrent.Torrent, completePieces *atomic.Uint64) error { + for i := 0; i < t.NumPieces(); i++ { + t.Piece(i).VerifyData() + + completePieces.Add(1) + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + } + return nil +} + +func VerifyFileFailFast(ctx context.Context, t *torrent.Torrent, root string, completePieces *atomic.Uint64) error { + span := new(mmap_span.MMapSpan) + defer span.Close() + info := t.Info() + for _, file := range info.UpvertedFiles() { + filename := filepath.Join(append([]string{root, info.Name}, file.Path...)...) + mm, err := mmapFile(filename) + if err != nil { + return err + } + if int64(len(mm.Bytes())) != file.Length { + return fmt.Errorf("file %q has wrong length", filename) + } + span.Append(mm) + } + span.InitIndex() + + hasher := sha1.New() + for i := 0; i < info.NumPieces(); i++ { + p := info.Piece(i) + hasher.Reset() + _, err := io.Copy(hasher, io.NewSectionReader(span, p.Offset(), p.Length())) + if err != nil { + return err + } + good := bytes.Equal(hasher.Sum(nil), p.Hash().Bytes()) + if !good { + return fmt.Errorf("hash mismatch at piece %d, file: %s", i, t.Name()) + } + + completePieces.Add(1) + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + } + return nil +} + +func mmapFile(name string) (mm storage.FileMapping, err error) { + f, err := os.Open(name) + if err != nil { + return + } + defer func() { + if err != nil { + f.Close() + } + }() + fi, err := f.Stat() + if err != nil { + return + } + if fi.Size() == 0 { + return + } + reg, err := mmap.MapRegion(f, -1, mmap.RDONLY, mmap.COPY, 0) + if err != nil { + return + } + return storage.WrapFileMapping(reg, f), nil +} diff --git a/erigon-lib/downloader/webseed.go b/erigon-lib/downloader/webseed.go index 2c42b29b62b..f6433103356 100644 --- a/erigon-lib/downloader/webseed.go +++ b/erigon-lib/downloader/webseed.go @@ -169,7 +169,7 @@ func (d *WebSeeds) callS3Provider(ctx context.Context, token string) (snaptype.W //v1:bucketName:accID:accessKeyID:accessKeySecret l := strings.Split(token, ":") if len(l) != 5 { - return nil, fmt.Errorf("token has invalid format, exepcing 'v1:tokenInBase64'") + return nil, fmt.Errorf("[snapshots] webseed token has invalid format. expeting 5 parts, found %d", len(l)) } version, bucketName, accountId, accessKeyId, accessKeySecret := strings.TrimSpace(l[0]), strings.TrimSpace(l[1]), strings.TrimSpace(l[2]), strings.TrimSpace(l[3]), strings.TrimSpace(l[4]) if version != "v1" { diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index 312fba162e5..4287d04edef 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -4,8 +4,8 @@ go 1.20 require ( github.com/erigontech/mdbx-go v0.27.21 - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231223133303-67e341eff759 - github.com/ledgerwatch/interfaces v0.0.0-20231209102305-b17e86fbe07d + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240115083615-b5feeb63e191 + github.com/ledgerwatch/interfaces v0.0.0-20240105174738-fe57049f198c github.com/ledgerwatch/log/v3 v3.9.0 github.com/ledgerwatch/secp256k1 v1.0.0 ) @@ -27,6 +27,7 @@ require ( github.com/edsrzf/mmap-go v1.1.0 github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 + github.com/golang/mock v1.6.0 github.com/google/btree v1.1.2 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/hashicorp/golang-lru/v2 v2.0.6 @@ -34,20 +35,20 @@ require ( github.com/matryer/moq v0.3.3 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pelletier/go-toml/v2 v2.1.0 - github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 github.com/quasilyte/go-ruleguard/dsl v0.3.22 github.com/spaolacci/murmur3 v1.1.0 github.com/stretchr/testify v1.8.4 github.com/tidwall/btree v1.6.0 - golang.org/x/crypto v0.16.0 + golang.org/x/crypto v0.17.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 - golang.org/x/sync v0.5.0 - golang.org/x/sys v0.15.0 + golang.org/x/sync v0.6.0 + golang.org/x/sys v0.16.0 golang.org/x/time v0.5.0 - google.golang.org/grpc v1.59.0 + google.golang.org/grpc v1.60.1 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.32.0 ) require ( @@ -103,7 +104,7 @@ require ( github.com/huandu/xstrings v1.4.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mschoch/smat v0.2.0 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect @@ -127,8 +128,8 @@ require ( github.com/pion/webrtc/v3 v3.1.42 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect github.com/sirupsen/logrus v1.9.0 // indirect @@ -139,7 +140,7 @@ require ( golang.org/x/net v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.16.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect modernc.org/libc v1.24.1 // indirect modernc.org/mathutil v1.6.0 // indirect @@ -149,6 +150,6 @@ require ( zombiezen.com/go/sqlite v0.13.1 // indirect ) -replace github.com/ledgerwatch/interfaces => github.com/bobanetwork/v3-erigon-interfaces v0.0.0-20231016141625-3f56f9c7c5ce +replace github.com/ledgerwatch/interfaces => github.com/bobanetwork/v3-erigon-interfaces v0.0.0-20240124153147-63af2e42e1b1 // replace github.com/ledgerwatch/interfaces => ../erigon-interfaces diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index fbf5282e836..5d90a5f49d4 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -127,8 +127,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bobanetwork/v3-erigon-interfaces v0.0.0-20231016141625-3f56f9c7c5ce h1:9F9EgLzZJYpH8KRtmHo4j3oDi/NSL7W115Lee38CeHg= -github.com/bobanetwork/v3-erigon-interfaces v0.0.0-20231016141625-3f56f9c7c5ce/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= +github.com/bobanetwork/v3-erigon-interfaces v0.0.0-20240124153147-63af2e42e1b1 h1:tdQB0HiXYQiCjrS+ijAPL1zFH7d13c4/BSj72KlKEGY= +github.com/bobanetwork/v3-erigon-interfaces v0.0.0-20240124153147-63af2e42e1b1/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8= @@ -217,6 +217,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -293,8 +295,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231223133303-67e341eff759 h1:ov31f3HPnYycT15Lhg3k9Q4Dx+qpQFCQWAcoxjoiGvM= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231223133303-67e341eff759/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240115083615-b5feeb63e191 h1:X/mHEyh0xEuhixj6hKCNQl04NuNDToYWJ08vr66e6L0= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240115083615-b5feeb63e191/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= github.com/ledgerwatch/log/v3 v3.9.0/go.mod h1:EiAY6upmI/6LkNhOVxb4eVsmsP11HZCnZ3PlJMjYiqE= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -307,8 +309,8 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= @@ -395,8 +397,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -407,15 +409,15 @@ github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -464,6 +466,7 @@ github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPyS github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= @@ -488,8 +491,8 @@ golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= @@ -500,6 +503,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= @@ -520,6 +524,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -542,9 +547,10 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -565,7 +571,9 @@ golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -580,8 +588,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -611,6 +619,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= @@ -626,8 +635,8 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -635,8 +644,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -647,8 +656,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/erigon-lib/gointerfaces/downloader/downloader.pb.go b/erigon-lib/gointerfaces/downloader/downloader.pb.go index a829ec07a8a..8870001c401 100644 --- a/erigon-lib/gointerfaces/downloader/downloader.pb.go +++ b/erigon-lib/gointerfaces/downloader/downloader.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.23.3 +// protoc-gen-go v1.32.0 +// protoc v4.24.2 // source: downloader/downloader.proto package downloader diff --git a/erigon-lib/gointerfaces/downloader/downloader_grpc.pb.go b/erigon-lib/gointerfaces/downloader/downloader_grpc.pb.go index b531ac84d36..369c9b494c4 100644 --- a/erigon-lib/gointerfaces/downloader/downloader_grpc.pb.go +++ b/erigon-lib/gointerfaces/downloader/downloader_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.3 +// - protoc v4.24.2 // source: downloader/downloader.proto package downloader @@ -31,7 +31,7 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type DownloaderClient interface { - // Erigon "download once" - means restart/upgrade will not download files (and will be fast) + // Erigon "download once" - means restart/upgrade/downgrade will not download files (and will be fast) // After "download once" - Erigon will produce and seed new files // Downloader will able: seed new files (already existing on FS), download uncomplete parts of existing files (if Verify found some bad parts) ProhibitNewDownloads(ctx context.Context, in *ProhibitNewDownloadsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) @@ -101,7 +101,7 @@ func (c *downloaderClient) Stats(ctx context.Context, in *StatsRequest, opts ... // All implementations must embed UnimplementedDownloaderServer // for forward compatibility type DownloaderServer interface { - // Erigon "download once" - means restart/upgrade will not download files (and will be fast) + // Erigon "download once" - means restart/upgrade/downgrade will not download files (and will be fast) // After "download once" - Erigon will produce and seed new files // Downloader will able: seed new files (already existing on FS), download uncomplete parts of existing files (if Verify found some bad parts) ProhibitNewDownloads(context.Context, *ProhibitNewDownloadsRequest) (*emptypb.Empty, error) diff --git a/erigon-lib/gointerfaces/execution/execution.pb.go b/erigon-lib/gointerfaces/execution/execution.pb.go index 839a7fd5347..24ef0cbe9b3 100644 --- a/erigon-lib/gointerfaces/execution/execution.pb.go +++ b/erigon-lib/gointerfaces/execution/execution.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.23.3 +// protoc-gen-go v1.32.0 +// protoc v4.24.2 // source: execution/execution.proto package execution diff --git a/erigon-lib/gointerfaces/execution/execution_grpc.pb.go b/erigon-lib/gointerfaces/execution/execution_grpc.pb.go index f47c35e1cb1..065a305e8fc 100644 --- a/erigon-lib/gointerfaces/execution/execution_grpc.pb.go +++ b/erigon-lib/gointerfaces/execution/execution_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.3 +// - protoc v4.24.2 // source: execution/execution.proto package execution diff --git a/erigon-lib/gointerfaces/remote/ethbackend.pb.go b/erigon-lib/gointerfaces/remote/ethbackend.pb.go index 78ba974b68a..684abb61c33 100644 --- a/erigon-lib/gointerfaces/remote/ethbackend.pb.go +++ b/erigon-lib/gointerfaces/remote/ethbackend.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.23.3 +// protoc-gen-go v1.32.0 +// protoc v4.24.2 // source: remote/ethbackend.proto package remote diff --git a/erigon-lib/gointerfaces/remote/ethbackend_grpc.pb.go b/erigon-lib/gointerfaces/remote/ethbackend_grpc.pb.go index 8e986e082db..4a410a32b86 100644 --- a/erigon-lib/gointerfaces/remote/ethbackend_grpc.pb.go +++ b/erigon-lib/gointerfaces/remote/ethbackend_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.3 +// - protoc v4.24.2 // source: remote/ethbackend.proto package remote diff --git a/erigon-lib/gointerfaces/remote/kv.pb.go b/erigon-lib/gointerfaces/remote/kv.pb.go index 05ba9ec5bd4..d1a45b6c44a 100644 --- a/erigon-lib/gointerfaces/remote/kv.pb.go +++ b/erigon-lib/gointerfaces/remote/kv.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.23.3 +// protoc-gen-go v1.32.0 +// protoc v4.24.2 // source: remote/kv.proto package remote diff --git a/erigon-lib/gointerfaces/remote/kv_grpc.pb.go b/erigon-lib/gointerfaces/remote/kv_grpc.pb.go index eb32cbf395c..d0305cb0fb4 100644 --- a/erigon-lib/gointerfaces/remote/kv_grpc.pb.go +++ b/erigon-lib/gointerfaces/remote/kv_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.3 +// - protoc v4.24.2 // source: remote/kv.proto package remote diff --git a/erigon-lib/gointerfaces/sentinel/sentinel.pb.go b/erigon-lib/gointerfaces/sentinel/sentinel.pb.go index f82d1bb3f54..b51920d0574 100644 --- a/erigon-lib/gointerfaces/sentinel/sentinel.pb.go +++ b/erigon-lib/gointerfaces/sentinel/sentinel.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.23.3 +// protoc-gen-go v1.32.0 +// protoc v4.24.2 // source: p2psentinel/sentinel.proto package sentinel @@ -21,91 +21,69 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type GossipType int32 - -const ( - // Global gossip topics. - GossipType_BeaconBlockGossipType GossipType = 0 - GossipType_AggregateAndProofGossipType GossipType = 1 - GossipType_VoluntaryExitGossipType GossipType = 2 - GossipType_ProposerSlashingGossipType GossipType = 3 - GossipType_AttesterSlashingGossipType GossipType = 4 - GossipType_BlobSidecarType GossipType = 5 - GossipType_BlsToExecutionChangeGossipType GossipType = 6 -) - -// Enum value maps for GossipType. -var ( - GossipType_name = map[int32]string{ - 0: "BeaconBlockGossipType", - 1: "AggregateAndProofGossipType", - 2: "VoluntaryExitGossipType", - 3: "ProposerSlashingGossipType", - 4: "AttesterSlashingGossipType", - 5: "BlobSidecarType", - 6: "BlsToExecutionChangeGossipType", - } - GossipType_value = map[string]int32{ - "BeaconBlockGossipType": 0, - "AggregateAndProofGossipType": 1, - "VoluntaryExitGossipType": 2, - "ProposerSlashingGossipType": 3, - "AttesterSlashingGossipType": 4, - "BlobSidecarType": 5, - "BlsToExecutionChangeGossipType": 6, - } -) - -func (x GossipType) Enum() *GossipType { - p := new(GossipType) - *p = x - return p +type EmptyMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (x GossipType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +func (x *EmptyMessage) Reset() { + *x = EmptyMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2psentinel_sentinel_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (GossipType) Descriptor() protoreflect.EnumDescriptor { - return file_p2psentinel_sentinel_proto_enumTypes[0].Descriptor() +func (x *EmptyMessage) String() string { + return protoimpl.X.MessageStringOf(x) } -func (GossipType) Type() protoreflect.EnumType { - return &file_p2psentinel_sentinel_proto_enumTypes[0] -} +func (*EmptyMessage) ProtoMessage() {} -func (x GossipType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) +func (x *EmptyMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2psentinel_sentinel_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -// Deprecated: Use GossipType.Descriptor instead. -func (GossipType) EnumDescriptor() ([]byte, []int) { +// Deprecated: Use EmptyMessage.ProtoReflect.Descriptor instead. +func (*EmptyMessage) Descriptor() ([]byte, []int) { return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{0} } -type EmptyMessage struct { +type SubscriptionData struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Filter *string `protobuf:"bytes,1,opt,name=filter,proto3,oneof" json:"filter,omitempty"` } -func (x *EmptyMessage) Reset() { - *x = EmptyMessage{} +func (x *SubscriptionData) Reset() { + *x = SubscriptionData{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[0] + mi := &file_p2psentinel_sentinel_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *EmptyMessage) String() string { +func (x *SubscriptionData) String() string { return protoimpl.X.MessageStringOf(x) } -func (*EmptyMessage) ProtoMessage() {} +func (*SubscriptionData) ProtoMessage() {} -func (x *EmptyMessage) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[0] +func (x *SubscriptionData) ProtoReflect() protoreflect.Message { + mi := &file_p2psentinel_sentinel_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -116,9 +94,16 @@ func (x *EmptyMessage) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EmptyMessage.ProtoReflect.Descriptor instead. -func (*EmptyMessage) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{0} +// Deprecated: Use SubscriptionData.ProtoReflect.Descriptor instead. +func (*SubscriptionData) Descriptor() ([]byte, []int) { + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{1} +} + +func (x *SubscriptionData) GetFilter() string { + if x != nil && x.Filter != nil { + return *x.Filter + } + return "" } type Peer struct { @@ -132,7 +117,7 @@ type Peer struct { func (x *Peer) Reset() { *x = Peer{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[1] + mi := &file_p2psentinel_sentinel_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -145,7 +130,7 @@ func (x *Peer) String() string { func (*Peer) ProtoMessage() {} func (x *Peer) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[1] + mi := &file_p2psentinel_sentinel_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -158,7 +143,7 @@ func (x *Peer) ProtoReflect() protoreflect.Message { // Deprecated: Use Peer.ProtoReflect.Descriptor instead. func (*Peer) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{1} + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{2} } func (x *Peer) GetPid() string { @@ -173,16 +158,15 @@ type GossipData struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // SSZ encoded data - Type GossipType `protobuf:"varint,2,opt,name=type,proto3,enum=sentinel.GossipType" json:"type,omitempty"` - Peer *Peer `protobuf:"bytes,3,opt,name=peer,proto3,oneof" json:"peer,omitempty"` - BlobIndex *uint32 `protobuf:"varint,4,opt,name=blob_index,json=blobIndex,proto3,oneof" json:"blob_index,omitempty"` // Blob identifier for EIP4844 + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // SSZ encoded data + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Peer *Peer `protobuf:"bytes,3,opt,name=peer,proto3,oneof" json:"peer,omitempty"` } func (x *GossipData) Reset() { *x = GossipData{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[2] + mi := &file_p2psentinel_sentinel_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -195,7 +179,7 @@ func (x *GossipData) String() string { func (*GossipData) ProtoMessage() {} func (x *GossipData) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[2] + mi := &file_p2psentinel_sentinel_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -208,7 +192,7 @@ func (x *GossipData) ProtoReflect() protoreflect.Message { // Deprecated: Use GossipData.ProtoReflect.Descriptor instead. func (*GossipData) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{2} + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{3} } func (x *GossipData) GetData() []byte { @@ -218,11 +202,11 @@ func (x *GossipData) GetData() []byte { return nil } -func (x *GossipData) GetType() GossipType { +func (x *GossipData) GetName() string { if x != nil { - return x.Type + return x.Name } - return GossipType_BeaconBlockGossipType + return "" } func (x *GossipData) GetPeer() *Peer { @@ -232,13 +216,6 @@ func (x *GossipData) GetPeer() *Peer { return nil } -func (x *GossipData) GetBlobIndex() uint32 { - if x != nil && x.BlobIndex != nil { - return *x.BlobIndex - } - return 0 -} - type Status struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -254,7 +231,7 @@ type Status struct { func (x *Status) Reset() { *x = Status{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[3] + mi := &file_p2psentinel_sentinel_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -267,7 +244,7 @@ func (x *Status) String() string { func (*Status) ProtoMessage() {} func (x *Status) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[3] + mi := &file_p2psentinel_sentinel_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -280,7 +257,7 @@ func (x *Status) ProtoReflect() protoreflect.Message { // Deprecated: Use Status.ProtoReflect.Descriptor instead. func (*Status) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{3} + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{4} } func (x *Status) GetForkDigest() uint32 { @@ -329,7 +306,7 @@ type PeerCount struct { func (x *PeerCount) Reset() { *x = PeerCount{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[4] + mi := &file_p2psentinel_sentinel_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -342,7 +319,7 @@ func (x *PeerCount) String() string { func (*PeerCount) ProtoMessage() {} func (x *PeerCount) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[4] + mi := &file_p2psentinel_sentinel_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -355,7 +332,7 @@ func (x *PeerCount) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerCount.ProtoReflect.Descriptor instead. func (*PeerCount) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{4} + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{5} } func (x *PeerCount) GetAmount() uint64 { @@ -377,7 +354,7 @@ type RequestData struct { func (x *RequestData) Reset() { *x = RequestData{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[5] + mi := &file_p2psentinel_sentinel_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -390,7 +367,7 @@ func (x *RequestData) String() string { func (*RequestData) ProtoMessage() {} func (x *RequestData) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[5] + mi := &file_p2psentinel_sentinel_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -403,7 +380,7 @@ func (x *RequestData) ProtoReflect() protoreflect.Message { // Deprecated: Use RequestData.ProtoReflect.Descriptor instead. func (*RequestData) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{5} + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{6} } func (x *RequestData) GetData() []byte { @@ -433,7 +410,7 @@ type ResponseData struct { func (x *ResponseData) Reset() { *x = ResponseData{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[6] + mi := &file_p2psentinel_sentinel_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -446,7 +423,7 @@ func (x *ResponseData) String() string { func (*ResponseData) ProtoMessage() {} func (x *ResponseData) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[6] + mi := &file_p2psentinel_sentinel_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -459,7 +436,7 @@ func (x *ResponseData) ProtoReflect() protoreflect.Message { // Deprecated: Use ResponseData.ProtoReflect.Descriptor instead. func (*ResponseData) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{6} + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{7} } func (x *ResponseData) GetData() []byte { @@ -490,94 +467,79 @@ var file_p2psentinel_sentinel_proto_rawDesc = []byte{ 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x0e, 0x0a, 0x0c, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x18, 0x0a, 0x04, 0x50, 0x65, 0x65, - 0x72, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x70, 0x69, 0x64, 0x22, 0xaf, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, - 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, - 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x27, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, - 0x62, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, - 0x09, 0x62, 0x6c, 0x6f, 0x62, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, - 0x05, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xcd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x67, 0x65, 0x73, - 0x74, 0x12, 0x32, 0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x72, - 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, - 0x65, 0x64, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, - 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x28, - 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x08, - 0x68, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, - 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x65, 0x61, - 0x64, 0x53, 0x6c, 0x6f, 0x74, 0x22, 0x23, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x37, 0x0a, 0x0b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, - 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x22, 0x5c, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, - 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x22, 0x0a, - 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x65, - 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x04, 0x70, 0x65, 0x65, - 0x72, 0x2a, 0xde, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x19, 0x0a, 0x15, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47, - 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x41, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, - 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, - 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x72, 0x6f, - 0x70, 0x6f, 0x73, 0x65, 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x03, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x74, 0x74, - 0x65, 0x73, 0x74, 0x65, 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x6c, 0x6f, - 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x54, 0x79, 0x70, 0x65, 0x10, 0x05, 0x12, 0x22, - 0x0a, 0x1e, 0x42, 0x6c, 0x73, 0x54, 0x6f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, - 0x10, 0x06, 0x32, 0x90, 0x04, 0x0a, 0x08, 0x53, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x12, - 0x41, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x47, 0x6f, 0x73, 0x73, - 0x69, 0x70, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x14, 0x2e, 0x73, 0x65, 0x6e, - 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, 0x61, - 0x30, 0x01, 0x12, 0x3c, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x15, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, - 0x6e, 0x65, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, - 0x12, 0x35, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x2e, - 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, - 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, 0x65, - 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x13, 0x2e, 0x73, 0x65, - 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x31, 0x0a, 0x07, 0x42, 0x61, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65, - 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, - 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x12, 0x33, 0x0a, 0x09, 0x55, 0x6e, 0x62, 0x61, 0x6e, 0x50, 0x65, 0x65, 0x72, + 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x18, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x10, 0x0a, + 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, + 0x66, 0x0a, 0x0a, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, + 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x07, + 0x0a, 0x05, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x22, 0xcd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, + 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x6e, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x70, 0x6f, 0x63, 0x68, + 0x12, 0x28, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, + 0x52, 0x08, 0x68, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x65, + 0x61, 0x64, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, + 0x65, 0x61, 0x64, 0x53, 0x6c, 0x6f, 0x74, 0x22, 0x23, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x37, 0x0a, 0x0b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x5c, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, + 0x22, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x32, 0x94, 0x04, 0x0a, 0x08, 0x53, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, + 0x12, 0x45, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x47, 0x6f, 0x73, + 0x73, 0x69, 0x70, 0x12, 0x1a, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x1a, + 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x44, 0x61, 0x74, 0x61, 0x30, 0x01, 0x12, 0x3c, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, + 0x6c, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, + 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x35, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x10, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, + 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, + 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x1a, 0x13, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x42, 0x61, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x61, - 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, - 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, - 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x34, 0x0a, 0x0a, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, - 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, - 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, - 0x68, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, - 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, + 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x33, 0x0a, 0x09, 0x55, 0x6e, 0x62, 0x61, + 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, + 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, + 0x0c, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, + 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x15, 0x5a, 0x13, 0x2e, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x69, - 0x6e, 0x65, 0x6c, 0x3b, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x50, + 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, + 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, 0x0a, 0x0d, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x14, 0x2e, 0x73, + 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, + 0x74, 0x61, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x15, 0x5a, 0x13, 0x2e, 0x2f, + 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x3b, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, + 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -592,48 +554,46 @@ func file_p2psentinel_sentinel_proto_rawDescGZIP() []byte { return file_p2psentinel_sentinel_proto_rawDescData } -var file_p2psentinel_sentinel_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_p2psentinel_sentinel_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_p2psentinel_sentinel_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_p2psentinel_sentinel_proto_goTypes = []interface{}{ - (GossipType)(0), // 0: sentinel.GossipType - (*EmptyMessage)(nil), // 1: sentinel.EmptyMessage - (*Peer)(nil), // 2: sentinel.Peer - (*GossipData)(nil), // 3: sentinel.GossipData - (*Status)(nil), // 4: sentinel.Status - (*PeerCount)(nil), // 5: sentinel.PeerCount - (*RequestData)(nil), // 6: sentinel.RequestData - (*ResponseData)(nil), // 7: sentinel.ResponseData - (*types.H256)(nil), // 8: types.H256 + (*EmptyMessage)(nil), // 0: sentinel.EmptyMessage + (*SubscriptionData)(nil), // 1: sentinel.SubscriptionData + (*Peer)(nil), // 2: sentinel.Peer + (*GossipData)(nil), // 3: sentinel.GossipData + (*Status)(nil), // 4: sentinel.Status + (*PeerCount)(nil), // 5: sentinel.PeerCount + (*RequestData)(nil), // 6: sentinel.RequestData + (*ResponseData)(nil), // 7: sentinel.ResponseData + (*types.H256)(nil), // 8: types.H256 } var file_p2psentinel_sentinel_proto_depIdxs = []int32{ - 0, // 0: sentinel.GossipData.type:type_name -> sentinel.GossipType - 2, // 1: sentinel.GossipData.peer:type_name -> sentinel.Peer - 8, // 2: sentinel.Status.finalized_root:type_name -> types.H256 - 8, // 3: sentinel.Status.head_root:type_name -> types.H256 - 2, // 4: sentinel.ResponseData.peer:type_name -> sentinel.Peer - 1, // 5: sentinel.Sentinel.SubscribeGossip:input_type -> sentinel.EmptyMessage - 6, // 6: sentinel.Sentinel.SendRequest:input_type -> sentinel.RequestData - 4, // 7: sentinel.Sentinel.SetStatus:input_type -> sentinel.Status - 1, // 8: sentinel.Sentinel.GetPeers:input_type -> sentinel.EmptyMessage - 2, // 9: sentinel.Sentinel.BanPeer:input_type -> sentinel.Peer - 2, // 10: sentinel.Sentinel.UnbanPeer:input_type -> sentinel.Peer - 2, // 11: sentinel.Sentinel.PenalizePeer:input_type -> sentinel.Peer - 2, // 12: sentinel.Sentinel.RewardPeer:input_type -> sentinel.Peer - 3, // 13: sentinel.Sentinel.PublishGossip:input_type -> sentinel.GossipData - 3, // 14: sentinel.Sentinel.SubscribeGossip:output_type -> sentinel.GossipData - 7, // 15: sentinel.Sentinel.SendRequest:output_type -> sentinel.ResponseData - 1, // 16: sentinel.Sentinel.SetStatus:output_type -> sentinel.EmptyMessage - 5, // 17: sentinel.Sentinel.GetPeers:output_type -> sentinel.PeerCount - 1, // 18: sentinel.Sentinel.BanPeer:output_type -> sentinel.EmptyMessage - 1, // 19: sentinel.Sentinel.UnbanPeer:output_type -> sentinel.EmptyMessage - 1, // 20: sentinel.Sentinel.PenalizePeer:output_type -> sentinel.EmptyMessage - 1, // 21: sentinel.Sentinel.RewardPeer:output_type -> sentinel.EmptyMessage - 1, // 22: sentinel.Sentinel.PublishGossip:output_type -> sentinel.EmptyMessage - 14, // [14:23] is the sub-list for method output_type - 5, // [5:14] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 2, // 0: sentinel.GossipData.peer:type_name -> sentinel.Peer + 8, // 1: sentinel.Status.finalized_root:type_name -> types.H256 + 8, // 2: sentinel.Status.head_root:type_name -> types.H256 + 2, // 3: sentinel.ResponseData.peer:type_name -> sentinel.Peer + 1, // 4: sentinel.Sentinel.SubscribeGossip:input_type -> sentinel.SubscriptionData + 6, // 5: sentinel.Sentinel.SendRequest:input_type -> sentinel.RequestData + 4, // 6: sentinel.Sentinel.SetStatus:input_type -> sentinel.Status + 0, // 7: sentinel.Sentinel.GetPeers:input_type -> sentinel.EmptyMessage + 2, // 8: sentinel.Sentinel.BanPeer:input_type -> sentinel.Peer + 2, // 9: sentinel.Sentinel.UnbanPeer:input_type -> sentinel.Peer + 2, // 10: sentinel.Sentinel.PenalizePeer:input_type -> sentinel.Peer + 2, // 11: sentinel.Sentinel.RewardPeer:input_type -> sentinel.Peer + 3, // 12: sentinel.Sentinel.PublishGossip:input_type -> sentinel.GossipData + 3, // 13: sentinel.Sentinel.SubscribeGossip:output_type -> sentinel.GossipData + 7, // 14: sentinel.Sentinel.SendRequest:output_type -> sentinel.ResponseData + 0, // 15: sentinel.Sentinel.SetStatus:output_type -> sentinel.EmptyMessage + 5, // 16: sentinel.Sentinel.GetPeers:output_type -> sentinel.PeerCount + 0, // 17: sentinel.Sentinel.BanPeer:output_type -> sentinel.EmptyMessage + 0, // 18: sentinel.Sentinel.UnbanPeer:output_type -> sentinel.EmptyMessage + 0, // 19: sentinel.Sentinel.PenalizePeer:output_type -> sentinel.EmptyMessage + 0, // 20: sentinel.Sentinel.RewardPeer:output_type -> sentinel.EmptyMessage + 0, // 21: sentinel.Sentinel.PublishGossip:output_type -> sentinel.EmptyMessage + 13, // [13:22] is the sub-list for method output_type + 4, // [4:13] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_p2psentinel_sentinel_proto_init() } @@ -655,7 +615,7 @@ func file_p2psentinel_sentinel_proto_init() { } } file_p2psentinel_sentinel_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Peer); i { + switch v := v.(*SubscriptionData); i { case 0: return &v.state case 1: @@ -667,7 +627,7 @@ func file_p2psentinel_sentinel_proto_init() { } } file_p2psentinel_sentinel_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GossipData); i { + switch v := v.(*Peer); i { case 0: return &v.state case 1: @@ -679,7 +639,7 @@ func file_p2psentinel_sentinel_proto_init() { } } file_p2psentinel_sentinel_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Status); i { + switch v := v.(*GossipData); i { case 0: return &v.state case 1: @@ -691,7 +651,7 @@ func file_p2psentinel_sentinel_proto_init() { } } file_p2psentinel_sentinel_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerCount); i { + switch v := v.(*Status); i { case 0: return &v.state case 1: @@ -703,7 +663,7 @@ func file_p2psentinel_sentinel_proto_init() { } } file_p2psentinel_sentinel_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestData); i { + switch v := v.(*PeerCount); i { case 0: return &v.state case 1: @@ -715,6 +675,18 @@ func file_p2psentinel_sentinel_proto_init() { } } file_p2psentinel_sentinel_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2psentinel_sentinel_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ResponseData); i { case 0: return &v.state @@ -727,20 +699,20 @@ func file_p2psentinel_sentinel_proto_init() { } } } - file_p2psentinel_sentinel_proto_msgTypes[2].OneofWrappers = []interface{}{} + file_p2psentinel_sentinel_proto_msgTypes[1].OneofWrappers = []interface{}{} + file_p2psentinel_sentinel_proto_msgTypes[3].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_p2psentinel_sentinel_proto_rawDesc, - NumEnums: 1, - NumMessages: 7, + NumEnums: 0, + NumMessages: 8, NumExtensions: 0, NumServices: 1, }, GoTypes: file_p2psentinel_sentinel_proto_goTypes, DependencyIndexes: file_p2psentinel_sentinel_proto_depIdxs, - EnumInfos: file_p2psentinel_sentinel_proto_enumTypes, MessageInfos: file_p2psentinel_sentinel_proto_msgTypes, }.Build() File_p2psentinel_sentinel_proto = out.File diff --git a/erigon-lib/gointerfaces/sentinel/sentinel_grpc.pb.go b/erigon-lib/gointerfaces/sentinel/sentinel_grpc.pb.go index 13052e192f8..34a47f5e43e 100644 --- a/erigon-lib/gointerfaces/sentinel/sentinel_grpc.pb.go +++ b/erigon-lib/gointerfaces/sentinel/sentinel_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.3 +// - protoc v4.24.2 // source: p2psentinel/sentinel.proto package sentinel @@ -34,7 +34,7 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type SentinelClient interface { - SubscribeGossip(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (Sentinel_SubscribeGossipClient, error) + SubscribeGossip(ctx context.Context, in *SubscriptionData, opts ...grpc.CallOption) (Sentinel_SubscribeGossipClient, error) SendRequest(ctx context.Context, in *RequestData, opts ...grpc.CallOption) (*ResponseData, error) SetStatus(ctx context.Context, in *Status, opts ...grpc.CallOption) (*EmptyMessage, error) GetPeers(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*PeerCount, error) @@ -53,7 +53,7 @@ func NewSentinelClient(cc grpc.ClientConnInterface) SentinelClient { return &sentinelClient{cc} } -func (c *sentinelClient) SubscribeGossip(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (Sentinel_SubscribeGossipClient, error) { +func (c *sentinelClient) SubscribeGossip(ctx context.Context, in *SubscriptionData, opts ...grpc.CallOption) (Sentinel_SubscribeGossipClient, error) { stream, err := c.cc.NewStream(ctx, &Sentinel_ServiceDesc.Streams[0], Sentinel_SubscribeGossip_FullMethodName, opts...) if err != nil { return nil, err @@ -161,7 +161,7 @@ func (c *sentinelClient) PublishGossip(ctx context.Context, in *GossipData, opts // All implementations must embed UnimplementedSentinelServer // for forward compatibility type SentinelServer interface { - SubscribeGossip(*EmptyMessage, Sentinel_SubscribeGossipServer) error + SubscribeGossip(*SubscriptionData, Sentinel_SubscribeGossipServer) error SendRequest(context.Context, *RequestData) (*ResponseData, error) SetStatus(context.Context, *Status) (*EmptyMessage, error) GetPeers(context.Context, *EmptyMessage) (*PeerCount, error) @@ -177,7 +177,7 @@ type SentinelServer interface { type UnimplementedSentinelServer struct { } -func (UnimplementedSentinelServer) SubscribeGossip(*EmptyMessage, Sentinel_SubscribeGossipServer) error { +func (UnimplementedSentinelServer) SubscribeGossip(*SubscriptionData, Sentinel_SubscribeGossipServer) error { return status.Errorf(codes.Unimplemented, "method SubscribeGossip not implemented") } func (UnimplementedSentinelServer) SendRequest(context.Context, *RequestData) (*ResponseData, error) { @@ -218,7 +218,7 @@ func RegisterSentinelServer(s grpc.ServiceRegistrar, srv SentinelServer) { } func _Sentinel_SubscribeGossip_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(EmptyMessage) + m := new(SubscriptionData) if err := stream.RecvMsg(m); err != nil { return err } diff --git a/erigon-lib/gointerfaces/sentry/sentry.pb.go b/erigon-lib/gointerfaces/sentry/sentry.pb.go index fa266f2c1d1..c577830dfb6 100644 --- a/erigon-lib/gointerfaces/sentry/sentry.pb.go +++ b/erigon-lib/gointerfaces/sentry/sentry.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.23.3 +// protoc-gen-go v1.32.0 +// protoc v4.24.2 // source: p2psentry/sentry.proto package sentry diff --git a/erigon-lib/gointerfaces/sentry/sentry_grpc.pb.go b/erigon-lib/gointerfaces/sentry/sentry_grpc.pb.go index 7802cf4fd7b..1a9d1959b5c 100644 --- a/erigon-lib/gointerfaces/sentry/sentry_grpc.pb.go +++ b/erigon-lib/gointerfaces/sentry/sentry_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.3 +// - protoc v4.24.2 // source: p2psentry/sentry.proto package sentry diff --git a/erigon-lib/gointerfaces/txpool/mining.pb.go b/erigon-lib/gointerfaces/txpool/mining.pb.go index 827e8da6e02..a8993b510d4 100644 --- a/erigon-lib/gointerfaces/txpool/mining.pb.go +++ b/erigon-lib/gointerfaces/txpool/mining.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.23.3 +// protoc-gen-go v1.32.0 +// protoc v4.24.2 // source: txpool/mining.proto package txpool diff --git a/erigon-lib/gointerfaces/txpool/mining_grpc.pb.go b/erigon-lib/gointerfaces/txpool/mining_grpc.pb.go index 32132e5d88b..c8855bfb6e3 100644 --- a/erigon-lib/gointerfaces/txpool/mining_grpc.pb.go +++ b/erigon-lib/gointerfaces/txpool/mining_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.3 +// - protoc v4.24.2 // source: txpool/mining.proto package txpool diff --git a/erigon-lib/gointerfaces/txpool/txpool.pb.go b/erigon-lib/gointerfaces/txpool/txpool.pb.go index 49693c16b05..3034cfcbdf8 100644 --- a/erigon-lib/gointerfaces/txpool/txpool.pb.go +++ b/erigon-lib/gointerfaces/txpool/txpool.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.23.3 +// protoc-gen-go v1.32.0 +// protoc v4.24.2 // source: txpool/txpool.proto package txpool diff --git a/erigon-lib/gointerfaces/txpool/txpool_grpc.pb.go b/erigon-lib/gointerfaces/txpool/txpool_grpc.pb.go index a1ae12fc0a1..d8c6da0d0a6 100644 --- a/erigon-lib/gointerfaces/txpool/txpool_grpc.pb.go +++ b/erigon-lib/gointerfaces/txpool/txpool_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.3 +// - protoc v4.24.2 // source: txpool/txpool.proto package txpool diff --git a/erigon-lib/gointerfaces/types/types.pb.go b/erigon-lib/gointerfaces/types/types.pb.go index b81ea0b4db4..56db8678d37 100644 --- a/erigon-lib/gointerfaces/types/types.pb.go +++ b/erigon-lib/gointerfaces/types/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.23.3 +// protoc-gen-go v1.32.0 +// protoc v4.24.2 // source: types/types.proto package types diff --git a/erigon-lib/kv/mdbx/kv_abstract_test.go b/erigon-lib/kv/mdbx/kv_abstract_test.go index a504eac6407..70befa9e513 100644 --- a/erigon-lib/kv/mdbx/kv_abstract_test.go +++ b/erigon-lib/kv/mdbx/kv_abstract_test.go @@ -23,6 +23,12 @@ import ( "runtime" "testing" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" + "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" @@ -30,11 +36,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon-lib/kv/remotedb" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" - "github.com/ledgerwatch/log/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" ) func TestSequence(t *testing.T) { @@ -169,7 +170,7 @@ func TestRemoteKvVersion(t *testing.T) { conn := bufconn.Listen(1024 * 1024) grpcServer := grpc.NewServer() go func() { - remote.RegisterKVServer(grpcServer, remotedbserver.NewKvServer(ctx, writeDB, nil, nil, logger)) + remote.RegisterKVServer(grpcServer, remotedbserver.NewKvServer(ctx, writeDB, nil, nil, nil, logger)) if err := grpcServer.Serve(conn); err != nil { log.Error("private RPC server fail", "err", err) } @@ -210,7 +211,7 @@ func TestRemoteKvRange(t *testing.T) { ctx, writeDB := context.Background(), memdb.NewTestDB(t) grpcServer, conn := grpc.NewServer(), bufconn.Listen(1024*1024) go func() { - kvServer := remotedbserver.NewKvServer(ctx, writeDB, nil, nil, logger) + kvServer := remotedbserver.NewKvServer(ctx, writeDB, nil, nil, nil, logger) remote.RegisterKVServer(grpcServer, kvServer) if err := grpcServer.Serve(conn); err != nil { log.Error("private RPC server fail", "err", err) @@ -344,7 +345,7 @@ func setupDatabases(t *testing.T, logger log.Logger, f mdbx.TableCfgFunc) (write grpcServer := grpc.NewServer() f2 := func() { - remote.RegisterKVServer(grpcServer, remotedbserver.NewKvServer(ctx, writeDBs[1], nil, nil, logger)) + remote.RegisterKVServer(grpcServer, remotedbserver.NewKvServer(ctx, writeDBs[1], nil, nil, nil, logger)) if err := grpcServer.Serve(conn); err != nil { logger.Error("private RPC server fail", "err", err) } diff --git a/erigon-lib/kv/mdbx/kv_mdbx.go b/erigon-lib/kv/mdbx/kv_mdbx.go index 40068780722..58ff2f4a00b 100644 --- a/erigon-lib/kv/mdbx/kv_mdbx.go +++ b/erigon-lib/kv/mdbx/kv_mdbx.go @@ -35,7 +35,6 @@ import ( "github.com/erigontech/mdbx-go/mdbx" stack2 "github.com/go-stack/stack" "github.com/ledgerwatch/log/v3" - "github.com/pbnjay/memory" "golang.org/x/exp/maps" "golang.org/x/sync/semaphore" @@ -44,6 +43,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/kv/order" + "github.com/ledgerwatch/erigon-lib/mmap" ) const NonExistingDBI kv.DBI = 999_999_999 @@ -84,10 +84,6 @@ func NewMDBX(log log.Logger) MdbxOpts { log: log, pageSize: kv.DefaultPageSize(), - // default is (TOTAL_RAM+AVAILABLE_RAM)/42/pageSize - // but for reproducibility of benchmarks - please don't rely on Available RAM - dirtySpace: 2 * (memory.TotalMemory() / 42), - mapSize: DefaultMapSize, growthStep: DefaultGrowthStep, mergeThreshold: 3 * 8192, @@ -280,7 +276,7 @@ func (opts MdbxOpts) Open(ctx context.Context) (kv.RwDB, error) { return nil, err } - if opts.flags&mdbx.Accede == 0 { + if !opts.HasFlag(mdbx.Accede) { if err = env.SetGeometry(-1, -1, int(opts.mapSize), int(opts.growthStep), opts.shrinkThreshold, int(opts.pageSize)); err != nil { return nil, err } @@ -289,32 +285,9 @@ func (opts MdbxOpts) Open(ctx context.Context) (kv.RwDB, error) { } } - err = env.Open(opts.path, opts.flags, 0664) - if err != nil { - if err != nil { - return nil, fmt.Errorf("%w, label: %s, trace: %s", err, opts.label.String(), stack2.Trace().String()) - } - } - - // mdbx will not change pageSize if db already exists. means need read real value after env.open() - in, err := env.Info(nil) - if err != nil { - if err != nil { - return nil, fmt.Errorf("%w, label: %s, trace: %s", err, opts.label.String(), stack2.Trace().String()) - } - } - - opts.pageSize = uint64(in.PageSize) - opts.mapSize = datasize.ByteSize(in.MapSize) - if opts.label == kv.ChainDB { - opts.log.Info("[db] open", "lable", opts.label, "sizeLimit", opts.mapSize, "pageSize", opts.pageSize) - } else { - opts.log.Debug("[db] open", "lable", opts.label, "sizeLimit", opts.mapSize, "pageSize", opts.pageSize) - } - // erigon using big transactions // increase "page measured" options. need do it after env.Open() because default are depend on pageSize known only after env.Open() - if !opts.HasFlag(mdbx.Accede) && !opts.HasFlag(mdbx.Readonly) { + if !opts.HasFlag(mdbx.Readonly) { // 1/8 is good for transactions with a lot of modifications - to reduce invalidation size. // But Erigon app now using Batch and etl.Collectors to avoid writing to DB frequently changing data. // It means most of our writes are: APPEND or "single UPSERT per key during transaction" @@ -326,20 +299,47 @@ func (opts MdbxOpts) Open(ctx context.Context) (kv.RwDB, error) { if err != nil { return nil, err } - if err = env.SetOption(mdbx.OptTxnDpInitial, txnDpInitial*2); err != nil { - return nil, err - } - dpReserveLimit, err := env.GetOption(mdbx.OptDpReverseLimit) - if err != nil { - return nil, err + if opts.label == kv.ChainDB { + if err = env.SetOption(mdbx.OptTxnDpInitial, txnDpInitial*2); err != nil { + return nil, err + } + dpReserveLimit, err := env.GetOption(mdbx.OptDpReverseLimit) + if err != nil { + return nil, err + } + if err = env.SetOption(mdbx.OptDpReverseLimit, dpReserveLimit*2); err != nil { + return nil, err + } } - if err = env.SetOption(mdbx.OptDpReverseLimit, dpReserveLimit*2); err != nil { - return nil, err + + // before env.Open() we don't know real pageSize. but will be implemented soon: https://gitflic.ru/project/erthink/libmdbx/issue/15 + // but we want call all `SetOption` before env.Open(), because: + // - after they will require rwtx-lock, which is not acceptable in ACCEDEE mode. + pageSize := opts.pageSize + if pageSize == 0 { + pageSize = kv.DefaultPageSize() } - if err = env.SetOption(mdbx.OptTxnDpLimit, opts.dirtySpace/opts.pageSize); err != nil { + var dirtySpace uint64 + if opts.dirtySpace > 0 { + dirtySpace = opts.dirtySpace + } else { + dirtySpace = mmap.TotalMemory() / 42 // it's default of mdbx, but our package also supports cgroups and GOMEMLIMIT + // clamp to max size + const dirtySpaceMaxChainDB = uint64(1 * datasize.GB) + const dirtySpaceMaxDefault = uint64(128 * datasize.MB) + + if opts.label == kv.ChainDB && dirtySpace > dirtySpaceMaxChainDB { + dirtySpace = dirtySpaceMaxChainDB + } else if opts.label != kv.ChainDB && dirtySpace > dirtySpaceMaxDefault { + dirtySpace = dirtySpaceMaxDefault + } + } + //can't use real pagesize here - it will be known only after env.Open() + if err = env.SetOption(mdbx.OptTxnDpLimit, dirtySpace/pageSize); err != nil { return nil, err } + // must be in the range from 12.5% (almost empty) to 50% (half empty) // which corresponds to the range from 8192 and to 32768 in units respectively if err = env.SetOption(mdbx.OptMergeThreshold16dot16Percent, opts.mergeThreshold); err != nil { @@ -347,6 +347,25 @@ func (opts MdbxOpts) Open(ctx context.Context) (kv.RwDB, error) { } } + err = env.Open(opts.path, opts.flags, 0664) + if err != nil { + return nil, fmt.Errorf("%w, label: %s, trace: %s", err, opts.label.String(), stack2.Trace().String()) + } + + // mdbx will not change pageSize if db already exists. means need read real value after env.open() + in, err := env.Info(nil) + if err != nil { + return nil, fmt.Errorf("%w, label: %s, trace: %s", err, opts.label.String(), stack2.Trace().String()) + } + + opts.pageSize = uint64(in.PageSize) + opts.mapSize = datasize.ByteSize(in.MapSize) + if opts.label == kv.ChainDB { + opts.log.Info("[db] open", "lable", opts.label, "sizeLimit", opts.mapSize, "pageSize", opts.pageSize) + } else { + opts.log.Debug("[db] open", "lable", opts.label, "sizeLimit", opts.mapSize, "pageSize", opts.pageSize) + } + dirtyPagesLimit, err := env.GetOption(mdbx.OptTxnDpLimit) if err != nil { return nil, err @@ -366,15 +385,20 @@ func (opts MdbxOpts) Open(ctx context.Context) (kv.RwDB, error) { targetSemCount := int64(runtime.GOMAXPROCS(-1) * 16) opts.roTxsLimiter = semaphore.NewWeighted(targetSemCount) // 1 less than max to allow unlocking to happen } + + txsCountMutex := &sync.Mutex{} + db := &MdbxKV{ opts: opts, env: env, log: opts.log, - wg: &sync.WaitGroup{}, buckets: kv.TableCfg{}, txSize: dirtyPagesLimit * opts.pageSize, roTxsLimiter: opts.roTxsLimiter, + txsCountMutex: txsCountMutex, + txsAllDoneOnCloseCond: sync.NewCond(txsCountMutex), + leakDetector: dbg.NewLeakDetector("db."+opts.label.String(), dbg.SlowTx()), } @@ -438,7 +462,6 @@ func (opts MdbxOpts) MustOpen() kv.RwDB { type MdbxKV struct { log log.Logger env *mdbx.Env - wg *sync.WaitGroup buckets kv.TableCfg roTxsLimiter *semaphore.Weighted // does limit amount of concurrent Ro transactions - in most casess runtime.NumCPU() is good value for this channel capacity - this channel can be shared with other components (like Decompressor) opts MdbxOpts @@ -446,6 +469,10 @@ type MdbxKV struct { closed atomic.Bool path string + txsCount uint + txsCountMutex *sync.Mutex + txsAllDoneOnCloseCond *sync.Cond + leakDetector *dbg.LeakDetector } @@ -488,13 +515,53 @@ func (db *MdbxKV) openDBIs(buckets []string) error { }) } +func (db *MdbxKV) trackTxBegin() bool { + db.txsCountMutex.Lock() + defer db.txsCountMutex.Unlock() + + isOpen := !db.closed.Load() + if isOpen { + db.txsCount++ + } + return isOpen +} + +func (db *MdbxKV) hasTxsAllDoneAndClosed() bool { + return (db.txsCount == 0) && db.closed.Load() +} + +func (db *MdbxKV) trackTxEnd() { + db.txsCountMutex.Lock() + defer db.txsCountMutex.Unlock() + + if db.txsCount > 0 { + db.txsCount-- + } else { + panic("MdbxKV: unmatched trackTxEnd") + } + + if db.hasTxsAllDoneAndClosed() { + db.txsAllDoneOnCloseCond.Signal() + } +} + +func (db *MdbxKV) waitTxsAllDoneOnClose() { + db.txsCountMutex.Lock() + defer db.txsCountMutex.Unlock() + + for !db.hasTxsAllDoneAndClosed() { + db.txsAllDoneOnCloseCond.Wait() + } +} + // Close closes db // All transactions must be closed before closing the database. func (db *MdbxKV) Close() { if ok := db.closed.CompareAndSwap(false, true); !ok { return } - db.wg.Wait() + db.waitTxsAllDoneOnClose() + db.env.Close() db.env = nil @@ -507,10 +574,6 @@ func (db *MdbxKV) Close() { } func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { - if db.closed.Load() { - return nil, fmt.Errorf("db closed") - } - // don't try to acquire if the context is already done select { case <-ctx.Done(): @@ -519,8 +582,13 @@ func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { // otherwise carry on } + if !db.trackTxBegin() { + return nil, fmt.Errorf("db closed") + } + // will return nil err if context is cancelled (may appear to acquire the semaphore) if semErr := db.roTxsLimiter.Acquire(ctx, 1); semErr != nil { + db.trackTxEnd() return nil, semErr } @@ -529,6 +597,7 @@ func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { // on error, or if there is whatever reason that we don't return a tx, // we need to free up the limiter slot, otherwise it could lead to deadlocks db.roTxsLimiter.Release(1) + db.trackTxEnd() } }() @@ -536,7 +605,7 @@ func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { if err != nil { return nil, fmt.Errorf("%w, label: %s, trace: %s", err, db.opts.label.String(), stack2.Trace().String()) } - db.wg.Add(1) + return &MdbxTx{ ctx: ctx, db: db, @@ -560,16 +629,18 @@ func (db *MdbxKV) beginRw(ctx context.Context, flags uint) (txn kv.RwTx, err err default: } - if db.closed.Load() { + if !db.trackTxBegin() { return nil, fmt.Errorf("db closed") } + runtime.LockOSThread() tx, err := db.env.BeginTxn(nil, flags) if err != nil { runtime.UnlockOSThread() // unlock only in case of error. normal flow is "defer .Rollback()" + db.trackTxEnd() return nil, fmt.Errorf("%w, lable: %s, trace: %s", err, db.opts.label.String(), stack2.Trace().String()) } - db.wg.Add(1) + return &MdbxTx{ db: db, tx: tx, @@ -811,7 +882,7 @@ func (tx *MdbxTx) Commit() error { } defer func() { tx.tx = nil - tx.db.wg.Done() + tx.db.trackTxEnd() if tx.readOnly { tx.db.roTxsLimiter.Release(1) } else { @@ -862,7 +933,7 @@ func (tx *MdbxTx) Rollback() { } defer func() { tx.tx = nil - tx.db.wg.Done() + tx.db.trackTxEnd() if tx.readOnly { tx.db.roTxsLimiter.Release(1) } else { diff --git a/erigon-lib/kv/mdbx/kv_mdbx_test.go b/erigon-lib/kv/mdbx/kv_mdbx_test.go index e79a852dae2..66506ef720f 100644 --- a/erigon-lib/kv/mdbx/kv_mdbx_test.go +++ b/erigon-lib/kv/mdbx/kv_mdbx_test.go @@ -18,14 +18,17 @@ package mdbx import ( "context" + "sync/atomic" "testing" + "time" "github.com/c2h5oh/datasize" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/order" ) func BaseCase(t *testing.T) (kv.RwDB, kv.RwTx, kv.RwCursorDupSort) { @@ -773,3 +776,124 @@ func TestAutoConversionSeekBothRange(t *testing.T) { require.NoError(t, err) assert.Nil(t, v) } + +func TestBeginRoAfterClose(t *testing.T) { + db := NewMDBX(log.New()).InMem(t.TempDir()).MustOpen() + db.Close() + _, err := db.BeginRo(context.Background()) + require.ErrorContains(t, err, "closed") +} + +func TestBeginRwAfterClose(t *testing.T) { + db := NewMDBX(log.New()).InMem(t.TempDir()).MustOpen() + db.Close() + _, err := db.BeginRw(context.Background()) + require.ErrorContains(t, err, "closed") +} + +func TestBeginRoWithDoneContext(t *testing.T) { + db := NewMDBX(log.New()).InMem(t.TempDir()).MustOpen() + defer db.Close() + ctx, cancel := context.WithCancel(context.Background()) + cancel() + _, err := db.BeginRo(ctx) + require.ErrorIs(t, err, context.Canceled) +} + +func TestBeginRwWithDoneContext(t *testing.T) { + db := NewMDBX(log.New()).InMem(t.TempDir()).MustOpen() + defer db.Close() + ctx, cancel := context.WithCancel(context.Background()) + cancel() + _, err := db.BeginRw(ctx) + require.ErrorIs(t, err, context.Canceled) +} + +func testCloseWaitsAfterTxBegin( + t *testing.T, + count int, + txBeginFunc func(kv.RwDB) (kv.StatelessReadTx, error), + txEndFunc func(kv.StatelessReadTx) error, +) { + t.Helper() + db := NewMDBX(log.New()).InMem(t.TempDir()).MustOpen() + var txs []kv.StatelessReadTx + for i := 0; i < count; i++ { + tx, err := txBeginFunc(db) + require.Nil(t, err) + txs = append(txs, tx) + } + + isClosed := &atomic.Bool{} + closeDone := make(chan struct{}) + + go func() { + db.Close() + isClosed.Store(true) + close(closeDone) + }() + + for _, tx := range txs { + // arbitrary delay to give db.Close() a chance to exit prematurely + time.Sleep(time.Millisecond * 20) + assert.False(t, isClosed.Load()) + + err := txEndFunc(tx) + require.Nil(t, err) + } + + <-closeDone + assert.True(t, isClosed.Load()) +} + +func TestCloseWaitsAfterTxBegin(t *testing.T) { + ctx := context.Background() + t.Run("BeginRoAndCommit", func(t *testing.T) { + testCloseWaitsAfterTxBegin( + t, + 1, + func(db kv.RwDB) (kv.StatelessReadTx, error) { return db.BeginRo(ctx) }, + func(tx kv.StatelessReadTx) error { return tx.Commit() }, + ) + }) + t.Run("BeginRoAndCommit3", func(t *testing.T) { + testCloseWaitsAfterTxBegin( + t, + 3, + func(db kv.RwDB) (kv.StatelessReadTx, error) { return db.BeginRo(ctx) }, + func(tx kv.StatelessReadTx) error { return tx.Commit() }, + ) + }) + t.Run("BeginRoAndRollback", func(t *testing.T) { + testCloseWaitsAfterTxBegin( + t, + 1, + func(db kv.RwDB) (kv.StatelessReadTx, error) { return db.BeginRo(ctx) }, + func(tx kv.StatelessReadTx) error { tx.Rollback(); return nil }, + ) + }) + t.Run("BeginRoAndRollback3", func(t *testing.T) { + testCloseWaitsAfterTxBegin( + t, + 3, + func(db kv.RwDB) (kv.StatelessReadTx, error) { return db.BeginRo(ctx) }, + func(tx kv.StatelessReadTx) error { tx.Rollback(); return nil }, + ) + }) + t.Run("BeginRwAndCommit", func(t *testing.T) { + testCloseWaitsAfterTxBegin( + t, + 1, + func(db kv.RwDB) (kv.StatelessReadTx, error) { return db.BeginRw(ctx) }, + func(tx kv.StatelessReadTx) error { return tx.Commit() }, + ) + }) + t.Run("BeginRwAndRollback", func(t *testing.T) { + testCloseWaitsAfterTxBegin( + t, + 1, + func(db kv.RwDB) (kv.StatelessReadTx, error) { return db.BeginRw(ctx) }, + func(tx kv.StatelessReadTx) error { tx.Rollback(); return nil }, + ) + }) +} diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation.go b/erigon-lib/kv/membatchwithdb/memory_mutation.go index c00f93cb2c6..173e44b232a 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation.go @@ -32,6 +32,7 @@ type MemoryMutation struct { memTx kv.RwTx memDb kv.RwDB deletedEntries map[string]map[string]struct{} + deletedDups map[string]map[string]map[string]struct{} clearedTables map[string]struct{} db kv.Tx statelessCursors map[string]kv.RwCursor @@ -45,8 +46,8 @@ type MemoryMutation struct { // defer batch.Close() // ... some calculations on `batch` // batch.Commit() -func NewMemoryBatch(tx kv.Tx, tmpDir string) *MemoryMutation { - tmpDB := mdbx.NewMDBX(log.New()).InMem(tmpDir).GrowthStep(64 * datasize.MB).MapSize(512 * datasize.GB).MustOpen() +func NewMemoryBatch(tx kv.Tx, tmpDir string, logger log.Logger) *MemoryMutation { + tmpDB := mdbx.NewMDBX(logger).InMem(tmpDir).GrowthStep(64 * datasize.MB).MapSize(512 * datasize.GB).MustOpen() memTx, err := tmpDB.BeginRw(context.Background()) if err != nil { panic(err) @@ -60,6 +61,7 @@ func NewMemoryBatch(tx kv.Tx, tmpDir string) *MemoryMutation { memDb: tmpDB, memTx: memTx, deletedEntries: make(map[string]map[string]struct{}), + deletedDups: map[string]map[string]map[string]struct{}{}, clearedTables: make(map[string]struct{}), } } @@ -70,6 +72,7 @@ func NewMemoryBatchWithCustomDB(tx kv.Tx, db kv.RwDB, uTx kv.RwTx, tmpDir string memDb: db, memTx: uTx, deletedEntries: make(map[string]map[string]struct{}), + deletedDups: map[string]map[string]map[string]struct{}{}, clearedTables: make(map[string]struct{}), } } @@ -93,6 +96,19 @@ func (m *MemoryMutation) isEntryDeleted(table string, key []byte) bool { return ok } +func (m *MemoryMutation) isDupDeleted(table string, key []byte, val []byte) bool { + t, ok := m.deletedDups[table] + if !ok { + return ok + } + k, ok := t[string(key)] + if !ok { + return ok + } + _, ok = k[string(val)] + return ok +} + func (m *MemoryMutation) DBSize() (uint64, error) { panic("not implemented") } @@ -243,10 +259,141 @@ func (m *MemoryMutation) RangeAscend(table string, fromPrefix, toPrefix []byte, panic("please implement me") } func (m *MemoryMutation) RangeDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { - panic("please implement me") + s := &rangeIter{orderAscend: false, limit: int64(limit)} + var err error + if s.iterDb, err = m.db.RangeDescend(table, fromPrefix, toPrefix, limit); err != nil { + return s, err + } + if s.iterMem, err = m.memTx.RangeDescend(table, fromPrefix, toPrefix, limit); err != nil { + return s, err + } + return s.init() +} + +type rangeIter struct { + iterDb, iterMem iter.KV + hasNextDb, hasNextMem bool + nextKdb, nextVdb, nextKmem, nextVmem []byte + orderAscend bool + limit int64 +} + +func (s *rangeIter) init() (*rangeIter, error) { + s.hasNextDb = s.iterDb.HasNext() + s.hasNextMem = s.iterMem.HasNext() + var err error + if s.hasNextDb { + if s.nextKdb, s.nextVdb, err = s.iterDb.Next(); err != nil { + return s, err + } + } + if s.hasNextMem { + if s.nextKmem, s.nextVmem, err = s.iterMem.Next(); err != nil { + return s, err + } + } + return s, nil +} + +func (s *rangeIter) HasNext() bool { + if s.limit == 0 { + return false + } + return s.hasNextDb || s.hasNextMem +} +func (s *rangeIter) Next() (k, v []byte, err error) { + s.limit-- + c := bytes.Compare(s.nextKdb, s.nextKmem) + if !s.hasNextMem || c == -1 && s.orderAscend || c == 1 && !s.orderAscend || c == 0 { + if s.hasNextDb { + k = s.nextKdb + v = s.nextVdb + s.hasNextDb = s.iterDb.HasNext() + if s.nextKdb, s.nextVdb, err = s.iterDb.Next(); err != nil { + return nil, nil, err + } + } + } + if !s.hasNextDb || c == 1 && s.orderAscend || c == -1 && !s.orderAscend || c == 0 { + if s.hasNextMem { + k = s.nextKmem + v = s.nextVmem + s.hasNextMem = s.iterMem.HasNext() + if s.nextKmem, s.nextVmem, err = s.iterMem.Next(); err != nil { + return nil, nil, err + } + } + } + return } + func (m *MemoryMutation) RangeDupSort(table string, key []byte, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) { - panic("please implement me") + s := &rangeDupSortIter{key: key, orderAscend: bool(asc), limit: int64(limit)} + var err error + if s.iterDb, err = m.db.RangeDupSort(table, key, fromPrefix, toPrefix, asc, limit); err != nil { + return s, err + } + if s.iterMem, err = m.memTx.RangeDupSort(table, key, fromPrefix, toPrefix, asc, limit); err != nil { + return s, err + } + return s.init() +} + +type rangeDupSortIter struct { + iterDb, iterMem iter.KV + hasNextDb, hasNextMem bool + key []byte + nextVdb, nextVmem []byte + orderAscend bool + limit int64 +} + +func (s *rangeDupSortIter) init() (*rangeDupSortIter, error) { + s.hasNextDb = s.iterDb.HasNext() + s.hasNextMem = s.iterMem.HasNext() + var err error + if s.hasNextDb { + if _, s.nextVdb, err = s.iterDb.Next(); err != nil { + return s, err + } + } + if s.hasNextMem { + if _, s.nextVmem, err = s.iterMem.Next(); err != nil { + return s, err + } + } + return s, nil +} + +func (s *rangeDupSortIter) HasNext() bool { + if s.limit == 0 { + return false + } + return s.hasNextDb || s.hasNextMem +} +func (s *rangeDupSortIter) Next() (k, v []byte, err error) { + s.limit-- + k = s.key + c := bytes.Compare(s.nextVdb, s.nextVmem) + if !s.hasNextMem || c == -1 && s.orderAscend || c == 1 && !s.orderAscend || c == 0 { + if s.hasNextDb { + v = s.nextVdb + s.hasNextDb = s.iterDb.HasNext() + if _, s.nextVdb, err = s.iterDb.Next(); err != nil { + return nil, nil, err + } + } + } + if !s.hasNextDb || c == 1 && s.orderAscend || c == -1 && !s.orderAscend || c == 0 { + if s.hasNextMem { + v = s.nextVmem + s.hasNextMem = s.iterMem.HasNext() + if _, s.nextVmem, err = s.iterMem.Next(); err != nil { + return nil, nil, err + } + } + } + return } func (m *MemoryMutation) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error { @@ -271,13 +418,29 @@ func (m *MemoryMutation) ForPrefix(bucket string, prefix []byte, walker func(k, } func (m *MemoryMutation) Delete(table string, k []byte) error { - if _, ok := m.deletedEntries[table]; !ok { - m.deletedEntries[table] = make(map[string]struct{}) + t, ok := m.deletedEntries[table] + if !ok { + t = make(map[string]struct{}) + m.deletedEntries[table] = t } - m.deletedEntries[table][string(k)] = struct{}{} + t[string(k)] = struct{}{} return m.memTx.Delete(table, k) } +func (m *MemoryMutation) deleteDup(table string, k, v []byte) { + t, ok := m.deletedDups[table] + if !ok { + t = map[string]map[string]struct{}{} + m.deletedDups[table] = t + } + km, ok := t[string(k)] + if !ok { + km = map[string]struct{}{} + t[string(k)] = km + } + km[string(v)] = struct{}{} +} + func (m *MemoryMutation) Commit() error { m.statelessCursors = nil return nil @@ -467,7 +630,7 @@ func (m *MemoryMutation) MemTx() kv.RwTx { // Cursor creates a new cursor (the real fun begins here) func (m *MemoryMutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) { - c := &memoryMutationCursor{} + c := &memoryMutationCursor{pureDupSort: isTablePurelyDupsort(bucket)} // We can filter duplicates in dup sorted table c.table = bucket diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go b/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go index c21b9e4015b..0fefa48dac3 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go @@ -47,6 +47,7 @@ type memoryMutationCursor struct { currentDbEntry cursorEntry currentMemEntry cursorEntry isPrevFromDb bool + pureDupSort bool } func (m *memoryMutationCursor) isTableCleared() bool { @@ -337,8 +338,13 @@ func (m *memoryMutationCursor) Delete(k []byte) error { } func (m *memoryMutationCursor) DeleteCurrent() error { - panic("DeleteCurrent Not implemented") + if !m.pureDupSort { + return m.mutation.Delete(m.table, m.currentPair.key) + } + m.mutation.deleteDup(m.table, m.currentPair.key, m.currentPair.value) + return nil } + func (m *memoryMutationCursor) DeleteExact(_, _ []byte) error { panic("DeleteExact Not implemented") } @@ -502,5 +508,34 @@ func (m *memoryMutationCursor) CountDuplicates() (uint64, error) { } func (m *memoryMutationCursor) SeekBothExact(key, value []byte) ([]byte, []byte, error) { - panic("SeekBothExact Not implemented") + memKey, memValue, err := m.memCursor.SeekBothExact(key, value) + if err != nil || m.isTableCleared() { + return memKey, memValue, err + } + + if memKey != nil { + m.currentMemEntry.key = memKey + m.currentMemEntry.value = memValue + m.currentDbEntry.key = key + m.currentDbEntry.value, err = m.cursor.SeekBothRange(key, value) + m.isPrevFromDb = false + m.currentPair = cursorEntry{memKey, memValue} + return memKey, memValue, err + } + + dbKey, dbValue, err := m.cursor.SeekBothExact(key, value) + if err != nil { + return nil, nil, err + } + + if dbKey != nil && !m.mutation.isDupDeleted(m.table, key, value) { + m.currentDbEntry.key = dbKey + m.currentDbEntry.value = dbValue + m.currentMemEntry.key = key + m.currentMemEntry.value, err = m.memCursor.SeekBothRange(key, value) + m.isPrevFromDb = true + m.currentPair = cursorEntry{dbKey, dbValue} + return dbKey, dbValue, err + } + return nil, nil, nil } diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation_test.go b/erigon-lib/kv/membatchwithdb/memory_mutation_test.go index 6bbc7d00da6..4ad18d8a1f8 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation_test.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/log/v3" ) func initializeDbNonDupSort(rwTx kv.RwTx) { @@ -35,7 +36,7 @@ func TestPutAppendHas(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) require.NoError(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.5"))) require.Error(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3"))) require.NoError(t, batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3"))) @@ -64,7 +65,7 @@ func TestLastMiningDB(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) batch.Put(kv.HashedAccounts, []byte("BCAA"), []byte("value5")) @@ -88,7 +89,7 @@ func TestLastMiningMem(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5")) @@ -111,7 +112,7 @@ func TestDeleteMining(t *testing.T) { _, rwTx := memdb.NewTestTx(t) initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5")) batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5")) @@ -137,7 +138,7 @@ func TestFlush(t *testing.T) { _, rwTx := memdb.NewTestTx(t) initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value5")) batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5")) @@ -158,7 +159,7 @@ func TestForEach(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5")) require.NoError(t, batch.Flush(rwTx)) @@ -200,7 +201,7 @@ func TestForPrefix(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) var keys1 []string var values1 []string @@ -239,7 +240,7 @@ func TestForAmount(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() var keys []string @@ -272,7 +273,7 @@ func TestGetOneAfterClearBucket(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() err := batch.ClearBucket(kv.HashedAccounts) @@ -295,7 +296,7 @@ func TestSeekExactAfterClearBucket(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() err := batch.ClearBucket(kv.HashedAccounts) @@ -331,7 +332,7 @@ func TestFirstAfterClearBucket(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() err := batch.ClearBucket(kv.HashedAccounts) @@ -359,7 +360,7 @@ func TestIncReadSequence(t *testing.T) { initializeDbNonDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() _, err := batch.IncrementSequence(kv.HashedAccounts, uint64(12)) @@ -382,7 +383,7 @@ func TestNext(t *testing.T) { initializeDbDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() batch.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.2")) @@ -426,7 +427,7 @@ func TestNextNoDup(t *testing.T) { initializeDbDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() batch.Put(kv.AccountChangeSet, []byte("key2"), []byte("value2.1")) @@ -453,7 +454,7 @@ func TestDeleteCurrentDuplicates(t *testing.T) { initializeDbDupSort(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() cursor, err := batch.RwCursorDupSort(kv.AccountChangeSet) @@ -488,7 +489,7 @@ func TestSeekBothRange(t *testing.T) { rwTx.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.1")) rwTx.Put(kv.AccountChangeSet, []byte("key3"), []byte("value3.3")) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() cursor, err := batch.RwCursorDupSort(kv.AccountChangeSet) @@ -522,7 +523,7 @@ func TestAutoConversion(t *testing.T) { initializeDbAutoConversion(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() c, err := batch.RwCursor(kv.PlainState) @@ -578,7 +579,7 @@ func TestAutoConversionDelete(t *testing.T) { initializeDbAutoConversion(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() c, err := batch.RwCursor(kv.PlainState) @@ -615,7 +616,7 @@ func TestAutoConversionSeekBothRange(t *testing.T) { initializeDbAutoConversion(rwTx) - batch := NewMemoryBatch(rwTx, "") + batch := NewMemoryBatch(rwTx, "", log.Root()) defer batch.Close() c, err := batch.RwCursorDupSort(kv.PlainState) diff --git a/erigon-lib/kv/remotedbserver/mock/snapshots_mock.go b/erigon-lib/kv/remotedbserver/mock/snapshots_mock.go new file mode 100644 index 00000000000..538b5aa4323 --- /dev/null +++ b/erigon-lib/kv/remotedbserver/mock/snapshots_mock.go @@ -0,0 +1,48 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon-lib/kv/remotedbserver (interfaces: Snapshots) + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockSnapshots is a mock of Snapshots interface. +type MockSnapshots struct { + ctrl *gomock.Controller + recorder *MockSnapshotsMockRecorder +} + +// MockSnapshotsMockRecorder is the mock recorder for MockSnapshots. +type MockSnapshotsMockRecorder struct { + mock *MockSnapshots +} + +// NewMockSnapshots creates a new mock instance. +func NewMockSnapshots(ctrl *gomock.Controller) *MockSnapshots { + mock := &MockSnapshots{ctrl: ctrl} + mock.recorder = &MockSnapshotsMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSnapshots) EXPECT() *MockSnapshotsMockRecorder { + return m.recorder +} + +// Files mocks base method. +func (m *MockSnapshots) Files() []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Files") + ret0, _ := ret[0].([]string) + return ret0 +} + +// Files indicates an expected call of Files. +func (mr *MockSnapshotsMockRecorder) Files() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Files", reflect.TypeOf((*MockSnapshots)(nil).Files)) +} diff --git a/erigon-lib/kv/remotedbserver/remotedbserver.go b/erigon-lib/kv/remotedbserver/remotedbserver.go index 07191e55fe6..f276674cb78 100644 --- a/erigon-lib/kv/remotedbserver/remotedbserver.go +++ b/erigon-lib/kv/remotedbserver/remotedbserver.go @@ -71,8 +71,9 @@ type KvServer struct { kv kv.RoDB stateChangeStreams *StateChangePubSub - blockSnapshots Snapsthots - historySnapshots Snapsthots + blockSnapshots Snapshots + borSnapshots Snapshots + historySnapshots Snapshots ctx context.Context //v3 fields @@ -90,18 +91,24 @@ type threadSafeTx struct { sync.Mutex } -type Snapsthots interface { +//go:generate mockgen -destination=./mock/snapshots_mock.go -package=mock . Snapshots +type Snapshots interface { Files() []string } -func NewKvServer(ctx context.Context, db kv.RoDB, snapshots Snapsthots, historySnapshots Snapsthots, logger log.Logger) *KvServer { +func NewKvServer(ctx context.Context, db kv.RoDB, snapshots Snapshots, borSnapshots Snapshots, historySnapshots Snapshots, logger log.Logger) *KvServer { return &KvServer{ - trace: false, - rangeStep: 1024, - kv: db, stateChangeStreams: newStateChangeStreams(), ctx: ctx, - blockSnapshots: snapshots, historySnapshots: historySnapshots, - txs: map[uint64]*threadSafeTx{}, txsMapLock: &sync.RWMutex{}, - logger: logger, + trace: false, + rangeStep: 1024, + kv: db, + stateChangeStreams: newStateChangeStreams(), + ctx: ctx, + blockSnapshots: snapshots, + borSnapshots: borSnapshots, + historySnapshots: historySnapshots, + txs: map[uint64]*threadSafeTx{}, + txsMapLock: &sync.RWMutex{}, + logger: logger, } } @@ -430,7 +437,7 @@ func bytesCopy(b []byte) []byte { return copiedBytes } -func (s *KvServer) StateChanges(req *remote.StateChangeRequest, server remote.KV_StateChangesServer) error { +func (s *KvServer) StateChanges(_ *remote.StateChangeRequest, server remote.KV_StateChangesServer) error { ch, remove := s.stateChangeStreams.Sub() defer remove() for { @@ -447,16 +454,21 @@ func (s *KvServer) StateChanges(req *remote.StateChangeRequest, server remote.KV } } -func (s *KvServer) SendStateChanges(ctx context.Context, sc *remote.StateChangeBatch) { +func (s *KvServer) SendStateChanges(_ context.Context, sc *remote.StateChangeBatch) { s.stateChangeStreams.Pub(sc) } -func (s *KvServer) Snapshots(ctx context.Context, _ *remote.SnapshotsRequest) (*remote.SnapshotsReply, error) { +func (s *KvServer) Snapshots(_ context.Context, _ *remote.SnapshotsRequest) (*remote.SnapshotsReply, error) { if s.blockSnapshots == nil || reflect.ValueOf(s.blockSnapshots).IsNil() { // nolint return &remote.SnapshotsReply{BlocksFiles: []string{}, HistoryFiles: []string{}}, nil } - return &remote.SnapshotsReply{BlocksFiles: s.blockSnapshots.Files(), HistoryFiles: s.historySnapshots.Files()}, nil + blockFiles := s.blockSnapshots.Files() + if s.borSnapshots != nil { + blockFiles = append(blockFiles, s.borSnapshots.Files()...) + } + + return &remote.SnapshotsReply{BlocksFiles: blockFiles, HistoryFiles: s.historySnapshots.Files()}, nil } type StateChangePubSub struct { @@ -507,8 +519,11 @@ func (s *StateChangePubSub) remove(id uint) { delete(s.chans, id) } +// // Temporal methods -func (s *KvServer) DomainGet(ctx context.Context, req *remote.DomainGetReq) (reply *remote.DomainGetReply, err error) { +// + +func (s *KvServer) DomainGet(_ context.Context, req *remote.DomainGetReq) (reply *remote.DomainGetReply, err error) { reply = &remote.DomainGetReply{} if err := s.with(req.TxId, func(tx kv.Tx) error { ttx, ok := tx.(kv.TemporalTx) @@ -532,7 +547,7 @@ func (s *KvServer) DomainGet(ctx context.Context, req *remote.DomainGetReq) (rep } return reply, nil } -func (s *KvServer) HistoryGet(ctx context.Context, req *remote.HistoryGetReq) (reply *remote.HistoryGetReply, err error) { +func (s *KvServer) HistoryGet(_ context.Context, req *remote.HistoryGetReq) (reply *remote.HistoryGetReply, err error) { reply = &remote.HistoryGetReply{} if err := s.with(req.TxId, func(tx kv.Tx) error { ttx, ok := tx.(kv.TemporalTx) @@ -552,7 +567,7 @@ func (s *KvServer) HistoryGet(ctx context.Context, req *remote.HistoryGetReq) (r const PageSizeLimit = 4 * 4096 -func (s *KvServer) IndexRange(ctx context.Context, req *remote.IndexRangeReq) (*remote.IndexRangeReply, error) { +func (s *KvServer) IndexRange(_ context.Context, req *remote.IndexRangeReq) (*remote.IndexRangeReply, error) { reply := &remote.IndexRangeReply{} from, limit := int(req.FromTs), int(req.Limit) if req.PageToken != "" { @@ -600,7 +615,7 @@ func (s *KvServer) IndexRange(ctx context.Context, req *remote.IndexRangeReq) (* return reply, nil } -func (s *KvServer) Range(ctx context.Context, req *remote.RangeReq) (*remote.Pairs, error) { +func (s *KvServer) Range(_ context.Context, req *remote.RangeReq) (*remote.Pairs, error) { from, limit := req.FromPrefix, int(req.Limit) if req.PageToken != "" { var pagination remote.ParisPagination diff --git a/erigon-lib/kv/remotedbserver/server_test.go b/erigon-lib/kv/remotedbserver/remotedbserver_test.go similarity index 54% rename from erigon-lib/kv/remotedbserver/server_test.go rename to erigon-lib/kv/remotedbserver/remotedbserver_test.go index fec193f0389..4e233638862 100644 --- a/erigon-lib/kv/remotedbserver/server_test.go +++ b/erigon-lib/kv/remotedbserver/remotedbserver_test.go @@ -21,14 +21,18 @@ import ( "runtime" "testing" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/golang/mock/gomock" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon-lib/kv/remotedbserver/mock" ) func TestKvServer_renew(t *testing.T) { + //goland:noinspection GoBoolExpressions if runtime.GOOS == "windows" { t.Skip("fix me on win please") } @@ -44,7 +48,7 @@ func TestKvServer_renew(t *testing.T) { return nil })) - s := NewKvServer(ctx, db, nil, nil, log.New()) + s := NewKvServer(ctx, db, nil, nil, nil, log.New()) g, ctx := errgroup.WithContext(ctx) testCase := func() error { id, err := s.begin(ctx) @@ -95,3 +99,44 @@ func TestKvServer_renew(t *testing.T) { } require.NoError(g.Wait()) } + +func TestKVServerSnapshotsReturnsSnapshots(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + blockSnapshots := mock.NewMockSnapshots(ctrl) + blockSnapshots.EXPECT().Files().Return([]string{"headers.seg", "bodies.seg"}).Times(1) + historySnapshots := mock.NewMockSnapshots(ctrl) + historySnapshots.EXPECT().Files().Return([]string{"history"}).Times(1) + + s := NewKvServer(ctx, nil, blockSnapshots, nil, historySnapshots, log.New()) + reply, err := s.Snapshots(ctx, nil) + require.NoError(t, err) + require.Equal(t, []string{"headers.seg", "bodies.seg"}, reply.BlocksFiles) + require.Equal(t, []string{"history"}, reply.HistoryFiles) +} + +func TestKVServerSnapshotsReturnsBorSnapshots(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + blockSnapshots := mock.NewMockSnapshots(ctrl) + blockSnapshots.EXPECT().Files().Return([]string{"headers.seg", "bodies.seg"}).Times(1) + borSnapshots := mock.NewMockSnapshots(ctrl) + borSnapshots.EXPECT().Files().Return([]string{"borevents.seg", "borspans.seg"}).Times(1) + historySnapshots := mock.NewMockSnapshots(ctrl) + historySnapshots.EXPECT().Files().Return([]string{"history"}).Times(1) + + s := NewKvServer(ctx, nil, blockSnapshots, borSnapshots, historySnapshots, log.New()) + reply, err := s.Snapshots(ctx, nil) + require.NoError(t, err) + require.Equal(t, []string{"headers.seg", "bodies.seg", "borevents.seg", "borspans.seg"}, reply.BlocksFiles) + require.Equal(t, []string{"history"}, reply.HistoryFiles) +} + +func TestKVServerSnapshotsReturnsEmptyIfNoBlockSnapshots(t *testing.T) { + ctx := context.Background() + s := NewKvServer(ctx, nil, nil, nil, nil, log.New()) + reply, err := s.Snapshots(ctx, nil) + require.NoError(t, err) + require.Empty(t, reply.BlocksFiles) + require.Empty(t, reply.HistoryFiles) +} diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go index 6ccdc45ef03..75435eef207 100644 --- a/erigon-lib/kv/tables.go +++ b/erigon-lib/kv/tables.go @@ -469,15 +469,18 @@ const ( ValidatorBalance = "ValidatorBalance" StaticValidators = "StaticValidators" StateEvents = "StateEvents" + ActiveValidatorIndicies = "ActiveValidatorIndicies" // External data - StateRoot = "StateRoot" - BlockRoot = "BlockRoot" - MinimalBeaconState = "MinimalBeaconState" + StateRoot = "StateRoot" + BlockRoot = "BlockRoot" + // Differentiate data stored per-slot vs per-epoch + SlotData = "SlotData" + EpochData = "EpochData" + // State fields InactivityScores = "InactivityScores" PreviousEpochParticipation = "PreviousEpochParticipation" CurrentEpochParticipation = "CurrentEpochParticipation" - Checkpoints = "Checkpoints" NextSyncCommittee = "NextSyncCommittee" CurrentSyncCommittee = "CurrentSyncCommittee" HistoricalRoots = "HistoricalRoots" @@ -663,14 +666,14 @@ var ChaindataTables = []string{ // Other stuff (related to state reconstitution) BlockRoot, StateRoot, - MinimalBeaconState, + SlotData, + EpochData, RandaoMixes, Proposers, StatesProcessingProgress, PreviousEpochParticipation, CurrentEpochParticipation, InactivityScores, - Checkpoints, NextSyncCommittee, CurrentSyncCommittee, HistoricalRoots, @@ -679,6 +682,7 @@ var ChaindataTables = []string{ PreviousEpochAttestations, Eth1DataVotes, IntraRandaoMixes, + ActiveValidatorIndicies, } const ( diff --git a/erigon-lib/metrics/parsing.go b/erigon-lib/metrics/parsing.go index 34e23ccccb2..2ae74952467 100644 --- a/erigon-lib/metrics/parsing.go +++ b/erigon-lib/metrics/parsing.go @@ -12,25 +12,24 @@ func parseMetric(s string) (string, prometheus.Labels, error) { if len(s) == 0 { return "", nil, fmt.Errorf("metric cannot be empty") } - n := strings.IndexByte(s, '{') - if n < 0 { + + ident, rest, ok := strings.Cut(s, "{") + if !ok { if err := validateIdent(s); err != nil { return "", nil, err } - return s, nil, nil } - ident := s[:n] - s = s[n+1:] + if err := validateIdent(ident); err != nil { return "", nil, err } - if len(s) == 0 || s[len(s)-1] != '}' { + + if len(rest) == 0 || rest[len(rest)-1] != '}' { return "", nil, fmt.Errorf("missing closing curly brace at the end of %q", ident) } - tags, err := parseTags(s[:len(s)-1]) - + tags, err := parseTags(rest[:len(rest)-1]) if err != nil { return "", nil, err } diff --git a/erigon-lib/mmap/mmap_windows.go b/erigon-lib/mmap/mmap_windows.go index b343ebb4024..0ce85db9ec2 100644 --- a/erigon-lib/mmap/mmap_windows.go +++ b/erigon-lib/mmap/mmap_windows.go @@ -41,7 +41,7 @@ func Mmap(f *os.File, size int) ([]byte, *[MaxMapSize]byte, error) { } // Close mapping handle. - if err := windows.CloseHandle(windows.Handle(h)); err != nil { + if err := windows.CloseHandle(h); err != nil { return nil, nil, os.NewSyscallError("CloseHandle", err) } diff --git a/erigon-lib/recsplit/index.go b/erigon-lib/recsplit/index.go index 277db2d5fdf..c10fa0205d4 100644 --- a/erigon-lib/recsplit/index.go +++ b/erigon-lib/recsplit/index.go @@ -178,6 +178,7 @@ func (idx *Index) ModTime() time.Time { return idx.modTime } func (idx *Index) BaseDataID() uint64 { return idx.baseDataID } func (idx *Index) FilePath() string { return idx.filePath } func (idx *Index) FileName() string { return idx.fileName } +func (idx *Index) IsOpen() bool { return idx != nil && idx.f != nil } func (idx *Index) Close() { if idx == nil { diff --git a/erigon-lib/recsplit/recsplit.go b/erigon-lib/recsplit/recsplit.go index a019ca9b3f9..be44bd20f48 100644 --- a/erigon-lib/recsplit/recsplit.go +++ b/erigon-lib/recsplit/recsplit.go @@ -161,7 +161,11 @@ func NewRecSplit(args RecSplitArgs, logger log.Logger) (*RecSplit, error) { rs.baseDataID = args.BaseDataID rs.etlBufLimit = args.EtlBufLimit if rs.etlBufLimit == 0 { - rs.etlBufLimit = etl.BufferOptimalSize + // reduce ram pressure, because: + // - indexing done in background or in many workers (building many indices in-parallel) + // - `recsplit` has 2 etl collectors + // - `rescplit` building is cpu-intencive and bottleneck is not in etl loading + rs.etlBufLimit = etl.BufferOptimalSize / 8 } rs.bucketCollector = etl.NewCollector(RecSplitLogPrefix+" "+fname, rs.tmpDir, etl.NewSortableBuffer(rs.etlBufLimit), logger) rs.bucketCollector.LogLvl(log.LvlDebug) @@ -556,6 +560,9 @@ func (rs *RecSplit) Build(ctx context.Context) error { if rs.indexF, err = os.Create(rs.tmpFilePath); err != nil { return fmt.Errorf("create index file %s: %w", rs.indexFile, err) } + + rs.logger.Debug("[index] created", "file", rs.tmpFilePath, "fs", rs.indexF) + defer rs.indexF.Close() rs.indexW = bufio.NewWriterSize(rs.indexF, etl.BufIOSize) // Write minimal app-specific dataID in this index file @@ -680,9 +687,12 @@ func (rs *RecSplit) Build(ctx context.Context) error { if err = rs.indexF.Close(); err != nil { return err } + if err = os.Rename(rs.tmpFilePath, rs.indexFile); err != nil { + rs.logger.Warn("[index] rename", "file", rs.tmpFilePath, "err", err) return err } + return nil } diff --git a/erigon-lib/sse/README.md b/erigon-lib/sse/README.md deleted file mode 100644 index 6cd1b2090c5..00000000000 --- a/erigon-lib/sse/README.md +++ /dev/null @@ -1,8 +0,0 @@ -## sse - -sse implement server side events also known as eventsource - -see the specification here: https://html.spec.whatwg.org/multipage/server-sent-events.html - - - diff --git a/erigon-lib/sse/conn.go b/erigon-lib/sse/conn.go deleted file mode 100644 index e6a39224ea7..00000000000 --- a/erigon-lib/sse/conn.go +++ /dev/null @@ -1,40 +0,0 @@ -package sse - -import ( - "bufio" - "net/http" - "strings" -) - -// EventSink tracks a event source connection between a client and a server -type EventSink struct { - wr http.ResponseWriter - r *http.Request - bw *bufio.Writer - enc *Encoder - - LastEventId string -} - -func Upgrade(wr http.ResponseWriter, r *http.Request) (*EventSink, error) { - if !strings.EqualFold(r.Header.Get("Content-Type"), "text/event-stream") { - return nil, ErrInvalidContentType - } - o := &EventSink{ - wr: wr, - r: r, - bw: bufio.NewWriter(wr), - } - o.LastEventId = r.Header.Get("Last-Event-ID") - wr.Header().Add("Content-Type", "text/event-stream") - o.enc = NewEncoder(o.bw) - return o, nil -} - -func (e *EventSink) Encode(p *Packet) error { - err := e.enc.Encode(p) - if err != nil { - return err - } - return e.bw.Flush() -} diff --git a/erigon-lib/sse/encoder.go b/erigon-lib/sse/encoder.go deleted file mode 100644 index f1924f10531..00000000000 --- a/erigon-lib/sse/encoder.go +++ /dev/null @@ -1,82 +0,0 @@ -package sse - -import "io" - -// Packet represents an event to send -// the order in this struct is the order that they will be sent. -type Packet struct { - - // as a special case, an empty value of event will not write an event header - Event string - - // additional headers to be added. - // using the reserved headers event, header, data, id is undefined behavior - // note that this is the canonical way to send the "retry" header - Header map[string]string - - // the io.Reader to source the data from - Data io.Reader - - // whether or not to send an id, and if so, what id to send - // a nil id means to not send an id. - // empty string means to simply send the string "id\n" - // otherwise, the id is sent as is - // id is always sent at the end of the packet - ID *string -} - -func ID(x string) *string { - return &x -} - -// Encoder works at a higher level than the encoder. -// it works on the packet level. -type Encoder struct { - wr *Writer - - firstWriteDone bool -} - -func NewEncoder(w io.Writer) *Encoder { - wr := NewWriter(w) - return &Encoder{ - wr: wr, - } -} - -func (e *Encoder) Encode(p *Packet) error { - if e.firstWriteDone { - err := e.wr.Next() - if err != nil { - return err - } - } - e.firstWriteDone = true - if len(p.Event) > 0 { - if err := e.wr.Header("event", p.Event); err != nil { - return err - } - } - if p.Header != nil { - for k, v := range p.Header { - if err := e.wr.Header(k, v); err != nil { - return err - } - } - } - if p.Data != nil { - if err := e.wr.WriteData(p.Data); err != nil { - return err - } - } - err := e.wr.Flush() - if err != nil { - return err - } - if p.ID != nil { - if err := e.wr.Header("id", *p.ID); err != nil { - return err - } - } - return nil -} diff --git a/erigon-lib/sse/encoder_test.go b/erigon-lib/sse/encoder_test.go deleted file mode 100644 index 9415f86403b..00000000000 --- a/erigon-lib/sse/encoder_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package sse - -import ( - "bytes" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestEncoderSimple(t *testing.T) { - type testCase struct { - xs []*Packet - w string - } - cases := []testCase{{ - []*Packet{ - {Event: "hello", Data: strings.NewReader("some data")}, - {Data: strings.NewReader("some other data with no event header")}, - }, - "event: hello\ndata: some data\n\ndata: some other data with no event header\n", - }, - { - []*Packet{ - {Event: "hello", Data: strings.NewReader("some \n funky\r\n data\r")}, - {Data: strings.NewReader("some other data with an id"), ID: ID("dogs")}, - }, - "event: hello\ndata: some \ndata: funky\r\ndata: data\r\ndata: some other data with an id\nid: dogs\n", - }, - } - for _, v := range cases { - buf := &bytes.Buffer{} - enc := NewEncoder(buf) - for _, p := range v.xs { - require.NoError(t, enc.Encode(p)) - } - assert.EqualValues(t, v.w, buf.String()) - } -} diff --git a/erigon-lib/sse/errors.go b/erigon-lib/sse/errors.go deleted file mode 100644 index 8bf380295fb..00000000000 --- a/erigon-lib/sse/errors.go +++ /dev/null @@ -1,8 +0,0 @@ -package sse - -import "errors" - -var ( - ErrInvalidUTF8Bytes = errors.New("invalid utf8 bytes") - ErrInvalidContentType = errors.New("invalid content type") -) diff --git a/erigon-lib/sse/writer.go b/erigon-lib/sse/writer.go deleted file mode 100644 index a261d93dbc9..00000000000 --- a/erigon-lib/sse/writer.go +++ /dev/null @@ -1,170 +0,0 @@ -package sse - -import ( - "io" - "unicode/utf8" - //"github.com/segmentio/asm/utf8" -- can switch to this library in the future if needed -) - -type Option func(*Options) - -func OptionValidateUtf8(enable bool) Option { - return func(o *Options) { - o.validateUTF8 = true - } -} - -type Options struct { - validateUTF8 bool -} - -func (e *Options) ValidateUTF8() bool { - return e.validateUTF8 -} - -type writeState struct { - inMessage bool - trailingCarriage bool -} - -// writer is not thread safe. it is meant for internal usage -type Writer struct { - raw io.Writer - - es writeState - - w io.Writer - - o Options -} - -func NewWriter(w io.Writer, opts ...Option) *Writer { - o := &Options{} - for _, v := range opts { - v(o) - } - return &Writer{ - raw: w, - w: w, - o: *o, - } -} - -func (e *Writer) writeByte(x byte) error { - _, err := e.w.Write([]byte{x}) - return err -} -func (e *Writer) writeString(s string) (int, error) { - return e.w.Write([]byte(s)) -} - -func (e *Writer) Flush() error { - if e.es.inMessage { - // we are in a message, so write a newline to terminate it, as the user did not - err := e.writeByte('\n') - if err != nil { - return err - } - e.es.inMessage = false - } - // and reset the trailingCarriage state as well - e.es.trailingCarriage = false - return nil -} - -// next should be called at the end of an event. it will call Flush and then write a newline -func (e *Writer) Next() error { - - if err := e.Flush(); err != nil { - return err - } - // we write a newline, indicating now that this is a new event - if err := e.writeByte('\n'); err != nil { - return err - } - return nil -} - -// Event will start writing an event with the name topic to the stream -func (e *Writer) Header(name string, topic string) error { - if topic == "" { - return nil - } - if e.o.ValidateUTF8() { - if !utf8.ValidString(topic) { - return ErrInvalidUTF8Bytes - } - } - if len(topic) > 0 { - if _, err := e.writeString(name + ": "); err != nil { - return err - } - // write the supplied topic - if _, err := e.writeString(topic); err != nil { - return err - } - } - if err := e.writeByte('\n'); err != nil { - return err - } - - return nil -} - -// a convenient wrapper for writing data from io.Reader so that one can easily replay events. -func (e *Writer) WriteData(r io.Reader) (err error) { - if _, err = io.Copy(e, r); err != nil { - return err - } - return -} - -// Write underlying write method for piping data. be careful using this! -func (e *Writer) Write(xs []byte) (n int, err error) { - if e.o.ValidateUTF8() && !utf8.Valid(xs) { - return 0, ErrInvalidUTF8Bytes - } - for _, x := range xs { - // now, see if there was a trailing carriage left over from the last write - // only check and write the data if we are do not have a trailing carriage - if !e.es.trailingCarriage { - e.checkMessage() - } - if e.es.trailingCarriage { - // if there is, see if the character is a newline - if x != '\n' { - // its not a newline, so the trailing carriage was a valid end of message. write a new data field - e.es.inMessage = false - e.checkMessage() - } - // in the case that the character is a newline - // we will just write the newline and inMessage=false will be set in the case below - - // in both cases, the trailing carriage is dealt with - e.es.trailingCarriage = false - } - // write the byte no matter what - err = e.writeByte(x) - if err != nil { - return - } - // if success, note that we wrote another byte - n++ - if x == '\n' { - // end message if it's a newline always - e.es.inMessage = false - } else if x == '\r' { - // if x is a carriage return, mark it as trailing carriage - e.es.trailingCarriage = true - e.es.inMessage = false - } - } - return -} - -func (e *Writer) checkMessage() { - if !e.es.inMessage { - e.es.inMessage = true - e.writeString("data: ") - } -} diff --git a/erigon-lib/sse/writer_test.go b/erigon-lib/sse/writer_test.go deleted file mode 100644 index d25c18cad2f..00000000000 --- a/erigon-lib/sse/writer_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package sse - -import ( - "bytes" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestEncoderWrite(t *testing.T) { - type testCase struct { - e string - i string - w string - } - cases := []testCase{{ - "", - "foo bar\nbar foo\nwowwwwza\n", - `data: foo bar -data: bar foo -data: wowwwwza -`}, { - "hello", - "there\nfriend", - `event: hello -data: there -data: friend -`}, - } - - for _, v := range cases { - buf := &bytes.Buffer{} - enc := NewWriter(buf) - err := enc.Header("event", v.e) - require.NoError(t, err) - _, err = enc.Write([]byte(v.i)) - require.NoError(t, err) - require.NoError(t, enc.Flush()) - assert.EqualValues(t, buf.String(), v.w) - } -} - -func TestEncoderWriteData(t *testing.T) { - type testCase struct { - e string - i string - w string - } - cases := []testCase{{ - "", - "foo bar\nbar foo\nwowwwwza\n", - `data: foo bar -data: bar foo -data: wowwwwza -`}, { - "hello", - "there\nfriend", - `event: hello -data: there -data: friend -`}, - } - - for _, v := range cases { - buf := &bytes.Buffer{} - enc := NewWriter(buf) - err := enc.Header("event", v.e) - require.NoError(t, err) - err = enc.WriteData(strings.NewReader(v.i)) - require.NoError(t, err) - require.NoError(t, enc.Flush()) - assert.EqualValues(t, v.w, buf.String()) - } -} diff --git a/erigon-lib/state/history.go b/erigon-lib/state/history.go index 23fad025128..ba6206d5637 100644 --- a/erigon-lib/state/history.go +++ b/erigon-lib/state/history.go @@ -406,13 +406,12 @@ func iterateForVi(historyItem, iiItem *filesItem, p *background.Progress, compre func buildVi(ctx context.Context, historyItem, iiItem *filesItem, historyIdxPath, tmpdir string, count int, p *background.Progress, compressVals bool, logger log.Logger) error { rs, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{ - KeyCount: count, - Enums: false, - BucketSize: 2000, - LeafSize: 8, - TmpDir: tmpdir, - IndexFile: historyIdxPath, - EtlBufLimit: etl.BufferOptimalSize / 2, + KeyCount: count, + Enums: false, + BucketSize: 2000, + LeafSize: 8, + TmpDir: tmpdir, + IndexFile: historyIdxPath, }, logger) if err != nil { return fmt.Errorf("create recsplit: %w", err) diff --git a/erigon-lib/txpool/fetch.go b/erigon-lib/txpool/fetch.go index e5097a7f706..a4ca77bff1a 100644 --- a/erigon-lib/txpool/fetch.go +++ b/erigon-lib/txpool/fetch.go @@ -476,7 +476,7 @@ func (f *Fetch) handleStateChanges(ctx context.Context, client StateChangesClien } func (f *Fetch) handleStateChangesRequest(ctx context.Context, req *remote.StateChangeBatch) error { - var unwindTxs, minedTxs types2.TxSlots + var unwindTxs, unwindBlobTxs, minedTxs types2.TxSlots for _, change := range req.ChangeBatch { if change.Direction == remote.Direction_FORWARD { minedTxs.Resize(uint(len(change.Txs))) @@ -500,18 +500,7 @@ func (f *Fetch) handleStateChangesRequest(ctx context.Context, req *remote.State return err } if utx.Type == types2.BlobTxType { - var knownBlobTxn *metaTx - //TODO: don't check `KnownBlobTxn()` here - because each call require `txpool.mutex.lock()`. Better add all hashes here and do check inside `OnNewBlock` - if err := f.db.View(ctx, func(tx kv.Tx) error { - knownBlobTxn, err = f.pool.GetKnownBlobTxn(tx, utx.IDHash[:]) - return err - }); err != nil { - return err - } - // Get the blob tx from cache; ignore altogether if it isn't there - if knownBlobTxn != nil { - unwindTxs.Append(knownBlobTxn.Tx, sender, false) - } + unwindBlobTxs.Append(utx, sender, false) } else { unwindTxs.Append(utx, sender, false) } @@ -525,7 +514,7 @@ func (f *Fetch) handleStateChangesRequest(ctx context.Context, req *remote.State } if err := f.db.View(ctx, func(tx kv.Tx) error { - return f.pool.OnNewBlock(ctx, req, unwindTxs, minedTxs, tx) + return f.pool.OnNewBlock(ctx, req, unwindTxs, unwindBlobTxs, minedTxs, tx) }); err != nil && !errors.Is(err, context.Canceled) { return err } diff --git a/erigon-lib/txpool/mocks_test.go b/erigon-lib/txpool/mocks_test.go index 22e8e8121af..502b4a69002 100644 --- a/erigon-lib/txpool/mocks_test.go +++ b/erigon-lib/txpool/mocks_test.go @@ -34,16 +34,13 @@ var _ Pool = &PoolMock{} // FilterKnownIdHashesFunc: func(tx kv.Tx, hashes types2.Hashes) (types2.Hashes, error) { // panic("mock out the FilterKnownIdHashes method") // }, -// GetKnownBlobTxnFunc: func(tx kv.Tx, hash []byte) (*metaTx, error) { -// panic("mock out the GetKnownBlobTxn method") -// }, // GetRlpFunc: func(tx kv.Tx, hash []byte) ([]byte, error) { // panic("mock out the GetRlp method") // }, // IdHashKnownFunc: func(tx kv.Tx, hash []byte) (bool, error) { // panic("mock out the IdHashKnown method") // }, -// OnNewBlockFunc: func(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs types2.TxSlots, minedTxs types2.TxSlots, tx kv.Tx) error { +// OnNewBlockFunc: func(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs types2.TxSlots, unwindBlobTxs types2.TxSlots, minedTxs types2.TxSlots, tx kv.Tx) error { // panic("mock out the OnNewBlock method") // }, // StartedFunc: func() bool { @@ -71,9 +68,6 @@ type PoolMock struct { // FilterKnownIdHashesFunc mocks the FilterKnownIdHashes method. FilterKnownIdHashesFunc func(tx kv.Tx, hashes types2.Hashes) (types2.Hashes, error) - // GetKnownBlobTxnFunc mocks the GetKnownBlobTxn method. - GetKnownBlobTxnFunc func(tx kv.Tx, hash []byte) (*metaTx, error) - // GetRlpFunc mocks the GetRlp method. GetRlpFunc func(tx kv.Tx, hash []byte) ([]byte, error) @@ -81,7 +75,7 @@ type PoolMock struct { IdHashKnownFunc func(tx kv.Tx, hash []byte) (bool, error) // OnNewBlockFunc mocks the OnNewBlock method. - OnNewBlockFunc func(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs types2.TxSlots, minedTxs types2.TxSlots, tx kv.Tx) error + OnNewBlockFunc func(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs types2.TxSlots, unwindBlobTxs types2.TxSlots, minedTxs types2.TxSlots, tx kv.Tx) error // StartedFunc mocks the Started method. StartedFunc func() bool @@ -119,13 +113,6 @@ type PoolMock struct { // Hashes is the hashes argument value. Hashes types2.Hashes } - // GetKnownBlobTxn holds details about calls to the GetKnownBlobTxn method. - GetKnownBlobTxn []struct { - // Tx is the tx argument value. - Tx kv.Tx - // Hash is the hash argument value. - Hash []byte - } // GetRlp holds details about calls to the GetRlp method. GetRlp []struct { // Tx is the tx argument value. @@ -148,6 +135,8 @@ type PoolMock struct { StateChanges *remote.StateChangeBatch // UnwindTxs is the unwindTxs argument value. UnwindTxs types2.TxSlots + // UnwindBlobTxs is the unwindBlobTxs argument value. + UnwindBlobTxs types2.TxSlots // MinedTxs is the minedTxs argument value. MinedTxs types2.TxSlots // Tx is the tx argument value. @@ -166,7 +155,6 @@ type PoolMock struct { lockAddNewGoodPeer sync.RWMutex lockAddRemoteTxs sync.RWMutex lockFilterKnownIdHashes sync.RWMutex - lockGetKnownBlobTxn sync.RWMutex lockGetRlp sync.RWMutex lockIdHashKnown sync.RWMutex lockOnNewBlock sync.RWMutex @@ -326,46 +314,6 @@ func (mock *PoolMock) FilterKnownIdHashesCalls() []struct { return calls } -// GetKnownBlobTxn calls GetKnownBlobTxnFunc. -func (mock *PoolMock) GetKnownBlobTxn(tx kv.Tx, hash []byte) (*metaTx, error) { - callInfo := struct { - Tx kv.Tx - Hash []byte - }{ - Tx: tx, - Hash: hash, - } - mock.lockGetKnownBlobTxn.Lock() - mock.calls.GetKnownBlobTxn = append(mock.calls.GetKnownBlobTxn, callInfo) - mock.lockGetKnownBlobTxn.Unlock() - if mock.GetKnownBlobTxnFunc == nil { - var ( - metaTxMoqParamOut *metaTx - errOut error - ) - return metaTxMoqParamOut, errOut - } - return mock.GetKnownBlobTxnFunc(tx, hash) -} - -// GetKnownBlobTxnCalls gets all the calls that were made to GetKnownBlobTxn. -// Check the length with: -// -// len(mockedPool.GetKnownBlobTxnCalls()) -func (mock *PoolMock) GetKnownBlobTxnCalls() []struct { - Tx kv.Tx - Hash []byte -} { - var calls []struct { - Tx kv.Tx - Hash []byte - } - mock.lockGetKnownBlobTxn.RLock() - calls = mock.calls.GetKnownBlobTxn - mock.lockGetKnownBlobTxn.RUnlock() - return calls -} - // GetRlp calls GetRlpFunc. func (mock *PoolMock) GetRlp(tx kv.Tx, hash []byte) ([]byte, error) { callInfo := struct { @@ -447,19 +395,21 @@ func (mock *PoolMock) IdHashKnownCalls() []struct { } // OnNewBlock calls OnNewBlockFunc. -func (mock *PoolMock) OnNewBlock(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs types2.TxSlots, minedTxs types2.TxSlots, tx kv.Tx) error { +func (mock *PoolMock) OnNewBlock(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs types2.TxSlots, unwindBlobTxs types2.TxSlots, minedTxs types2.TxSlots, tx kv.Tx) error { callInfo := struct { - Ctx context.Context - StateChanges *remote.StateChangeBatch - UnwindTxs types2.TxSlots - MinedTxs types2.TxSlots - Tx kv.Tx + Ctx context.Context + StateChanges *remote.StateChangeBatch + UnwindTxs types2.TxSlots + UnwindBlobTxs types2.TxSlots + MinedTxs types2.TxSlots + Tx kv.Tx }{ - Ctx: ctx, - StateChanges: stateChanges, - UnwindTxs: unwindTxs, - MinedTxs: minedTxs, - Tx: tx, + Ctx: ctx, + StateChanges: stateChanges, + UnwindTxs: unwindTxs, + UnwindBlobTxs: unwindBlobTxs, + MinedTxs: minedTxs, + Tx: tx, } mock.lockOnNewBlock.Lock() mock.calls.OnNewBlock = append(mock.calls.OnNewBlock, callInfo) @@ -470,7 +420,7 @@ func (mock *PoolMock) OnNewBlock(ctx context.Context, stateChanges *remote.State ) return errOut } - return mock.OnNewBlockFunc(ctx, stateChanges, unwindTxs, minedTxs, tx) + return mock.OnNewBlockFunc(ctx, stateChanges, unwindTxs, unwindBlobTxs, minedTxs, tx) } // OnNewBlockCalls gets all the calls that were made to OnNewBlock. @@ -478,18 +428,20 @@ func (mock *PoolMock) OnNewBlock(ctx context.Context, stateChanges *remote.State // // len(mockedPool.OnNewBlockCalls()) func (mock *PoolMock) OnNewBlockCalls() []struct { - Ctx context.Context - StateChanges *remote.StateChangeBatch - UnwindTxs types2.TxSlots - MinedTxs types2.TxSlots - Tx kv.Tx + Ctx context.Context + StateChanges *remote.StateChangeBatch + UnwindTxs types2.TxSlots + UnwindBlobTxs types2.TxSlots + MinedTxs types2.TxSlots + Tx kv.Tx } { var calls []struct { - Ctx context.Context - StateChanges *remote.StateChangeBatch - UnwindTxs types2.TxSlots - MinedTxs types2.TxSlots - Tx kv.Tx + Ctx context.Context + StateChanges *remote.StateChangeBatch + UnwindTxs types2.TxSlots + UnwindBlobTxs types2.TxSlots + MinedTxs types2.TxSlots + Tx kv.Tx } mock.lockOnNewBlock.RLock() calls = mock.calls.OnNewBlock diff --git a/erigon-lib/txpool/pool.go b/erigon-lib/txpool/pool.go index d97a22e8118..45495262b9d 100644 --- a/erigon-lib/txpool/pool.go +++ b/erigon-lib/txpool/pool.go @@ -74,6 +74,8 @@ var ( basefeeSubCounter = metrics.GetOrCreateGauge(`txpool_basefee`) ) +var TraceAll = false + // Pool is interface for the transaction pool // This interface exists for the convenience of testing, and not yet because // there are multiple implementations @@ -83,13 +85,12 @@ type Pool interface { // Handle 3 main events - new remote txs from p2p, new local txs from RPC, new blocks from execution layer AddRemoteTxs(ctx context.Context, newTxs types.TxSlots) AddLocalTxs(ctx context.Context, newTxs types.TxSlots, tx kv.Tx) ([]txpoolcfg.DiscardReason, error) - OnNewBlock(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs, minedTxs types.TxSlots, tx kv.Tx) error + OnNewBlock(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs, unwindBlobTxs, minedTxs types.TxSlots, tx kv.Tx) error // IdHashKnown check whether transaction with given Id hash is known to the pool IdHashKnown(tx kv.Tx, hash []byte) (bool, error) FilterKnownIdHashes(tx kv.Tx, hashes types.Hashes) (unknownHashes types.Hashes, err error) Started() bool GetRlp(tx kv.Tx, hash []byte) ([]byte, error) - GetKnownBlobTxn(tx kv.Tx, hash []byte) (*metaTx, error) AddNewGoodPeer(peerID types.PeerID) } @@ -126,7 +127,6 @@ type metaTx struct { timestamp uint64 // when it was added to pool subPool SubPoolMarker currentSubPool SubPoolType - alreadyYielded bool minedBlockNum uint64 } @@ -211,6 +211,7 @@ type TxPool struct { cfg txpoolcfg.Config chainID uint256.Int lastSeenBlock atomic.Uint64 + lastSeenCond *sync.Cond lastFinalizedBlock atomic.Uint64 started atomic.Bool pendingBaseFee atomic.Uint64 @@ -223,16 +224,17 @@ type TxPool struct { cancunTime *uint64 isPostCancun atomic.Bool maxBlobsPerBlock uint64 + feeCalculator FeeCalculator logger log.Logger } -// disables adding remote transactions -type TxPoolDropRemote struct { - *TxPool +type FeeCalculator interface { + CurrentFees(chainConfig *chain.Config, db kv.Getter) (baseFee uint64, blobFee uint64, minBlobGasPrice uint64, err error) } func New(newTxs chan types.Announcements, coreDB kv.RoDB, cfg txpoolcfg.Config, cache kvcache.Cache, - chainID uint256.Int, shanghaiTime, agraBlock, cancunTime *big.Int, maxBlobsPerBlock uint64, logger log.Logger, + chainID uint256.Int, shanghaiTime, agraBlock, cancunTime *big.Int, maxBlobsPerBlock uint64, + feeCalculator FeeCalculator, logger log.Logger, ) (*TxPool, error) { localsHistory, err := simplelru.NewLRU[string, struct{}](10_000, nil) if err != nil { @@ -254,8 +256,11 @@ func New(newTxs chan types.Announcements, coreDB kv.RoDB, cfg txpoolcfg.Config, tracedSenders[common.BytesToAddress([]byte(sender))] = struct{}{} } + lock := &sync.Mutex{} + res := &TxPool{ - lock: &sync.Mutex{}, + lock: lock, + lastSeenCond: sync.NewCond(lock), byHash: map[string]*metaTx{}, isLocalLRU: localsHistory, discardReasonsLRU: discardHistory, @@ -275,6 +280,7 @@ func New(newTxs chan types.Announcements, coreDB kv.RoDB, cfg txpoolcfg.Config, minedBlobTxsByBlock: map[uint64][]*metaTx{}, minedBlobTxsByHash: map[string]*metaTx{}, maxBlobsPerBlock: maxBlobsPerBlock, + feeCalculator: feeCalculator, logger: logger, } @@ -303,46 +309,88 @@ func New(newTxs chan types.Announcements, coreDB kv.RoDB, cfg txpoolcfg.Config, return res, nil } -func NewTxPoolDropRemote(txPool *TxPool) *TxPoolDropRemote { - return &TxPoolDropRemote{TxPool: txPool} +// SetInitialBlockGasLimit is a hack to allow the txpool to function before the +// op-node makes the call to create the first block, setting the block gas +// limit, and triggering the processing of the transactions in the transaction +// pool +func (p *TxPool) SetInitialBlockGasLimit(limit uint64) { + p.blockGasLimit.Store(limit) } -func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs, minedTxs types.TxSlots, tx kv.Tx) error { - if err := minedTxs.Valid(); err != nil { - return err +func (p *TxPool) Start(ctx context.Context, db kv.RwDB) error { + if p.started.Load() { + return nil } + return db.View(ctx, func(tx kv.Tx) error { + coreDb, _ := p.coreDBWithCache() + coreTx, err := coreDb.BeginRo(ctx) + + if err != nil { + return err + } + + defer coreTx.Rollback() + + if err := p.fromDB(ctx, tx, coreTx); err != nil { + return fmt.Errorf("loading pool from DB: %w", err) + } + + if p.started.CompareAndSwap(false, true) { + p.logger.Info("[txpool] Started") + } + + return nil + }) +} + +func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs, unwindBlobTxs, minedTxs types.TxSlots, tx kv.Tx) error { defer newBlockTimer.ObserveDuration(time.Now()) //t := time.Now() coreDB, cache := p.coreDBWithCache() cache.OnNewBlock(stateChanges) coreTx, err := coreDB.BeginRo(ctx) + if err != nil { return err } + defer coreTx.Rollback() - p.lastSeenBlock.Store(stateChanges.ChangeBatch[len(stateChanges.ChangeBatch)-1].BlockHeight) - if !p.started.Load() { - if err := p.fromDBWithLock(ctx, tx, coreTx); err != nil { - return fmt.Errorf("OnNewBlock: loading txs from DB: %w", err) - } + block := stateChanges.ChangeBatch[len(stateChanges.ChangeBatch)-1].BlockHeight + baseFee := stateChanges.PendingBlockBaseFee + available := len(p.pending.best.ms) + + defer func() { + p.logger.Debug("[txpool] New block", "block", block, "unwound", len(unwindTxs.Txs), "mined", len(minedTxs.Txs), "baseFee", baseFee, "pending-pre", available, "pending", p.pending.Len(), "baseFee", p.baseFee.Len(), "queued", p.queued.Len(), "err", err) + }() + + if err = minedTxs.Valid(); err != nil { + return err } + cacheView, err := cache.View(ctx, coreTx) + if err != nil { return err } p.lock.Lock() - defer p.lock.Unlock() + defer func() { + if err == nil { + p.lastSeenBlock.Store(block) + p.lastSeenCond.Broadcast() + } + + p.lock.Unlock() + }() if assert.Enable { if _, err := kvcache.AssertCheckValues(ctx, coreTx, cache); err != nil { p.logger.Error("AssertCheckValues", "err", err, "stack", stack.Trace().String()) } } - baseFee := stateChanges.PendingBlockBaseFee pendingBaseFee, baseFeeChanged := p.setBaseFee(baseFee) // Update pendingBase for all pool queues and slices @@ -359,10 +407,24 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang p.setBlobFee(pendingBlobFee) p.blockGasLimit.Store(stateChanges.BlockGasLimit) - if err := p.senders.onNewBlock(stateChanges, unwindTxs, minedTxs, p.logger); err != nil { + + for i, txn := range unwindBlobTxs.Txs { + if txn.Type == types.BlobTxType { + knownBlobTxn, err := p.getCachedBlobTxnLocked(coreTx, txn.IDHash[:]) + if err != nil { + return err + } + if knownBlobTxn != nil { + unwindTxs.Append(knownBlobTxn.Tx, unwindBlobTxs.Senders.At(i), false) + } + } + } + if err = p.senders.onNewBlock(stateChanges, unwindTxs, minedTxs, p.logger); err != nil { return err } + _, unwindTxs, err = p.validateTxs(&unwindTxs, cacheView) + if err != nil { return err } @@ -380,33 +442,31 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang } } - if err := p.processMinedFinalizedBlobs(coreTx, minedTxs.Txs, stateChanges.FinalizedBlock); err != nil { + if err = p.processMinedFinalizedBlobs(coreTx, minedTxs.Txs, stateChanges.FinalizedBlock); err != nil { return err } - if err := removeMined(p.all, minedTxs.Txs, p.pending, p.baseFee, p.queued, p.discardLocked, p.logger); err != nil { + + if err = p.removeMined(p.all, minedTxs.Txs); err != nil { return err } - //p.logger.Debug("[txpool] new block", "unwinded", len(unwindTxs.txs), "mined", len(minedTxs.txs), "baseFee", baseFee, "blockHeight", blockHeight) + var announcements types.Announcements + + announcements, err = p.addTxsOnNewBlock(block, cacheView, stateChanges, p.senders, unwindTxs, /* newTxs */ + pendingBaseFee, stateChanges.BlockGasLimit, p.logger) - announcements, err := addTxsOnNewBlock(p.lastSeenBlock.Load(), cacheView, stateChanges, p.senders, unwindTxs, /* newTxs */ - pendingBaseFee, stateChanges.BlockGasLimit, - p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, p.logger) if err != nil { return err } + p.pending.EnforceWorstInvariants() p.baseFee.EnforceInvariants() p.queued.EnforceInvariants() - promote(p.pending, p.baseFee, p.queued, pendingBaseFee, pendingBlobFee, p.discardLocked, &announcements, p.logger) + p.promote(pendingBaseFee, pendingBlobFee, &announcements, p.logger) p.pending.EnforceBestInvariants() p.promoted.Reset() p.promoted.AppendOther(announcements) - if p.started.CompareAndSwap(false, true) { - p.logger.Info("[txpool] Started") - } - if p.promoted.Len() > 0 { select { case p.newPendingTxs <- p.promoted.Copy(): @@ -414,12 +474,11 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang } } - //p.logger.Info("[txpool] new block", "number", p.lastSeenBlock.Load(), "pendngBaseFee", pendingBaseFee, "in", time.Since(t)) return nil } func (p *TxPool) processRemoteTxs(ctx context.Context) error { - if !p.started.Load() { + if !p.Started() { return fmt.Errorf("txpool not started yet") } @@ -454,8 +513,8 @@ func (p *TxPool) processRemoteTxs(ctx context.Context) error { return err } - announcements, _, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, - p.pendingBaseFee.Load(), p.pendingBlobFee.Load(), p.blockGasLimit.Load(), p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, true, p.logger) + announcements, _, err := p.addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, + p.pendingBaseFee.Load(), p.pendingBlobFee.Load(), p.blockGasLimit.Load(), true, p.logger) if err != nil { return err } @@ -578,10 +637,8 @@ func (p *TxPool) getUnprocessedTxn(hashS string) (*types.TxSlot, bool) { return nil, false } -func (p *TxPool) GetKnownBlobTxn(tx kv.Tx, hash []byte) (*metaTx, error) { +func (p *TxPool) getCachedBlobTxnLocked(tx kv.Tx, hash []byte) (*metaTx, error) { hashS := string(hash) - p.lock.Lock() - defer p.lock.Unlock() if mt, ok := p.minedBlobTxsByHash[hashS]; ok { return mt, nil } @@ -619,20 +676,29 @@ func (p *TxPool) IsLocal(idHash []byte) bool { func (p *TxPool) AddNewGoodPeer(peerID types.PeerID) { p.recentlyConnectedPeers.AddPeer(peerID) } func (p *TxPool) Started() bool { return p.started.Load() } -func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas, availableBlobGas uint64, toSkip mapset.Set[[32]byte]) (bool, int, error) { - // First wait for the corresponding block to arrive - if p.lastSeenBlock.Load() < onTopOf { - return false, 0, nil // Too early +func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas, availableBlobGas uint64, yielded mapset.Set[[32]byte]) (bool, int, error) { + p.lock.Lock() + defer p.lock.Unlock() + + for last := p.lastSeenBlock.Load(); last < onTopOf; last = p.lastSeenBlock.Load() { + p.logger.Debug("[txpool] Waiting for block", "expecting", onTopOf, "lastSeen", last, "txRequested", n, "pending", p.pending.Len(), "baseFee", p.baseFee.Len(), "queued", p.queued.Len()) + p.lastSeenCond.Wait() } - isShanghai := p.isShanghai() || p.isAgra() best := p.pending.best + isShanghai := p.isShanghai() || p.isAgra() + txs.Resize(uint(cmp.Min(int(n), len(best.ms)))) var toRemove []*metaTx count := 0 + i := 0 - for i := 0; count < int(n) && i < len(best.ms); i++ { + defer func() { + p.logger.Debug("[txpool] Processing best request", "last", onTopOf, "txRequested", n, "txAvailable", len(best.ms), "txProcessed", i, "txReturned", count) + }() + + for ; count < int(n) && i < len(best.ms); i++ { // if we wouldn't have enough gas for a standard transaction then quit out early if availableGas < fixedgas.TxGas { break @@ -640,7 +706,7 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG mt := best.ms[i] - if toSkip.Contains(mt.Tx.IDHash) { + if yielded.Contains(mt.Tx.IDHash) { continue } @@ -678,39 +744,26 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG txs.Txs[count] = rlpTx copy(txs.Senders.At(count), sender.Bytes()) txs.IsLocal[count] = isLocal - toSkip.Add(mt.Tx.IDHash) // TODO: Is this unnecessary + yielded.Add(mt.Tx.IDHash) count++ } txs.Resize(uint(count)) if len(toRemove) > 0 { for _, mt := range toRemove { - p.pending.Remove(mt) + p.pending.Remove(mt, "best", p.logger) } } return true, count, nil } -func (p *TxPool) ResetYieldedStatus() { - p.lock.Lock() - defer p.lock.Unlock() - best := p.pending.best - for i := 0; i < len(best.ms); i++ { - best.ms[i].alreadyYielded = false - } -} - func (p *TxPool) YieldBest(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas, availableBlobGas uint64, toSkip mapset.Set[[32]byte]) (bool, int, error) { - p.lock.Lock() - defer p.lock.Unlock() return p.best(n, txs, tx, onTopOf, availableGas, availableBlobGas, toSkip) } func (p *TxPool) PeekBest(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas, availableBlobGas uint64) (bool, error) { set := mapset.NewThreadUnsafeSet[[32]byte]() - p.lock.Lock() - defer p.lock.Unlock() - onTime, _, err := p.best(n, txs, tx, onTopOf, availableGas, availableBlobGas, set) + onTime, _, err := p.YieldBest(n, txs, tx, onTopOf, availableGas, availableBlobGas, set) return onTime, err } @@ -819,13 +872,13 @@ func (p *TxPool) validateTx(txn *types.TxSlot, isLocal bool, stateCache kvcache. } if !isLocal && uint64(p.all.count(txn.SenderID)) > p.cfg.AccountSlots { if txn.Traced { - log.Info(fmt.Sprintf("TX TRACING: validateTx marked as spamming idHash=%x slots=%d, limit=%d", txn.IDHash, p.all.count(txn.SenderID), p.cfg.AccountSlots)) + p.logger.Info(fmt.Sprintf("TX TRACING: validateTx marked as spamming idHash=%x slots=%d, limit=%d", txn.IDHash, p.all.count(txn.SenderID), p.cfg.AccountSlots)) } return txpoolcfg.Spammer } if !isLocal && p.all.blobCount(txn.SenderID) > p.cfg.BlobSlots { if txn.Traced { - log.Info(fmt.Sprintf("TX TRACING: validateTx marked as spamming (too many blobs) idHash=%x slots=%d, limit=%d", txn.IDHash, p.all.count(txn.SenderID), p.cfg.AccountSlots)) + p.logger.Info(fmt.Sprintf("TX TRACING: validateTx marked as spamming (too many blobs) idHash=%x slots=%d, limit=%d", txn.IDHash, p.all.count(txn.SenderID), p.cfg.AccountSlots)) } return txpoolcfg.Spammer } @@ -908,7 +961,7 @@ func (p *TxPool) isShanghai() bool { } func (p *TxPool) isAgra() bool { - // once this flag has been set for the first time we no longer need to check the timestamp + // once this flag has been set for the first time we no longer need to check the block set := p.isPostAgra.Load() if set { return true @@ -1047,7 +1100,19 @@ func (p *TxPool) punishSpammer(spammer uint64) { count-- return count > 0 }) + for _, mt := range txsToDelete { + switch mt.currentSubPool { + case PendingSubPool: + p.pending.Remove(mt, "punishSpammer", p.logger) + case BaseFeeSubPool: + p.baseFee.Remove(mt, "punishSpammer", p.logger) + case QueuedSubPool: + p.queued.Remove(mt, "punishSpammer", p.logger) + default: + //already removed + } + p.discardLocked(mt, txpoolcfg.Spammer) // can't call it while iterating by all } } @@ -1084,15 +1149,6 @@ func (p *TxPool) AddLocalTxs(ctx context.Context, newTransactions types.TxSlots, p.lock.Lock() defer p.lock.Unlock() - if !p.Started() { - if err := p.fromDB(ctx, tx, coreTx); err != nil { - return nil, fmt.Errorf("AddLocalTxs: loading txs from DB: %w", err) - } - if p.started.CompareAndSwap(false, true) { - p.logger.Info("[txpool] Started") - } - } - if err = p.senders.registerNewSenders(&newTransactions, p.logger); err != nil { return nil, err } @@ -1102,8 +1158,8 @@ func (p *TxPool) AddLocalTxs(ctx context.Context, newTransactions types.TxSlots, return nil, err } - announcements, addReasons, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, - p.pendingBaseFee.Load(), p.pendingBlobFee.Load(), p.blockGasLimit.Load(), p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, true, p.logger) + announcements, addReasons, err := p.addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, + p.pendingBaseFee.Load(), p.pendingBlobFee.Load(), p.blockGasLimit.Load(), true, p.logger) if err == nil { for i, reason := range addReasons { if reason != txpoolcfg.NotSet { @@ -1140,16 +1196,8 @@ func (p *TxPool) coreDBWithCache() (kv.RoDB, kvcache.Cache) { return p._chainDB, p._stateCache } -func (p *TxPoolDropRemote) AddRemoteTxs(ctx context.Context, newTxs types.TxSlots) { - // disable adding remote transactions - // consume remote tx from fetch -} - -func addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, - newTxs types.TxSlots, pendingBaseFee, pendingBlobFee, blockGasLimit uint64, - pending *PendingPool, baseFee, queued *SubPool, - byNonce *BySenderAndNonce, byHash map[string]*metaTx, add func(*metaTx, *types.Announcements) txpoolcfg.DiscardReason, discard func(*metaTx, txpoolcfg.DiscardReason), collect bool, - logger log.Logger) (types.Announcements, []txpoolcfg.DiscardReason, error) { +func (p *TxPool) addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, + newTxs types.TxSlots, pendingBaseFee, pendingBlobFee, blockGasLimit uint64, collect bool, logger log.Logger) (types.Announcements, []txpoolcfg.DiscardReason, error) { if assert.Enable { for _, txn := range newTxs.Txs { if txn.SenderID == 0 { @@ -1157,6 +1205,7 @@ func addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, } } } + // This can be thought of a reverse operation from the one described before. // When a block that was deemed "the best" of its height, is no longer deemed "the best", the // transactions contained in it, are now viable for inclusion in other blocks, and therefore should @@ -1170,7 +1219,7 @@ func addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, discardReasons := make([]txpoolcfg.DiscardReason, len(newTxs.Txs)) announcements := types.Announcements{} for i, txn := range newTxs.Txs { - if found, ok := byHash[string(txn.IDHash[:])]; ok { + if found, ok := p.byHash[string(txn.IDHash[:])]; ok { discardReasons[i] = txpoolcfg.DuplicateHash // In case if the transition is stuck, "poke" it to rebroadcast if collect && newTxs.IsLocal[i] && (found.currentSubPool == PendingSubPool || found.currentSubPool == BaseFeeSubPool) { @@ -1179,7 +1228,7 @@ func addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, continue } mt := newMetaTx(txn, newTxs.IsLocal[i], blockNum) - if reason := add(mt, &announcements); reason != txpoolcfg.NotSet { + if reason := p.addLocked(mt, &announcements); reason != txpoolcfg.NotSet { discardReasons[i] = reason continue } @@ -1195,22 +1244,18 @@ func addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, if err != nil { return announcements, discardReasons, err } - onSenderStateChange(senderID, nonce, balance, byNonce, - blockGasLimit, pending, baseFee, queued, discard, logger) + p.onSenderStateChange(senderID, nonce, balance, blockGasLimit, logger) } - promote(pending, baseFee, queued, pendingBaseFee, pendingBlobFee, discard, &announcements, logger) - pending.EnforceBestInvariants() + p.promote(pendingBaseFee, pendingBlobFee, &announcements, logger) + p.pending.EnforceBestInvariants() return announcements, discardReasons, nil } // TODO: Looks like a copy of the above -func addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, stateChanges *remote.StateChangeBatch, - senders *sendersBatch, newTxs types.TxSlots, pendingBaseFee uint64, blockGasLimit uint64, - pending *PendingPool, baseFee, queued *SubPool, - byNonce *BySenderAndNonce, byHash map[string]*metaTx, add func(*metaTx, *types.Announcements) txpoolcfg.DiscardReason, discard func(*metaTx, txpoolcfg.DiscardReason), - logger log.Logger) (types.Announcements, error) { +func (p *TxPool) addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, stateChanges *remote.StateChangeBatch, + senders *sendersBatch, newTxs types.TxSlots, pendingBaseFee uint64, blockGasLimit uint64, logger log.Logger) (types.Announcements, error) { if assert.Enable { for _, txn := range newTxs.Txs { if txn.SenderID == 0 { @@ -1230,12 +1275,12 @@ func addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, stateChanges sendersWithChangedState := map[uint64]struct{}{} announcements := types.Announcements{} for i, txn := range newTxs.Txs { - if _, ok := byHash[string(txn.IDHash[:])]; ok { + if _, ok := p.byHash[string(txn.IDHash[:])]; ok { continue } mt := newMetaTx(txn, newTxs.IsLocal[i], blockNum) - if reason := add(mt, &announcements); reason != txpoolcfg.NotSet { - discard(mt, reason) + if reason := p.addLocked(mt, &announcements); reason != txpoolcfg.NotSet { + p.discardLocked(mt, reason) continue } sendersWithChangedState[mt.Tx.SenderID] = struct{}{} @@ -1263,8 +1308,7 @@ func addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, stateChanges if err != nil { return announcements, err } - onSenderStateChange(senderID, nonce, balance, byNonce, - blockGasLimit, pending, baseFee, queued, discard, logger) + p.onSenderStateChange(senderID, nonce, balance, blockGasLimit, logger) } return announcements, nil @@ -1331,11 +1375,11 @@ func (p *TxPool) addLocked(mt *metaTx, announcements *types.Announcements) txpoo switch found.currentSubPool { case PendingSubPool: - p.pending.Remove(found) + p.pending.Remove(found, "add", p.logger) case BaseFeeSubPool: - p.baseFee.Remove(found) + p.baseFee.Remove(found, "add", p.logger) case QueuedSubPool: - p.queued.Remove(found) + p.queued.Remove(found, "add", p.logger) default: //already removed } @@ -1351,7 +1395,7 @@ func (p *TxPool) addLocked(mt *metaTx, announcements *types.Announcements) txpoo hashStr := string(mt.Tx.IDHash[:]) p.byHash[hashStr] = mt - if replaced := p.all.replaceOrInsert(mt); replaced != nil { + if replaced := p.all.replaceOrInsert(mt, p.logger); replaced != nil { if assert.Enable { panic("must never happen") } @@ -1361,7 +1405,7 @@ func (p *TxPool) addLocked(mt *metaTx, announcements *types.Announcements) txpoo p.isLocalLRU.Add(hashStr, struct{}{}) } // All transactions are first added to the queued pool and then immediately promoted from there if required - p.queued.Add(mt, p.logger) + p.queued.Add(mt, "addLocked", p.logger) // Remove from mined cache as we are now "resurrecting" it to a sub-pool p.deleteMinedBlobTxn(hashStr) return txpoolcfg.NotSet @@ -1373,7 +1417,7 @@ func (p *TxPool) discardLocked(mt *metaTx, reason txpoolcfg.DiscardReason) { hashStr := string(mt.Tx.IDHash[:]) delete(p.byHash, hashStr) p.deletedTxs = append(p.deletedTxs, mt) - p.all.delete(mt) + p.all.delete(mt, reason, p.logger) p.discardReasonsLRU.Add(hashStr, reason) } @@ -1437,7 +1481,7 @@ func (p *TxPool) NonceFromAddress(addr [20]byte) (nonce uint64, inPool bool) { // modify state_balance and state_nonce, potentially remove some elements (if transaction with some nonce is // included into a block), and finally, walk over the transaction records and update SubPool fields depending on // the actual presence of nonce gaps and what the balance is. -func removeMined(byNonce *BySenderAndNonce, minedTxs []*types.TxSlot, pending *PendingPool, baseFee, queued *SubPool, discard func(*metaTx, txpoolcfg.DiscardReason), logger log.Logger) error { +func (p *TxPool) removeMined(byNonce *BySenderAndNonce, minedTxs []*types.TxSlot) error { noncesToRemove := map[uint64]uint64{} for _, txn := range minedTxs { nonce, ok := noncesToRemove[txn.SenderID] @@ -1447,39 +1491,56 @@ func removeMined(byNonce *BySenderAndNonce, minedTxs []*types.TxSlot, pending *P } var toDel []*metaTx // can't delete items while iterate them + + discarded := 0 + pendingRemoved := 0 + baseFeeRemoved := 0 + queuedRemoved := 0 + for senderID, nonce := range noncesToRemove { - //if sender.all.Len() > 0 { - //logger.Debug("[txpool] removing mined", "senderID", tx.senderID, "sender.all.len()", sender.all.Len()) - //} - // delete mined transactions from everywhere byNonce.ascend(senderID, func(mt *metaTx) bool { - //logger.Debug("[txpool] removing mined, cmp nonces", "tx.nonce", it.metaTx.Tx.nonce, "sender.nonce", sender.nonce) if mt.Tx.Nonce > nonce { + if mt.Tx.Traced { + p.logger.Debug("[txpool] removing mined, cmp nonces", "tx.nonce", mt.Tx.Nonce, "sender.nonce", nonce) + } + return false } + if mt.Tx.Traced { - logger.Info(fmt.Sprintf("TX TRACING: removeMined idHash=%x senderId=%d, currentSubPool=%s", mt.Tx.IDHash, mt.Tx.SenderID, mt.currentSubPool)) + p.logger.Info("TX TRACING: removeMined", "idHash", fmt.Sprintf("%x", mt.Tx.IDHash), "senderId", mt.Tx.SenderID, "nonce", mt.Tx.Nonce, "currentSubPool", mt.currentSubPool) } + toDel = append(toDel, mt) // del from sub-pool switch mt.currentSubPool { case PendingSubPool: - pending.Remove(mt) + pendingRemoved++ + p.pending.Remove(mt, "remove-mined", p.logger) case BaseFeeSubPool: - baseFee.Remove(mt) + baseFeeRemoved++ + p.baseFee.Remove(mt, "remove-mined", p.logger) case QueuedSubPool: - queued.Remove(mt) + queuedRemoved++ + p.queued.Remove(mt, "remove-mined", p.logger) default: //already removed } return true }) + discarded += len(toDel) + for _, mt := range toDel { - discard(mt, txpoolcfg.Mined) + p.discardLocked(mt, txpoolcfg.Mined) } toDel = toDel[:0] } + + if discarded > 0 { + p.logger.Debug("Discarded transactions", "count", discarded, "pending", pendingRemoved, "baseFee", baseFeeRemoved, "queued", queuedRemoved) + } + return nil } @@ -1487,17 +1548,14 @@ func removeMined(byNonce *BySenderAndNonce, minedTxs []*types.TxSlot, pending *P // which sub pool they will need to go to. Since this depends on other transactions from the same sender by with lower // nonces, and also affect other transactions from the same sender with higher nonce, it loops through all transactions // for a given senderID -func onSenderStateChange(senderID uint64, senderNonce uint64, senderBalance uint256.Int, byNonce *BySenderAndNonce, - blockGasLimit uint64, pending *PendingPool, baseFee, queued *SubPool, discard func(*metaTx, txpoolcfg.DiscardReason), logger log.Logger) { +func (p *TxPool) onSenderStateChange(senderID uint64, senderNonce uint64, senderBalance uint256.Int, blockGasLimit uint64, logger log.Logger) { noGapsNonce := senderNonce cumulativeRequiredBalance := uint256.NewInt(0) minFeeCap := uint256.NewInt(0).SetAllOne() minTip := uint64(math.MaxUint64) var toDel []*metaTx // can't delete items while iterate them - byNonce.ascend(senderID, func(mt *metaTx) bool { - if mt.Tx.Traced { - logger.Info(fmt.Sprintf("TX TRACING: onSenderStateChange loop iteration idHash=%x senderID=%d, senderNonce=%d, txn.nonce=%d, currentSubPool=%s", mt.Tx.IDHash, senderID, senderNonce, mt.Tx.Nonce, mt.currentSubPool)) - } + + p.all.ascend(senderID, func(mt *metaTx) bool { deleteAndContinueReasonLog := "" if senderNonce > mt.Tx.Nonce { deleteAndContinueReasonLog = "low nonce" @@ -1506,16 +1564,16 @@ func onSenderStateChange(senderID uint64, senderNonce uint64, senderBalance uint } if deleteAndContinueReasonLog != "" { if mt.Tx.Traced { - logger.Info(fmt.Sprintf("TX TRACING: removing due to %s for idHash=%x senderID=%d, senderNonce=%d, txn.nonce=%d, currentSubPool=%s", deleteAndContinueReasonLog, mt.Tx.IDHash, senderID, senderNonce, mt.Tx.Nonce, mt.currentSubPool)) + logger.Info("TX TRACING: onSenderStateChange loop iteration remove", "idHash", fmt.Sprintf("%x", mt.Tx.IDHash), "senderID", senderID, "senderNonce", senderNonce, "txn.nonce", mt.Tx.Nonce, "currentSubPool", mt.currentSubPool, "reason", deleteAndContinueReasonLog) } // del from sub-pool switch mt.currentSubPool { case PendingSubPool: - pending.Remove(mt) + p.pending.Remove(mt, deleteAndContinueReasonLog, p.logger) case BaseFeeSubPool: - baseFee.Remove(mt) + p.baseFee.Remove(mt, deleteAndContinueReasonLog, p.logger) case QueuedSubPool: - queued.Remove(mt) + p.queued.Remove(mt, deleteAndContinueReasonLog, p.logger) default: //already removed } @@ -1573,60 +1631,62 @@ func onSenderStateChange(senderID uint64, senderNonce uint64, senderBalance uint } if mt.Tx.Traced { - logger.Info(fmt.Sprintf("TX TRACING: onSenderStateChange loop iteration idHash=%x senderId=%d subPool=%b", mt.Tx.IDHash, mt.Tx.SenderID, mt.subPool)) + logger.Info("TX TRACING: onSenderStateChange loop iteration update", "idHash", fmt.Sprintf("%x", mt.Tx.IDHash), "senderId", mt.Tx.SenderID, "nonce", mt.Tx.Nonce, "subPool", mt.currentSubPool) } // Some fields of mt might have changed, need to fix the invariants in the subpool best and worst queues switch mt.currentSubPool { case PendingSubPool: - pending.Updated(mt) + p.pending.Updated(mt) case BaseFeeSubPool: - baseFee.Updated(mt) + p.baseFee.Updated(mt) case QueuedSubPool: - queued.Updated(mt) + p.queued.Updated(mt) } return true }) + for _, mt := range toDel { - discard(mt, txpoolcfg.NonceTooLow) + p.discardLocked(mt, txpoolcfg.NonceTooLow) } + + logger.Trace("[txpool] onSenderStateChange", "sender", senderID, "count", p.all.count(senderID), "pending", p.pending.Len(), "baseFee", p.baseFee.Len(), "queued", p.queued.Len()) } // promote reasserts invariants of the subpool and returns the list of transactions that ended up // being promoted to the pending or basefee pool, for re-broadcasting -func promote(pending *PendingPool, baseFee, queued *SubPool, pendingBaseFee uint64, pendingBlobFee uint64, discard func(*metaTx, txpoolcfg.DiscardReason), announcements *types.Announcements, - logger log.Logger) { +func (p *TxPool) promote(pendingBaseFee uint64, pendingBlobFee uint64, announcements *types.Announcements, logger log.Logger) { // Demote worst transactions that do not qualify for pending sub pool anymore, to other sub pools, or discard - for worst := pending.Worst(); pending.Len() > 0 && (worst.subPool < BaseFeePoolBits || worst.minFeeCap.LtUint64(pendingBaseFee) || (worst.Tx.Type == types.BlobTxType && worst.Tx.BlobFeeCap.LtUint64(pendingBlobFee))); worst = pending.Worst() { + for worst := p.pending.Worst(); p.pending.Len() > 0 && (worst.subPool < BaseFeePoolBits || worst.minFeeCap.LtUint64(pendingBaseFee) || (worst.Tx.Type == types.BlobTxType && worst.Tx.BlobFeeCap.LtUint64(pendingBlobFee))); worst = p.pending.Worst() { if worst.subPool >= BaseFeePoolBits { - tx := pending.PopWorst() + tx := p.pending.PopWorst() announcements.Append(tx.Tx.Type, tx.Tx.Size, tx.Tx.IDHash[:]) - baseFee.Add(tx, logger) + p.baseFee.Add(tx, "demote-pending", logger) } else { - queued.Add(pending.PopWorst(), logger) + p.queued.Add(p.pending.PopWorst(), "demote-pending", logger) } } // Promote best transactions from base fee pool to pending pool while they qualify - for best := baseFee.Best(); baseFee.Len() > 0 && best.subPool >= BaseFeePoolBits && best.minFeeCap.CmpUint64(pendingBaseFee) >= 0 && (best.Tx.Type != types.BlobTxType || best.Tx.BlobFeeCap.CmpUint64(pendingBlobFee) >= 0); best = baseFee.Best() { - tx := baseFee.PopBest() + for best := p.baseFee.Best(); p.baseFee.Len() > 0 && best.subPool >= BaseFeePoolBits && best.minFeeCap.CmpUint64(pendingBaseFee) >= 0 && (best.Tx.Type != types.BlobTxType || best.Tx.BlobFeeCap.CmpUint64(pendingBlobFee) >= 0); best = p.baseFee.Best() { + tx := p.baseFee.PopBest() announcements.Append(tx.Tx.Type, tx.Tx.Size, tx.Tx.IDHash[:]) - pending.Add(tx, logger) + p.pending.Add(tx, logger) } // Demote worst transactions that do not qualify for base fee pool anymore, to queued sub pool, or discard - for worst := baseFee.Worst(); baseFee.Len() > 0 && worst.subPool < BaseFeePoolBits; worst = baseFee.Worst() { - queued.Add(baseFee.PopWorst(), logger) + for worst := p.baseFee.Worst(); p.baseFee.Len() > 0 && worst.subPool < BaseFeePoolBits; worst = p.baseFee.Worst() { + p.queued.Add(p.baseFee.PopWorst(), "demote-base", logger) } // Promote best transactions from the queued pool to either pending or base fee pool, while they qualify - for best := queued.Best(); queued.Len() > 0 && best.subPool >= BaseFeePoolBits; best = queued.Best() { + for best := p.queued.Best(); p.queued.Len() > 0 && best.subPool >= BaseFeePoolBits; best = p.queued.Best() { if best.minFeeCap.Cmp(uint256.NewInt(pendingBaseFee)) >= 0 { - tx := queued.PopBest() + tx := p.queued.PopBest() announcements.Append(tx.Tx.Type, tx.Tx.Size, tx.Tx.IDHash[:]) - pending.Add(tx, logger) + p.pending.Add(tx, logger) } else { - baseFee.Add(queued.PopBest(), logger) + p.baseFee.Add(p.queued.PopBest(), "promote-queued", logger) } } @@ -1634,18 +1694,18 @@ func promote(pending *PendingPool, baseFee, queued *SubPool, pendingBaseFee uint // // Discard worst transactions from pending pool until it is within capacity limit - for pending.Len() > pending.limit { - discard(pending.PopWorst(), txpoolcfg.PendingPoolOverflow) + for p.pending.Len() > p.pending.limit { + p.discardLocked(p.pending.PopWorst(), txpoolcfg.PendingPoolOverflow) } // Discard worst transactions from pending sub pool until it is within capacity limits - for baseFee.Len() > baseFee.limit { - discard(baseFee.PopWorst(), txpoolcfg.BaseFeePoolOverflow) + for p.baseFee.Len() > p.baseFee.limit { + p.discardLocked(p.baseFee.PopWorst(), txpoolcfg.BaseFeePoolOverflow) } // Discard worst transactions from the queued sub pool until it is within its capacity limits - for _ = queued.Worst(); queued.Len() > queued.limit; _ = queued.Worst() { - discard(queued.PopWorst(), txpoolcfg.QueuedPoolOverflow) + for _ = p.queued.Worst(); p.queued.Len() > p.queued.limit; _ = p.queued.Worst() { + p.discardLocked(p.queued.PopWorst(), txpoolcfg.QueuedPoolOverflow) } } @@ -1662,7 +1722,7 @@ const txMaxBroadcastSize = 4 * 1024 // // promote/demote transactions // reorgs -func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs chan types.Announcements, send *Send, newSlotsStreams *NewSlotsStreams, notifyMiningAboutNewSlots func()) { +func MainLoop(ctx context.Context, db kv.RwDB, p *TxPool, newTxs chan types.Announcements, send *Send, newSlotsStreams *NewSlotsStreams, notifyMiningAboutNewSlots func()) { syncToNewPeersEvery := time.NewTicker(p.cfg.SyncToNewPeersEvery) defer syncToNewPeersEvery.Stop() processRemoteTxsEvery := time.NewTicker(p.cfg.ProcessRemoteTxsEvery) @@ -1672,6 +1732,13 @@ func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs logEvery := time.NewTicker(p.cfg.LogEvery) defer logEvery.Stop() + err := p.Start(ctx, db) + + if err != nil { + p.logger.Error("[txpool] Failed to start", "err", err) + return + } + for { select { case <-ctx.Done(): @@ -1739,7 +1806,7 @@ func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs var remoteTxSizes []uint32 var remoteTxHashes types.Hashes var remoteTxRlps [][]byte - var broadCastedHashes types.Hashes + var broadcastHashes types.Hashes slotsRlp := make([][]byte, 0, announcements.Len()) if err := db.View(ctx, func(tx kv.Tx) error { @@ -1763,7 +1830,7 @@ func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs // "Nodes MUST NOT automatically broadcast blob transactions to their peers" - EIP-4844 if t != types.BlobTxType { localTxRlps = append(localTxRlps, slotRlp) - broadCastedHashes = append(broadCastedHashes, hash...) + broadcastHashes = append(broadcastHashes, hash...) } } else { remoteTxTypes = append(remoteTxTypes, t) @@ -1790,12 +1857,12 @@ func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs const localTxsBroadcastMaxPeers uint64 = 10 txSentTo := send.BroadcastPooledTxs(localTxRlps, localTxsBroadcastMaxPeers) for i, peer := range txSentTo { - p.logger.Info("Local tx broadcasted", "txHash", hex.EncodeToString(broadCastedHashes.At(i)), "to peer", peer) + p.logger.Trace("Local tx broadcast", "txHash", hex.EncodeToString(broadcastHashes.At(i)), "to peer", peer) } hashSentTo := send.AnnouncePooledTxs(localTxTypes, localTxSizes, localTxHashes, localTxsBroadcastMaxPeers*2) for i := 0; i < localTxHashes.Len(); i++ { hash := localTxHashes.At(i) - p.logger.Info("Local tx announced", "txHash", hex.EncodeToString(hash), "to peer", hashSentTo[i], "baseFee", p.pendingBaseFee.Load()) + p.logger.Trace("Local tx announced", "txHash", hex.EncodeToString(hash), "to peer", hashSentTo[i], "baseFee", p.pendingBaseFee.Load()) } // broadcast remote transactions @@ -1859,6 +1926,7 @@ func (p *TxPool) flush(ctx context.Context, db kv.RwDB) (written uint64, err err } return written, nil } + func (p *TxPool) flushLocked(tx kv.RwTx) (err error) { for i, mt := range p.deletedTxs { id := mt.Tx.SenderID @@ -1942,20 +2010,33 @@ func (p *TxPool) flushLocked(tx kv.RwTx) (err error) { return nil } -func (p *TxPool) fromDBWithLock(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error { - p.lock.Lock() - defer p.lock.Unlock() - return p.fromDB(ctx, tx, coreTx) -} func (p *TxPool) fromDB(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error { if p.lastSeenBlock.Load() == 0 { lastSeenBlock, err := LastSeenBlock(tx) if err != nil { return err } + p.lastSeenBlock.Store(lastSeenBlock) } + // this is neccessary as otherwise best - which waits for sync events + // may wait for ever if blocks have been process before the txpool + // starts with an empty db + lastSeenProgress, err := getExecutionProgress(coreTx) + + if err != nil { + return err + } + + if p.lastSeenBlock.Load() < lastSeenProgress { + // TODO we need to process the blocks since the + // last seen to make sure that the tx pool is in + // sync with the processed blocks + + p.lastSeenBlock.Store(lastSeenProgress) + } + cacheView, err := p._stateCache.View(ctx, coreTx) if err != nil { return err @@ -2013,8 +2094,15 @@ func (p *TxPool) fromDB(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error { i++ } - var pendingBaseFee uint64 - { + var pendingBaseFee, pendingBlobFee, minBlobGasPrice uint64 + + if p.feeCalculator != nil { + if chainConfig, _ := ChainConfig(tx); chainConfig != nil { + pendingBaseFee, pendingBlobFee, minBlobGasPrice, _ = p.feeCalculator.CurrentFees(chainConfig, coreTx) + } + } + + if pendingBaseFee == 0 { v, err := tx.GetOne(kv.PoolInfo, PoolPendingBaseFeeKey) if err != nil { return err @@ -2023,8 +2111,8 @@ func (p *TxPool) fromDB(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error { pendingBaseFee = binary.BigEndian.Uint64(v) } } - var pendingBlobFee uint64 = 1 // MIN_BLOB_GAS_PRICE A/EIP-4844 - { + + if pendingBlobFee == 0 { v, err := tx.GetOne(kv.PoolInfo, PoolPendingBlobFeeKey) if err != nil { return err @@ -2034,18 +2122,40 @@ func (p *TxPool) fromDB(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error { } } + if pendingBlobFee == 0 { + pendingBlobFee = minBlobGasPrice + } + err = p.senders.registerNewSenders(&txs, p.logger) if err != nil { return err } - if _, _, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, txs, - pendingBaseFee, pendingBlobFee, math.MaxUint64 /* blockGasLimit */, p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, false, p.logger); err != nil { + if _, _, err := p.addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, txs, + pendingBaseFee, pendingBlobFee, math.MaxUint64 /* blockGasLimit */, false, p.logger); err != nil { return err } p.pendingBaseFee.Store(pendingBaseFee) p.pendingBlobFee.Store(pendingBlobFee) return nil } + +func getExecutionProgress(db kv.Getter) (uint64, error) { + data, err := db.GetOne(kv.SyncStageProgress, []byte("Execution")) + if err != nil { + return 0, err + } + + if len(data) == 0 { + return 0, nil + } + + if len(data) < 8 { + return 0, fmt.Errorf("value must be at least 8 bytes, got %d", len(data)) + } + + return binary.BigEndian.Uint64(data[:8]), nil +} + func LastSeenBlock(tx kv.Getter) (uint64, error) { v, err := tx.GetOne(kv.PoolInfo, PoolLastSeenBlockKey) if err != nil { @@ -2108,7 +2218,7 @@ func (p *TxPool) printDebug(prefix string) { } } func (p *TxPool) logStats() { - if !p.started.Load() { + if !p.Started() { //p.logger.Info("[txpool] Not started yet, waiting for new blocks...") return } @@ -2217,6 +2327,11 @@ func (sc *sendersBatch) getID(addr common.Address) (uint64, bool) { } func (sc *sendersBatch) getOrCreateID(addr common.Address, logger log.Logger) (uint64, bool) { _, traced := sc.tracedSenders[addr] + + if !traced { + traced = TraceAll + } + id, ok := sc.senderIDs[addr] if !ok { sc.senderID++ @@ -2300,11 +2415,13 @@ func (b *BySenderAndNonce) nonce(senderID uint64) (nonce uint64, ok bool) { }) return nonce, ok } + func (b *BySenderAndNonce) ascendAll(f func(*metaTx) bool) { b.tree.Ascend(func(mt *metaTx) bool { return f(mt) }) } + func (b *BySenderAndNonce) ascend(senderID uint64, f func(*metaTx) bool) { s := b.search s.Tx.SenderID = senderID @@ -2316,6 +2433,7 @@ func (b *BySenderAndNonce) ascend(senderID uint64, f func(*metaTx) bool) { return f(mt) }) } + func (b *BySenderAndNonce) descend(senderID uint64, f func(*metaTx) bool) { s := b.search s.Tx.SenderID = senderID @@ -2327,12 +2445,15 @@ func (b *BySenderAndNonce) descend(senderID uint64, f func(*metaTx) bool) { return f(mt) }) } + func (b *BySenderAndNonce) count(senderID uint64) int { return b.senderIDTxnCount[senderID] } + func (b *BySenderAndNonce) blobCount(senderID uint64) uint64 { return b.senderIDBlobCount[senderID] } + func (b *BySenderAndNonce) hasTxs(senderID uint64) bool { has := false b.ascend(senderID, func(*metaTx) bool { @@ -2341,6 +2462,7 @@ func (b *BySenderAndNonce) hasTxs(senderID uint64) bool { }) return has } + func (b *BySenderAndNonce) get(senderID, txNonce uint64) *metaTx { s := b.search s.Tx.SenderID = senderID @@ -2355,8 +2477,13 @@ func (b *BySenderAndNonce) get(senderID, txNonce uint64) *metaTx { func (b *BySenderAndNonce) has(mt *metaTx) bool { return b.tree.Has(mt) } -func (b *BySenderAndNonce) delete(mt *metaTx) { + +func (b *BySenderAndNonce) delete(mt *metaTx, reason txpoolcfg.DiscardReason, logger log.Logger) { if _, ok := b.tree.Delete(mt); ok { + if mt.Tx.Traced { + logger.Info("TX TRACING: Deleted tx by nonce", "idHash", fmt.Sprintf("%x", mt.Tx.IDHash), "sender", mt.Tx.SenderID, "nonce", mt.Tx.Nonce, "reason", reason) + } + senderID := mt.Tx.SenderID count := b.senderIDTxnCount[senderID] if count > 1 { @@ -2376,11 +2503,21 @@ func (b *BySenderAndNonce) delete(mt *metaTx) { } } } -func (b *BySenderAndNonce) replaceOrInsert(mt *metaTx) *metaTx { + +func (b *BySenderAndNonce) replaceOrInsert(mt *metaTx, logger log.Logger) *metaTx { it, ok := b.tree.ReplaceOrInsert(mt) + if ok { + if mt.Tx.Traced { + logger.Info("TX TRACING: Replaced tx by nonce", "idHash", fmt.Sprintf("%x", mt.Tx.IDHash), "sender", mt.Tx.SenderID, "nonce", mt.Tx.Nonce) + } return it } + + if mt.Tx.Traced { + logger.Info("TX TRACING: Inserted tx by nonce", "idHash", fmt.Sprintf("%x", mt.Tx.IDHash), "sender", mt.Tx.SenderID, "nonce", mt.Tx.Nonce) + } + b.senderIDTxnCount[mt.Tx.SenderID]++ if mt.Tx.Type == types.BlobTxType && mt.Tx.Blobs != nil { b.senderIDBlobCount[mt.Tx.SenderID] += uint64(len(mt.Tx.Blobs)) @@ -2459,7 +2596,10 @@ func (p *PendingPool) Updated(mt *metaTx) { } func (p *PendingPool) Len() int { return len(p.best.ms) } -func (p *PendingPool) Remove(i *metaTx) { +func (p *PendingPool) Remove(i *metaTx, reason string, logger log.Logger) { + if i.Tx.Traced { + logger.Info(fmt.Sprintf("TX TRACING: removed from subpool %s", p.t), "idHash", fmt.Sprintf("%x", i.Tx.IDHash), "sender", i.Tx.SenderID, "nonce", i.Tx.Nonce, "reason", reason) + } if i.worstIndex >= 0 { heap.Remove(p.worst, i.worstIndex) } @@ -2471,7 +2611,7 @@ func (p *PendingPool) Remove(i *metaTx) { func (p *PendingPool) Add(i *metaTx, logger log.Logger) { if i.Tx.Traced { - logger.Info(fmt.Sprintf("TX TRACING: moved to subpool %s, IdHash=%x, sender=%d", p.t, i.Tx.IDHash, i.Tx.SenderID)) + logger.Info(fmt.Sprintf("TX TRACING: added to subpool %s, IdHash=%x, sender=%d, nonce=%d", p.t, i.Tx.IDHash, i.Tx.SenderID, i.Tx.Nonce)) } i.currentSubPool = p.t heap.Push(p.worst, i) @@ -2524,16 +2664,19 @@ func (p *SubPool) PopWorst() *metaTx { //nolint return i } func (p *SubPool) Len() int { return p.best.Len() } -func (p *SubPool) Add(i *metaTx, logger log.Logger) { +func (p *SubPool) Add(i *metaTx, reason string, logger log.Logger) { if i.Tx.Traced { - logger.Info(fmt.Sprintf("TX TRACING: moved to subpool %s, IdHash=%x, sender=%d", p.t, i.Tx.IDHash, i.Tx.SenderID)) + logger.Info(fmt.Sprintf("TX TRACING: added to subpool %s", p.t), "idHash", fmt.Sprintf("%x", i.Tx.IDHash), "sender", i.Tx.SenderID, "nonce", i.Tx.Nonce, "reason", reason) } i.currentSubPool = p.t heap.Push(p.best, i) heap.Push(p.worst, i) } -func (p *SubPool) Remove(i *metaTx) { +func (p *SubPool) Remove(i *metaTx, reason string, logger log.Logger) { + if i.Tx.Traced { + logger.Info(fmt.Sprintf("TX TRACING: removed from subpool %s", p.t), "idHash", fmt.Sprintf("%x", i.Tx.IDHash), "sender", i.Tx.SenderID, "nonce", i.Tx.Nonce, "reason", reason) + } heap.Remove(p.best, i.bestIndex) heap.Remove(p.worst, i.worstIndex) i.currentSubPool = 0 diff --git a/erigon-lib/txpool/pool_fuzz_test.go b/erigon-lib/txpool/pool_fuzz_test.go index e81d316910a..54b1beb0238 100644 --- a/erigon-lib/txpool/pool_fuzz_test.go +++ b/erigon-lib/txpool/pool_fuzz_test.go @@ -314,8 +314,12 @@ func FuzzOnNewBlocks(f *testing.F) { cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) - pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, log.New()) + pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, nil, log.New()) assert.NoError(err) + + err = pool.Start(ctx, db) + assert.NoError(err) + pool.senders.senderIDs = senderIDs for addr, id := range senderIDs { pool.senders.senderID2Addr[id] = addr @@ -481,7 +485,7 @@ func FuzzOnNewBlocks(f *testing.F) { } // go to first fork txs1, txs2, p2pReceived, txs3 := splitDataset(txs) - err = pool.OnNewBlock(ctx, change, txs1, types.TxSlots{}, tx) + err = pool.OnNewBlock(ctx, change, txs1, types.TxSlots{}, types.TxSlots{}, tx) assert.NoError(err) check(txs1, types.TxSlots{}, "fork1") checkNotify(txs1, types.TxSlots{}, "fork1") @@ -494,7 +498,7 @@ func FuzzOnNewBlocks(f *testing.F) { {BlockHeight: 1, BlockHash: h0}, }, } - err = pool.OnNewBlock(ctx, change, types.TxSlots{}, txs2, tx) + err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, txs2, tx) assert.NoError(err) check(types.TxSlots{}, txs2, "fork1 mined") checkNotify(types.TxSlots{}, txs2, "fork1 mined") @@ -507,7 +511,7 @@ func FuzzOnNewBlocks(f *testing.F) { {BlockHeight: 0, BlockHash: h0, Direction: remote.Direction_UNWIND}, }, } - err = pool.OnNewBlock(ctx, change, txs2, types.TxSlots{}, tx) + err = pool.OnNewBlock(ctx, change, txs2, types.TxSlots{}, types.TxSlots{}, tx) assert.NoError(err) check(txs2, types.TxSlots{}, "fork2") checkNotify(txs2, types.TxSlots{}, "fork2") @@ -519,7 +523,7 @@ func FuzzOnNewBlocks(f *testing.F) { {BlockHeight: 1, BlockHash: h22}, }, } - err = pool.OnNewBlock(ctx, change, types.TxSlots{}, txs3, tx) + err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, txs3, tx) assert.NoError(err) check(types.TxSlots{}, txs3, "fork2 mined") checkNotify(types.TxSlots{}, txs3, "fork2 mined") @@ -536,8 +540,9 @@ func FuzzOnNewBlocks(f *testing.F) { check(p2pReceived, types.TxSlots{}, "after_flush") checkNotify(p2pReceived, types.TxSlots{}, "after_flush") - p2, err := New(ch, coreDB, txpoolcfg.DefaultConfig, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, log.New()) + p2, err := New(ch, coreDB, txpoolcfg.DefaultConfig, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, nil, log.New()) assert.NoError(err) + p2.senders = pool.senders // senders are not persisted err = coreDB.View(ctx, func(coreTx kv.Tx) error { return p2.fromDB(ctx, tx, coreTx) }) require.NoError(err) diff --git a/erigon-lib/txpool/pool_test.go b/erigon-lib/txpool/pool_test.go index 5ac404c1d09..98dc31c2893 100644 --- a/erigon-lib/txpool/pool_test.go +++ b/erigon-lib/txpool/pool_test.go @@ -53,7 +53,7 @@ func TestNonceFromAddress(t *testing.T) { cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) - pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, log.New()) + pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, nil, log.New()) assert.NoError(err) require.True(pool != nil) ctx := context.Background() @@ -81,7 +81,7 @@ func TestNonceFromAddress(t *testing.T) { tx, err := db.BeginRw(ctx) require.NoError(err) defer tx.Rollback() - err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, tx) + err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, types.TxSlots{}, tx) assert.NoError(err) { @@ -173,7 +173,7 @@ func TestReplaceWithHigherFee(t *testing.T) { cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) - pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, log.New()) + pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, nil, log.New()) assert.NoError(err) require.NotEqual(nil, pool) ctx := context.Background() @@ -201,7 +201,7 @@ func TestReplaceWithHigherFee(t *testing.T) { tx, err := db.BeginRw(ctx) require.NoError(err) defer tx.Rollback() - err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, tx) + err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, types.TxSlots{}, tx) assert.NoError(err) { @@ -290,7 +290,7 @@ func TestReverseNonces(t *testing.T) { cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) - pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, log.New()) + pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, nil, log.New()) assert.NoError(err) require.True(pool != nil) ctx := context.Background() @@ -318,7 +318,7 @@ func TestReverseNonces(t *testing.T) { tx, err := db.BeginRw(ctx) require.NoError(err) defer tx.Rollback() - err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, tx) + err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, types.TxSlots{}, tx) assert.NoError(err) // 1. Send high fee transaction with nonce gap { @@ -417,7 +417,7 @@ func TestTxPoke(t *testing.T) { cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) - pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, log.New()) + pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil, nil, nil, fixedgas.DefaultMaxBlobsPerBlock, nil, log.New()) assert.NoError(err) require.True(pool != nil) ctx := context.Background() @@ -445,7 +445,7 @@ func TestTxPoke(t *testing.T) { tx, err := db.BeginRw(ctx) require.NoError(err) defer tx.Rollback() - err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, tx) + err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, types.TxSlots{}, tx) assert.NoError(err) var idHash types.Hashes @@ -682,7 +682,7 @@ func TestShanghaiValidateTx(t *testing.T) { } cache := &kvcache.DummyCache{} - pool, err := New(ch, coreDB, cfg, cache, *u256.N1, shanghaiTime, nil /* agraBlock */, nil /* cancunTime */, fixedgas.DefaultMaxBlobsPerBlock, logger) + pool, err := New(ch, coreDB, cfg, cache, *u256.N1, shanghaiTime, nil /* agraBlock */, nil /* cancunTime */, fixedgas.DefaultMaxBlobsPerBlock, nil, logger) asrt.NoError(err) ctx := context.Background() tx, err := coreDB.BeginRw(ctx) @@ -728,7 +728,7 @@ func TestBlobTxReplacement(t *testing.T) { db, coreDB := memdb.NewTestPoolDB(t), memdb.NewTestDB(t) cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) - pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, common.Big0, nil, common.Big0, fixedgas.DefaultMaxBlobsPerBlock, log.New()) + pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, common.Big0, nil, common.Big0, fixedgas.DefaultMaxBlobsPerBlock, nil, log.New()) assert.NoError(err) require.True(pool != nil) ctx := context.Background() @@ -757,8 +757,7 @@ func TestBlobTxReplacement(t *testing.T) { tx, err := db.BeginRw(ctx) require.NoError(err) defer tx.Rollback() - - err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, tx) + err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, types.TxSlots{}, tx) assert.NoError(err) tip, feeCap, blobFeeCap := uint256.NewInt(100_000), uint256.NewInt(200_000), uint256.NewInt(200_000) @@ -951,11 +950,15 @@ func TestDropRemoteAtNoGossip(t *testing.T) { logger := log.New() sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) - txPool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, big.NewInt(0), big.NewInt(0), nil, fixedgas.DefaultMaxBlobsPerBlock, logger) + txPool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, big.NewInt(0), big.NewInt(0), nil, fixedgas.DefaultMaxBlobsPerBlock, nil, logger) assert.NoError(err) require.True(txPool != nil) ctx := context.Background() + + err = txPool.Start(ctx, db) + assert.NoError(err) + var stateVersionID uint64 = 0 pendingBaseFee := uint64(1_000_000) // start blocks from 0, set empty hash - then kvcache will also work on this @@ -980,7 +983,7 @@ func TestDropRemoteAtNoGossip(t *testing.T) { tx, err := db.BeginRw(ctx) require.NoError(err) defer tx.Rollback() - err = txPool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, tx) + err = txPool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, types.TxSlots{}, tx) assert.NoError(err) // 1. Try Local Tx { diff --git a/erigon-lib/txpool/txpoolcfg/txpoolcfg.go b/erigon-lib/txpool/txpoolcfg/txpoolcfg.go index 8f613d9fe73..ca98782ff30 100644 --- a/erigon-lib/txpool/txpoolcfg/txpoolcfg.go +++ b/erigon-lib/txpool/txpoolcfg/txpoolcfg.go @@ -29,17 +29,18 @@ import ( ) type Config struct { - DBDir string - TracedSenders []string // List of senders for which tx pool should print out debugging info - PendingSubPoolLimit int - BaseFeeSubPoolLimit int - QueuedSubPoolLimit int - MinFeeCap uint64 - AccountSlots uint64 // Number of executable transaction slots guaranteed per account - BlobSlots uint64 // Total number of blobs (not txs) allowed per account - PriceBump uint64 // Price bump percentage to replace an already existing transaction - BlobPriceBump uint64 //Price bump percentage to replace an existing 4844 blob tx (type-3) - OverrideCancunTime *big.Int + DBDir string + TracedSenders []string // List of senders for which tx pool should print out debugging info + PendingSubPoolLimit int + BaseFeeSubPoolLimit int + QueuedSubPoolLimit int + MinFeeCap uint64 + AccountSlots uint64 // Number of executable transaction slots guaranteed per account + BlobSlots uint64 // Total number of blobs (not txs) allowed per account + PriceBump uint64 // Price bump percentage to replace an already existing transaction + BlobPriceBump uint64 //Price bump percentage to replace an existing 4844 blob tx (type-3) + OverrideCancunTime *big.Int + OverrideOptimismCanyonTime *big.Int // regular batch tasks processing diff --git a/erigon-lib/txpool/txpooluitl/all_components.go b/erigon-lib/txpool/txpooluitl/all_components.go index 8ab31eca862..ecf884bbe39 100644 --- a/erigon-lib/txpool/txpooluitl/all_components.go +++ b/erigon-lib/txpool/txpooluitl/all_components.go @@ -101,8 +101,8 @@ func SaveChainConfigIfNeed(ctx context.Context, coreDB kv.RoDB, txPoolDB kv.RwDB } func AllComponents(ctx context.Context, cfg txpoolcfg.Config, cache kvcache.Cache, newTxs chan types.Announcements, chainDB kv.RoDB, - sentryClients []direct.SentryClient, stateChangesClient txpool.StateChangesClient, logger log.Logger) (kv.RwDB, *txpool.TxPool, *txpool.Fetch, *txpool.Send, *txpool.GrpcServer, error) { - opts := mdbx.NewMDBX(log.New()).Label(kv.TxPoolDB).Path(cfg.DBDir). + sentryClients []direct.SentryClient, stateChangesClient txpool.StateChangesClient, feeCalculator txpool.FeeCalculator, logger log.Logger) (kv.RwDB, *txpool.TxPool, *txpool.Fetch, *txpool.Send, *txpool.GrpcServer, error) { + opts := mdbx.NewMDBX(logger).Label(kv.TxPoolDB).Path(cfg.DBDir). WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.TxpoolTablesCfg }). WriteMergeThreshold(3 * 8192). PageSize(uint64(16 * datasize.KB)). @@ -137,14 +137,14 @@ func AllComponents(ctx context.Context, cfg txpoolcfg.Config, cache kvcache.Cach shanghaiTime := chainConfig.ShanghaiTime var agraBlock *big.Int if chainConfig.Bor != nil { - agraBlock = chainConfig.Bor.AgraBlock + agraBlock = chainConfig.Bor.GetAgraBlock() } cancunTime := chainConfig.CancunTime if cfg.OverrideCancunTime != nil { cancunTime = cfg.OverrideCancunTime } - txPool, err := txpool.New(newTxs, chainDB, cfg, cache, *chainID, shanghaiTime, agraBlock, cancunTime, maxBlobsPerBlock, logger) + txPool, err := txpool.New(newTxs, chainDB, cfg, cache, *chainID, shanghaiTime, agraBlock, cancunTime, maxBlobsPerBlock, feeCalculator, logger) if err != nil { return nil, nil, nil, nil, nil, err } diff --git a/erigon-lib/wrap/e3_wrapper.go b/erigon-lib/wrap/e3_wrapper.go new file mode 100644 index 00000000000..71f7f0e5f16 --- /dev/null +++ b/erigon-lib/wrap/e3_wrapper.go @@ -0,0 +1,10 @@ +package wrap + +import ( + "github.com/ledgerwatch/erigon-lib/kv" +) + +type TxContainer struct { + Tx kv.RwTx + Ttx kv.TemporalTx +} diff --git a/eth/backend.go b/eth/backend.go index 8704aed0f32..8a4ee085fe6 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -29,39 +29,10 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" lru "github.com/hashicorp/golang-lru/arc/v2" - "github.com/ledgerwatch/erigon-lib/chain/networkname" - "github.com/ledgerwatch/erigon-lib/diagnostics" - "github.com/ledgerwatch/erigon-lib/downloader/downloadergrpc" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" - "github.com/ledgerwatch/erigon/cl/clparams" - "github.com/ledgerwatch/erigon/cl/cltypes" - "github.com/ledgerwatch/erigon/cl/fork" - "github.com/ledgerwatch/erigon/cl/persistence" - "github.com/ledgerwatch/erigon/cl/persistence/db_config" - "github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format/getters" - clcore "github.com/ledgerwatch/erigon/cl/phase1/core" - "github.com/ledgerwatch/erigon/cl/phase1/execution_client" - "github.com/ledgerwatch/erigon/cl/sentinel" - "github.com/ledgerwatch/erigon/cl/sentinel/service" - - "github.com/ledgerwatch/erigon/core/rawdb/blockio" - "github.com/ledgerwatch/erigon/ethdb/prune" - "github.com/ledgerwatch/erigon/p2p/sentry" - "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client" - "github.com/ledgerwatch/erigon/turbo/builder" - "github.com/ledgerwatch/erigon/turbo/engineapi" - "github.com/ledgerwatch/erigon/turbo/engineapi/engine_block_downloader" - "github.com/ledgerwatch/erigon/turbo/engineapi/engine_helpers" - "github.com/ledgerwatch/erigon/turbo/execution/eth1" - "github.com/ledgerwatch/erigon/turbo/execution/eth1/eth1_chain_reader.go" - "github.com/ledgerwatch/erigon/turbo/jsonrpc" - "github.com/ledgerwatch/erigon/turbo/silkworm" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" - "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" "golang.org/x/exp/slices" @@ -70,41 +41,52 @@ import ( "google.golang.org/protobuf/types/known/emptypb" "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/chain/networkname" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon-lib/direct" - downloader3 "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader" "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" + "github.com/ledgerwatch/erigon-lib/downloader/downloadergrpc" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + rpcsentinel "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel" proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" prototypes "github.com/ledgerwatch/erigon-lib/gointerfaces/types" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon-lib/txpool" "github.com/ledgerwatch/erigon-lib/txpool/txpooluitl" types2 "github.com/ledgerwatch/erigon-lib/types" - + "github.com/ledgerwatch/erigon-lib/wrap" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/fork" + "github.com/ledgerwatch/erigon/cl/persistence" + "github.com/ledgerwatch/erigon/cl/persistence/db_config" + "github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format/getters" + clcore "github.com/ledgerwatch/erigon/cl/phase1/core" + "github.com/ledgerwatch/erigon/cl/phase1/execution_client" + "github.com/ledgerwatch/erigon/cl/sentinel" + "github.com/ledgerwatch/erigon/cl/sentinel/service" "github.com/ledgerwatch/erigon/cmd/caplin/caplin1" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli" "github.com/ledgerwatch/erigon/common/debug" - - rpcsentinel "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/finality/flags" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall" - "github.com/ledgerwatch/erigon/consensus/bor/heimdallgrpc" "github.com/ledgerwatch/erigon/consensus/clique" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/consensus/merge" "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/rawdb/blockio" "github.com/ledgerwatch/erigon/core/state/temporal" "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/core/types" @@ -117,14 +99,31 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/privateapi" + "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/ethstats" "github.com/ledgerwatch/erigon/node" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/enode" + "github.com/ledgerwatch/erigon/p2p/sentry" + "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/finality/flags" + "github.com/ledgerwatch/erigon/polygon/bor/valset" + "github.com/ledgerwatch/erigon/polygon/heimdall" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/builder" + "github.com/ledgerwatch/erigon/turbo/engineapi" + "github.com/ledgerwatch/erigon/turbo/engineapi/engine_block_downloader" + "github.com/ledgerwatch/erigon/turbo/engineapi/engine_helpers" + "github.com/ledgerwatch/erigon/turbo/execution/eth1" + "github.com/ledgerwatch/erigon/turbo/execution/eth1/eth1_chain_reader.go" + "github.com/ledgerwatch/erigon/turbo/jsonrpc" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/shards" + "github.com/ledgerwatch/erigon/turbo/silkworm" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" stages2 "github.com/ledgerwatch/erigon/turbo/stages" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" ) @@ -195,7 +194,7 @@ type Ethereum struct { txPoolGrpcServer txpool_proto.TxpoolServer notifyMiningAboutNewTxs chan struct{} forkValidator *engine_helpers.ForkValidator - downloader *downloader3.Downloader + downloader *downloader.Downloader agg *libstate.AggregatorV3 blockSnapshots *freezeblocks.RoSnapshots @@ -329,10 +328,11 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger log.Warn("Optimism CanyonTime has not been set") } } + snapshotVersion := snapcfg.KnownCfg(chainConfig.ChainName, 0).Version // Check if we have an already initialized chain and fall back to // that if so. Otherwise we need to generate a new genesis spec. - blockReader, blockWriter, allSnapshots, agg, err := setUpBlockReader(ctx, chainKv, config.Dirs, config.Snapshot, config.HistoryV3, chainConfig.Bor != nil, logger) + blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, err := setUpBlockReader(ctx, chainKv, config.Dirs, snapshotVersion, config.Snapshot, config.HistoryV3, chainConfig.Bor != nil, logger) if err != nil { return nil, err } @@ -350,7 +350,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger return nil, err } - kvRPC := remotedbserver.NewKvServer(ctx, backend.chainDB, allSnapshots, agg, logger) + kvRPC := remotedbserver.NewKvServer(ctx, backend.chainDB, allSnapshots, allBorSnapshots, agg, logger) backend.notifications.StateChangesConsumer = kvRPC backend.kvRPC = kvRPC @@ -517,18 +517,14 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } else if chainConfig.Aura != nil { consensusConfig = &config.Aura } else if chainConfig.Bor != nil { - consensusConfig = &config.Bor + consensusConfig = chainConfig.Bor } else { consensusConfig = &config.Ethash } - var heimdallClient heimdall.IHeimdallClient + var heimdallClient heimdall.HeimdallClient if chainConfig.Bor != nil { if !config.WithoutHeimdall { - if config.HeimdallgRPCAddress != "" { - heimdallClient = heimdallgrpc.NewHeimdallGRPCClient(config.HeimdallgRPCAddress, logger) - } else { - heimdallClient = heimdall.NewHeimdallClient(config.HeimdallURL, logger) - } + heimdallClient = heimdall.NewHeimdallClient(config.HeimdallURL, logger) } flags.Milestone = config.WithHeimdallMilestones @@ -536,20 +532,20 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.engine = ethconsensusconfig.CreateConsensusEngine(ctx, stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, heimdallClient, config.WithoutHeimdall, blockReader, false /* readonly */, logger) - inMemoryExecution := func(batch kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, + inMemoryExecution := func(txc wrap.TxContainer, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, notifications *shards.Notifications) error { terseLogger := log.New() terseLogger.SetHandler(log.LvlFilterHandler(log.LvlWarn, log.StderrHandler)) // Needs its own notifications to not update RPC daemon and txpool about pending blocks stateSync := stages2.NewInMemoryExecution(backend.sentryCtx, backend.chainDB, config, backend.sentriesClient, dirs, notifications, blockReader, blockWriter, backend.agg, backend.silkworm, terseLogger) - chainReader := stagedsync.NewChainReaderImpl(chainConfig, batch, blockReader, logger) + chainReader := stagedsync.NewChainReaderImpl(chainConfig, txc.Tx, blockReader, logger) // We start the mining step - if err := stages2.StateStep(ctx, chainReader, backend.engine, batch, backend.blockWriter, stateSync, backend.sentriesClient.Bd, header, body, unwindPoint, headersChain, bodiesChain, config.HistoryV3); err != nil { + if err := stages2.StateStep(ctx, chainReader, backend.engine, txc, backend.blockWriter, stateSync, backend.sentriesClient.Bd, header, body, unwindPoint, headersChain, bodiesChain, config.HistoryV3); err != nil { logger.Warn("Could not validate block", "err", err) return err } - progress, err := stages.GetStageProgress(batch, stages.IntermediateHashes) + progress, err := stages.GetStageProgress(txc.Tx, stages.IntermediateHashes) if err != nil { return err } @@ -632,8 +628,11 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.newTxs = make(chan types2.Announcements, 1024) //defer close(newTxs) backend.txPoolDB, backend.txPool, backend.txPoolFetch, backend.txPoolSend, backend.txPoolGrpcServer, err = txpooluitl.AllComponents( - ctx, config.TxPool, kvcache.NewDummy(), backend.newTxs, backend.chainDB, backend.sentriesClient.Sentries(), stateDiffClient, logger, + ctx, config.TxPool, kvcache.NewDummy(), backend.newTxs, backend.chainDB, backend.sentriesClient.Sentries(), stateDiffClient, misc.Eip1559FeeCalculator, logger, ) + // TODO(jky) this is a bit of a hack, and should probably be passed as a + // parameter through the AllComponents call above + backend.txPool.SetInitialBlockGasLimit(config.Miner.GasLimit) if err != nil { return nil, err } @@ -646,7 +645,6 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger miner := stagedsync.NewMiningState(&config.Miner) backend.pendingBlocks = miner.PendingResultCh - backend.minedBlocks = miner.MiningResultCh var ( snapDb kv.RwDB @@ -660,9 +658,10 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } // proof-of-work mining mining := stagedsync.New( + config.Sync, stagedsync.MiningStages(backend.sentryCtx, stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPoolDB, nil, tmpdir, backend.blockReader), - stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miner, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, recents, signatures), + stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miner, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, nil, recents, signatures), stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil, 0, backend.txPool, backend.txPoolDB, blockReader), stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3), stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg), @@ -680,9 +679,10 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger miningStatePos := stagedsync.NewProposingState(&config.Miner) miningStatePos.MiningConfig.Etherbase = param.SuggestedFeeRecipient proposingSync := stagedsync.New( + config.Sync, stagedsync.MiningStages(backend.sentryCtx, stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miningStatePos, *backend.chainConfig, backend.engine, backend.txPoolDB, param, tmpdir, backend.blockReader), - stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miningStatePos, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, recents, signatures), + stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miningStatePos, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, nil, recents, signatures), stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, backend.txPool, backend.txPoolDB, blockReader), stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3), stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg), @@ -691,7 +691,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger logger) // We start the mining step log.Debug("Starting assembleBlockPOS mining step", "payloadId", param.PayloadId) - if err := stages2.MiningStep(ctx, backend.chainDB, proposingSync, tmpdir); err != nil { + if err := stages2.MiningStep(ctx, backend.chainDB, proposingSync, tmpdir, logger); err != nil { return nil, err } log.Debug("Finished assembleBlockPOS mining step", "payloadId", param.PayloadId) @@ -737,23 +737,6 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger // 1) Hive tests requires us to do so and starting it from eth_sendRawTransaction is not viable as we have not enough data // to initialize it properly. // 2) we cannot propose for block 1 regardless. - go func() { - time.Sleep(10 * time.Millisecond) - baseFee := uint64(0) - if currentBlock.BaseFee() != nil { - baseFee = misc.CalcBaseFee(chainConfig, currentBlock.Header(), currentBlock.Time()+1).Uint64() - } - blobFee := chainConfig.GetMinBlobGasPrice() - if currentBlock.Header().ExcessBlobGas != nil { - excessBlobGas := misc.CalcExcessBlobGas(chainConfig, currentBlock.Header()) - b, err := misc.GetBlobGasPrice(chainConfig, excessBlobGas) - if err == nil { - blobFee = b.Uint64() - } - } - backend.notifications.Accumulator.StartChange(currentBlock.NumberU64(), currentBlock.Hash(), nil, false) - backend.notifications.Accumulator.SendAndReset(ctx, backend.notifications.StateChangesConsumer, baseFee, blobFee, currentBlock.GasLimit(), 0) - }() if !config.DeprecatedTxPool.Disable { backend.txPoolFetch.ConnectCore() @@ -763,8 +746,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger newTxsBroadcaster = casted.NewSlotsStreams } go txpool.MainLoop(backend.sentryCtx, - backend.txPoolDB, backend.chainDB, - backend.txPool, backend.newTxs, backend.txPoolSend, newTxsBroadcaster, + backend.txPoolDB, backend.txPool, backend.newTxs, backend.txPoolSend, newTxsBroadcaster, func() { select { case backend.notifyMiningAboutNewTxs <- struct{}{}: @@ -772,6 +754,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } }) } + go func() { defer debug.LogPanic() for { @@ -810,7 +793,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } }() - if err := backend.StartMining(context.Background(), backend.chainDB, mining, backend.config.Miner, backend.gasPrice, backend.sentriesClient.Hd.QuitPoWMining, tmpdir); err != nil { + if err := backend.StartMining(context.Background(), backend.chainDB, stateDiffClient, mining, miner, backend.gasPrice, backend.sentriesClient.Hd.QuitPoWMining, tmpdir, logger); err != nil { return nil, err } @@ -820,13 +803,13 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, heimdallClient, recents, signatures, logger) backend.syncUnwindOrder = stagedsync.DefaultUnwindOrder backend.syncPruneOrder = stagedsync.DefaultPruneOrder - backend.stagedSync = stagedsync.New(backend.syncStages, backend.syncUnwindOrder, backend.syncPruneOrder, logger) + backend.stagedSync = stagedsync.New(config.Sync, backend.syncStages, backend.syncUnwindOrder, backend.syncPruneOrder, logger) hook := stages2.NewHook(backend.sentryCtx, backend.chainDB, backend.notifications, backend.stagedSync, backend.blockReader, backend.chainConfig, backend.logger, backend.sentriesClient.UpdateHead) checkStateRoot := true - pipelineStages := stages2.NewPipelineStages(ctx, chainKv, config, backend.sentriesClient, backend.notifications, backend.downloaderClient, blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, logger, checkStateRoot) - backend.pipelineStagedSync = stagedsync.New(pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) + pipelineStages := stages2.NewPipelineStages(ctx, chainKv, config, stack.Config().P2P, backend.sentriesClient, backend.notifications, backend.downloaderClient, blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, logger, checkStateRoot) + backend.pipelineStagedSync = stagedsync.New(config.Sync, pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) backend.eth1ExecutionServer = eth1.NewEthereumExecutionModule(blockReader, chainKv, backend.pipelineStagedSync, backend.forkValidator, chainConfig, assembleBlockPOS, hook, backend.notifications.Accumulator, backend.notifications.StateChangesConsumer, logger, backend.engine, config.HistoryV3) executionRpc := direct.NewExecutionClientDirect(backend.eth1ExecutionServer) engineBackendRPC := engineapi.NewEngineServer( @@ -907,7 +890,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger go func() { eth1Getter := getters.NewExecutionSnapshotReader(ctx, beaconCfg, blockReader, backend.chainDB) - if err := caplin1.RunCaplinPhase1(ctx, client, engine, beaconCfg, genesisCfg, state, nil, dirs, config.BeaconRouter, eth1Getter, backend.downloaderClient, config.CaplinConfig.Backfilling, config.CaplinConfig.Archive, historyDB, indiciesDB); err != nil { + if err := caplin1.RunCaplinPhase1(ctx, client, engine, beaconCfg, genesisCfg, state, nil, dirs, snapshotVersion, config.BeaconRouter, eth1Getter, backend.downloaderClient, config.CaplinConfig.Backfilling, config.CaplinConfig.Archive, historyDB, indiciesDB); err != nil { logger.Error("could not start caplin", "err", err) } ctxCancel() @@ -924,7 +907,9 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config) error { chainKv := s.chainDB var err error - s.sentriesClient.Hd.StartPoSDownloader(s.sentryCtx, s.sentriesClient.SendHeaderRequest, s.sentriesClient.Penalize) + if config.Genesis.Config.Bor == nil { + s.sentriesClient.Hd.StartPoSDownloader(s.sentryCtx, s.sentriesClient.SendHeaderRequest, s.sentriesClient.Penalize) + } emptyBadHash := config.BadBlockHash == libcommon.Hash{} if !emptyBadHash { @@ -977,7 +962,9 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config) error { }() } - go s.engineBackendRPC.Start(&httpRpcCfg, s.chainDB, s.blockReader, ff, stateCache, s.agg, s.engine, ethRpcClient, txPoolRpcClient, miningRpcClient, s.seqRPCService, s.historicalRPCService) + if config.Genesis.Config.Bor == nil { + go s.engineBackendRPC.Start(&httpRpcCfg, s.chainDB, s.blockReader, ff, stateCache, s.agg, s.engine, ethRpcClient, txPoolRpcClient, miningRpcClient, s.seqRPCService, s.historicalRPCService) + } // Register the backend on the node stack.RegisterLifecycle(s) @@ -1040,7 +1027,7 @@ func (s *Ethereum) shouldPreserve(block *types.Block) bool { //nolint // StartMining starts the miner with the given number of CPU threads. If mining // is already running, this method adjust the number of threads allowed to use // and updates the minimum price required by the transaction pool. -func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsync.Sync, cfg params.MiningConfig, gasPrice *uint256.Int, quitCh chan struct{}, tmpDir string) error { +func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, stateDiffClient *direct.StateDiffClientDirect, mining *stagedsync.Sync, miner stagedsync.MiningState, gasPrice *uint256.Int, quitCh chan struct{}, tmpDir string, logger log.Logger) error { var borcfg *bor.Bor if b, ok := s.engine.(*bor.Bor); ok { @@ -1054,7 +1041,7 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsy } //if borcfg == nil { - if !cfg.Enabled { + if !miner.MiningConfig.Enabled { return nil } //} @@ -1067,15 +1054,28 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsy } if borcfg != nil { - if cfg.Enabled { - if cfg.SigKey == nil { + if miner.MiningConfig.Enabled { + if miner.MiningConfig.SigKey == nil { s.logger.Error("Etherbase account unavailable locally", "err", err) return fmt.Errorf("signer missing: %w", err) } borcfg.Authorize(eb, func(_ libcommon.Address, mimeType string, message []byte) ([]byte, error) { - return crypto.Sign(crypto.Keccak256(message), cfg.SigKey) + return crypto.Sign(crypto.Keccak256(message), miner.MiningConfig.SigKey) }) + + if !s.config.WithoutHeimdall { + err := stagedsync.FetchSpanZeroForMiningIfNeeded( + ctx, + s.chainDB, + s.blockReader, + borcfg.HeimdallClient, + logger, + ) + if err != nil { + return err + } + } } else { // for the bor dev network without heimdall we need the authorizer to be set otherwise there is no // validator defined in the bor validator set and non mining nodes will reject all blocks @@ -1083,7 +1083,7 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsy if s.chainConfig.ChainName == networkname.BorDevnetChainName && s.config.WithoutHeimdall { borcfg.Authorize(eb, func(addr libcommon.Address, _ string, _ []byte) ([]byte, error) { - return nil, &bor.UnauthorizedSignerError{Number: 0, Signer: addr.Bytes()} + return nil, &valset.UnauthorizedSignerError{Number: 0, Signer: addr.Bytes()} }) } @@ -1100,58 +1100,86 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsy } } if clq != nil { - if cfg.SigKey == nil { + if miner.MiningConfig.SigKey == nil { s.logger.Error("Etherbase account unavailable locally", "err", err) return fmt.Errorf("signer missing: %w", err) } clq.Authorize(eb, func(_ libcommon.Address, mimeType string, message []byte) ([]byte, error) { - return crypto.Sign(crypto.Keccak256(message), cfg.SigKey) + return crypto.Sign(crypto.Keccak256(message), miner.MiningConfig.SigKey) }) } + streamCtx, streamCancel := context.WithCancel(ctx) + stream, err := stateDiffClient.StateChanges(streamCtx, &remote.StateChangeRequest{WithStorage: false, WithTransactions: true}, grpc.WaitForReady(true)) + + if err != nil { + streamCancel() + return err + } + + stateChangeCh := make(chan *remote.StateChange) + + go func() { + for req, err := stream.Recv(); ; req, err = stream.Recv() { + if err == nil { + for _, change := range req.ChangeBatch { + stateChangeCh <- change + } + } + } + }() + go func() { defer debug.LogPanic() defer close(s.waitForMiningStop) + defer streamCancel() - mineEvery := time.NewTicker(cfg.Recommit) + mineEvery := time.NewTicker(miner.MiningConfig.Recommit) defer mineEvery.Stop() - // Listen on a new head subscription. This allows us to maintain the block time by - // triggering mining after the block is passed through all stages. - newHeadCh, closeNewHeadCh := s.notifications.Events.AddHeaderSubscription() - defer closeNewHeadCh() - s.logger.Info("Starting to mine", "etherbase", eb) - var works bool + var working bool + var waiting atomic.Bool + hasWork := true // Start mining immediately errc := make(chan error, 1) + workCtx, workCancel := context.WithCancel(ctx) + defer workCancel() + for { // Only reset if some work was done previously as we'd like to rely // on the `miner.recommit` as backup. if hasWork { - mineEvery.Reset(cfg.Recommit) + mineEvery.Reset(miner.MiningConfig.Recommit) } - // Only check for case if you're already mining (i.e. works = true) and + // Only check for case if you're already mining (i.e. working = true) and // waiting for error or you don't have any work yet (i.e. hasWork = false). - if works || !hasWork { + if working || !hasWork { select { - case <-newHeadCh: + case stateChanges := <-stateChangeCh: + block := stateChanges.BlockHeight + s.logger.Debug("Start mining based on previous block", "block", block) + // TODO - can do mining clean up here as we have previous + // block info in the state channel hasWork = true + case <-s.notifyMiningAboutNewTxs: // Skip mining based on new tx notif for bor consensus hasWork = s.chainConfig.Bor == nil if hasWork { - s.logger.Debug("Start mining new block based on txpool notif") + s.logger.Debug("Start mining based on txpool notif") } case <-mineEvery.C: - s.logger.Debug("Start mining new block based on miner.recommit") - hasWork = true + if !(working || waiting.Load()) { + s.logger.Debug("Start mining based on miner.recommit", "duration", miner.MiningConfig.Recommit) + } + hasWork = !(working || waiting.Load()) case err := <-errc: - works = false + working = false hasWork = false if errors.Is(err, libcommon.ErrStopped) { return @@ -1164,11 +1192,36 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsy } } - if !works && hasWork { - works = true + if !working && hasWork { + working = true hasWork = false - mineEvery.Reset(cfg.Recommit) - go func() { errc <- stages2.MiningStep(ctx, db, mining, tmpDir) }() + mineEvery.Reset(miner.MiningConfig.Recommit) + go func() { + err := stages2.MiningStep(ctx, db, mining, tmpDir, logger) + + waiting.Store(true) + defer waiting.Store(false) + + errc <- err + + if err != nil { + return + } + + for { + select { + case block := <-miner.MiningResultCh: + if block != nil { + s.logger.Debug("Mined block", "block", block.Number()) + s.minedBlocks <- block + } + return + case <-workCtx.Done(): + errc <- workCtx.Err() + return + } + } + }() } } }() @@ -1232,13 +1285,17 @@ func (s *Ethereum) setUpSnapDownloader(ctx context.Context, downloaderCfg *downl s.downloaderClient, err = downloadergrpc.NewClient(ctx, s.config.Snapshot.DownloaderAddr) } else { // start embedded Downloader + if uploadFs := s.config.Sync.UploadLocation; len(uploadFs) > 0 { + downloaderCfg.AddTorrentsFromDisk = false + } + discover := true - s.downloader, err = downloader3.New(ctx, downloaderCfg, s.config.Dirs, s.logger, log.LvlDebug, discover) + s.downloader, err = downloader.New(ctx, downloaderCfg, s.config.Dirs, s.logger, log.LvlDebug, discover) if err != nil { return err } s.downloader.MainLoopInBackground(true) - bittorrentServer, err := downloader3.NewGrpcServer(s.downloader) + bittorrentServer, err := downloader.NewGrpcServer(s.downloader) if err != nil { return fmt.Errorf("new server: %w", err) } @@ -1263,14 +1320,21 @@ func (s *Ethereum) setUpSnapDownloader(ctx context.Context, downloaderCfg *downl return err } -func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snConfig ethconfig.BlocksFreezing, histV3 bool, isBor bool, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter, *freezeblocks.RoSnapshots, *libstate.AggregatorV3, error) { - allSnapshots := freezeblocks.NewRoSnapshots(snConfig, dirs.Snap, logger) +func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snashotVersion uint8, snConfig ethconfig.BlocksFreezing, histV3 bool, isBor bool, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter, *freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.AggregatorV3, error) { + allSnapshots := freezeblocks.NewRoSnapshots(snConfig, dirs.Snap, snashotVersion, logger) + var allBorSnapshots *freezeblocks.BorRoSnapshots if isBor { - allBorSnapshots = freezeblocks.NewBorRoSnapshots(snConfig, dirs.Snap, logger) + allBorSnapshots = freezeblocks.NewBorRoSnapshots(snConfig, dirs.Snap, snashotVersion, logger) } + var err error - if !snConfig.NoDownloader { + if snConfig.NoDownloader { + allSnapshots.ReopenFolder() + if isBor { + allBorSnapshots.ReopenFolder() + } + } else { allSnapshots.OptimisticalyReopenWithDB(db) if isBor { allBorSnapshots.OptimisticalyReopenWithDB(db) @@ -1281,12 +1345,12 @@ func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snConf agg, err := libstate.NewAggregatorV3(ctx, dirs.SnapHistory, dirs.Tmp, ethconfig.HistoryV3AggregationStep, db, logger) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } if err = agg.OpenFolder(); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } - return blockReader, blockWriter, allSnapshots, agg, nil + return blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, nil } func (s *Ethereum) Peers(ctx context.Context) (*remote.PeersReply, error) { diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index c588fa4f818..ad3245a71e0 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -27,13 +27,13 @@ import ( "time" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" - "github.com/ledgerwatch/erigon/cl/beacon/beacon_router_configuration" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/consensus/ethash/ethashcfg" @@ -42,6 +42,7 @@ import ( "github.com/ledgerwatch/erigon/eth/gasprice/gaspricecfg" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc" ) // AggregationStep number of transactions in smallest static file @@ -77,6 +78,7 @@ var Defaults = Config{ ReconWorkerCount: estimate.ReconstituteState.Workers(), BodyCacheLimit: 256 * 1024 * 1024, BodyDownloadTimeoutSeconds: 2, + PruneLimit: 100, }, Ethash: ethashcfg.Config{ CachesInMem: 2, @@ -207,7 +209,6 @@ type Config struct { Clique params.ConsensusSnapshotConfig Aura chain.AuRaConfig - Bor chain.BorConfig // Transaction pool options DeprecatedTxPool DeprecatedTxPoolConfig @@ -228,9 +229,6 @@ type Config struct { // New DB and Snapshots format of history allows: parallel blocks execution, get state as of given transaction without executing whole block.", HistoryV3 bool - // gRPC Address to connect to Heimdall node - HeimdallgRPCAddress string - // URL to connect to Heimdall node HeimdallURL string @@ -273,6 +271,13 @@ type Sync struct { BodyCacheLimit datasize.ByteSize BodyDownloadTimeoutSeconds int // TODO: change to duration + PruneLimit int //the maxumum records to delete from the DB during pruning + BreakAfterStage string + LoopBlockLimit uint + + UploadLocation string + UploadFrom rpc.BlockNumber + FrozenBlockLimit uint64 } // Chains where snapshots are enabled by default diff --git a/eth/ethconsensusconfig/config.go b/eth/ethconsensusconfig/config.go index 72ff681393e..8050c5161ba 100644 --- a/eth/ethconsensusconfig/config.go +++ b/eth/ethconsensusconfig/config.go @@ -8,14 +8,11 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/aura" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/contract" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" "github.com/ledgerwatch/erigon/consensus/clique" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/consensus/ethash/ethashcfg" @@ -23,11 +20,13 @@ import ( "github.com/ledgerwatch/erigon/node" "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/heimdall" "github.com/ledgerwatch/erigon/turbo/services" ) func CreateConsensusEngine(ctx context.Context, nodeConfig *nodecfg.Config, chainConfig *chain.Config, config interface{}, notify []string, noVerify bool, - heimdallClient heimdall.IHeimdallClient, withoutHeimdall bool, blockReader services.FullBlockReader, readonly bool, + heimdallClient heimdall.HeimdallClient, withoutHeimdall bool, blockReader services.FullBlockReader, readonly bool, logger log.Logger, ) consensus.Engine { var eng consensus.Engine @@ -95,14 +94,14 @@ func CreateConsensusEngine(ctx context.Context, nodeConfig *nodecfg.Config, chai panic(err) } } - case *chain.BorConfig: + case *borcfg.BorConfig: // If Matic bor consensus is requested, set it up // In order to pass the ethereum transaction tests, we need to set the burn contract which is in the bor config // Then, bor != nil will also be enabled for ethash and clique. Only enable Bor for real if there is a validator contract present. - if chainConfig.Bor != nil && chainConfig.Bor.ValidatorContract != "" { - genesisContractsClient := contract.NewGenesisContractsClient(chainConfig, chainConfig.Bor.ValidatorContract, chainConfig.Bor.StateReceiverContract, logger) + if chainConfig.Bor != nil && consensusCfg.ValidatorContract != "" { + genesisContractsClient := bor.NewGenesisContractsClient(chainConfig, consensusCfg.ValidatorContract, consensusCfg.StateReceiverContract, logger) - spanner := span.NewChainSpanner(contract.ValidatorSet(), chainConfig, withoutHeimdall, logger) + spanner := bor.NewChainSpanner(bor.GenesisContractValidatorSetABI(), chainConfig, withoutHeimdall, logger) var err error var db kv.RwDB diff --git a/eth/integrity/e3_history_no_system_txs.go b/eth/integrity/e3_history_no_system_txs.go new file mode 100644 index 00000000000..cfcb305fa9a --- /dev/null +++ b/eth/integrity/e3_history_no_system_txs.go @@ -0,0 +1 @@ +package integrity diff --git a/eth/stagedsync/bor_heimdall_shared.go b/eth/stagedsync/bor_heimdall_shared.go new file mode 100644 index 00000000000..cbb213f775c --- /dev/null +++ b/eth/stagedsync/bor_heimdall_shared.go @@ -0,0 +1,313 @@ +package stagedsync + +import ( + "context" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "math/big" + "strconv" + "strings" + "time" + + "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/heimdall" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/services" +) + +var ( + ErrHeaderValidatorsLengthMismatch = errors.New("header validators length mismatch") + ErrHeaderValidatorsBytesMismatch = errors.New("header validators bytes mismatch") +) + +// LastSpanID TODO - move to block reader +func LastSpanID(tx kv.RwTx, blockReader services.FullBlockReader) (uint64, bool, error) { + sCursor, err := tx.Cursor(kv.BorSpans) + if err != nil { + return 0, false, err + } + + defer sCursor.Close() + k, _, err := sCursor.Last() + if err != nil { + return 0, false, err + } + + var lastSpanId uint64 + if k != nil { + lastSpanId = binary.BigEndian.Uint64(k) + } + + // TODO tidy this out when moving to block reader + type LastFrozen interface { + LastFrozenSpanID() uint64 + } + + snapshotLastSpanId := blockReader.(LastFrozen).LastFrozenSpanID() + if snapshotLastSpanId > lastSpanId { + return snapshotLastSpanId, true, nil + } + + return lastSpanId, k != nil, nil +} + +// LastStateSyncEventID TODO - move to block reader +func LastStateSyncEventID(tx kv.RwTx, blockReader services.FullBlockReader) (uint64, error) { + cursor, err := tx.Cursor(kv.BorEvents) + if err != nil { + return 0, err + } + + defer cursor.Close() + k, _, err := cursor.Last() + if err != nil { + return 0, err + } + + var lastEventId uint64 + if k != nil { + lastEventId = binary.BigEndian.Uint64(k) + } + + // TODO tidy this out when moving to block reader + type LastFrozen interface { + LastFrozenEventID() uint64 + } + + snapshotLastEventId := blockReader.(LastFrozen).LastFrozenEventID() + if snapshotLastEventId > lastEventId { + return snapshotLastEventId, nil + } + + return lastEventId, nil +} + +func FetchSpanZeroForMiningIfNeeded( + ctx context.Context, + db kv.RwDB, + blockReader services.FullBlockReader, + heimdallClient heimdall.HeimdallClient, + logger log.Logger, +) error { + return db.Update(ctx, func(tx kv.RwTx) error { + _, err := blockReader.Span(ctx, tx, 0) + if err == nil { + return err + } + + // TODO refactor to use errors.Is + if !strings.Contains(err.Error(), "not found") { + // span exists, no need to fetch + return nil + } + + _, err = fetchAndWriteHeimdallSpan(ctx, 0, tx, heimdallClient, "FetchSpanZeroForMiningIfNeeded", logger) + return err + }) +} + +func fetchRequiredHeimdallSpansIfNeeded( + ctx context.Context, + toBlockNum uint64, + tx kv.RwTx, + cfg BorHeimdallCfg, + logPrefix string, + logger log.Logger, +) (uint64, error) { + requiredSpanID := bor.SpanIDAt(toBlockNum) + if bor.IsBlockInLastSprintOfSpan(toBlockNum, cfg.borConfig) { + requiredSpanID++ + } + + lastSpanID, exists, err := LastSpanID(tx, cfg.blockReader) + if err != nil { + return 0, err + } + + if exists && requiredSpanID <= lastSpanID { + return lastSpanID, nil + } + + var from uint64 + if lastSpanID > 0 { + from = lastSpanID + 1 + } // else fetch from span 0 + + logger.Info(fmt.Sprintf("[%s] Processing spans...", logPrefix), "from", from, "to", requiredSpanID) + for spanID := from; spanID <= requiredSpanID; spanID++ { + if _, err = fetchAndWriteHeimdallSpan(ctx, spanID, tx, cfg.heimdallClient, logPrefix, logger); err != nil { + return 0, err + } + } + + return requiredSpanID, err +} + +func fetchAndWriteHeimdallSpan( + ctx context.Context, + spanID uint64, + tx kv.RwTx, + heimdallClient heimdall.HeimdallClient, + logPrefix string, + logger log.Logger, +) (uint64, error) { + response, err := heimdallClient.Span(ctx, spanID) + if err != nil { + return 0, err + } + + spanBytes, err := json.Marshal(response) + if err != nil { + return 0, err + } + + var spanIDBytes [8]byte + binary.BigEndian.PutUint64(spanIDBytes[:], spanID) + if err = tx.Put(kv.BorSpans, spanIDBytes[:], spanBytes); err != nil { + return 0, err + } + + logger.Debug(fmt.Sprintf("[%s] Wrote span", logPrefix), "id", spanID) + return spanID, nil +} + +func fetchRequiredHeimdallStateSyncEventsIfNeeded( + ctx context.Context, + header *types.Header, + tx kv.RwTx, + cfg BorHeimdallCfg, + logPrefix string, + logger log.Logger, + lastStateSyncEventIDGetter func() (uint64, error), +) (uint64, int, time.Duration, error) { + lastStateSyncEventID, err := lastStateSyncEventIDGetter() + if err != nil { + return 0, 0, 0, err + } + + headerNum := header.Number.Uint64() + if headerNum%cfg.borConfig.CalculateSprintLength(headerNum) != 0 || headerNum == 0 { + // we fetch events only at beginning of each sprint + return lastStateSyncEventID, 0, 0, nil + } + + return fetchAndWriteHeimdallStateSyncEvents(ctx, header, lastStateSyncEventID, tx, cfg, logPrefix, logger) +} + +func fetchAndWriteHeimdallStateSyncEvents( + ctx context.Context, + header *types.Header, + lastStateSyncEventID uint64, + tx kv.RwTx, + cfg BorHeimdallCfg, + logPrefix string, + logger log.Logger, +) (uint64, int, time.Duration, error) { + fetchStart := time.Now() + config := cfg.borConfig + blockReader := cfg.blockReader + heimdallClient := cfg.heimdallClient + chainID := cfg.chainConfig.ChainID.String() + stateReceiverABI := cfg.stateReceiverABI + // Find out the latest eventId + var ( + from uint64 + to time.Time + ) + + blockNum := header.Number.Uint64() + + if config.IsIndore(blockNum) { + stateSyncDelay := config.CalculateStateSyncDelay(blockNum) + to = time.Unix(int64(header.Time-stateSyncDelay), 0) + } else { + pHeader, err := blockReader.HeaderByNumber(ctx, tx, blockNum-config.CalculateSprintLength(blockNum)) + if err != nil { + return lastStateSyncEventID, 0, time.Since(fetchStart), err + } + to = time.Unix(int64(pHeader.Time), 0) + } + + from = lastStateSyncEventID + 1 + + logger.Debug( + fmt.Sprintf("[%s] Fetching state updates from Heimdall", logPrefix), + "fromID", from, + "to", to.Format(time.RFC3339), + ) + + eventRecords, err := heimdallClient.StateSyncEvents(ctx, from, to.Unix()) + if err != nil { + return lastStateSyncEventID, 0, time.Since(fetchStart), err + } + + if config.OverrideStateSyncRecords != nil { + if val, ok := config.OverrideStateSyncRecords[strconv.FormatUint(blockNum, 10)]; ok { + eventRecords = eventRecords[0:val] + } + } + + if len(eventRecords) > 0 { + var key, val [8]byte + binary.BigEndian.PutUint64(key[:], blockNum) + binary.BigEndian.PutUint64(val[:], lastStateSyncEventID+1) + } + + const method = "commitState" + wroteIndex := false + for i, eventRecord := range eventRecords { + if eventRecord.ID <= lastStateSyncEventID { + continue + } + + if lastStateSyncEventID+1 != eventRecord.ID || eventRecord.ChainID != chainID || !eventRecord.Time.Before(to) { + return lastStateSyncEventID, i, time.Since(fetchStart), fmt.Errorf(fmt.Sprintf( + "invalid event record received %s, %s, %s, %s", + fmt.Sprintf("blockNum=%d", blockNum), + fmt.Sprintf("eventId=%d (exp %d)", eventRecord.ID, lastStateSyncEventID+1), + fmt.Sprintf("chainId=%s (exp %s)", eventRecord.ChainID, chainID), + fmt.Sprintf("time=%s (exp to %s)", eventRecord.Time, to), + )) + } + + eventRecordWithoutTime := eventRecord.BuildEventRecord() + + recordBytes, err := rlp.EncodeToBytes(eventRecordWithoutTime) + if err != nil { + return lastStateSyncEventID, i, time.Since(fetchStart), err + } + + data, err := stateReceiverABI.Pack(method, big.NewInt(eventRecord.Time.Unix()), recordBytes) + if err != nil { + logger.Error(fmt.Sprintf("[%s] Unable to pack tx for commitState", logPrefix), "err", err) + return lastStateSyncEventID, i, time.Since(fetchStart), err + } + + var eventIdBuf [8]byte + binary.BigEndian.PutUint64(eventIdBuf[:], eventRecord.ID) + if err = tx.Put(kv.BorEvents, eventIdBuf[:], data); err != nil { + return lastStateSyncEventID, i, time.Since(fetchStart), err + } + + if !wroteIndex { + var blockNumBuf [8]byte + binary.BigEndian.PutUint64(blockNumBuf[:], blockNum) + binary.BigEndian.PutUint64(eventIdBuf[:], eventRecord.ID) + if err = tx.Put(kv.BorEventNums, blockNumBuf[:], eventIdBuf[:]); err != nil { + return lastStateSyncEventID, i, time.Since(fetchStart), err + } + + wroteIndex = true + } + + lastStateSyncEventID++ + } + + return lastStateSyncEventID, len(eventRecords), time.Since(fetchStart), nil +} diff --git a/eth/stagedsync/chain_reader.go b/eth/stagedsync/chain_reader.go index 862cae5710a..5c2d75c4292 100644 --- a/eth/stagedsync/chain_reader.go +++ b/eth/stagedsync/chain_reader.go @@ -4,23 +4,23 @@ import ( "context" "math/big" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/services" ) -// Implements consensus.ChainReader +// ChainReader implements consensus.ChainReader type ChainReader struct { - Cfg chain.Config - + Cfg chain.Config Db kv.Getter BlockReader services.FullBlockReader + Logger log.Logger } // Config retrieves the blockchain's chain configuration. @@ -71,7 +71,7 @@ func (cr ChainReader) HasBlock(hash libcommon.Hash, number uint64) bool { func (cr ChainReader) GetTd(hash libcommon.Hash, number uint64) *big.Int { td, err := rawdb.ReadTd(cr.Db, hash, number) if err != nil { - log.Error("ReadTd failed", "err", err) + cr.Logger.Error("ReadTd failed", "err", err) return nil } return td @@ -81,10 +81,16 @@ func (cr ChainReader) FrozenBlocks() uint64 { return cr.BlockReader.FrozenBlocks() } -func (cr ChainReader) BorEventsByBlock(hash libcommon.Hash, number uint64) []rlp.RawValue { - panic("") +func (cr ChainReader) BorEventsByBlock(_ libcommon.Hash, _ uint64) []rlp.RawValue { + panic("bor events by block not implemented") } func (cr ChainReader) BorSpan(spanId uint64) []byte { - panic("") + span, err := cr.BlockReader.Span(context.Background(), cr.Db, spanId) + if err != nil { + cr.Logger.Error("BorSpan failed", "err", err) + return nil + } + + return span } diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 7afee387c1c..ca5570ba797 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -3,11 +3,13 @@ package stagedsync import ( "context" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/log/v3" ) func DefaultStages(ctx context.Context, @@ -30,30 +32,30 @@ func DefaultStages(ctx context.Context, { ID: stages.Snapshots, Description: "Download snapshots", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if badBlockUnwind { return nil } - return SpawnStageSnapshots(s, ctx, tx, snapshots, firstCycle, logger) + return SpawnStageSnapshots(s, ctx, txc.Tx, snapshots, firstCycle, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { return nil }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { - return SnapshotsPrune(p, firstCycle, snapshots, ctx, tx) + return SnapshotsPrune(p, firstCycle, snapshots, ctx, tx, logger) }, }, { ID: stages.Headers, Description: "Download headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if badBlockUnwind { return nil } - return SpawnStageHeaders(s, u, ctx, tx, headers, firstCycle, test, logger) + return SpawnStageHeaders(s, u, ctx, txc.Tx, headers, firstCycle, test, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return HeadersUnwind(u, s, tx, headers, test) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return HeadersUnwind(u, s, txc.Tx, headers, test) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return nil @@ -62,14 +64,14 @@ func DefaultStages(ctx context.Context, { ID: stages.BorHeimdall, Description: "Download Bor-specific data from Heimdall", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if badBlockUnwind { return nil } - return BorHeimdallForward(s, u, ctx, tx, borHeimdallCfg, false, logger) + return BorHeimdallForward(s, u, ctx, txc.Tx, borHeimdallCfg, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return BorHeimdallUnwind(u, ctx, s, tx, borHeimdallCfg) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return BorHeimdallUnwind(u, ctx, s, txc.Tx, borHeimdallCfg) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return BorHeimdallPrune(p, ctx, tx, borHeimdallCfg) @@ -78,11 +80,11 @@ func DefaultStages(ctx context.Context, { ID: stages.BlockHashes, Description: "Write block hashes", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnBlockHashStage(s, tx, blockHashCfg, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnBlockHashStage(s, txc.Tx, blockHashCfg, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindBlockHashStage(u, tx, blockHashCfg, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindBlockHashStage(u, txc.Tx, blockHashCfg, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneBlockHashStage(p, tx, blockHashCfg, ctx) @@ -91,11 +93,11 @@ func DefaultStages(ctx context.Context, { ID: stages.Bodies, Description: "Download block bodies", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return BodiesForward(s, u, ctx, tx, bodies, test, firstCycle, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return BodiesForward(s, u, ctx, txc.Tx, bodies, test, firstCycle, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindBodiesStage(u, tx, bodies, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindBodiesStage(u, txc.Tx, bodies, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return nil @@ -104,11 +106,11 @@ func DefaultStages(ctx context.Context, { ID: stages.Senders, Description: "Recover senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnRecoverSendersStage(senders, s, u, txc.Tx, 0, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindSendersStage(u, tx, senders, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindSendersStage(u, txc.Tx, senders, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneSendersStage(p, tx, senders, ctx) @@ -118,11 +120,11 @@ func DefaultStages(ctx context.Context, ID: stages.Execution, Description: "Execute blocks w/o hash checks", Disabled: dbg.StagesOnlyBlocks, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnExecuteBlocksStage(s, u, txc, 0, ctx, exec, firstCycle, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle, logger) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindExecutionStage(u, s, txc, ctx, exec, firstCycle, logger) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneExecutionStage(p, tx, exec, ctx, firstCycle) @@ -132,11 +134,11 @@ func DefaultStages(ctx context.Context, ID: stages.HashState, Description: "Hash the key in the state", Disabled: bodies.historyV3 || ethconfig.EnableHistoryV4InTest || dbg.StagesOnlyBlocks, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnHashStateStage(s, tx, hashState, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindHashStateStage(u, s, tx, hashState, ctx, logger) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneHashStateStage(p, tx, hashState, ctx) @@ -146,19 +148,19 @@ func DefaultStages(ctx context.Context, ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", Disabled: bodies.historyV3 || ethconfig.EnableHistoryV4InTest || dbg.StagesOnlyBlocks, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if exec.chainConfig.IsPrague(0) { - _, err := SpawnVerkleTrie(s, u, tx, trieCfg, ctx, logger) + _, err := SpawnVerkleTrie(s, u, txc.Tx, trieCfg, ctx, logger) return err } - _, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, logger) + _, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger) return err }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { if exec.chainConfig.IsPrague(0) { - return UnwindVerkleTrie(u, s, tx, trieCfg, ctx, logger) + return UnwindVerkleTrie(u, s, txc.Tx, trieCfg, ctx, logger) } - return UnwindIntermediateHashesStage(u, s, tx, trieCfg, ctx, logger) + return UnwindIntermediateHashesStage(u, s, txc.Tx, trieCfg, ctx, logger) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneIntermediateHashesStage(p, tx, trieCfg, ctx) @@ -169,11 +171,11 @@ func DefaultStages(ctx context.Context, Description: "Generate call traces index", DisabledDescription: "Work In Progress", Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnCallTraces(s, tx, callTraces, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnCallTraces(s, txc.Tx, callTraces, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindCallTraces(u, s, tx, callTraces, ctx, logger) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindCallTraces(u, s, txc.Tx, callTraces, ctx, logger) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneCallTraces(p, tx, callTraces, ctx, logger) @@ -183,11 +185,11 @@ func DefaultStages(ctx context.Context, ID: stages.AccountHistoryIndex, Description: "Generate account history index", Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnAccountHistoryIndex(s, tx, history, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnAccountHistoryIndex(s, txc.Tx, history, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindAccountHistoryIndex(u, s, tx, history, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindAccountHistoryIndex(u, s, txc.Tx, history, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneAccountHistoryIndex(p, tx, history, ctx, logger) @@ -197,11 +199,11 @@ func DefaultStages(ctx context.Context, ID: stages.StorageHistoryIndex, Description: "Generate storage history index", Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnStorageHistoryIndex(s, tx, history, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnStorageHistoryIndex(s, txc.Tx, history, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindStorageHistoryIndex(u, s, tx, history, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindStorageHistoryIndex(u, s, txc.Tx, history, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneStorageHistoryIndex(p, tx, history, ctx, logger) @@ -211,11 +213,11 @@ func DefaultStages(ctx context.Context, ID: stages.LogIndex, Description: "Generate receipt logs index", Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnLogIndex(s, tx, logIndex, ctx, 0, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnLogIndex(s, txc.Tx, logIndex, ctx, 0, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindLogIndex(u, s, tx, logIndex, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindLogIndex(u, s, txc.Tx, logIndex, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneLogIndex(p, tx, logIndex, ctx, logger) @@ -225,11 +227,11 @@ func DefaultStages(ctx context.Context, ID: stages.TxLookup, Description: "Generate tx lookup index", Disabled: dbg.StagesOnlyBlocks, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnTxLookup(s, tx, 0 /* toBlock */, txLookup, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnTxLookup(s, txc.Tx, 0 /* toBlock */, txLookup, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindTxLookup(u, s, tx, txLookup, ctx, logger) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindTxLookup(u, s, txc.Tx, txLookup, ctx, logger) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneTxLookup(p, tx, txLookup, ctx, firstCycle, logger) @@ -238,11 +240,11 @@ func DefaultStages(ctx context.Context, { ID: stages.Finish, Description: "Final: update current block for the RPC API", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, tx kv.RwTx, logger log.Logger) error { - return FinishForward(s, tx, finish, firstCycle) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return FinishForward(s, txc.Tx, finish, firstCycle) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindFinish(u, tx, finish, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindFinish(u, txc.Tx, finish, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneFinish(p, tx, finish, ctx) @@ -256,40 +258,249 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl { ID: stages.Snapshots, Description: "Download snapshots", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + if badBlockUnwind { + return nil + } + return SpawnStageSnapshots(s, ctx, txc.Tx, snapshots, firstCycle, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return nil + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return SnapshotsPrune(p, firstCycle, snapshots, ctx, tx, logger) + }, + }, + { + ID: stages.BlockHashes, + Description: "Write block hashes", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnBlockHashStage(s, txc.Tx, blockHashCfg, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindBlockHashStage(u, txc.Tx, blockHashCfg, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneBlockHashStage(p, tx, blockHashCfg, ctx) + }, + }, + { + ID: stages.Senders, + Description: "Recover senders from tx signatures", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnRecoverSendersStage(senders, s, u, txc.Tx, 0, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindSendersStage(u, txc.Tx, senders, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneSendersStage(p, tx, senders, ctx) + }, + }, + { + ID: stages.Execution, + Description: "Execute blocks w/o hash checks", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnExecuteBlocksStage(s, u, txc, 0, ctx, exec, firstCycle, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindExecutionStage(u, s, txc, ctx, exec, firstCycle, logger) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneExecutionStage(p, tx, exec, ctx, firstCycle) + }, + }, + { + ID: stages.HashState, + Description: "Hash the key in the state", + Disabled: exec.historyV3 && ethconfig.EnableHistoryV4InTest, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneHashStateStage(p, tx, hashState, ctx) + }, + }, + { + ID: stages.IntermediateHashes, + Description: "Generate intermediate hashes and computing state root", + Disabled: exec.historyV3 && ethconfig.EnableHistoryV4InTest, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + if exec.chainConfig.IsPrague(0) { + _, err := SpawnVerkleTrie(s, u, txc.Tx, trieCfg, ctx, logger) + return err + } + _, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger) + return err + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + if exec.chainConfig.IsPrague(0) { + return UnwindVerkleTrie(u, s, txc.Tx, trieCfg, ctx, logger) + } + return UnwindIntermediateHashesStage(u, s, txc.Tx, trieCfg, ctx, logger) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneIntermediateHashesStage(p, tx, trieCfg, ctx) + }, + }, + { + ID: stages.CallTraces, + Description: "Generate call traces index", + DisabledDescription: "Work In Progress", + Disabled: exec.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnCallTraces(s, txc.Tx, callTraces, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindCallTraces(u, s, txc.Tx, callTraces, ctx, logger) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneCallTraces(p, tx, callTraces, ctx, logger) + }, + }, + { + ID: stages.AccountHistoryIndex, + Description: "Generate account history index", + Disabled: exec.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnAccountHistoryIndex(s, txc.Tx, history, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindAccountHistoryIndex(u, s, txc.Tx, history, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneAccountHistoryIndex(p, tx, history, ctx, logger) + }, + }, + { + ID: stages.StorageHistoryIndex, + Description: "Generate storage history index", + Disabled: exec.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnStorageHistoryIndex(s, txc.Tx, history, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindStorageHistoryIndex(u, s, txc.Tx, history, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneStorageHistoryIndex(p, tx, history, ctx, logger) + }, + }, + { + ID: stages.LogIndex, + Description: "Generate receipt logs index", + Disabled: exec.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnLogIndex(s, txc.Tx, logIndex, ctx, 0, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindLogIndex(u, s, txc.Tx, logIndex, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneLogIndex(p, tx, logIndex, ctx, logger) + }, + }, + { + ID: stages.TxLookup, + Description: "Generate tx lookup index", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnTxLookup(s, txc.Tx, 0 /* toBlock */, txLookup, ctx, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindTxLookup(u, s, txc.Tx, txLookup, ctx, logger) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneTxLookup(p, tx, txLookup, ctx, firstCycle, logger) + }, + }, + { + ID: stages.Finish, + Description: "Final: update current block for the RPC API", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return FinishForward(s, txc.Tx, finish, firstCycle) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindFinish(u, txc.Tx, finish, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneFinish(p, tx, finish, ctx) + }, + }, + } +} + +// when uploading - potentially from zero we need to include headers and bodies stages otherwise we won't recover the POW portion of the chain +func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers HeadersCfg, blockHashCfg BlockHashesCfg, senders SendersCfg, bodies BodiesCfg, exec ExecuteBlockCfg, hashState HashStateCfg, trieCfg TrieCfg, history HistoryCfg, logIndex LogIndexCfg, callTraces CallTracesCfg, txLookup TxLookupCfg, finish FinishCfg, test bool) []*Stage { + return []*Stage{ + { + ID: stages.Snapshots, + Description: "Download snapshots", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if badBlockUnwind { return nil } - return SpawnStageSnapshots(s, ctx, tx, snapshots, firstCycle, logger) + return SpawnStageSnapshots(s, ctx, txc.Tx, snapshots, firstCycle, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { return nil }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { - return SnapshotsPrune(p, firstCycle, snapshots, ctx, tx) + return SnapshotsPrune(p, firstCycle, snapshots, ctx, tx, logger) + }, + }, + { + ID: stages.Headers, + Description: "Download headers", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + if badBlockUnwind { + return nil + } + return SpawnStageHeaders(s, u, ctx, txc.Tx, headers, firstCycle, test, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return HeadersUnwind(u, s, txc.Tx, headers, test) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return nil }, }, { ID: stages.BlockHashes, Description: "Write block hashes", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnBlockHashStage(s, tx, blockHashCfg, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnBlockHashStage(s, txc.Tx, blockHashCfg, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindBlockHashStage(u, tx, blockHashCfg, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindBlockHashStage(u, txc.Tx, blockHashCfg, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneBlockHashStage(p, tx, blockHashCfg, ctx) }, }, + { + ID: stages.Bodies, + Description: "Download block bodies", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return BodiesForward(s, u, ctx, txc.Tx, bodies, test, firstCycle, logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindBodiesStage(u, txc.Tx, bodies, ctx) + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { + return nil + }, + }, { ID: stages.Senders, Description: "Recover senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnRecoverSendersStage(senders, s, u, txc.Tx, 0, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindSendersStage(u, tx, senders, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindSendersStage(u, txc.Tx, senders, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneSendersStage(p, tx, senders, ctx) @@ -298,11 +509,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl { ID: stages.Execution, Description: "Execute blocks w/o hash checks", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnExecuteBlocksStage(s, u, txc, 0, ctx, exec, firstCycle, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle, logger) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindExecutionStage(u, s, txc, ctx, exec, firstCycle, logger) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneExecutionStage(p, tx, exec, ctx, firstCycle) @@ -312,11 +523,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl ID: stages.HashState, Description: "Hash the key in the state", Disabled: exec.historyV3 && ethconfig.EnableHistoryV4InTest, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnHashStateStage(s, tx, hashState, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindHashStateStage(u, s, tx, hashState, ctx, logger) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneHashStateStage(p, tx, hashState, ctx) @@ -326,19 +537,19 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", Disabled: exec.historyV3 && ethconfig.EnableHistoryV4InTest, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if exec.chainConfig.IsPrague(0) { - _, err := SpawnVerkleTrie(s, u, tx, trieCfg, ctx, logger) + _, err := SpawnVerkleTrie(s, u, txc.Tx, trieCfg, ctx, logger) return err } - _, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, logger) + _, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger) return err }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { if exec.chainConfig.IsPrague(0) { - return UnwindVerkleTrie(u, s, tx, trieCfg, ctx, logger) + return UnwindVerkleTrie(u, s, txc.Tx, trieCfg, ctx, logger) } - return UnwindIntermediateHashesStage(u, s, tx, trieCfg, ctx, logger) + return UnwindIntermediateHashesStage(u, s, txc.Tx, trieCfg, ctx, logger) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneIntermediateHashesStage(p, tx, trieCfg, ctx) @@ -349,11 +560,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl Description: "Generate call traces index", DisabledDescription: "Work In Progress", Disabled: exec.historyV3, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnCallTraces(s, tx, callTraces, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnCallTraces(s, txc.Tx, callTraces, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindCallTraces(u, s, tx, callTraces, ctx, logger) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindCallTraces(u, s, txc.Tx, callTraces, ctx, logger) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneCallTraces(p, tx, callTraces, ctx, logger) @@ -363,11 +574,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl ID: stages.AccountHistoryIndex, Description: "Generate account history index", Disabled: exec.historyV3, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnAccountHistoryIndex(s, tx, history, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnAccountHistoryIndex(s, txc.Tx, history, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindAccountHistoryIndex(u, s, tx, history, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindAccountHistoryIndex(u, s, txc.Tx, history, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneAccountHistoryIndex(p, tx, history, ctx, logger) @@ -377,11 +588,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl ID: stages.StorageHistoryIndex, Description: "Generate storage history index", Disabled: exec.historyV3, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnStorageHistoryIndex(s, tx, history, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnStorageHistoryIndex(s, txc.Tx, history, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindStorageHistoryIndex(u, s, tx, history, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindStorageHistoryIndex(u, s, txc.Tx, history, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneStorageHistoryIndex(p, tx, history, ctx, logger) @@ -391,11 +602,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl ID: stages.LogIndex, Description: "Generate receipt logs index", Disabled: exec.historyV3, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnLogIndex(s, tx, logIndex, ctx, 0, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnLogIndex(s, txc.Tx, logIndex, ctx, 0, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindLogIndex(u, s, tx, logIndex, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindLogIndex(u, s, txc.Tx, logIndex, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneLogIndex(p, tx, logIndex, ctx, logger) @@ -404,11 +615,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl { ID: stages.TxLookup, Description: "Generate tx lookup index", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnTxLookup(s, tx, 0 /* toBlock */, txLookup, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnTxLookup(s, txc.Tx, 0 /* toBlock */, txLookup, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindTxLookup(u, s, tx, txLookup, ctx, logger) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindTxLookup(u, s, txc.Tx, txLookup, ctx, logger) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneTxLookup(p, tx, txLookup, ctx, firstCycle, logger) @@ -417,11 +628,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl { ID: stages.Finish, Description: "Final: update current block for the RPC API", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, tx kv.RwTx, logger log.Logger) error { - return FinishForward(s, tx, finish, firstCycle) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return FinishForward(s, txc.Tx, finish, firstCycle) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindFinish(u, tx, finish, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindFinish(u, txc.Tx, finish, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneFinish(p, tx, finish, ctx) @@ -436,72 +647,72 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.Headers, Description: "Download headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return HeadersUnwind(u, s, tx, headers, false) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return HeadersUnwind(u, s, txc.Tx, headers, false) }, }, { ID: stages.Bodies, Description: "Download block bodies", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindBodiesStage(u, tx, bodies, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindBodiesStage(u, txc.Tx, bodies, ctx) }, }, { ID: stages.BlockHashes, Description: "Write block hashes", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnBlockHashStage(s, tx, blockHashCfg, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnBlockHashStage(s, txc.Tx, blockHashCfg, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindBlockHashStage(u, tx, blockHashCfg, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindBlockHashStage(u, txc.Tx, blockHashCfg, ctx) }, }, { ID: stages.Senders, Description: "Recover senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnRecoverSendersStage(senders, s, u, txc.Tx, 0, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindSendersStage(u, tx, senders, ctx) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindSendersStage(u, txc.Tx, senders, ctx) }, }, { ID: stages.Execution, Description: "Execute blocks w/o hash checks", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnExecuteBlocksStage(s, u, txc, 0, ctx, exec, firstCycle, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle, logger) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindExecutionStage(u, s, txc, ctx, exec, firstCycle, logger) }, }, { ID: stages.HashState, Description: "Hash the key in the state", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnHashStateStage(s, tx, hashState, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindHashStateStage(u, s, tx, hashState, ctx, logger) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger) }, }, { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - _, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + _, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger) return err }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return UnwindIntermediateHashesStage(u, s, tx, trieCfg, ctx, logger) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindIntermediateHashesStage(u, s, txc.Tx, trieCfg, ctx, logger) }, }, } diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go index 0db8253e097..bcaaf39a126 100644 --- a/eth/stagedsync/exec3.go +++ b/eth/stagedsync/exec3.go @@ -29,7 +29,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/metrics" libstate "github.com/ledgerwatch/erigon-lib/state" - state2 "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/cmd/state/exec22" "github.com/ledgerwatch/erigon/cmd/state/exec3" "github.com/ledgerwatch/erigon/common/math" @@ -145,7 +145,7 @@ rwloop does: When rwLoop has nothing to do - it does Prune, or flush of WAL to RwTx (agg.rotate+agg.Flush) */ func ExecV3(ctx context.Context, - execStage *StageState, u Unwinder, workerCount int, cfg ExecuteBlockCfg, applyTx kv.RwTx, + execStage *StageState, u Unwinder, workerCount int, cfg ExecuteBlockCfg, txc wrap.TxContainer, parallel bool, logPrefix string, maxBlockNum uint64, logger log.Logger, @@ -157,6 +157,7 @@ func ExecV3(ctx context.Context, agg, engine := cfg.agg, cfg.engine chainConfig, genesis := cfg.chainConfig, cfg.genesis + applyTx := txc.Tx useExternalTx := applyTx != nil if !useExternalTx && !parallel { var err error @@ -760,7 +761,7 @@ func blockWithSenders(db kv.RoDB, tx kv.Tx, blockReader services.BlockReader, bl return blockReader.BlockByNumber(context.Background(), tx, blockNum) } -func processResultQueue(in *exec22.QueueWithRetry, rws *exec22.ResultsQueue, outputTxNumIn uint64, rs *state.StateV3, agg *state2.AggregatorV3, applyTx kv.Tx, backPressure chan struct{}, applyWorker *exec3.Worker, canRetry, forceStopAtBlockEnd bool) (outputTxNum uint64, conflicts, triggers int, processedBlockNum uint64, stopedAtBlockEnd bool, err error) { +func processResultQueue(in *exec22.QueueWithRetry, rws *exec22.ResultsQueue, outputTxNumIn uint64, rs *state.StateV3, agg *libstate.AggregatorV3, applyTx kv.Tx, backPressure chan struct{}, applyWorker *exec3.Worker, canRetry, forceStopAtBlockEnd bool) (outputTxNum uint64, conflicts, triggers int, processedBlockNum uint64, stopedAtBlockEnd bool, err error) { rwsIt := rws.Iter() defer rwsIt.Close() @@ -1044,7 +1045,7 @@ func reconstituteStep(last bool, return err } if b == nil { - return fmt.Errorf("could not find block %d\n", bn) + return fmt.Errorf("could not find block %d", bn) } txs := b.Transactions() header := b.HeaderNoCopy() @@ -1334,7 +1335,7 @@ func safeCloseTxTaskCh(ch chan *exec22.TxTask) { func ReconstituteState(ctx context.Context, s *StageState, dirs datadir.Dirs, workerCount int, batchSize datasize.ByteSize, chainDb kv.RwDB, blockReader services.FullBlockReader, - logger log.Logger, agg *state2.AggregatorV3, engine consensus.Engine, + logger log.Logger, agg *libstate.AggregatorV3, engine consensus.Engine, chainConfig *chain.Config, genesis *types.Genesis) (err error) { startTime := time.Now() defer agg.EnableMadvNormal().DisableReadAhead() diff --git a/eth/stagedsync/stage.go b/eth/stagedsync/stage.go index 17607787155..43b248ecdd9 100644 --- a/eth/stagedsync/stage.go +++ b/eth/stagedsync/stage.go @@ -5,18 +5,19 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" ) // ExecFunc is the execution function for the stage to move forward. // * state - is the current state of the stage and contains stage data. // * unwinder - if the stage needs to cause unwinding, `unwinder` methods can be used. -type ExecFunc func(firstCycle bool, badBlockUnwind bool, s *StageState, unwinder Unwinder, tx kv.RwTx, logger log.Logger) error +type ExecFunc func(firstCycle bool, badBlockUnwind bool, s *StageState, unwinder Unwinder, txc wrap.TxContainer, logger log.Logger) error // UnwindFunc is the unwinding logic of the stage. // * unwindState - contains information about the unwind itself. // * stageState - represents the state of this stage at the beginning of unwind. -type UnwindFunc func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error +type UnwindFunc func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error // PruneFunc is the execution function for the stage to prune old data. // * state - is the current state of the stage and contains stage data. diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index bf2feb97c9b..c10aaae4438 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -6,14 +6,14 @@ import ( "runtime" "time" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/blockio" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/dataflow" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/turbo/adapter" @@ -35,6 +35,7 @@ type BodiesCfg struct { blockReader services.FullBlockReader blockWriter *blockio.BlockWriter historyV3 bool + loopBreakCheck func(int) bool } func StageBodiesCfg(db kv.RwDB, bd *bodydownload.BodyDownload, @@ -43,8 +44,12 @@ func StageBodiesCfg(db kv.RwDB, bd *bodydownload.BodyDownload, chanConfig chain.Config, blockReader services.FullBlockReader, historyV3 bool, - blockWriter *blockio.BlockWriter) BodiesCfg { - return BodiesCfg{db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, timeout: timeout, chanConfig: chanConfig, blockReader: blockReader, historyV3: historyV3, blockWriter: blockWriter} + blockWriter *blockio.BlockWriter, + loopBreakCheck func(int) bool) BodiesCfg { + return BodiesCfg{ + db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, + timeout: timeout, chanConfig: chanConfig, blockReader: blockReader, + historyV3: historyV3, blockWriter: blockWriter, loopBreakCheck: loopBreakCheck} } // BodiesForward progresses Bodies stage in the forward direction @@ -59,6 +64,9 @@ func BodiesForward( logger log.Logger, ) error { var doUpdate bool + + startTime := time.Now() + if s.BlockNumber < cfg.blockReader.FrozenBlocks() { s.BlockNumber = cfg.blockReader.FrozenBlocks() doUpdate = true @@ -126,7 +134,7 @@ func BodiesForward( prevProgress := bodyProgress var noProgressCount uint = 0 // How many time the progress was printed without actual progress var totalDelivered uint64 = 0 - cr := ChainReader{Cfg: cfg.chanConfig, Db: tx, BlockReader: cfg.blockReader} + cr := ChainReader{Cfg: cfg.chanConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} loopBody := func() (bool, error) { // loopCount is used here to ensure we don't get caught in a constant loop of making requests @@ -221,6 +229,10 @@ func BodiesForward( } } cfg.bd.AdvanceLow() + + if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(i)) { + return true, nil + } } d5 += time.Since(start) @@ -282,7 +294,10 @@ func BodiesForward( return libcommon.ErrStopped } if bodyProgress > s.BlockNumber+16 { - logger.Info(fmt.Sprintf("[%s] Processed", logPrefix), "highest", bodyProgress) + blocks := bodyProgress - s.BlockNumber + secs := time.Since(startTime).Seconds() + logger.Info(fmt.Sprintf("[%s] Processed", logPrefix), "highest", bodyProgress, + "blocks", blocks, "in", secs, "blk/sec", uint64(float64(blocks)/secs)) } return nil } @@ -304,6 +319,7 @@ func logDownloadingBodies(logPrefix string, committed, remaining uint64, totalDe "wasted/sec", libcommon.ByteCount(uint64(wastedSpeed)), "remaining", remaining, "delivered", totalDelivered, + "blk/sec", totalDelivered/uint64(logInterval/time.Second), "cache", libcommon.ByteCount(uint64(bodyCacheSize)), "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), diff --git a/eth/stagedsync/stage_bor_heimdall.go b/eth/stagedsync/stage_bor_heimdall.go index 2b7d8b39e13..88199f74774 100644 --- a/eth/stagedsync/stage_bor_heimdall.go +++ b/eth/stagedsync/stage_bor_heimdall.go @@ -6,9 +6,7 @@ import ( "encoding/binary" "encoding/json" "fmt" - "math/big" "sort" - "strconv" "time" lru "github.com/hashicorp/golang-lru/arc/v2" @@ -21,25 +19,23 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/accounts/abi" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/contract" - "github.com/ledgerwatch/erigon/consensus/bor/finality/generics" - "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" - "github.com/ledgerwatch/erigon/consensus/bor/valset" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/dataflow" "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" + "github.com/ledgerwatch/erigon/polygon/bor/finality" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" + "github.com/ledgerwatch/erigon/polygon/bor/valset" + "github.com/ledgerwatch/erigon/polygon/heimdall" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" ) const ( inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory - inmemorySignatures = 4096 // Number of recent block signatures to keep in memory + InMemorySignatures = 4096 // Number of recent block signatures to keep in memory snapshotPersistInterval = 1024 // Number of blocks after which to persist the vote snapshot to the database extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal @@ -50,11 +46,13 @@ type BorHeimdallCfg struct { snapDb kv.RwDB // Database to store and retrieve snapshot checkpoints miningState MiningState chainConfig chain.Config - heimdallClient heimdall.IHeimdallClient + borConfig *borcfg.BorConfig + heimdallClient heimdall.HeimdallClient blockReader services.FullBlockReader hd *headerdownload.HeaderDownload penalize func(context.Context, []headerdownload.PenaltyItem) stateReceiverABI abi.ABI + loopBreakCheck func(int) bool recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot] signatures *lru.ARCCache[libcommon.Hash, libcommon.Address] } @@ -64,23 +62,31 @@ func StageBorHeimdallCfg( snapDb kv.RwDB, miningState MiningState, chainConfig chain.Config, - heimdallClient heimdall.IHeimdallClient, + heimdallClient heimdall.HeimdallClient, blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload, penalize func(context.Context, []headerdownload.PenaltyItem), + loopBreakCheck func(int) bool, recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], signatures *lru.ARCCache[libcommon.Hash, libcommon.Address], ) BorHeimdallCfg { + var borConfig *borcfg.BorConfig + if chainConfig.Bor != nil { + borConfig = chainConfig.Bor.(*borcfg.BorConfig) + } + return BorHeimdallCfg{ db: db, snapDb: snapDb, miningState: miningState, chainConfig: chainConfig, + borConfig: borConfig, heimdallClient: heimdallClient, blockReader: blockReader, hd: hd, penalize: penalize, - stateReceiverABI: contract.StateReceiver(), + stateReceiverABI: bor.GenesisContractStateReceiverABI(), + loopBreakCheck: loopBreakCheck, recents: recents, signatures: signatures, } @@ -92,12 +98,11 @@ func BorHeimdallForward( ctx context.Context, tx kv.RwTx, cfg BorHeimdallCfg, - mine bool, logger log.Logger, ) (err error) { processStart := time.Now() - if cfg.chainConfig.Bor == nil { + if cfg.borConfig == nil { return } if cfg.heimdallClient == nil { @@ -113,399 +118,172 @@ func BorHeimdallForward( defer tx.Rollback() } - var header *types.Header - var headNumber uint64 - - headNumber, err = stages.GetStageProgress(tx, stages.Headers) - + headNumber, err := stages.GetStageProgress(tx, stages.Headers) if err != nil { return err } - service := whitelist.GetWhitelistingService() - - if generics.BorMilestoneRewind.Load() != nil && *generics.BorMilestoneRewind.Load() != 0 { - unwindPoint := *generics.BorMilestoneRewind.Load() - var reset uint64 = 0 - generics.BorMilestoneRewind.Store(&reset) - - if service != nil && unwindPoint < headNumber { - header, err = cfg.blockReader.HeaderByNumber(ctx, tx, headNumber) - logger.Debug("[BorHeimdall] Verification failed for header", "hash", header.Hash(), "height", headNumber, "err", err) - cfg.penalize(ctx, []headerdownload.PenaltyItem{ - {Penalty: headerdownload.BadBlockPenalty, PeerID: cfg.hd.SourcePeerId(header.Hash())}}) + whitelistService := whitelist.GetWhitelistingService() + if unwindPointPtr := finality.BorMilestoneRewind.Load(); unwindPointPtr != nil && *unwindPointPtr != 0 { + unwindPoint := *unwindPointPtr + if whitelistService != nil && unwindPoint < headNumber { + header, err := cfg.blockReader.HeaderByNumber(ctx, tx, headNumber) + if err != nil { + return err + } + hash := header.Hash() + logger.Debug( + fmt.Sprintf("[%s] Verification failed for header due to milestone rewind", s.LogPrefix()), + "hash", hash, + "height", headNumber, + ) + cfg.penalize(ctx, []headerdownload.PenaltyItem{{ + Penalty: headerdownload.BadBlockPenalty, + PeerID: cfg.hd.SourcePeerId(hash), + }}) dataflow.HeaderDownloadStates.AddChange(headNumber, dataflow.HeaderInvalidated) - s.state.UnwindTo(unwindPoint, ForkReset(header.Hash())) + s.state.UnwindTo(unwindPoint, ForkReset(hash)) + var reset uint64 = 0 + finality.BorMilestoneRewind.Store(&reset) return fmt.Errorf("verification failed for header %d: %x", headNumber, header.Hash()) } } - if mine { - minedHeader := cfg.miningState.MiningBlock.Header - - if minedHeadNumber := minedHeader.Number.Uint64(); minedHeadNumber > headNumber { - // Whitelist service is called to check if the bor chain is - // on the cannonical chain according to milestones - if service != nil { - if !service.IsValidChain(minedHeadNumber, []*types.Header{minedHeader}) { - logger.Debug("[BorHeimdall] Verification failed for mined header", "hash", minedHeader.Hash(), "height", minedHeadNumber, "err", err) - dataflow.HeaderDownloadStates.AddChange(minedHeadNumber, dataflow.HeaderInvalidated) - s.state.UnwindTo(minedHeadNumber-1, ForkReset(minedHeader.Hash())) - return fmt.Errorf("mining on a wrong fork %d:%x", minedHeadNumber, minedHeader.Hash()) - } - } - } else { - return fmt.Errorf("attempting to mine %d, which is behind current head: %d", minedHeadNumber, headNumber) - } - } - - if err != nil { - return fmt.Errorf("getting headers progress: %w", err) - } - if s.BlockNumber == headNumber { return nil } - // Find out the latest event Id - cursor, err := tx.Cursor(kv.BorEvents) - if err != nil { - return err - } - defer cursor.Close() - k, _, err := cursor.Last() - if err != nil { - return err - } - - var lastEventId uint64 - if k != nil { - lastEventId = binary.BigEndian.Uint64(k) - } - type LastFrozen interface { - LastFrozenEventID() uint64 - LastFrozenSpanID() uint64 - } - snapshotLastEventId := cfg.blockReader.(LastFrozen).LastFrozenEventID() - if snapshotLastEventId > lastEventId { - lastEventId = snapshotLastEventId - } - sCursor, err := tx.Cursor(kv.BorSpans) - if err != nil { - return err - } - defer sCursor.Close() - k, _, err = sCursor.Last() - if err != nil { - return err - } - var lastSpanId uint64 - if k != nil { - lastSpanId = binary.BigEndian.Uint64(k) - } - snapshotLastSpanId := cfg.blockReader.(LastFrozen).LastFrozenSpanID() - if snapshotLastSpanId > lastSpanId { - lastSpanId = snapshotLastSpanId - } - var nextSpanId uint64 - if lastSpanId > 0 { - nextSpanId = lastSpanId + 1 - } - var endSpanID uint64 - if bor.SpanIDAt(headNumber) > 0 { - endSpanID = bor.SpanIDAt(headNumber+1) + 1 - } - lastBlockNum := s.BlockNumber + if cfg.blockReader.FrozenBorBlocks() > lastBlockNum { lastBlockNum = cfg.blockReader.FrozenBorBlocks() } + recents, err := lru.NewARC[libcommon.Hash, *bor.Snapshot](inmemorySnapshots) + if err != nil { return err } - signatures, err := lru.NewARC[libcommon.Hash, libcommon.Address](inmemorySignatures) + signatures, err := lru.NewARC[libcommon.Hash, libcommon.Address](InMemorySignatures) if err != nil { return err } + chain := NewChainReaderImpl(&cfg.chainConfig, tx, cfg.blockReader, logger) var blockNum uint64 var fetchTime time.Duration var eventRecords int + lastSpanID, err := fetchRequiredHeimdallSpansIfNeeded(ctx, headNumber, tx, cfg, s.LogPrefix(), logger) + if err != nil { + return err + } + + lastStateSyncEventID, err := LastStateSyncEventID(tx, cfg.blockReader) + if err != nil { + return err + } + logTimer := time.NewTicker(logInterval) defer logTimer.Stop() - if endSpanID >= nextSpanId { - logger.Info("["+s.LogPrefix()+"] Processing spans...", "from", nextSpanId, "to", endSpanID) - } - for spanID := nextSpanId; spanID <= endSpanID; spanID++ { - if lastSpanId, err = fetchAndWriteSpans(ctx, spanID, tx, cfg.heimdallClient, s.LogPrefix(), logger); err != nil { - return err - } - } - if !mine { - logger.Info("["+s.LogPrefix()+"] Processing sync events...", "from", lastBlockNum+1, "to", headNumber) - } + logger.Info("["+s.LogPrefix()+"] Processing sync events...", "from", lastBlockNum+1, "to", headNumber) + for blockNum = lastBlockNum + 1; blockNum <= headNumber; blockNum++ { select { default: case <-logTimer.C: - logger.Info("["+s.LogPrefix()+"] StateSync Progress", "progress", blockNum, "lastSpanId", lastSpanId, "lastEventId", lastEventId, "total records", eventRecords, "fetch time", fetchTime, "process time", time.Since(processStart)) + logger.Info("["+s.LogPrefix()+"] StateSync Progress", "progress", blockNum, "lastSpanID", lastSpanID, "lastStateSyncEventID", lastStateSyncEventID, "total records", eventRecords, "fetch time", fetchTime, "process time", time.Since(processStart)) } - if !mine { - header, err = cfg.blockReader.HeaderByNumber(ctx, tx, blockNum) - if err != nil { - return err - } - if header == nil { - return fmt.Errorf("["+s.LogPrefix()+"] header not found: %d", blockNum) - } - - // Whitelist service is called to check if the bor chain is - // on the cannonical chain according to milestones - if service != nil { - if !service.IsValidChain(blockNum, []*types.Header{header}) { - logger.Debug("["+s.LogPrefix()+"] Verification failed for header", "height", blockNum, "hash", header.Hash()) - cfg.penalize(ctx, []headerdownload.PenaltyItem{ - {Penalty: headerdownload.BadBlockPenalty, PeerID: cfg.hd.SourcePeerId(header.Hash())}}) - dataflow.HeaderDownloadStates.AddChange(blockNum, dataflow.HeaderInvalidated) - s.state.UnwindTo(blockNum-1, ForkReset(header.Hash())) - return fmt.Errorf("["+s.LogPrefix()+"] verification failed for header %d: %x", blockNum, header.Hash()) - } - } + header, err := cfg.blockReader.HeaderByNumber(ctx, tx, blockNum) + if err != nil { + return err } - - if blockNum > 0 && blockNum%cfg.chainConfig.Bor.CalculateSprint(blockNum) == 0 { - var callTime time.Duration - var records int - if lastEventId, records, callTime, err = fetchAndWriteBorEvents(ctx, cfg.blockReader, cfg.chainConfig.Bor, header, lastEventId, cfg.chainConfig.ChainID.String(), tx, cfg.heimdallClient, cfg.stateReceiverABI, s.LogPrefix(), logger); err != nil { - return err - } - - eventRecords += records - fetchTime += callTime + if header == nil { + return fmt.Errorf("header not found: %d", blockNum) } - var snap *bor.Snapshot + // Whitelist whitelistService is called to check if the bor chain is + // on the cannonical chain according to milestones + if whitelistService != nil && !whitelistService.IsValidChain(blockNum, []*types.Header{header}) { + logger.Debug("["+s.LogPrefix()+"] Verification failed for header", "height", blockNum, "hash", header.Hash()) + cfg.penalize(ctx, []headerdownload.PenaltyItem{{ + Penalty: headerdownload.BadBlockPenalty, + PeerID: cfg.hd.SourcePeerId(header.Hash()), + }}) + dataflow.HeaderDownloadStates.AddChange(blockNum, dataflow.HeaderInvalidated) + s.state.UnwindTo(blockNum-1, ForkReset(header.Hash())) + return fmt.Errorf("verification failed for header %d: %x", blockNum, header.Hash()) + } - if header != nil { - snap = loadSnapshot(blockNum, header.Hash(), cfg.chainConfig.Bor, recents, signatures, cfg.snapDb, logger) + if blockNum > cfg.blockReader.BorSnapshots().SegmentsMin() { + // SegmentsMin is only set if running as an uploader process (check SnapshotsCfg.snapshotUploader and + // UploadLocationFlag) when we remove snapshots based on FrozenBlockLimit and number of uploaded snapshots + // avoid calling this if block for blockNums <= SegmentsMin to avoid reinsertion of snapshots + snap := loadSnapshot(blockNum, header.Hash(), cfg.borConfig, recents, signatures, cfg.snapDb, logger) if snap == nil { - snap, err = initValidatorSets(ctx, tx, cfg.blockReader, cfg.chainConfig.Bor, - chain, blockNum, recents, signatures, cfg.snapDb, logger, s.LogPrefix()) + snap, err = initValidatorSets(ctx, tx, cfg.blockReader, cfg.borConfig, + cfg.heimdallClient, chain, blockNum, recents, signatures, cfg.snapDb, logger, s.LogPrefix()) if err != nil { return fmt.Errorf("can't initialise validator sets: %w", err) } } - if err = persistValidatorSets(ctx, snap, u, tx, cfg.blockReader, cfg.chainConfig.Bor, chain, blockNum, header.Hash(), recents, signatures, cfg.snapDb, logger, s.LogPrefix()); err != nil { + if err = persistValidatorSets(ctx, snap, u, tx, cfg.blockReader, cfg.borConfig, chain, blockNum, header.Hash(), recents, signatures, cfg.snapDb, logger, s.LogPrefix()); err != nil { return fmt.Errorf("can't persist validator sets: %w", err) } - - if !mine { - sprintLength := cfg.chainConfig.Bor.CalculateSprint(blockNum) - spanID := bor.SpanIDAt(blockNum) - if (spanID > 0) && ((blockNum+1)%sprintLength == 0) { - if err = checkHeaderExtraData(u, ctx, chain, blockNum, header, cfg.chainConfig.Bor); err != nil { - return err - } - } - } } - } - if err = s.Update(tx, headNumber); err != nil { - return err - } - - if !useExternalTx { - if err = tx.Commit(); err != nil { + if err := checkBorHeaderExtraDataIfRequired(chain, header, cfg.borConfig); err != nil { return err } - } - - logger.Info("["+s.LogPrefix()+"] Sync events processed", "progress", blockNum-1, "lastSpanId", lastSpanId, "lastEventId", lastEventId, "total records", eventRecords, "fetch time", fetchTime, "process time", time.Since(processStart)) - - return -} - -func checkHeaderExtraData( - u Unwinder, - ctx context.Context, - chain consensus.ChainHeaderReader, - blockNum uint64, - header *types.Header, - config *chain.BorConfig, -) error { - spanID := bor.SpanIDAt(blockNum + 1) - spanBytes := chain.BorSpan(spanID) - var sp span.HeimdallSpan - if err := json.Unmarshal(spanBytes, &sp); err != nil { - return err - } - producerSet := make([]*valset.Validator, len(sp.SelectedProducers)) - for i := range sp.SelectedProducers { - producerSet[i] = &sp.SelectedProducers[i] - } - - sort.Sort(valset.ValidatorsByAddress(producerSet)) - - headerVals, err := valset.ParseValidators(bor.GetValidatorBytes(header, config)) - if err != nil { - return err - } - - if len(producerSet) != len(headerVals) { - return bor.ErrInvalidSpanValidators - } - - for i, val := range producerSet { - if !bytes.Equal(val.HeaderBytes(), headerVals[i].HeaderBytes()) { - return bor.ErrInvalidSpanValidators - } - } - return nil -} - -func fetchAndWriteBorEvents( - ctx context.Context, - blockReader services.FullBlockReader, - config *chain.BorConfig, - header *types.Header, - lastEventId uint64, - chainID string, - tx kv.RwTx, - heimdallClient heimdall.IHeimdallClient, - stateReceiverABI abi.ABI, - logPrefix string, - logger log.Logger, -) (uint64, int, time.Duration, error) { - fetchStart := time.Now() - - // Find out the latest eventId - var ( - from uint64 - to time.Time - ) - - if header == nil { - return 0, 0, 0, fmt.Errorf("can't fetch events for nil header") - } - blockNum := header.Number.Uint64() - - if config.IsIndore(blockNum) { - stateSyncDelay := config.CalculateStateSyncDelay(blockNum) - to = time.Unix(int64(header.Time-stateSyncDelay), 0) - } else { - pHeader, err := blockReader.HeaderByNumber(ctx, tx, blockNum-config.CalculateSprint(blockNum)) + var callTime time.Duration + var records int + lastStateSyncEventID, records, callTime, err = fetchRequiredHeimdallStateSyncEventsIfNeeded( + ctx, + header, + tx, + cfg, + s.LogPrefix(), + logger, + func() (uint64, error) { + return lastStateSyncEventID, nil + }, + ) if err != nil { - return lastEventId, 0, time.Since(fetchStart), err + return err } - to = time.Unix(int64(pHeader.Time), 0) - } - from = lastEventId + 1 + eventRecords += records + fetchTime += callTime - logger.Debug( - fmt.Sprintf("[%s] Fetching state updates from Heimdall", logPrefix), - "fromID", from, - "to", to.Format(time.RFC3339), - ) - - eventRecords, err := heimdallClient.StateSyncEvents(ctx, from, to.Unix()) - - if err != nil { - return lastEventId, 0, time.Since(fetchStart), err - } - - if config.OverrideStateSyncRecords != nil { - if val, ok := config.OverrideStateSyncRecords[strconv.FormatUint(blockNum, 10)]; ok { - eventRecords = eventRecords[0:val] + if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(blockNum-lastBlockNum)) { + break } } - if len(eventRecords) > 0 { - var key, val [8]byte - binary.BigEndian.PutUint64(key[:], blockNum) - binary.BigEndian.PutUint64(val[:], lastEventId+1) + if err = s.Update(tx, headNumber); err != nil { + return err } - const method = "commitState" - wroteIndex := false - for i, eventRecord := range eventRecords { - if eventRecord.ID <= lastEventId { - continue - } - if lastEventId+1 != eventRecord.ID || eventRecord.ChainID != chainID || !eventRecord.Time.Before(to) { - return lastEventId, i, time.Since(fetchStart), fmt.Errorf("invalid event record received blockNum=%d, eventId=%d (exp %d), chainId=%s (exp %s), time=%s (exp to %s)", blockNum, eventRecord.ID, lastEventId+1, eventRecord.ChainID, chainID, eventRecord.Time, to) - } - - eventRecordWithoutTime := eventRecord.BuildEventRecord() - - recordBytes, err := rlp.EncodeToBytes(eventRecordWithoutTime) - if err != nil { - return lastEventId, i, time.Since(fetchStart), err - } - - data, err := stateReceiverABI.Pack(method, big.NewInt(eventRecord.Time.Unix()), recordBytes) - if err != nil { - logger.Error(fmt.Sprintf("[%s] Unable to pack tx for commitState", logPrefix), "err", err) - return lastEventId, i, time.Since(fetchStart), err - } - var eventIdBuf [8]byte - binary.BigEndian.PutUint64(eventIdBuf[:], eventRecord.ID) - if err = tx.Put(kv.BorEvents, eventIdBuf[:], data); err != nil { - return lastEventId, i, time.Since(fetchStart), err - } - if !wroteIndex { - var blockNumBuf [8]byte - binary.BigEndian.PutUint64(blockNumBuf[:], blockNum) - binary.BigEndian.PutUint64(eventIdBuf[:], eventRecord.ID) - if err = tx.Put(kv.BorEventNums, blockNumBuf[:], eventIdBuf[:]); err != nil { - return lastEventId, i, time.Since(fetchStart), err - } - wroteIndex = true + if !useExternalTx { + if err = tx.Commit(); err != nil { + return err } - - lastEventId++ } - return lastEventId, len(eventRecords), time.Since(fetchStart), nil -} + logger.Info("["+s.LogPrefix()+"] Sync events processed", "progress", blockNum-1, "lastSpanID", lastSpanID, "lastStateSyncEventID", lastStateSyncEventID, "total records", eventRecords, "fetch time", fetchTime, "process time", time.Since(processStart)) -func fetchAndWriteSpans( - ctx context.Context, - spanId uint64, - tx kv.RwTx, - heimdallClient heimdall.IHeimdallClient, - logPrefix string, - logger log.Logger, -) (uint64, error) { - response, err := heimdallClient.Span(ctx, spanId) - if err != nil { - return 0, err - } - spanBytes, err := json.Marshal(response) - if err != nil { - return 0, err - } - var spanIDBytes [8]byte - binary.BigEndian.PutUint64(spanIDBytes[:], spanId) - if err = tx.Put(kv.BorSpans, spanIDBytes[:], spanBytes); err != nil { - return 0, err - } - logger.Debug(fmt.Sprintf("[%s] Wrote span", logPrefix), "id", spanId) - return spanId, nil + return } -func loadSnapshot(blockNum uint64, hash libcommon.Hash, config *chain.BorConfig, recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], +func loadSnapshot(blockNum uint64, hash libcommon.Hash, config *borcfg.BorConfig, recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], signatures *lru.ARCCache[libcommon.Hash, libcommon.Address], snapDb kv.RwDB, logger log.Logger) *bor.Snapshot { @@ -530,7 +308,7 @@ func persistValidatorSets( u Unwinder, tx kv.Tx, blockReader services.FullBlockReader, - config *chain.BorConfig, + config *borcfg.BorConfig, chain consensus.ChainHeaderReader, blockNum uint64, hash libcommon.Hash, @@ -645,9 +423,10 @@ func persistValidatorSets( func initValidatorSets( ctx context.Context, - tx kv.Tx, + tx kv.RwTx, blockReader services.FullBlockReader, - config *chain.BorConfig, + config *borcfg.BorConfig, + heimdallClient heimdall.HeimdallClient, chain consensus.ChainHeaderReader, blockNum uint64, recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], @@ -673,15 +452,24 @@ func initValidatorSets( // get validators and current span zeroSpanBytes, err := blockReader.Span(ctx, tx, 0) + if err != nil { - return nil, err + if _, err := fetchAndWriteHeimdallSpan(ctx, 0, tx, heimdallClient, logPrefix, logger); err != nil { + return nil, err + } + + zeroSpanBytes, err = blockReader.Span(ctx, tx, 0) + + if err != nil { + return nil, err + } } if zeroSpanBytes == nil { return nil, fmt.Errorf("zero span not found") } - var zeroSpan span.HeimdallSpan + var zeroSpan heimdall.HeimdallSpan if err = json.Unmarshal(zeroSpanBytes, &zeroSpan); err != nil { return nil, err } @@ -696,7 +484,7 @@ func initValidatorSets( g.SetLimit(estimate.AlmostAllCPUs()) defer g.Wait() - batchSize := 128 // must be < inmemorySignatures + batchSize := 128 // must be < InMemorySignatures initialHeaders := make([]*types.Header, 0, batchSize) parentHeader := zeroHeader for i := uint64(1); i <= blockNum; i++ { @@ -704,7 +492,7 @@ func initValidatorSets( { // `snap.apply` bottleneck - is recover of signer. // to speedup: recover signer in background goroutines and save in `sigcache` - // `batchSize` < `inmemorySignatures`: means all current batch will fit in cache - and `snap.apply` will find it there. + // `batchSize` < `InMemorySignatures`: means all current batch will fit in cache - and `snap.apply` will find it there. g.Go(func() error { if header == nil { return nil @@ -738,8 +526,56 @@ func initValidatorSets( return snap, nil } +func checkBorHeaderExtraDataIfRequired(chr consensus.ChainHeaderReader, header *types.Header, cfg *borcfg.BorConfig) error { + blockNum := header.Number.Uint64() + sprintLength := cfg.CalculateSprintLength(blockNum) + if (blockNum+1)%sprintLength != 0 { + // not last block of a sprint in a span, so no check needed (we only check last block of a sprint) + return nil + } + + return checkBorHeaderExtraData(chr, header, cfg) +} + +func checkBorHeaderExtraData(chr consensus.ChainHeaderReader, header *types.Header, cfg *borcfg.BorConfig) error { + spanID := bor.SpanIDAt(header.Number.Uint64() + 1) + spanBytes := chr.BorSpan(spanID) + var sp heimdall.HeimdallSpan + if err := json.Unmarshal(spanBytes, &sp); err != nil { + return err + } + + producerSet := make([]*valset.Validator, len(sp.SelectedProducers)) + for i := range sp.SelectedProducers { + producerSet[i] = &sp.SelectedProducers[i] + } + + sort.Sort(valset.ValidatorsByAddress(producerSet)) + + headerVals, err := valset.ParseValidators(bor.GetValidatorBytes(header, cfg)) + if err != nil { + return err + } + + // span 0 at least for mumbai has a header mismatch in + // its first spam. Since we control neither the span, not the + // the headers (they are external data) - we just don't do the + // check as it will hault further processing + if len(producerSet) != len(headerVals) && spanID > 0 { + return ErrHeaderValidatorsLengthMismatch + } + + for i, val := range producerSet { + if !bytes.Equal(val.HeaderBytes(), headerVals[i].HeaderBytes()) { + return ErrHeaderValidatorsBytesMismatch + } + } + + return nil +} + func BorHeimdallUnwind(u *UnwindState, ctx context.Context, s *StageState, tx kv.RwTx, cfg BorHeimdallCfg) (err error) { - if cfg.chainConfig.Bor == nil { + if cfg.borConfig == nil { return } useExternalTx := tx != nil @@ -812,7 +648,7 @@ func BorHeimdallUnwind(u *UnwindState, ctx context.Context, s *StageState, tx kv } func BorHeimdallPrune(s *PruneState, ctx context.Context, tx kv.RwTx, cfg BorHeimdallCfg) (err error) { - if cfg.chainConfig.Bor == nil { + if cfg.borConfig == nil { return } return diff --git a/eth/stagedsync/stage_bor_heimdall_test.go b/eth/stagedsync/stage_bor_heimdall_test.go index 8f54635d77e..e937b38f33b 100644 --- a/eth/stagedsync/stage_bor_heimdall_test.go +++ b/eth/stagedsync/stage_bor_heimdall_test.go @@ -1,26 +1,66 @@ package stagedsync_test import ( + "bytes" "context" + "errors" + "math/big" "testing" + "time" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stagedsynctest" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/eth/stagedsync/test" - "github.com/ledgerwatch/erigon/turbo/testlog" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/valset" ) func TestBorHeimdallForwardPersistsSpans(t *testing.T) { t.Parallel() ctx := context.Background() - logger := testlog.Logger(t, log.LvlInfo) + numBlocks := 4000 + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(), + GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, + }) + // pretend-update previous stage progress + testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks)) + + // run stage under test + testHarness.RunStageForward(t, stages.BorHeimdall) + + // asserts + spans, err := testHarness.ReadSpansFromDB(ctx) + require.NoError(t, err) + require.Len(t, spans, 2) + require.Equal(t, uint64(0), spans[0].ID) + require.Equal(t, uint64(0), spans[0].StartBlock) + require.Equal(t, uint64(255), spans[0].EndBlock) + require.Equal(t, uint64(1), spans[1].ID) + require.Equal(t, uint64(256), spans[1].StartBlock) + require.Equal(t, uint64(6655), spans[1].EndBlock) +} + +func TestBorHeimdallForwardFetchesNextSpanDuringLastSprintOfCurrentSpan(t *testing.T) { + // heimdall prepares the next span a number of sprints before the end of the current one + // we should be fetching the next span once we reach the last sprint of the current span + // this mimics the behaviour in bor + t.Parallel() + + ctx := context.Background() numBlocks := 6640 - testHarness := test.InitHarness(ctx, t, logger, test.HarnessCfg{ - ChainConfig: test.BorDevnetChainConfigWithNoBlockSealDelays(), + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(), GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, }) // pretend-update previous stage progress testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks)) @@ -29,7 +69,7 @@ func TestBorHeimdallForwardPersistsSpans(t *testing.T) { testHarness.RunStageForward(t, stages.BorHeimdall) // asserts - spans, err := testHarness.ReadSpansFromDb(ctx) + spans, err := testHarness.ReadSpansFromDB(ctx) require.NoError(t, err) require.Len(t, spans, 3) require.Equal(t, uint64(0), spans[0].ID) @@ -47,11 +87,11 @@ func TestBorHeimdallForwardPersistsStateSyncEvents(t *testing.T) { t.Parallel() ctx := context.Background() - logger := testlog.Logger(t, log.LvlInfo) numBlocks := 96 - testHarness := test.InitHarness(ctx, t, logger, test.HarnessCfg{ - ChainConfig: test.BorDevnetChainConfigWithNoBlockSealDelays(), + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(), GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, }) // pretend-update previous stage progress testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks)) @@ -61,11 +101,11 @@ func TestBorHeimdallForwardPersistsStateSyncEvents(t *testing.T) { // asserts // 1 event per sprint expected - events, err := testHarness.ReadStateSyncEventsFromDb(ctx) + events, err := testHarness.ReadStateSyncEventsFromDB(ctx) require.NoError(t, err) require.Len(t, events, 6) - firstEventNumPerBlock, err := testHarness.ReadFirstStateSyncEventNumPerBlockFromDb(ctx) + firstEventNumPerBlock, err := testHarness.ReadFirstStateSyncEventNumPerBlockFromDB(ctx) require.NoError(t, err) require.Len(t, firstEventNumPerBlock, 6) require.Equal(t, uint64(1), firstEventNumPerBlock[16]) @@ -75,3 +115,99 @@ func TestBorHeimdallForwardPersistsStateSyncEvents(t *testing.T) { require.Equal(t, uint64(5), firstEventNumPerBlock[80]) require.Equal(t, uint64(6), firstEventNumPerBlock[96]) } + +func TestBorHeimdallForwardErrHeaderValidatorsLengthMismatch(t *testing.T) { + t.Parallel() + + ctx := context.Background() + numBlocks := 271 + validatorKey1, err := crypto.GenerateKey() + require.NoError(t, err) + validatorKey2, err := crypto.GenerateKey() + require.NoError(t, err) + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(), + GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, + HeimdallProducersOverride: map[uint64][]valset.Validator{ + 1: { + *valset.NewValidator(crypto.PubkeyToAddress(validatorKey1.PublicKey), 1), + *valset.NewValidator(crypto.PubkeyToAddress(validatorKey2.PublicKey), 1), + }, + }, + }) + // pretend-update previous stage progress + testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks)) + + // run stage under test + testHarness.RunStageForwardWithErrorIs(t, stages.BorHeimdall, stagedsync.ErrHeaderValidatorsLengthMismatch) +} + +func TestBorHeimdallForwardErrHeaderValidatorsBytesMismatch(t *testing.T) { + t.Parallel() + + ctx := context.Background() + numBlocks := 271 + validatorKey1, err := crypto.GenerateKey() + require.NoError(t, err) + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(), + GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, + HeimdallProducersOverride: map[uint64][]valset.Validator{ + 1: { + *valset.NewValidator(crypto.PubkeyToAddress(validatorKey1.PublicKey), 1), + }, + }, + }) + // pretend-update previous stage progress + testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks)) + + // run stage under test + testHarness.RunStageForwardWithErrorIs(t, stages.BorHeimdall, stagedsync.ErrHeaderValidatorsBytesMismatch) +} + +func TestBorHeimdallForwardDetectsUnauthorizedSignerError(t *testing.T) { + t.Parallel() + + ctx := context.Background() + numBlocks := 312 + chainConfig := stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays() + testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{ + ChainConfig: chainConfig, + GenerateChainNumBlocks: numBlocks, + LogLvl: log.LvlInfo, + }) + + // prepare invalid header and insert it in the db + latestHeader, err := testHarness.ReadHeaderByNumber(ctx, uint64(numBlocks)) + require.NoError(t, err) + gasLimit := uint64(15500) + invalidHeader := core.MakeEmptyHeader(latestHeader, chainConfig, uint64(time.Now().Unix()), &gasLimit) + invalidHeader.Number = new(big.Int).Add(latestHeader.Number, big.NewInt(1)) + invalidHeader.Extra = bytes.Repeat([]byte{0x00}, types.ExtraVanityLength+types.ExtraSealLength) + validatorKey1, err := crypto.GenerateKey() + require.NoError(t, err) + sighash, err := crypto.Sign(crypto.Keccak256(bor.BorRLP(invalidHeader, testHarness.BorConfig())), validatorKey1) + require.NoError(t, err) + copy(invalidHeader.Extra[len(invalidHeader.Extra)-types.ExtraSealLength:], sighash) + testHarness.SaveHeader(ctx, t, invalidHeader) + // pretend-update previous stage progress + testHarness.SaveStageProgress(ctx, t, stages.Headers, invalidHeader.Number.Uint64()) + require.Equal(t, uint64(numBlocks+1), testHarness.GetStageProgress(ctx, t, stages.Headers)) + require.Equal(t, uint64(0), testHarness.GetStageProgress(ctx, t, stages.BorHeimdall)) + + // run stage under test + testHarness.RunStageForward(t, stages.BorHeimdall) + + // asserts + require.Equal(t, uint64(numBlocks+1), testHarness.GetStageProgress(ctx, t, stages.BorHeimdall)) + require.Equal(t, invalidHeader.Number.Uint64()-1, testHarness.StateSyncUnwindPoint()) + unwindReason := testHarness.StateSyncUnwindReason() + require.Equal(t, invalidHeader.Hash(), *unwindReason.Block) + var unauthorizedSignerErr *valset.UnauthorizedSignerError + ok := errors.As(unwindReason.Err, &unauthorizedSignerErr) + require.True(t, ok) + require.Equal(t, invalidHeader.Number.Uint64(), unauthorizedSignerErr.Number) + require.Equal(t, crypto.PubkeyToAddress(validatorKey1.PublicKey).Bytes(), unauthorizedSignerErr.Signer) +} diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 8a512eba3e2..7bf77db28d3 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -28,6 +28,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" @@ -144,12 +145,11 @@ func executeBlock( writeChangesets bool, writeReceipts bool, writeCallTraces bool, - initialCycle bool, stateStream bool, logger log.Logger, ) error { blockNum := block.NumberU64() - stateReader, stateWriter, err := newStateReaderWriter(batch, tx, block, writeChangesets, cfg.accumulator, cfg.blockReader, initialCycle, stateStream) + stateReader, stateWriter, err := newStateReaderWriter(batch, tx, block, writeChangesets, cfg.accumulator, cfg.blockReader, stateStream) if err != nil { return err } @@ -210,16 +210,14 @@ func newStateReaderWriter( writeChangesets bool, accumulator *shards.Accumulator, br services.FullBlockReader, - initialCycle bool, stateStream bool, ) (state.StateReader, state.WriterWithChangeSets, error) { - var stateReader state.StateReader var stateWriter state.WriterWithChangeSets stateReader = state.NewPlainStateReader(batch) - if !initialCycle && stateStream { + if stateStream { txs, err := br.RawTransactions(context.Background(), tx, block.NumberU64(), block.NumberU64()) if err != nil { return nil, nil, err @@ -239,7 +237,7 @@ func newStateReaderWriter( // ================ Erigon3 ================ -func ExecBlockV3(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) { +func ExecBlockV3(s *StageState, u Unwinder, txc wrap.TxContainer, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) { workersCount := cfg.syncCfg.ExecWorkerCount //workersCount := 2 if !initialCycle { @@ -248,7 +246,7 @@ func ExecBlockV3(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx cont cfg.agg.SetWorkers(estimate.CompressSnapshot.WorkersQuarter()) if initialCycle { - reconstituteToBlock, found, err := reconstituteBlock(cfg.agg, cfg.db, tx) + reconstituteToBlock, found, err := reconstituteBlock(cfg.agg, cfg.db, txc.Tx) if err != nil { return err } @@ -264,7 +262,7 @@ func ExecBlockV3(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx cont } } - prevStageProgress, err := senderStageProgress(tx, cfg.db) + prevStageProgress, err := senderStageProgress(txc.Tx, cfg.db) if err != nil { return err } @@ -280,8 +278,8 @@ func ExecBlockV3(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx cont if to > s.BlockNumber+16 { logger.Info(fmt.Sprintf("[%s] Blocks execution", logPrefix), "from", s.BlockNumber, "to", to) } - parallel := tx == nil - if err := ExecV3(ctx, s, u, workersCount, cfg, tx, parallel, logPrefix, + parallel := txc.Tx == nil + if err := ExecV3(ctx, s, u, workersCount, cfg, txc, parallel, logPrefix, to, logger, initialCycle); err != nil { return fmt.Errorf("ExecV3: %w", err) } @@ -308,28 +306,28 @@ func reconstituteBlock(agg *libstate.AggregatorV3, db kv.RoDB, tx kv.Tx) (n uint return } -func unwindExec3(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg, accumulator *shards.Accumulator, logger log.Logger) (err error) { +func unwindExec3(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx context.Context, cfg ExecuteBlockCfg, accumulator *shards.Accumulator, logger log.Logger) (err error) { cfg.agg.SetLogPrefix(s.LogPrefix()) rs := state.NewStateV3(cfg.dirs.Tmp, logger) // unwind all txs of u.UnwindPoint block. 1 txn in begin/end of block - system txs - txNum, err := rawdbv3.TxNums.Min(tx, u.UnwindPoint+1) + txNum, err := rawdbv3.TxNums.Min(txc.Tx, u.UnwindPoint+1) if err != nil { return err } - if err := rs.Unwind(ctx, tx, txNum, cfg.agg, accumulator); err != nil { + if err := rs.Unwind(ctx, txc.Tx, u.UnwindPoint, txNum, cfg.agg, accumulator); err != nil { return fmt.Errorf("StateV3.Unwind: %w", err) } - if err := rs.Flush(ctx, tx, s.LogPrefix(), time.NewTicker(30*time.Second)); err != nil { + if err := rs.Flush(ctx, txc.Tx, s.LogPrefix(), time.NewTicker(30*time.Second)); err != nil { return fmt.Errorf("StateV3.Flush: %w", err) } - if err := rawdb.TruncateReceipts(tx, u.UnwindPoint+1); err != nil { + if err := rawdb.TruncateReceipts(txc.Tx, u.UnwindPoint+1); err != nil { return fmt.Errorf("truncate receipts: %w", err) } - if err := rawdb.TruncateBorReceipts(tx, u.UnwindPoint+1); err != nil { + if err := rawdb.TruncateBorReceipts(txc.Tx, u.UnwindPoint+1); err != nil { return fmt.Errorf("truncate bor receipts: %w", err) } - if err := rawdb.DeleteNewerEpochs(tx, u.UnwindPoint+1); err != nil { + if err := rawdb.DeleteNewerEpochs(txc.Tx, u.UnwindPoint+1); err != nil { return fmt.Errorf("delete newer epochs: %w", err) } @@ -358,29 +356,29 @@ func senderStageProgress(tx kv.Tx, db kv.RoDB) (prevStageProgress uint64, err er // ================ Erigon3 End ================ -func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) { +func SpawnExecuteBlocksStage(s *StageState, u Unwinder, txc wrap.TxContainer, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) { if cfg.historyV3 { - if err = ExecBlockV3(s, u, tx, toBlock, ctx, cfg, initialCycle, logger); err != nil { + if err = ExecBlockV3(s, u, txc, toBlock, ctx, cfg, initialCycle, logger); err != nil { return err } return nil } quit := ctx.Done() - useExternalTx := tx != nil + useExternalTx := txc.Tx != nil if !useExternalTx { - tx, err = cfg.db.BeginRw(context.Background()) + txc.Tx, err = cfg.db.BeginRw(context.Background()) if err != nil { return err } - defer tx.Rollback() + defer txc.Tx.Rollback() } - prevStageProgress, errStart := stages.GetStageProgress(tx, stages.Senders) + prevStageProgress, errStart := stages.GetStageProgress(txc.Tx, stages.Senders) if errStart != nil { return errStart } - nextStageProgress, err := stages.GetStageProgress(tx, stages.HashState) + nextStageProgress, err := stages.GetStageProgress(txc.Tx, stages.HashState) if err != nil { return err } @@ -388,16 +386,20 @@ func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint logPrefix := s.LogPrefix() var to = prevStageProgress + if toBlock > 0 { to = cmp.Min(prevStageProgress, toBlock) } + if to <= s.BlockNumber { return nil } + if to > s.BlockNumber+16 { logger.Info(fmt.Sprintf("[%s] Blocks execution", logPrefix), "from", s.BlockNumber, "to", to) } - stateStream := !initialCycle && cfg.stateStream && to-s.BlockNumber < stateStreamLimit + + stateStream := cfg.stateStream && to-s.BlockNumber < stateStreamLimit // changes are stored through memory buffer logEvery := time.NewTicker(logInterval) @@ -415,7 +417,7 @@ func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint var batch kv.PendingMutations // state is stored through ethdb batches - batch = membatch.NewHashBatch(tx, quit, cfg.dirs.Tmp, logger) + batch = membatch.NewHashBatch(txc.Tx, quit, cfg.dirs.Tmp, logger) // avoids stacking defers within the loop defer func() { batch.Close() @@ -443,11 +445,11 @@ Loop: } } - blockHash, err := cfg.blockReader.CanonicalHash(ctx, tx, blockNum) + blockHash, err := cfg.blockReader.CanonicalHash(ctx, txc.Tx, blockNum) if err != nil { return err } - block, _, err := cfg.blockReader.BlockWithSenders(ctx, tx, blockHash, blockNum) + block, _, err := cfg.blockReader.BlockWithSenders(ctx, txc.Tx, blockHash, blockNum) if err != nil { return err } @@ -463,11 +465,11 @@ Loop: writeReceipts := nextStagesExpectData || blockNum > cfg.prune.Receipts.PruneTo(to) writeCallTraces := nextStagesExpectData || blockNum > cfg.prune.CallTraces.PruneTo(to) - _, isMemoryMutation := tx.(*membatchwithdb.MemoryMutation) + _, isMemoryMutation := txc.Tx.(*membatchwithdb.MemoryMutation) if cfg.silkworm != nil && !isMemoryMutation { - blockNum, err = silkworm.ExecuteBlocks(cfg.silkworm, tx, cfg.chainConfig.ChainID, blockNum, to, uint64(cfg.batchSize), writeChangeSets, writeReceipts, writeCallTraces) + blockNum, err = silkworm.ExecuteBlocks(cfg.silkworm, txc.Tx, cfg.chainConfig.ChainID, blockNum, to, uint64(cfg.batchSize), writeChangeSets, writeReceipts, writeCallTraces) } else { - err = executeBlock(block, tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, initialCycle, stateStream, logger) + err = executeBlock(block, txc.Tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, stateStream, logger) } if err != nil { @@ -507,24 +509,24 @@ Loop: if shouldUpdateProgress { logger.Info("Committed State", "gas reached", currentStateGas, "gasTarget", gasState) currentStateGas = 0 - if err = batch.Flush(ctx, tx); err != nil { + if err = batch.Flush(ctx, txc.Tx); err != nil { return err } - if err = s.Update(tx, stageProgress); err != nil { + if err = s.Update(txc.Tx, stageProgress); err != nil { return err } if !useExternalTx { - if err = tx.Commit(); err != nil { + if err = txc.Tx.Commit(); err != nil { return err } - tx, err = cfg.db.BeginRw(context.Background()) + txc.Tx, err = cfg.db.BeginRw(context.Background()) if err != nil { return err } // TODO: This creates stacked up deferrals - defer tx.Rollback() + defer txc.Tx.Rollback() } - batch = membatch.NewHashBatch(tx, quit, cfg.dirs.Tmp, logger) + batch = membatch.NewHashBatch(txc.Tx, quit, cfg.dirs.Tmp, logger) } gas = gas + block.GasUsed() @@ -534,25 +536,25 @@ Loop: case <-logEvery.C: logBlock, logTx, logTime = logProgress(logPrefix, logBlock, logTime, blockNum, logTx, lastLogTx, gas, float64(currentStateGas)/float64(gasState), batch, logger) gas = 0 - tx.CollectMetrics() + txc.Tx.CollectMetrics() syncMetrics[stages.Execution].SetUint64(blockNum) } } - if err = s.Update(batch, stageProgress); err != nil { + if err = s.Update(txc.Tx, stageProgress); err != nil { return err } - if err = batch.Flush(ctx, tx); err != nil { + if err = batch.Flush(ctx, txc.Tx); err != nil { return fmt.Errorf("batch commit: %w", err) } - _, err = rawdb.IncrementStateVersion(tx) + _, err = rawdb.IncrementStateVersion(txc.Tx) if err != nil { return fmt.Errorf("writing plain state version: %w", err) } if !useExternalTx { - if err = tx.Commit(); err != nil { + if err = txc.Tx.Commit(); err != nil { return err } } @@ -673,50 +675,50 @@ func logProgress(logPrefix string, prevBlock uint64, prevTime time.Time, current return currentBlock, currentTx, currentTime } -func UnwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) { +func UnwindExecutionStage(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) { if u.UnwindPoint >= s.BlockNumber { return nil } - useExternalTx := tx != nil + useExternalTx := txc.Tx != nil if !useExternalTx { - tx, err = cfg.db.BeginRw(context.Background()) + txc.Tx, err = cfg.db.BeginRw(context.Background()) if err != nil { return err } - defer tx.Rollback() + defer txc.Tx.Rollback() } logPrefix := u.LogPrefix() logger.Info(fmt.Sprintf("[%s] Unwind Execution", logPrefix), "from", s.BlockNumber, "to", u.UnwindPoint) - if err = unwindExecutionStage(u, s, tx, ctx, cfg, initialCycle, logger); err != nil { + if err = unwindExecutionStage(u, s, txc, ctx, cfg, initialCycle, logger); err != nil { return err } - if err = u.Done(tx); err != nil { + if err = u.Done(txc.Tx); err != nil { return err } if !useExternalTx { - if err = tx.Commit(); err != nil { + if err = txc.Tx.Commit(); err != nil { return err } } return nil } -func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) error { +func unwindExecutionStage(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) error { logPrefix := s.LogPrefix() stateBucket := kv.PlainState storageKeyLength := length.Addr + length.Incarnation + length.Hash var accumulator *shards.Accumulator - if !initialCycle && cfg.stateStream && s.BlockNumber-u.UnwindPoint < stateStreamLimit { + if cfg.stateStream && s.BlockNumber-u.UnwindPoint < stateStreamLimit { accumulator = cfg.accumulator - hash, err := cfg.blockReader.CanonicalHash(ctx, tx, u.UnwindPoint) + hash, err := cfg.blockReader.CanonicalHash(ctx, txc.Tx, u.UnwindPoint) if err != nil { return fmt.Errorf("read canonical hash of unwind point: %w", err) } - txs, err := cfg.blockReader.RawTransactions(ctx, tx, u.UnwindPoint, s.BlockNumber) + txs, err := cfg.blockReader.RawTransactions(ctx, txc.Tx, u.UnwindPoint, s.BlockNumber) if err != nil { return err } @@ -724,17 +726,17 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context } if cfg.historyV3 { - return unwindExec3(u, s, tx, ctx, cfg, accumulator, logger) + return unwindExec3(u, s, txc, ctx, cfg, accumulator, logger) } changes := etl.NewCollector(logPrefix, cfg.dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) defer changes.Close() - errRewind := changeset.RewindData(tx, s.BlockNumber, u.UnwindPoint, changes, ctx.Done()) + errRewind := changeset.RewindData(txc.Tx, s.BlockNumber, u.UnwindPoint, changes, ctx.Done()) if errRewind != nil { return fmt.Errorf("getting rewind data: %w", errRewind) } - if err := changes.Load(tx, stateBucket, func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + if err := changes.Load(txc.Tx, stateBucket, func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { if len(k) == 20 { if len(v) > 0 { var acc accounts.Account @@ -743,19 +745,19 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context } // Fetch the code hash - recoverCodeHashPlain(&acc, tx, k) + recoverCodeHashPlain(&acc, txc.Tx, k) var address common.Address copy(address[:], k) // cleanup contract code bucket - original, err := state.NewPlainStateReader(tx).ReadAccountData(address) + original, err := state.NewPlainStateReader(txc.Tx).ReadAccountData(address) if err != nil { return fmt.Errorf("read account for %x: %w", address, err) } if original != nil { // clean up all the code incarnations original incarnation and the new one for incarnation := original.Incarnation; incarnation > acc.Incarnation && incarnation > 0; incarnation-- { - err = tx.Delete(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], incarnation)) + err = txc.Tx.Delete(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], incarnation)) if err != nil { return fmt.Errorf("writeAccountPlain for %x: %w", address, err) } @@ -807,23 +809,23 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context return err } - if err := historyv2.Truncate(tx, u.UnwindPoint+1); err != nil { + if err := historyv2.Truncate(txc.Tx, u.UnwindPoint+1); err != nil { return err } - if err := rawdb.TruncateReceipts(tx, u.UnwindPoint+1); err != nil { + if err := rawdb.TruncateReceipts(txc.Tx, u.UnwindPoint+1); err != nil { return fmt.Errorf("truncate receipts: %w", err) } - if err := rawdb.TruncateBorReceipts(tx, u.UnwindPoint+1); err != nil { + if err := rawdb.TruncateBorReceipts(txc.Tx, u.UnwindPoint+1); err != nil { return fmt.Errorf("truncate bor receipts: %w", err) } - if err := rawdb.DeleteNewerEpochs(tx, u.UnwindPoint+1); err != nil { + if err := rawdb.DeleteNewerEpochs(txc.Tx, u.UnwindPoint+1); err != nil { return fmt.Errorf("delete newer epochs: %w", err) } // Truncate CallTraceSet keyStart := hexutility.EncodeTs(u.UnwindPoint + 1) - c, err := tx.RwCursorDupSort(kv.CallTraceSet) + c, err := txc.Tx.RwCursorDupSort(kv.CallTraceSet) if err != nil { return err } @@ -832,7 +834,7 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context if err != nil { return err } - if err = tx.Delete(kv.CallTraceSet, k); err != nil { + if err = txc.Tx.Delete(kv.CallTraceSet, k); err != nil { return err } } diff --git a/eth/stagedsync/stage_execute_test.go b/eth/stagedsync/stage_execute_test.go index 7ef922a112a..a34f6010b14 100644 --- a/eth/stagedsync/stage_execute_test.go +++ b/eth/stagedsync/stage_execute_test.go @@ -11,6 +11,8 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" + "github.com/ledgerwatch/erigon-lib/wrap" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" @@ -42,7 +44,7 @@ func TestExec(t *testing.T) { u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} s := &StageState{ID: stages.Execution, BlockNumber: 50} - err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false, logger) + err = UnwindExecutionStage(u, s, wrap.TxContainer{Tx: tx2}, ctx, cfg, false, logger) require.NoError(err) compareCurrentState(t, newAgg(t, logger), tx1, tx2, kv.PlainState, kv.PlainContractCode, kv.ContractTEVMCode) @@ -58,7 +60,7 @@ func TestExec(t *testing.T) { u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} s := &StageState{ID: stages.Execution, BlockNumber: 50} - err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false, logger) + err = UnwindExecutionStage(u, s, wrap.TxContainer{Tx: tx2}, ctx, cfg, false, logger) require.NoError(err) compareCurrentState(t, newAgg(t, logger), tx1, tx2, kv.PlainState, kv.PlainContractCode) @@ -76,7 +78,7 @@ func TestExec(t *testing.T) { } u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} s := &StageState{ID: stages.Execution, BlockNumber: 50} - err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false, logger) + err = UnwindExecutionStage(u, s, wrap.TxContainer{Tx: tx2}, ctx, cfg, false, logger) require.NoError(err) compareCurrentState(t, newAgg(t, logger), tx1, tx2, kv.PlainState, kv.PlainContractCode) @@ -204,7 +206,7 @@ func TestExec22(t *testing.T) { u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} s := &StageState{ID: stages.Execution, BlockNumber: 50} - err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false, logger) + err = UnwindExecutionStage(u, s, wrap.TxContainer{Tx: tx2}, ctx, cfg, false, logger) require.NoError(err) compareCurrentState(t, agg, tx1, tx2, kv.PlainState, kv.PlainContractCode) @@ -228,7 +230,7 @@ func TestExec22(t *testing.T) { u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} s := &StageState{ID: stages.Execution, BlockNumber: 50} - err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false, logger) + err = UnwindExecutionStage(u, s, wrap.TxContainer{Tx: tx2}, ctx, cfg, false, logger) require.NoError(err) tx1.ForEach(kv.PlainState, nil, func(k, v []byte) error { diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index e42b35a7058..8a36be2f64a 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -9,17 +9,19 @@ import ( "time" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/core/rawdb/blockio" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/rawdb/blockio" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/engineapi/engine_helpers" "github.com/ledgerwatch/erigon/turbo/services" @@ -49,7 +51,8 @@ type HeadersCfg struct { forkValidator *engine_helpers.ForkValidator notifications *shards.Notifications - loopBreakCheck func() bool + syncConfig ethconfig.Sync + loopBreakCheck func(int) bool } func StageHeadersCfg( @@ -57,6 +60,7 @@ func StageHeadersCfg( headerDownload *headerdownload.HeaderDownload, bodyDownload *bodydownload.BodyDownload, chainConfig chain.Config, + syncConfig ethconfig.Sync, headerReqSend func(context.Context, *headerdownload.HeaderRequest) ([64]byte, bool), announceNewHashes func(context.Context, []headerdownload.Announce), penalize func(context.Context, []headerdownload.PenaltyItem), @@ -67,12 +71,13 @@ func StageHeadersCfg( tmpdir string, notifications *shards.Notifications, forkValidator *engine_helpers.ForkValidator, - loopBreakCheck func() bool) HeadersCfg { + loopBreakCheck func(int) bool) HeadersCfg { return HeadersCfg{ db: db, hd: headerDownload, bodyDownload: bodyDownload, chainConfig: chainConfig, + syncConfig: syncConfig, headerReqSend: headerReqSend, announceNewHashes: announceNewHashes, penalize: penalize, @@ -127,20 +132,21 @@ func HeadersPOW( useExternalTx bool, logger log.Logger, ) error { - var headerProgress uint64 var err error + startTime := time.Now() + if err = cfg.hd.ReadProgressFromDb(tx); err != nil { return err } cfg.hd.SetPOSSync(false) cfg.hd.SetFetchingNew(true) defer cfg.hd.SetFetchingNew(false) - headerProgress = cfg.hd.Progress() + startProgress := cfg.hd.Progress() logPrefix := s.LogPrefix() // Check if this is called straight after the unwinds, which means we need to create new canonical markings - hash, err := cfg.blockReader.CanonicalHash(ctx, tx, headerProgress) + hash, err := cfg.blockReader.CanonicalHash(ctx, tx, startProgress) if err != nil { return err } @@ -148,7 +154,7 @@ func HeadersPOW( defer logEvery.Stop() if hash == (libcommon.Hash{}) { headHash := rawdb.ReadHeadHeaderHash(tx) - if err = fixCanonicalChain(logPrefix, logEvery, headerProgress, headHash, tx, cfg.blockReader, logger); err != nil { + if err = fixCanonicalChain(logPrefix, logEvery, startProgress, headHash, tx, cfg.blockReader, logger); err != nil { return err } if !useExternalTx { @@ -164,21 +170,28 @@ func HeadersPOW( return nil } - logger.Info(fmt.Sprintf("[%s] Waiting for headers...", logPrefix), "from", headerProgress) + logger.Info(fmt.Sprintf("[%s] Waiting for headers...", logPrefix), "from", startProgress) - localTd, err := rawdb.ReadTd(tx, hash, headerProgress) + localTd, err := rawdb.ReadTd(tx, hash, startProgress) if err != nil { return err } + /* TEMP TESTING if localTd == nil { - return fmt.Errorf("localTD is nil: %d, %x", headerProgress, hash) + return fmt.Errorf("localTD is nil: %d, %x", startProgress, hash) } - headerInserter := headerdownload.NewHeaderInserter(logPrefix, localTd, headerProgress, cfg.blockReader) - cfg.hd.SetHeaderReader(&ChainReaderImpl{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}) + TEMP TESTING */ + headerInserter := headerdownload.NewHeaderInserter(logPrefix, localTd, startProgress, cfg.blockReader) + cfg.hd.SetHeaderReader(&ChainReaderImpl{ + config: &cfg.chainConfig, + tx: tx, + blockReader: cfg.blockReader, + logger: logger, + }) stopped := false var noProgressCounter uint = 0 - prevProgress := headerProgress + prevProgress := startProgress var wasProgress bool var lastSkeletonTime time.Time var peer [64]byte @@ -186,14 +199,15 @@ func HeadersPOW( Loop: for !stopped { - transitionedToPoS, err := rawdb.Transitioned(tx, headerProgress, cfg.chainConfig.TerminalTotalDifficulty) + transitionedToPoS, err := rawdb.Transitioned(tx, startProgress, cfg.chainConfig.TerminalTotalDifficulty) if err != nil { return err } if transitionedToPoS { - if err := s.Update(tx, headerProgress); err != nil { + if err := s.Update(tx, startProgress); err != nil { return err } + s.state.posTransition = &startProgress break } @@ -240,8 +254,9 @@ Loop: } } // Load headers into the database - var inSync bool - if inSync, err = cfg.hd.InsertHeaders(headerInserter.NewFeedHeaderFunc(tx, cfg.blockReader), cfg.chainConfig.TerminalTotalDifficulty, logPrefix, logEvery.C, uint64(currentTime.Unix())); err != nil { + inSync, err := cfg.hd.InsertHeaders(headerInserter.NewFeedHeaderFunc(tx, cfg.blockReader), cfg.syncConfig.LoopBlockLimit, cfg.chainConfig.TerminalTotalDifficulty, logPrefix, logEvery.C, uint64(currentTime.Unix())) + + if err != nil { return err } @@ -254,7 +269,15 @@ Loop: } } - if cfg.loopBreakCheck != nil && cfg.loopBreakCheck() { + if cfg.syncConfig.LoopBlockLimit > 0 { + if bodyProgress, err := stages.GetStageProgress(tx, stages.Bodies); err == nil { + if cfg.hd.Progress() > bodyProgress && cfg.hd.Progress()-bodyProgress > uint64(cfg.syncConfig.LoopBlockLimit*2) { + break + } + } + } + + if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(cfg.hd.Progress()-startProgress)) { break } @@ -323,7 +346,16 @@ Loop: return libcommon.ErrStopped } // We do not print the following line if the stage was interrupted - logger.Info(fmt.Sprintf("[%s] Processed", logPrefix), "highest inserted", headerInserter.GetHighest(), "age", common.PrettyAge(time.Unix(int64(headerInserter.GetHighestTimestamp()), 0))) + + if s.state.posTransition != nil { + logger.Info(fmt.Sprintf("[%s] Transitioned to POS", logPrefix), "block", *s.state.posTransition) + } else { + headers := headerInserter.GetHighest() - startProgress + secs := time.Since(startTime).Seconds() + logger.Info(fmt.Sprintf("[%s] Processed", logPrefix), + "highest", headerInserter.GetHighest(), "age", common.PrettyAge(time.Unix(int64(headerInserter.GetHighestTimestamp()), 0)), + "headers", headers, "in", secs, "blk/sec", uint64(float64(headers)/secs)) + } return nil } @@ -572,7 +604,7 @@ func (cr ChainReaderImpl) BorEventsByBlock(hash libcommon.Hash, number uint64) [ func (cr ChainReaderImpl) BorSpan(spanId uint64) []byte { span, err := cr.blockReader.Span(context.Background(), cr.tx, spanId) if err != nil { - cr.logger.Error("BorSpan failed", "err", err) + cr.logger.Error("[staged sync] BorSpan failed", "err", err) return nil } return span diff --git a/eth/stagedsync/stage_interhashes_test.go b/eth/stagedsync/stage_interhashes_test.go index 3bf6c7faac3..107369b1659 100644 --- a/eth/stagedsync/stage_interhashes_test.go +++ b/eth/stagedsync/stage_interhashes_test.go @@ -3,9 +3,10 @@ package stagedsync_test import ( "context" "encoding/binary" - "github.com/ledgerwatch/erigon-lib/kv/dbutils" "testing" + "github.com/ledgerwatch/erigon-lib/kv/dbutils" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/common/length" @@ -80,7 +81,7 @@ func TestAccountAndStorageTrie(t *testing.T) { // ---------------------------------------------------------------- historyV3 := false - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New())) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New())) cfg := stagedsync.StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil) _, err := stagedsync.RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx, log.New()) assert.Nil(t, err) @@ -202,7 +203,7 @@ func TestAccountTrieAroundExtensionNode(t *testing.T) { hash6 := libcommon.HexToHash("0x3100000000000000000000000000000000000000000000000000000000000000") assert.Nil(t, tx.Put(kv.HashedAccounts, hash6[:], encoded)) - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New())) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New())) _, err := stagedsync.RegenerateIntermediateHashes("IH", tx, stagedsync.StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil), libcommon.Hash{} /* expectedRootHash */, ctx, log.New()) assert.Nil(t, err) @@ -265,7 +266,7 @@ func TestStorageDeletion(t *testing.T) { // Populate account & storage trie DB tables // ---------------------------------------------------------------- historyV3 := false - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New())) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New())) cfg := stagedsync.StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil) _, err = stagedsync.RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx, log.New()) assert.Nil(t, err) @@ -384,7 +385,7 @@ func TestHiveTrieRoot(t *testing.T) { common.FromHex("02081bc16d674ec80000"))) historyV3 := false - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New())) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New())) cfg := stagedsync.StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil) logger := log.New() _, err := stagedsync.RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx, logger) diff --git a/eth/stagedsync/stage_mining_bor_heimdall.go b/eth/stagedsync/stage_mining_bor_heimdall.go new file mode 100644 index 00000000000..4a5d21665d4 --- /dev/null +++ b/eth/stagedsync/stage_mining_bor_heimdall.go @@ -0,0 +1,89 @@ +package stagedsync + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/dataflow" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" +) + +func MiningBorHeimdallForward( + ctx context.Context, + cfg BorHeimdallCfg, + stageStage *StageState, + unwinder Unwinder, + tx kv.RwTx, + logger log.Logger, +) error { + if cfg.borConfig == nil || cfg.heimdallClient == nil { + return nil + } + + logPrefix := stageStage.LogPrefix() + headerStageProgress, err := stages.GetStageProgress(tx, stages.Headers) + if err != nil { + return err + } + + header := cfg.miningState.MiningBlock.Header + headerNum := header.Number.Uint64() + if headerNum <= headerStageProgress { + return fmt.Errorf("attempting to mine %d, which is behind current head: %d", headerNum, headerStageProgress) + } + + // Whitelist service is called to check if the bor chain is on the canonical chain according to milestones + whitelistService := whitelist.GetWhitelistingService() + if whitelistService != nil && !whitelistService.IsValidChain(headerNum, []*types.Header{header}) { + hash := header.Hash() + logger.Debug( + fmt.Sprintf("[%s] Verification failed for mined header", logPrefix), + "hash", hash, + "height", headerNum, + "err", err, + ) + dataflow.HeaderDownloadStates.AddChange(headerNum, dataflow.HeaderInvalidated) + unwinder.UnwindTo(headerNum-1, ForkReset(hash)) + return fmt.Errorf("mining on a wrong fork %d:%x", headerNum, hash) + } + + lastSpanID, err := fetchRequiredHeimdallSpansIfNeeded(ctx, headerNum, tx, cfg, logPrefix, logger) + if err != nil { + return err + } + + lastStateSyncEventID, records, fetchTime, err := fetchRequiredHeimdallStateSyncEventsIfNeeded( + ctx, + header, + tx, + cfg, + logPrefix, + logger, + func() (uint64, error) { + return LastStateSyncEventID(tx, cfg.blockReader) + }, + ) + if err != nil { + return err + } + + if err = stageStage.Update(tx, headerNum); err != nil { + return err + } + + logger.Info( + fmt.Sprintf("[%s] Finished processing", logPrefix), + "progress", headerNum, + "lastSpanID", lastSpanID, + "lastStateSyncEventID", lastStateSyncEventID, + "stateSyncEventTotalRecords", records, + "stateSyncEventFetchTime", fetchTime, + ) + + return nil +} diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index e270f22c3ae..8720738a507 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -8,11 +8,10 @@ import ( "time" mapset "github.com/deckarep/golang-set/v2" - "github.com/ledgerwatch/erigon-lib/chain" - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/consensus" @@ -22,6 +21,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethutils" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" ) type MiningBlock struct { @@ -130,7 +130,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc if err != nil { return err } - chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader} + chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} var GetBlocksFromHash = func(hash libcommon.Hash, n int) (blocks []*types.Block) { number := rawdb.ReadHeaderNumber(tx, hash) if number == nil { diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index 15d4dd1c6b7..90395618d3c 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -10,15 +10,14 @@ import ( mapset "github.com/deckarep/golang-set/v2" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/kv/membatch" "github.com/ledgerwatch/log/v3" "golang.org/x/net/context" "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/membatch" types2 "github.com/ledgerwatch/erigon-lib/types" - "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core" @@ -43,8 +42,8 @@ type MiningExecCfg struct { tmpdir string interrupt *int32 payloadId uint64 - txPool2 TxPoolForMining - txPool2DB kv.RoDB + txPool TxPoolForMining + txPoolDB kv.RoDB } type TxPoolForMining interface { @@ -56,7 +55,7 @@ func StageMiningExecCfg( notifier ChainEventNotifier, chainConfig chain.Config, engine consensus.Engine, vmConfig *vm.Config, tmpdir string, interrupt *int32, payloadId uint64, - txPool2 TxPoolForMining, txPool2DB kv.RoDB, + txPool TxPoolForMining, txPoolDB kv.RoDB, blockReader services.FullBlockReader, ) MiningExecCfg { return MiningExecCfg{ @@ -70,8 +69,8 @@ func StageMiningExecCfg( tmpdir: tmpdir, interrupt: interrupt, payloadId: payloadId, - txPool2: txPool2, - txPool2DB: txPool2DB, + txPool: txPool, + txPoolDB: txPoolDB, } } @@ -90,7 +89,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c ibs := state.New(stateReader) stateWriter := state.NewPlainStateWriter(tx, tx, current.Header.Number.Uint64()) - chainReader := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader} + chainReader := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} core.InitializeBlockExecution(cfg.engine, chainReader, current.Header, &cfg.chainConfig, ibs, logger) // Optimism Canyon @@ -186,7 +185,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c } } - logger.Debug("SpawnMiningExecStage", "block txn", current.Txs.Len(), "payload", cfg.payloadId) + logger.Debug("SpawnMiningExecStage", "block", current.Header.Number, "txn", current.Txs.Len(), "payload", cfg.payloadId) if current.Uncles == nil { current.Uncles = []*types.Header{} } @@ -198,11 +197,12 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c } var err error - _, current.Txs, current.Receipts, err = core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, stateWriter, &cfg.chainConfig, ibs, current.Receipts, current.Withdrawals, ChainReaderImpl{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, true, logger) + _, current.Txs, current.Receipts, err = core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, stateWriter, &cfg.chainConfig, ibs, current.Receipts, current.Withdrawals, ChainReaderImpl{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader, logger: logger}, true, logger) if err != nil { return err } - logger.Debug("FinalizeBlockExecution", "current txn", current.Txs.Len(), "current receipt", current.Receipts.Len(), "payload", cfg.payloadId) + + logger.Debug("FinalizeBlockExecution", "block", current.Header.Number, "txn", current.Txs.Len(), "gas", current.Header.GasUsed, "receipt", current.Receipts.Len(), "payload", cfg.payloadId) // hack: pretend that we are real execution stage - next stages will rely on this progress if err := stages.SaveStageProgress(tx, stages.Execution, current.Header.Number.Uint64()); err != nil { @@ -222,23 +222,20 @@ func getNextTransactions( logger log.Logger, ) (types.TransactionsStream, int, error) { txSlots := types2.TxsRlp{} - var onTime bool count := 0 - if err := cfg.txPool2DB.View(context.Background(), func(poolTx kv.Tx) error { + if err := cfg.txPoolDB.View(context.Background(), func(poolTx kv.Tx) error { var err error - counter := 0 - for !onTime && counter < 500 { - remainingGas := header.GasLimit - header.GasUsed - remainingBlobGas := uint64(0) - if header.BlobGasUsed != nil { - remainingBlobGas = cfg.chainConfig.GetMaxBlobGasPerBlock() - *header.BlobGasUsed - } - if onTime, count, err = cfg.txPool2.YieldBest(amount, &txSlots, poolTx, executionAt, remainingGas, remainingBlobGas, alreadyYielded); err != nil { - return err - } - time.Sleep(1 * time.Millisecond) - counter++ + + remainingGas := header.GasLimit - header.GasUsed + remainingBlobGas := uint64(0) + if header.BlobGasUsed != nil { + remainingBlobGas = cfg.chainConfig.GetMaxBlobGasPerBlock() - *header.BlobGasUsed } + + if _, count, err = cfg.txPool.YieldBest(amount, &txSlots, poolTx, executionAt, remainingGas, remainingBlobGas, alreadyYielded); err != nil { + return err + } + return nil }); err != nil { return nil, 0, err @@ -418,7 +415,6 @@ func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainC gasSnap := gasPool.Gas() blobGasSnap := gasPool.BlobGas() snap := ibs.Snapshot() - logger.Debug("addTransactionsToMiningBlock", "txn hash", txn.Hash()) receipt, _, err := core.ApplyTransaction(&chainConfig, core.GetHashFn(header, getHeader), engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, header.BlobGasUsed, *vmConfig) if err != nil { ibs.RevertToSnapshot(snap) @@ -507,7 +503,7 @@ LOOP: txs.Pop() } else if err == nil { // Everything ok, collect the logs and shift in the next transaction from the same account - logger.Debug(fmt.Sprintf("[%s] addTransactionsToMiningBlock Successful", logPrefix), "sender", from, "nonce", txn.GetNonce(), "payload", payloadId) + logger.Trace(fmt.Sprintf("[%s] Added transaction", logPrefix), "hash", txn.Hash(), "sender", from, "nonce", txn.GetNonce(), "payload", payloadId) coalescedLogs = append(coalescedLogs, logs...) tcount++ txs.Shift() diff --git a/eth/stagedsync/stage_mining_finish.go b/eth/stagedsync/stage_mining_finish.go index 16d90e00667..d3d36dfbab6 100644 --- a/eth/stagedsync/stage_mining_finish.go +++ b/eth/stagedsync/stage_mining_finish.go @@ -3,14 +3,14 @@ package stagedsync import ( "fmt" - "github.com/ledgerwatch/erigon-lib/chain" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/turbo/builder" - "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/turbo/builder" + "github.com/ledgerwatch/erigon/turbo/services" ) type MiningFinishCfg struct { @@ -82,10 +82,10 @@ func SpawnMiningFinishStage(s *StageState, tx kv.RwTx, cfg MiningFinishCfg, quit if block.Transactions().Len() > 0 { logger.Info(fmt.Sprintf("[%s] block ready for seal", logPrefix), - "block_num", block.NumberU64(), + "block", block.NumberU64(), "transactions", block.Transactions().Len(), - "gas_used", block.GasUsed(), - "gas_limit", block.GasLimit(), + "gasUsed", block.GasUsed(), + "gasLimit", block.GasLimit(), "difficulty", block.Difficulty(), ) } @@ -95,7 +95,7 @@ func SpawnMiningFinishStage(s *StageState, tx kv.RwTx, cfg MiningFinishCfg, quit default: logger.Trace("No in-flight sealing task.") } - chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader} + chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} if err := cfg.engine.Seal(chain, block, cfg.miningState.MiningResultCh, cfg.sealCancel); err != nil { logger.Warn("Block sealing failed", "err", err) } diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index 453562c4e20..3de8e13904b 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -44,9 +44,10 @@ type SendersCfg struct { chainConfig *chain.Config hd *headerdownload.HeaderDownload blockReader services.FullBlockReader + loopBreakCheck func(int) bool } -func StageSendersCfg(db kv.RwDB, chainCfg *chain.Config, badBlockHalt bool, tmpdir string, prune prune.Mode, blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload) SendersCfg { +func StageSendersCfg(db kv.RwDB, chainCfg *chain.Config, badBlockHalt bool, tmpdir string, prune prune.Mode, blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload, loopBreakCheck func(int) bool) SendersCfg { const sendersBatchSize = 10000 const sendersBlockSize = 4096 @@ -62,8 +63,8 @@ func StageSendersCfg(db kv.RwDB, chainCfg *chain.Config, badBlockHalt bool, tmpd chainConfig: chainCfg, prune: prune, hd: hd, - - blockReader: blockReader, + blockReader: blockReader, + loopBreakCheck: loopBreakCheck, } } @@ -198,6 +199,10 @@ Loop: break } + if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(blockNumber-startFrom)) { + break + } + has, err := cfg.blockReader.HasSenders(ctx, tx, blockHash, blockNumber) if err != nil { return err diff --git a/eth/stagedsync/stage_senders_test.go b/eth/stagedsync/stage_senders_test.go index 15bda777a11..bda8d5e90f4 100644 --- a/eth/stagedsync/stage_senders_test.go +++ b/eth/stagedsync/stage_senders_test.go @@ -128,7 +128,7 @@ func TestSenders(t *testing.T) { require.NoError(stages.SaveStageProgress(tx, stages.Bodies, 3)) - cfg := stagedsync.StageSendersCfg(db, params.TestChainConfig, false, "", prune.Mode{}, br, nil) + cfg := stagedsync.StageSendersCfg(db, params.TestChainConfig, false, "", prune.Mode{}, br, nil, nil) err = stagedsync.SpawnRecoverSendersStage(cfg, &stagedsync.StageState{ID: stages.Senders}, nil, tx, 3, m.Ctx, log.New()) require.NoError(err) diff --git a/eth/stagedsync/stage_snapshots.go b/eth/stagedsync/stage_snapshots.go index ca2f9ccb824..4b9cbc37064 100644 --- a/eth/stagedsync/stage_snapshots.go +++ b/eth/stagedsync/stage_snapshots.go @@ -1,17 +1,35 @@ package stagedsync import ( + "bufio" + "bytes" "context" "encoding/binary" + "errors" "fmt" + "io/fs" "math/big" + "os" + "path/filepath" "reflect" + "runtime" + "sort" + "strings" + "sync" + "sync/atomic" "time" + "github.com/anacrolix/torrent" "github.com/ledgerwatch/log/v3" + "golang.org/x/sync/errgroup" "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon-lib/common/dir" + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" "github.com/ledgerwatch/erigon-lib/etl" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/kv" @@ -21,11 +39,15 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/silkworm" "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" ) type SnapshotsCfg struct { @@ -36,39 +58,79 @@ type SnapshotsCfg struct { blockRetire services.BlockRetire snapshotDownloader proto_downloader.DownloaderClient blockReader services.FullBlockReader - dbEventNotifier services.DBEventNotifier + notifier *shards.Notifications - historyV3 bool - caplin bool - agg *state.AggregatorV3 - silkworm *silkworm.Silkworm + historyV3 bool + caplin bool + agg *state.AggregatorV3 + silkworm *silkworm.Silkworm + snapshotUploader *snapshotUploader + syncConfig ethconfig.Sync } func StageSnapshotsCfg(db kv.RwDB, chainConfig chain.Config, + syncConfig ethconfig.Sync, dirs datadir.Dirs, blockRetire services.BlockRetire, snapshotDownloader proto_downloader.DownloaderClient, blockReader services.FullBlockReader, - dbEventNotifier services.DBEventNotifier, + notifier *shards.Notifications, historyV3 bool, agg *state.AggregatorV3, caplin bool, silkworm *silkworm.Silkworm, ) SnapshotsCfg { - return SnapshotsCfg{ + cfg := SnapshotsCfg{ db: db, chainConfig: chainConfig, dirs: dirs, blockRetire: blockRetire, snapshotDownloader: snapshotDownloader, blockReader: blockReader, - dbEventNotifier: dbEventNotifier, + notifier: notifier, historyV3: historyV3, caplin: caplin, agg: agg, silkworm: silkworm, + syncConfig: syncConfig, } + + if uploadFs := cfg.syncConfig.UploadLocation; len(uploadFs) > 0 { + + cfg.snapshotUploader = &snapshotUploader{ + cfg: &cfg, + uploadFs: uploadFs, + version: snapcfg.KnownCfg(chainConfig.ChainName, 0).Version, + torrentFiles: downloader.NewAtomicTorrentFiles(cfg.dirs.Snap), + } + + cfg.blockRetire.SetWorkers(estimate.CompressSnapshot.Workers()) + + freezingCfg := cfg.blockReader.FreezingCfg() + + if freezingCfg.Enabled && freezingCfg.Produce { + u := cfg.snapshotUploader + + if maxSeedable := u.maxSeedableHeader(); u.cfg.syncConfig.FrozenBlockLimit > 0 && maxSeedable > u.cfg.syncConfig.FrozenBlockLimit { + blockLimit := maxSeedable - u.minBlockNumber() + + if u.cfg.syncConfig.FrozenBlockLimit < blockLimit { + blockLimit = u.cfg.syncConfig.FrozenBlockLimit + } + + if snapshots, ok := u.cfg.blockReader.Snapshots().(*freezeblocks.RoSnapshots); ok { + snapshots.SetSegmentsMin(maxSeedable - blockLimit) + } + + if snapshots, ok := u.cfg.blockReader.BorSnapshots().(*freezeblocks.BorRoSnapshots); ok { + snapshots.SetSegmentsMin(maxSeedable - blockLimit) + } + } + } + } + + return cfg } func SpawnStageSnapshots( @@ -99,7 +161,12 @@ func SpawnStageSnapshots( if minProgress == 0 || progress < minProgress { minProgress = progress } + + if stage == stages.SyncStage(cfg.syncConfig.BreakAfterStage) { + break + } } + if minProgress > s.BlockNumber { if err = s.Update(tx, minProgress); err != nil { return err @@ -126,21 +193,60 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R cstate = snapshotsync.AlsoCaplin } - if err := snapshotsync.WaitForDownloader(s.LogPrefix(), ctx, cfg.historyV3, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader); err != nil { - return err + if cfg.snapshotUploader != nil { + u := cfg.snapshotUploader + + u.init(ctx, logger) + + if cfg.syncConfig.UploadFrom != rpc.EarliestBlockNumber { + u.downloadLatestSnapshots(ctx, cfg.syncConfig.UploadFrom, u.version) + } + + if maxSeedable := u.maxSeedableHeader(); u.cfg.syncConfig.FrozenBlockLimit > 0 && maxSeedable > u.cfg.syncConfig.FrozenBlockLimit { + blockLimit := maxSeedable - u.minBlockNumber() + + if u.cfg.syncConfig.FrozenBlockLimit < blockLimit { + blockLimit = u.cfg.syncConfig.FrozenBlockLimit + } + + if snapshots, ok := u.cfg.blockReader.Snapshots().(*freezeblocks.RoSnapshots); ok { + snapshots.SetSegmentsMin(maxSeedable - blockLimit) + } + + if snapshots, ok := u.cfg.blockReader.BorSnapshots().(*freezeblocks.BorRoSnapshots); ok { + snapshots.SetSegmentsMin(maxSeedable - blockLimit) + } + } + + if err := cfg.blockReader.Snapshots().ReopenFolder(); err != nil { + return err + } + + if cfg.chainConfig.Bor != nil { + if err := cfg.blockReader.BorSnapshots().ReopenFolder(); err != nil { + return err + } + } + if cfg.notifier.Events != nil { // can notify right here, even that write txn is not commit + cfg.notifier.Events.OnNewSnapshot() + } + } else { + if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix(), cfg.historyV3, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { + return err + } } // It's ok to notify before tx.Commit(), because RPCDaemon does read list of files by gRPC (not by reading from db) - if cfg.dbEventNotifier != nil { - cfg.dbEventNotifier.OnNewSnapshot() + if cfg.notifier.Events != nil { + cfg.notifier.Events.OnNewSnapshot() } - cfg.blockReader.Snapshots().LogStat() + cfg.blockReader.Snapshots().LogStat("download") cfg.agg.LogStats(tx, func(endTxNumMinimax uint64) uint64 { _, histBlockNumProgress, _ := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax) return histBlockNumProgress }) - if err := cfg.blockRetire.BuildMissedIndicesIfNeed(ctx, s.LogPrefix(), cfg.dbEventNotifier, &cfg.chainConfig); err != nil { + if err := cfg.blockRetire.BuildMissedIndicesIfNeed(ctx, s.LogPrefix(), cfg.notifier.Events, &cfg.chainConfig); err != nil { return err } @@ -157,8 +263,8 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R if err := cfg.agg.BuildMissedIndices(ctx, indexWorkers); err != nil { return err } - if cfg.dbEventNotifier != nil { - cfg.dbEventNotifier.OnNewSnapshot() + if cfg.notifier.Events != nil { + cfg.notifier.Events.OnNewSnapshot() } } @@ -173,6 +279,7 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R if err := FillDBFromSnapshots(s.LogPrefix(), ctx, tx, cfg.dirs, cfg.blockReader, cfg.agg, logger); err != nil { return err } + return nil } @@ -300,7 +407,7 @@ func FillDBFromSnapshots(logPrefix string, ctx context.Context, tx kv.RwTx, dirs /* ====== PRUNING ====== */ // snapshots pruning sections works more as a retiring of blocks // retiring blocks means moving block data from db into snapshots -func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx context.Context, tx kv.RwTx) (err error) { +func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx context.Context, tx kv.RwTx, logger log.Logger) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -311,32 +418,71 @@ func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx cont } freezingCfg := cfg.blockReader.FreezingCfg() + if freezingCfg.Enabled { - if err := cfg.blockRetire.PruneAncientBlocks(tx, 100); err != nil { - return err - } - } - if freezingCfg.Enabled && freezingCfg.Produce { - //TODO: initialSync maybe save files progress here - if cfg.blockRetire.HasNewFrozenFiles() || cfg.agg.HasNewFrozenFiles() { - if err := rawdb.WriteSnapshots(tx, cfg.blockReader.FrozenFiles(), cfg.agg.Files()); err != nil { - return err + if freezingCfg.Produce { + //TODO: initialSync maybe save files progress here + if cfg.blockRetire.HasNewFrozenFiles() || cfg.agg.HasNewFrozenFiles() { + if err := rawdb.WriteSnapshots(tx, cfg.blockReader.FrozenFiles(), cfg.agg.Files()); err != nil { + return err + } } - } - cfg.blockRetire.RetireBlocksInBackground(ctx, s.ForwardProgress, log.LvlInfo, func(downloadRequest []services.DownloadRequest) error { - if cfg.snapshotDownloader == nil || reflect.ValueOf(cfg.snapshotDownloader).IsNil() { - return nil + var minBlockNumber uint64 + + if cfg.snapshotUploader != nil { + minBlockNumber = cfg.snapshotUploader.minBlockNumber() } - return snapshotsync.RequestSnapshotsDownload(ctx, downloadRequest, cfg.snapshotDownloader) - }, func(l []string) error { - if cfg.snapshotDownloader == nil || reflect.ValueOf(cfg.snapshotDownloader).IsNil() { + + cfg.blockRetire.RetireBlocksInBackground(ctx, minBlockNumber, s.ForwardProgress, log.LvlDebug, func(downloadRequest []services.DownloadRequest) error { + if cfg.snapshotDownloader != nil && !reflect.ValueOf(cfg.snapshotDownloader).IsNil() { + if err := snapshotsync.RequestSnapshotsDownload(ctx, downloadRequest, cfg.snapshotDownloader); err != nil { + return err + } + } + return nil - } - _, err := cfg.snapshotDownloader.Delete(ctx, &proto_downloader.DeleteRequest{Paths: l}) + }, func(l []string) error { + //if cfg.snapshotUploader != nil { + // TODO - we need to also remove files from the uploader (100k->500K transition) + //} + + if !(cfg.snapshotDownloader == nil || reflect.ValueOf(cfg.snapshotDownloader).IsNil()) { + _, err := cfg.snapshotDownloader.Delete(ctx, &proto_downloader.DeleteRequest{Paths: l}) + return err + } + + return nil + }) + + //cfg.agg.BuildFilesInBackground() + } + + if err := cfg.blockRetire.PruneAncientBlocks(tx, cfg.syncConfig.PruneLimit); err != nil { return err - }) - //cfg.agg.BuildFilesInBackground() + } + } + + if cfg.snapshotUploader != nil { + // if we're uploading make sure that the DB does not get too far + // ahead of the snapshot production process - otherwise DB will + // grow larger than necessary - we may also want to increase the + // workers + if s.ForwardProgress > cfg.blockReader.FrozenBlocks()+300_000 { + func() { + checkEvery := time.NewTicker(logInterval) + defer checkEvery.Stop() + + for s.ForwardProgress > cfg.blockReader.FrozenBlocks()+300_000 { + select { + case <-ctx.Done(): + return + case <-checkEvery.C: + log.Info(fmt.Sprintf("[%s] Waiting for snapshots...", s.LogPrefix()), "progress", s.ForwardProgress, "frozen", cfg.blockReader.FrozenBlocks(), "gap", s.ForwardProgress-cfg.blockReader.FrozenBlocks()) + } + } + }() + } } if !useExternalTx { @@ -347,3 +493,779 @@ func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx cont return nil } + +type uploadState struct { + sync.Mutex + file string + info *snaptype.FileInfo + torrent *torrent.TorrentSpec + buildingTorrent bool + uploads []string + remote bool + hasRemoteTorrent bool + //remoteHash string + local bool + localHash string +} + +type snapshotUploader struct { + cfg *SnapshotsCfg + files map[string]*uploadState + uploadFs string + rclone *downloader.RCloneClient + uploadSession *downloader.RCloneSession + uploadScheduled atomic.Bool + uploading atomic.Bool + manifestMutex sync.Mutex + version uint8 + torrentFiles *downloader.TorrentFiles +} + +func (u *snapshotUploader) init(ctx context.Context, logger log.Logger) { + if u.files == nil { + freezingCfg := u.cfg.blockReader.FreezingCfg() + + if freezingCfg.Enabled && freezingCfg.Produce { + u.files = map[string]*uploadState{} + u.start(ctx, logger) + } + } +} + +func (u *snapshotUploader) maxUploadedHeader() uint64 { + var max uint64 + + if len(u.files) > 0 { + for _, state := range u.files { + if state.local && state.remote { + if state.info != nil { + if state.info.T == snaptype.Headers { + if state.info.To > max { + max = state.info.To + } + } + } else { + if info, ok := snaptype.ParseFileName(u.cfg.dirs.Snap, state.file); ok { + if info.T == snaptype.Headers { + if info.To > max { + max = info.To + } + } + state.info = &info + } + } + } + } + } + + return max +} + +type dirEntry struct { + name string +} + +func (e dirEntry) Name() string { + return e.name +} + +func (e dirEntry) IsDir() bool { + return false +} + +func (e dirEntry) Type() fs.FileMode { + return e.Mode() +} + +func (e dirEntry) Size() int64 { + return -1 +} + +func (e dirEntry) Mode() fs.FileMode { + return fs.ModeIrregular +} + +func (e dirEntry) ModTime() time.Time { + return time.Time{} +} + +func (e dirEntry) Sys() any { + return nil +} + +func (e dirEntry) Info() (fs.FileInfo, error) { + return e, nil +} + +var checkKnownSizes = false + +func (u *snapshotUploader) seedable(fi snaptype.FileInfo) bool { + if !fi.Seedable() { + return false + } + + if checkKnownSizes { + for _, it := range snapcfg.KnownCfg(u.cfg.chainConfig.ChainName, 1).Preverified { + info, _ := snaptype.ParseFileName("", it.Name) + + if fi.From == info.From { + return fi.To == info.To + } + + if fi.From < info.From { + return info.To-info.From == fi.To-fi.From + } + + if fi.From < info.To { + return false + } + } + } + + return true +} + +func (u *snapshotUploader) downloadManifest(ctx context.Context) ([]fs.DirEntry, error) { + u.manifestMutex.Lock() + defer u.manifestMutex.Unlock() + + reader, err := u.uploadSession.Cat(ctx, "manifest.txt") + + if err != nil { + return nil, err + } + + var entries []fs.DirEntry + + scanner := bufio.NewScanner(reader) + + for scanner.Scan() { + entries = append(entries, dirEntry{scanner.Text()}) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil +} + +func (u *snapshotUploader) uploadManifest(ctx context.Context, remoteRefresh bool) error { + u.manifestMutex.Lock() + defer u.manifestMutex.Unlock() + + if remoteRefresh { + u.refreshFromRemote(ctx) + } + + manifestFile := "manifest.txt" + + fileMap := map[string]string{} + + for file, state := range u.files { + if state.remote { + if state.hasRemoteTorrent { + fileMap[file] = file + ".torrent" + } else { + fileMap[file] = "" + } + } + } + + files := make([]string, 0, len(fileMap)) + + for torrent, file := range fileMap { + files = append(files, file) + + if len(torrent) > 0 { + files = append(files, torrent) + } + } + + sort.Strings(files) + + manifestEntries := bytes.Buffer{} + + for _, file := range files { + fmt.Fprintln(&manifestEntries, file) + } + + _ = os.WriteFile(filepath.Join(u.cfg.dirs.Snap, manifestFile), manifestEntries.Bytes(), 0644) + defer os.Remove(filepath.Join(u.cfg.dirs.Snap, manifestFile)) + + return u.uploadSession.Upload(ctx, manifestFile) +} + +func (u *snapshotUploader) refreshFromRemote(ctx context.Context) { + remoteFiles, err := u.uploadSession.ReadRemoteDir(ctx, true) + + if err != nil { + return + } + + u.updateRemotes(remoteFiles) +} + +func (u *snapshotUploader) updateRemotes(remoteFiles []fs.DirEntry) { + for _, fi := range remoteFiles { + var file string + var hasTorrent bool + + if hasTorrent = filepath.Ext(fi.Name()) == ".torrent"; hasTorrent { + file = strings.TrimSuffix(fi.Name(), ".torrent") + } else { + file = fi.Name() + } + + // if we have found the file & its torrent we don't + // need to attempt another sync operation + if state, ok := u.files[file]; ok { + state.remote = true + + if hasTorrent { + state.hasRemoteTorrent = true + } + + } else { + info, ok := snaptype.ParseFileName(u.cfg.dirs.Snap, fi.Name()) + + if !ok || info.Version != u.version { + continue + } + + u.files[file] = &uploadState{ + file: file, + info: &info, + local: dir.FileNonZero(info.Path), + hasRemoteTorrent: hasTorrent, + } + } + } +} + +func (u *snapshotUploader) downloadLatestSnapshots(ctx context.Context, blockNumber rpc.BlockNumber, version uint8) error { + + entries, err := u.downloadManifest(ctx) + + if err != nil { + entries, err = u.uploadSession.ReadRemoteDir(ctx, true) + } + + if err != nil { + return err + } + + lastSegments := map[snaptype.Type]fs.FileInfo{} + torrents := map[string]string{} + + for _, ent := range entries { + if info, err := ent.Info(); err == nil { + + if info.Size() > -1 && info.Size() <= 32 { + continue + } + + snapInfo, ok := info.Sys().(downloader.SnapInfo) + + if ok && snapInfo.Type() != snaptype.Unknown && snapInfo.Version() == version { + if last, ok := lastSegments[snapInfo.Type()]; ok { + if lastInfo, ok := last.Sys().(downloader.SnapInfo); ok && snapInfo.To() > lastInfo.To() { + lastSegments[snapInfo.Type()] = info + } + } else { + lastSegments[snapInfo.Type()] = info + } + } else { + if ext := filepath.Ext(info.Name()); ext == ".torrent" { + fileName := strings.TrimSuffix(info.Name(), ".torrent") + torrents[fileName] = info.Name() + } + } + } + } + + var min uint64 + + for _, info := range lastSegments { + if lastInfo, ok := info.Sys().(downloader.SnapInfo); ok { + if min == 0 || lastInfo.From() < min { + min = lastInfo.From() + } + } + } + + for segType, info := range lastSegments { + if lastInfo, ok := info.Sys().(downloader.SnapInfo); ok { + if lastInfo.From() > min { + for _, ent := range entries { + if info, err := ent.Info(); err == nil { + snapInfo, ok := info.Sys().(downloader.SnapInfo) + + if ok && snapInfo.Type() == segType && + snapInfo.Version() == version && + snapInfo.From() == min { + lastSegments[segType] = info + } + } + } + } + } + } + + downloads := make([]string, 0, len(lastSegments)) + + for _, info := range lastSegments { + downloads = append(downloads, info.Name()) + if torrent, ok := torrents[info.Name()]; ok { + downloads = append(downloads, torrent) + } + } + + if len(downloads) > 0 { + return u.uploadSession.Download(ctx, downloads...) + } + + return nil +} + +func (u *snapshotUploader) maxSeedableHeader() uint64 { + var max uint64 + + if list, err := snaptype.Segments(u.cfg.dirs.Snap, u.version); err == nil { + for _, info := range list { + if u.seedable(info) && info.T == snaptype.Headers && info.To > max { + max = info.To + } + } + } + + return max +} + +func (u *snapshotUploader) minBlockNumber() uint64 { + var min uint64 + + if list, err := snaptype.Segments(u.cfg.dirs.Snap, u.version); err == nil { + for _, info := range list { + if u.seedable(info) && min == 0 || info.From < min { + min = info.From + } + } + } + + return min +} + +func expandHomeDir(dirpath string) string { + home, err := os.UserHomeDir() + if err != nil { + return dirpath + } + prefix := fmt.Sprintf("~%c", os.PathSeparator) + if strings.HasPrefix(dirpath, prefix) { + return filepath.Join(home, dirpath[len(prefix):]) + } else if dirpath == "~" { + return home + } + return dirpath +} + +func isLocalFs(ctx context.Context, rclient *downloader.RCloneClient, fs string) bool { + + remotes, _ := rclient.ListRemotes(ctx) + + if remote, _, ok := strings.Cut(fs, ":"); ok { + for _, r := range remotes { + if remote == r { + return false + } + } + + return filepath.VolumeName(fs) == remote + } + + return true +} + +func (u *snapshotUploader) start(ctx context.Context, logger log.Logger) { + var err error + + u.rclone, err = downloader.NewRCloneClient(logger) + + if err != nil { + logger.Warn("[uploader] Uploading disabled: rclone start failed", "err", err) + return + } + + uploadFs := u.uploadFs + + if isLocalFs(ctx, u.rclone, uploadFs) { + uploadFs = expandHomeDir(filepath.Clean(uploadFs)) + + uploadFs, err = filepath.Abs(uploadFs) + + if err != nil { + logger.Warn("[uploader] Uploading disabled: invalid upload fs", "err", err, "fs", u.uploadFs) + return + } + + if err := os.MkdirAll(uploadFs, 0755); err != nil { + logger.Warn("[uploader] Uploading disabled: can't create upload fs", "err", err, "fs", u.uploadFs) + return + } + } + + u.uploadSession, err = u.rclone.NewSession(ctx, u.cfg.dirs.Snap, uploadFs) + + if err != nil { + logger.Warn("[uploader] Uploading disabled: rclone session failed", "err", err) + return + } + + go func() { + + remoteFiles, _ := u.downloadManifest(ctx) + refreshFromRemote := false + + if len(remoteFiles) > 0 { + u.updateRemotes(remoteFiles) + refreshFromRemote = true + } else { + u.refreshFromRemote(ctx) + } + + go u.uploadManifest(ctx, refreshFromRemote) + + logger.Debug("[snapshot uploader] starting snapshot subscription...") + snapshotSubCh, snapshotSubClean := u.cfg.notifier.Events.AddNewSnapshotSubscription() + defer snapshotSubClean() + + logger.Info("[snapshot uploader] subscription established") + + defer func() { + if err != nil { + if !errors.Is(err, context.Canceled) { + logger.Warn("[snapshot uploader] subscription closed", "reason", err) + } + } else { + logger.Warn("[snapshot uploader] subscription closed") + } + }() + + u.scheduleUpload(ctx, logger) + + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-snapshotSubCh: + logger.Info("[snapshot uploader] new snapshot received") + u.scheduleUpload(ctx, logger) + } + } + }() +} + +func (u *snapshotUploader) scheduleUpload(ctx context.Context, logger log.Logger) { + if !u.uploadScheduled.CompareAndSwap(false, true) { + return + } + + if u.uploading.CompareAndSwap(false, true) { + go func() { + defer u.uploading.Store(false) + for u.uploadScheduled.Load() { + u.uploadScheduled.Store(false) + u.upload(ctx, logger) + } + }() + } +} + +func (u *snapshotUploader) removeBefore(before uint64) { + list, err := snaptype.Segments(u.cfg.dirs.Snap, u.version) + + if err != nil { + return + } + + var toReopen []string + var borToReopen []string + + var toRemove []string //nolint:prealloc + + for _, f := range list { + if f.To > before { + switch f.T { + case snaptype.BorEvents, snaptype.BorSpans: + borToReopen = append(borToReopen, filepath.Base(f.Path)) + default: + toReopen = append(toReopen, filepath.Base(f.Path)) + } + + continue + } + + toRemove = append(toRemove, f.Path) + } + + if len(toRemove) > 0 { + if snapshots, ok := u.cfg.blockReader.Snapshots().(*freezeblocks.RoSnapshots); ok { + snapshots.SetSegmentsMin(before) + snapshots.ReopenList(toReopen, true) + } + + if snapshots, ok := u.cfg.blockReader.BorSnapshots().(*freezeblocks.BorRoSnapshots); ok { + snapshots.ReopenList(borToReopen, true) + snapshots.SetSegmentsMin(before) + } + + for _, f := range toRemove { + _ = os.Remove(f) + _ = os.Remove(f + ".torrent") + ext := filepath.Ext(f) + withoutExt := f[:len(f)-len(ext)] + _ = os.Remove(withoutExt + ".idx") + + if strings.HasSuffix(withoutExt, "transactions") { + _ = os.Remove(withoutExt + "-to-block.idx") + } + } + } +} + +func (u *snapshotUploader) upload(ctx context.Context, logger log.Logger) { + defer func() { + if r := recover(); r != nil { + log.Error("[snapshot uploader] snapshot upload failed", "err", r, "stack", dbg.Stack()) + } + }() + + retryTime := 30 * time.Second + maxRetryTime := 300 * time.Second + + var uploadCount int + + for { + var processList []*uploadState + + for _, f := range u.cfg.blockReader.FrozenFiles() { + if state, ok := u.files[f]; !ok { + if fi, ok := snaptype.ParseFileName(u.cfg.dirs.Snap, f); ok { + if u.seedable(fi) { + state := &uploadState{ + file: f, + info: &fi, + local: true, + } + + if fi.TorrentFileExists() { + state.torrent, _ = u.torrentFiles.LoadByName(f) + } + + u.files[f] = state + processList = append(processList, state) + } + } + } else { + func() { + state.Lock() + defer state.Unlock() + + state.local = true + + if state.torrent == nil && state.info.TorrentFileExists() { + state.torrent, _ = u.torrentFiles.LoadByName(f) + if state.torrent != nil { + state.localHash = state.torrent.InfoHash.String() + } + } + + if !state.remote { + processList = append(processList, state) + } + }() + } + } + + var torrentList []*uploadState + + for _, state := range processList { + func() { + state.Lock() + defer state.Unlock() + if !(state.torrent != nil || state.buildingTorrent) { + torrentList = append(torrentList, state) + state.buildingTorrent = true + } + }() + } + + if len(torrentList) > 0 { + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(runtime.GOMAXPROCS(-1) * 4) + var i atomic.Int32 + + go func() { + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() + + for int(i.Load()) < len(torrentList) { + select { + case <-gctx.Done(): + return + case <-logEvery.C: + if int(i.Load()) == len(torrentList) { + return + } + log.Info("[snapshot uploader] Creating .torrent files", "progress", fmt.Sprintf("%d/%d", i.Load(), len(torrentList))) + } + } + }() + + for _, s := range torrentList { + state := s + + g.Go(func() error { + defer i.Add(1) + + err := downloader.BuildTorrentIfNeed(gctx, state.file, u.cfg.dirs.Snap, u.torrentFiles) + + state.Lock() + state.buildingTorrent = false + state.Unlock() + + if err != nil { + return err + } + + torrent, err := u.torrentFiles.LoadByName(state.file) + + if err != nil { + return err + } + + state.Lock() + state.torrent = torrent + state.Unlock() + + state.localHash = state.torrent.InfoHash.String() + + logger.Info("[snapshot uploader] built torrent", "file", state.file, "hash", state.localHash) + + return nil + }) + } + + if err := g.Wait(); err != nil { + logger.Debug(".torrent file creation failed", "err", err) + } + } + + var f atomic.Int32 + + var uploadList []*uploadState + + for _, state := range processList { + err := func() error { + state.Lock() + defer state.Unlock() + if !state.remote && state.torrent != nil && len(state.uploads) == 0 && u.rclone != nil { + state.uploads = []string{state.file, state.file + ".torrent"} + uploadList = append(uploadList, state) + } + + return nil + }() + + if err != nil { + logger.Debug("upload failed", "file", state.file, "err", err) + } + } + + if len(uploadList) > 0 { + log.Info("[snapshot uploader] Starting upload", "count", len(uploadList)) + + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(16) + var i atomic.Int32 + + go func() { + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() + + for int(i.Load()) < len(processList) { + select { + case <-gctx.Done(): + log.Info("[snapshot uploader] Uploaded files", "processed", fmt.Sprintf("%d/%d/%d", i.Load(), len(processList), f.Load())) + return + case <-logEvery.C: + if int(i.Load()+f.Load()) == len(processList) { + return + } + log.Info("[snapshot uploader] Uploading files", "progress", fmt.Sprintf("%d/%d/%d", i.Load(), len(processList), f.Load())) + } + } + }() + + for _, s := range uploadList { + state := s + func() { + state.Lock() + defer state.Unlock() + + g.Go(func() error { + defer i.Add(1) + defer func() { + state.Lock() + state.uploads = nil + state.Unlock() + }() + + if err := u.uploadSession.Upload(gctx, state.uploads...); err != nil { + f.Add(1) + return nil + } + + uploadCount++ + + state.Lock() + state.remote = true + state.hasRemoteTorrent = true + state.Unlock() + return nil + }) + }() + } + + if err := g.Wait(); err != nil { + logger.Debug("[snapshot uploader] upload failed", "err", err) + } + } + + if f.Load() == 0 { + break + } + + time.Sleep(retryTime) + + if retryTime < maxRetryTime { + retryTime += retryTime + } else { + retryTime = maxRetryTime + } + } + + var err error + + if uploadCount > 0 { + err = u.uploadManifest(ctx, false) + } + + if err == nil { + if maxUploaded := u.maxUploadedHeader(); u.cfg.syncConfig.FrozenBlockLimit > 0 && maxUploaded > u.cfg.syncConfig.FrozenBlockLimit { + u.removeBefore(maxUploaded - u.cfg.syncConfig.FrozenBlockLimit) + } + } +} diff --git a/eth/stagedsync/stage_txlookup.go b/eth/stagedsync/stage_txlookup.go index 3c0223e3393..67ded81459e 100644 --- a/eth/stagedsync/stage_txlookup.go +++ b/eth/stagedsync/stage_txlookup.go @@ -6,14 +6,16 @@ import ( "fmt" "math/big" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" @@ -24,7 +26,7 @@ type TxLookupCfg struct { db kv.RwDB prune prune.Mode tmpdir string - borConfig *chain.BorConfig + borConfig *borcfg.BorConfig blockReader services.FullBlockReader } @@ -32,9 +34,14 @@ func StageTxLookupCfg( db kv.RwDB, prune prune.Mode, tmpdir string, - borConfig *chain.BorConfig, + borConfigInterface chain.BorConfig, blockReader services.FullBlockReader, ) TxLookupCfg { + var borConfig *borcfg.BorConfig + if borConfigInterface != nil { + borConfig = borConfigInterface.(*borcfg.BorConfig) + } + return TxLookupCfg{ db: db, prune: prune, @@ -152,7 +159,7 @@ func borTxnLookupTransform(logPrefix string, tx kv.RwTx, blockFrom, blockTo uint blockNumBytes := bigNum.SetUint64(blocknum).Bytes() // we add state sync transactions every bor Sprint amount of blocks - if blocknum%cfg.borConfig.CalculateSprint(blocknum) == 0 && rawdb.HasBorReceipts(tx, blocknum) { + if blocknum%cfg.borConfig.CalculateSprintLength(blocknum) == 0 && rawdb.HasBorReceipts(tx, blocknum) { txnHash := types.ComputeBorTxHash(blocknum, blockHash) if err := next(k, txnHash.Bytes(), blockNumBytes); err != nil { return err diff --git a/eth/stagedsync/stagebuilder.go b/eth/stagedsync/stagebuilder.go index 05ed4183ca6..edd2e90049e 100644 --- a/eth/stagedsync/stagebuilder.go +++ b/eth/stagedsync/stagebuilder.go @@ -3,11 +3,13 @@ package stagedsync import ( "context" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/log/v3" ) type ChainEventNotifier interface { @@ -30,23 +32,25 @@ func MiningStages( { ID: stages.MiningCreateBlock, Description: "Mining: construct new block from tx pool", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnMiningCreateBlockStage(s, tx, createBlockCfg, ctx.Done(), logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnMiningCreateBlockStage(s, txc.Tx, createBlockCfg, ctx.Done(), logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { return nil }, - Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return nil + }, + Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil }, }, { - ID: stages.BorHeimdall, + ID: stages.MiningBorHeimdall, Description: "Download Bor-specific data from Heimdall", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if badBlockUnwind { return nil } - return BorHeimdallForward(s, u, ctx, tx, borHeimdallCfg, true, logger) + return MiningBorHeimdallForward(ctx, borHeimdallCfg, s, u, txc.Tx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { - return BorHeimdallUnwind(u, ctx, s, tx, borHeimdallCfg) + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return BorHeimdallUnwind(u, ctx, s, txc.Tx, borHeimdallCfg) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return BorHeimdallPrune(p, ctx, tx, borHeimdallCfg) @@ -55,45 +59,51 @@ func MiningStages( { ID: stages.MiningExecution, Description: "Mining: execute new block from tx pool", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - //fmt.Println("SpawnMiningExecStage") - //defer fmt.Println("SpawnMiningExecStage", "DONE") - return SpawnMiningExecStage(s, tx, execCfg, ctx.Done(), logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnMiningExecStage(s, txc.Tx, execCfg, ctx.Done(), logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { return nil }, - Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil }, + Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil }, }, { ID: stages.HashState, Description: "Hash the key in the state", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnHashStateStage(s, tx, hashStateCfg, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnHashStateStage(s, txc.Tx, hashStateCfg, ctx, logger) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { return nil }, - Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return nil + }, + Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil }, }, { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - stateRoot, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + stateRoot, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger) if err != nil { return err } createBlockCfg.miner.MiningBlock.Header.Root = stateRoot return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { return nil }, - Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return nil + }, + Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil }, }, { ID: stages.MiningFinish, Description: "Mining: create and propagate valid block", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { - return SpawnMiningFinishStage(s, tx, finish, ctx.Done(), logger) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnMiningFinishStage(s, txc.Tx, finish, ctx.Done(), logger) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { + return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { return nil }, - Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil }, + Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil }, }, } } diff --git a/eth/stagedsync/test/chain_configs.go b/eth/stagedsync/stagedsynctest/chain_configs.go similarity index 76% rename from eth/stagedsync/test/chain_configs.go rename to eth/stagedsync/stagedsynctest/chain_configs.go index db274245e63..9db9429d327 100644 --- a/eth/stagedsync/test/chain_configs.go +++ b/eth/stagedsync/stagedsynctest/chain_configs.go @@ -1,14 +1,15 @@ -package test +package stagedsynctest import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" ) func BorDevnetChainConfigWithNoBlockSealDelays() *chain.Config { // take care not to mutate global var (shallow copy) chainConfigCopy := *params.BorDevnetChainConfig - borConfigCopy := *chainConfigCopy.Bor + borConfigCopy := *chainConfigCopy.Bor.(*borcfg.BorConfig) borConfigCopy.Period = map[string]uint64{ "0": 0, } diff --git a/eth/stagedsync/test/harness.go b/eth/stagedsync/stagedsynctest/harness.go similarity index 65% rename from eth/stagedsync/test/harness.go rename to eth/stagedsync/stagedsynctest/harness.go index ea40c226d93..ea203b65fa0 100644 --- a/eth/stagedsync/test/harness.go +++ b/eth/stagedsync/stagedsynctest/harness.go @@ -1,19 +1,21 @@ -package test +package stagedsynctest import ( "context" "crypto/ecdsa" "encoding/binary" "encoding/json" + "errors" "fmt" "math/big" "testing" "time" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" + "github.com/ledgerwatch/erigon/polygon/heimdall" + "github.com/golang/mock/gomock" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/erigon/turbo/stages/mock" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" @@ -21,14 +23,8 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/clerk" - "github.com/ledgerwatch/erigon/consensus/bor/contract" - heimdallmock "github.com/ledgerwatch/erigon/consensus/bor/heimdall/mock" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" - bormock "github.com/ledgerwatch/erigon/consensus/bor/mock" - "github.com/ledgerwatch/erigon/consensus/bor/valset" consensusmock "github.com/ledgerwatch/erigon/consensus/mock" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" @@ -37,19 +33,25 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/valset" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/stages/mock" + "github.com/ledgerwatch/erigon/turbo/testlog" ) -func InitHarness(ctx context.Context, t *testing.T, logger log.Logger, cfg HarnessCfg) Harness { +func InitHarness(ctx context.Context, t *testing.T, cfg HarnessCfg) Harness { + logger := testlog.Logger(t, cfg.LogLvl) genesisInit := createGenesisInitData(t, cfg.ChainConfig) m := mock.MockWithGenesis(t, genesisInit.genesis, genesisInit.genesisAllocPrivateKey, false) - chainDataDb := m.DB + chainDataDB := m.DB blockReader := m.BlockReader - borConsensusDb := memdb.NewTestDB(t) + borConsensusDB := memdb.NewTestDB(t) ctrl := gomock.NewController(t) - heimdallClient := heimdallmock.NewMockIHeimdallClient(ctrl) + heimdallClient := heimdall.NewMockHeimdallClient(ctrl) bhCfg := stagedsync.StageBorHeimdallCfg( - chainDataDb, - borConsensusDb, + chainDataDB, + borConsensusDB, stagedsync.NewProposingState(ðconfig.Defaults.Miner), *cfg.ChainConfig, heimdallClient, @@ -58,6 +60,7 @@ func InitHarness(ctx context.Context, t *testing.T, logger log.Logger, cfg Harne nil, // penalize nil, // not used nil, // not used + nil, ) stateSyncStages := stagedsync.DefaultStages( ctx, @@ -77,29 +80,31 @@ func InitHarness(ctx context.Context, t *testing.T, logger log.Logger, cfg Harne stagedsync.FinishCfg{}, true, ) - stateSync := stagedsync.New(stateSyncStages, stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, logger) + stateSync := stagedsync.New(ethconfig.Defaults.Sync, stateSyncStages, stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, logger) validatorKey, err := crypto.GenerateKey() require.NoError(t, err) validatorAddress := crypto.PubkeyToAddress(validatorKey.PublicKey) h := Harness{ - logger: logger, - chainDataDb: chainDataDb, - borConsensusDb: borConsensusDb, - chainConfig: cfg.ChainConfig, - blockReader: blockReader, - stateSyncStages: stateSyncStages, - stateSync: stateSync, - bhCfg: bhCfg, - heimdallClient: heimdallClient, - sealedHeaders: make(map[uint64]*types.Header), - borSpanner: bormock.NewMockSpanner(ctrl), - validatorAddress: validatorAddress, - validatorKey: validatorKey, - genesisInitData: genesisInit, + logger: logger, + chainDataDB: chainDataDB, + borConsensusDB: borConsensusDB, + chainConfig: cfg.ChainConfig, + borConfig: cfg.ChainConfig.Bor.(*borcfg.BorConfig), + blockReader: blockReader, + stateSyncStages: stateSyncStages, + stateSync: stateSync, + bhCfg: bhCfg, + heimdallClient: heimdallClient, + heimdallProducersOverride: cfg.GetOrCreateDefaultHeimdallProducersOverride(), + sealedHeaders: make(map[uint64]*types.Header), + borSpanner: bor.NewMockSpanner(ctrl), + validatorAddress: validatorAddress, + validatorKey: validatorKey, + genesisInitData: genesisInit, } if cfg.ChainConfig.Bor != nil { - h.setHeimdallNextMockSpan(logger) + h.setHeimdallNextMockSpan() h.mockBorSpanner() h.mockHeimdallClient() } @@ -117,57 +122,103 @@ type genesisInitData struct { } type HarnessCfg struct { - ChainConfig *chain.Config - GenerateChainNumBlocks int + ChainConfig *chain.Config + GenerateChainNumBlocks int + LogLvl log.Lvl + HeimdallProducersOverride map[uint64][]valset.Validator +} + +func (hc *HarnessCfg) GetOrCreateDefaultHeimdallProducersOverride() map[uint64][]valset.Validator { + if hc.HeimdallProducersOverride == nil { + hc.HeimdallProducersOverride = map[uint64][]valset.Validator{} + } + + return hc.HeimdallProducersOverride } type Harness struct { logger log.Logger - chainDataDb kv.RwDB - borConsensusDb kv.RwDB + chainDataDB kv.RwDB + borConsensusDB kv.RwDB chainConfig *chain.Config + borConfig *borcfg.BorConfig blockReader services.BlockReader stateSyncStages []*stagedsync.Stage stateSync *stagedsync.Sync bhCfg stagedsync.BorHeimdallCfg - heimdallClient *heimdallmock.MockIHeimdallClient - heimdallNextMockSpan *span.HeimdallSpan - heimdallLastEventId uint64 + heimdallClient *heimdall.MockHeimdallClient + heimdallNextMockSpan *heimdall.HeimdallSpan + heimdallLastEventID uint64 heimdallLastEventHeaderNum uint64 + heimdallProducersOverride map[uint64][]valset.Validator // spanID -> selected producers override sealedHeaders map[uint64]*types.Header - borSpanner *bormock.MockSpanner + borSpanner *bor.MockSpanner validatorAddress libcommon.Address validatorKey *ecdsa.PrivateKey genesisInitData *genesisInitData } -func (h *Harness) SaveStageProgress(ctx context.Context, t *testing.T, stageId stages.SyncStage, progress uint64) { - rwTx, err := h.chainDataDb.BeginRw(ctx) +func (h *Harness) Logger() log.Logger { + return h.logger +} + +func (h *Harness) BorConfig() *borcfg.BorConfig { + return h.borConfig +} + +func (h *Harness) SaveStageProgress(ctx context.Context, t *testing.T, stageID stages.SyncStage, progress uint64) { + rwTx, err := h.chainDataDB.BeginRw(ctx) require.NoError(t, err) defer rwTx.Rollback() - err = stages.SaveStageProgress(rwTx, stageId, progress) + err = stages.SaveStageProgress(rwTx, stageID, progress) require.NoError(t, err) err = rwTx.Commit() require.NoError(t, err) } +func (h *Harness) GetStageProgress(ctx context.Context, t *testing.T, stageID stages.SyncStage) uint64 { + roTx, err := h.chainDataDB.BeginRo(ctx) + require.NoError(t, err) + defer roTx.Rollback() + + progress, err := stages.GetStageProgress(roTx, stageID) + require.NoError(t, err) + return progress +} + +func (h *Harness) StateSyncUnwindPoint() uint64 { + return h.stateSync.UnwindPoint() +} + +func (h *Harness) StateSyncUnwindReason() stagedsync.UnwindReason { + return h.stateSync.UnwindReason() +} + func (h *Harness) RunStageForward(t *testing.T, id stages.SyncStage) { + h.RunStageForwardWithErrorIs(t, id, nil) +} + +func (h *Harness) RunStageForwardWithErrorIs(t *testing.T, id stages.SyncStage, wantErr error) { + err := h.RunStageForwardWithReturnError(t, id) + require.ErrorIs(t, err, wantErr) +} + +func (h *Harness) RunStageForwardWithReturnError(t *testing.T, id stages.SyncStage) error { err := h.stateSync.SetCurrentStage(id) require.NoError(t, err) - stage, found := h.findStateSyncStageById(id) + stage, found := h.findStateSyncStageByID(id) require.True(t, found) - stageState, err := h.stateSync.StageState(id, nil, h.chainDataDb) + stageState, err := h.stateSync.StageState(id, nil, h.chainDataDB) require.NoError(t, err) - err = stage.Forward(true, false, stageState, h.stateSync, nil, h.logger) - require.NoError(t, err) + return stage.Forward(true, false, stageState, h.stateSync, wrap.TxContainer{}, h.logger) } -func (h *Harness) ReadSpansFromDb(ctx context.Context) (spans []*span.HeimdallSpan, err error) { - err = h.chainDataDb.View(ctx, func(tx kv.Tx) error { +func (h *Harness) ReadSpansFromDB(ctx context.Context) (spans []*heimdall.HeimdallSpan, err error) { + err = h.chainDataDB.View(ctx, func(tx kv.Tx) error { spanIter, err := tx.Range(kv.BorSpans, nil, nil) if err != nil { return err @@ -180,7 +231,7 @@ func (h *Harness) ReadSpansFromDb(ctx context.Context) (spans []*span.HeimdallSp } spanKey := binary.BigEndian.Uint64(keyBytes) - var heimdallSpan span.HeimdallSpan + var heimdallSpan heimdall.HeimdallSpan if err = json.Unmarshal(spanBytes, &heimdallSpan); err != nil { return err } @@ -201,8 +252,8 @@ func (h *Harness) ReadSpansFromDb(ctx context.Context) (spans []*span.HeimdallSp return spans, nil } -func (h *Harness) ReadStateSyncEventsFromDb(ctx context.Context) (eventIds []uint64, err error) { - err = h.chainDataDb.View(ctx, func(tx kv.Tx) error { +func (h *Harness) ReadStateSyncEventsFromDB(ctx context.Context) (eventIDs []uint64, err error) { + err = h.chainDataDB.View(ctx, func(tx kv.Tx) error { eventsIter, err := tx.Range(kv.BorEvents, nil, nil) if err != nil { return err @@ -214,7 +265,7 @@ func (h *Harness) ReadStateSyncEventsFromDb(ctx context.Context) (eventIds []uin return err } - eventIds = append(eventIds, binary.BigEndian.Uint64(keyBytes)) + eventIDs = append(eventIDs, binary.BigEndian.Uint64(keyBytes)) } return nil @@ -223,12 +274,12 @@ func (h *Harness) ReadStateSyncEventsFromDb(ctx context.Context) (eventIds []uin return nil, err } - return eventIds, nil + return eventIDs, nil } -func (h *Harness) ReadFirstStateSyncEventNumPerBlockFromDb(ctx context.Context) (nums map[uint64]uint64, err error) { +func (h *Harness) ReadFirstStateSyncEventNumPerBlockFromDB(ctx context.Context) (nums map[uint64]uint64, err error) { nums = map[uint64]uint64{} - err = h.chainDataDb.View(ctx, func(tx kv.Tx) error { + err = h.chainDataDB.View(ctx, func(tx kv.Tx) error { eventNumsIter, err := tx.Range(kv.BorEventNums, nil, nil) if err != nil { return err @@ -254,6 +305,19 @@ func (h *Harness) ReadFirstStateSyncEventNumPerBlockFromDb(ctx context.Context) return nums, nil } +func (h *Harness) ReadHeaderByNumber(ctx context.Context, number uint64) (header *types.Header, err error) { + err = h.chainDataDB.View(ctx, func(tx kv.Tx) error { + header = rawdb.ReadHeaderByNumber(tx, number) + if header == nil { + return errors.New("header not found by harness") + } + + return nil + }) + + return +} + func createGenesisInitData(t *testing.T, chainConfig *chain.Config) *genesisInitData { t.Helper() accountPrivateKey, err := crypto.GenerateKey() @@ -282,7 +346,7 @@ func createGenesisInitData(t *testing.T, chainConfig *chain.Config) *genesisInit func (h *Harness) generateChain(ctx context.Context, t *testing.T, ctrl *gomock.Controller, cfg HarnessCfg) { consensusEngine := h.consensusEngine(t, cfg) var parentBlock *types.Block - err := h.chainDataDb.View(ctx, func(tx kv.Tx) (err error) { + err := h.chainDataDB.View(ctx, func(tx kv.Tx) (err error) { parentBlock, err = h.blockReader.BlockByNumber(ctx, tx, 0) return err }) @@ -294,7 +358,7 @@ func (h *Harness) generateChain(ctx context.Context, t *testing.T, ctrl *gomock. h.chainConfig, parentBlock, consensusEngine, - h.chainDataDb, + h.chainDataDB, cfg.GenerateChainNumBlocks, func(i int, gen *core.BlockGen) { // seal parent block first so that we can Prepare the current header @@ -309,13 +373,13 @@ func (h *Harness) generateChain(ctx context.Context, t *testing.T, ctrl *gomock. } h.logger.Info("Adding 1 mock tx to block", "blockNum", gen.GetHeader().Number) - chainId := uint256.Int{} - overflow := chainId.SetFromBig(h.chainConfig.ChainID) + chainID := uint256.Int{} + overflow := chainID.SetFromBig(h.chainConfig.ChainID) require.False(t, overflow) from := h.genesisInitData.fundedAddresses[0] tx, err := types.SignTx( types.NewEIP1559Transaction( - chainId, + chainID, gen.TxNonce(from), from, // send to itself new(uint256.Int), @@ -356,16 +420,16 @@ func (h *Harness) seal(t *testing.T, chr consensus.ChainHeaderReader, eng consen func (h *Harness) consensusEngine(t *testing.T, cfg HarnessCfg) consensus.Engine { if h.chainConfig.Bor != nil { - genesisContracts := contract.NewGenesisContractsClient( + genesisContracts := bor.NewGenesisContractsClient( h.chainConfig, - h.chainConfig.Bor.ValidatorContract, - h.chainConfig.Bor.StateReceiverContract, + h.borConfig.ValidatorContract, + h.borConfig.StateReceiverContract, h.logger, ) borConsensusEng := bor.New( h.chainConfig, - h.borConsensusDb, + h.borConsensusDB, nil, h.borSpanner, h.heimdallClient, @@ -380,12 +444,16 @@ func (h *Harness) consensusEngine(t *testing.T, cfg HarnessCfg) consensus.Engine return borConsensusEng } - t.Fatal(fmt.Sprintf("unimplmented consensus engine init for cfg %v", cfg.ChainConfig)) + t.Fatalf("unimplmented consensus engine init for cfg %v", cfg.ChainConfig) return nil } +func (h *Harness) SaveHeader(ctx context.Context, t *testing.T, header *types.Header) { + h.saveHeaders(ctx, t, []*types.Header{header}) +} + func (h *Harness) saveHeaders(ctx context.Context, t *testing.T, headers []*types.Header) { - rwTx, err := h.chainDataDb.BeginRw(ctx) + rwTx, err := h.chainDataDB.BeginRw(ctx) require.NoError(t, err) defer rwTx.Rollback() @@ -428,7 +496,7 @@ func (h *Harness) mockChainHeaderReader(ctrl *gomock.Controller) consensus.Chain return mockChainHR } -func (h *Harness) setHeimdallNextMockSpan(logger log.Logger) { +func (h *Harness) setHeimdallNextMockSpan() { validators := []*valset.Validator{ { ID: 1, @@ -438,14 +506,14 @@ func (h *Harness) setHeimdallNextMockSpan(logger log.Logger) { }, } - validatorSet := valset.NewValidatorSet(validators, logger) + validatorSet := valset.NewValidatorSet(validators) selectedProducers := make([]valset.Validator, len(validators)) for i := range validators { selectedProducers[i] = *validators[i] } - h.heimdallNextMockSpan = &span.HeimdallSpan{ - Span: span.Span{ + h.heimdallNextMockSpan = &heimdall.HeimdallSpan{ + Span: heimdall.Span{ ID: 0, StartBlock: 0, EndBlock: 255, @@ -480,10 +548,10 @@ func (h *Harness) mockHeimdallClient() { h.heimdallClient. EXPECT(). Span(gomock.Any(), gomock.Any()). - DoAndReturn(func(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) { + DoAndReturn(func(ctx context.Context, spanID uint64) (*heimdall.HeimdallSpan, error) { res := h.heimdallNextMockSpan - h.heimdallNextMockSpan = &span.HeimdallSpan{ - Span: span.Span{ + h.heimdallNextMockSpan = &heimdall.HeimdallSpan{ + Span: heimdall.Span{ ID: res.ID + 1, StartBlock: res.EndBlock + 1, EndBlock: res.EndBlock + 6400, @@ -492,6 +560,10 @@ func (h *Harness) mockHeimdallClient() { SelectedProducers: res.SelectedProducers, } + if selectedProducers, ok := h.heimdallProducersOverride[res.ID]; ok { + res.SelectedProducers = selectedProducers + } + return res, nil }). AnyTimes() @@ -499,25 +571,25 @@ func (h *Harness) mockHeimdallClient() { h.heimdallClient. EXPECT(). StateSyncEvents(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, _ uint64, _ int64) ([]*clerk.EventRecordWithTime, error) { - h.heimdallLastEventId++ - h.heimdallLastEventHeaderNum += h.chainConfig.Bor.CalculateSprint(h.heimdallLastEventHeaderNum) - stateSyncDelay := h.chainConfig.Bor.CalculateStateSyncDelay(h.heimdallLastEventHeaderNum) - newEvent := clerk.EventRecordWithTime{ - EventRecord: clerk.EventRecord{ - ID: h.heimdallLastEventId, + DoAndReturn(func(_ context.Context, _ uint64, _ int64) ([]*heimdall.EventRecordWithTime, error) { + h.heimdallLastEventID++ + h.heimdallLastEventHeaderNum += h.borConfig.CalculateSprintLength(h.heimdallLastEventHeaderNum) + stateSyncDelay := h.borConfig.CalculateStateSyncDelay(h.heimdallLastEventHeaderNum) + newEvent := heimdall.EventRecordWithTime{ + EventRecord: heimdall.EventRecord{ + ID: h.heimdallLastEventID, ChainID: h.chainConfig.ChainID.String(), }, Time: time.Unix(int64(h.sealedHeaders[h.heimdallLastEventHeaderNum].Time-stateSyncDelay-1), 0), } // 1 per sprint - return []*clerk.EventRecordWithTime{&newEvent}, nil + return []*heimdall.EventRecordWithTime{&newEvent}, nil }). AnyTimes() } -func (h *Harness) findStateSyncStageById(id stages.SyncStage) (*stagedsync.Stage, bool) { +func (h *Harness) findStateSyncStageByID(id stages.SyncStage) (*stagedsync.Stage, bool) { for _, s := range h.stateSyncStages { if s.ID == id { return s, true diff --git a/eth/stagedsync/stages/stages.go b/eth/stagedsync/stages/stages.go index bf3c9fba6ae..c6734f3923e 100644 --- a/eth/stagedsync/stages/stages.go +++ b/eth/stagedsync/stages/stages.go @@ -49,6 +49,7 @@ var ( Finish SyncStage = "Finish" // Nominal stage after all other stages MiningCreateBlock SyncStage = "MiningCreateBlock" + MiningBorHeimdall SyncStage = "MiningBorHeimdall" MiningExecution SyncStage = "MiningExecution" MiningFinish SyncStage = "MiningFinish" // Beacon chain stages diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index 1112fad19bf..20b067844de 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -5,26 +5,33 @@ import ( "fmt" "time" + "github.com/ledgerwatch/log/v3" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/wrap" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" ) type Sync struct { + cfg ethconfig.Sync unwindPoint *uint64 // used to run stages prevUnwindPoint *uint64 // used to get value from outside of staged sync after cycle (for example to notify RPCDaemon) unwindReason UnwindReason - - stages []*Stage - unwindOrder []*Stage - pruningOrder []*Stage - currentStage uint - timings []Timing - logPrefixes []string - logger log.Logger + posTransition *uint64 + + stages []*Stage + unwindOrder []*Stage + pruningOrder []*Stage + currentStage uint + timings []Timing + logPrefixes []string + logger log.Logger + stagesIdsList []string } type Timing struct { @@ -34,8 +41,21 @@ type Timing struct { took time.Duration } -func (s *Sync) Len() int { return len(s.stages) } -func (s *Sync) PrevUnwindPoint() *uint64 { return s.prevUnwindPoint } +func (s *Sync) Len() int { + return len(s.stages) +} + +func (s *Sync) UnwindPoint() uint64 { + return *s.unwindPoint +} + +func (s *Sync) UnwindReason() UnwindReason { + return s.unwindReason +} + +func (s *Sync) PrevUnwindPoint() *uint64 { + return s.prevUnwindPoint +} func (s *Sync) NewUnwindState(id stages.SyncStage, unwindPoint, currentProgress uint64) *UnwindState { return &UnwindState{id, unwindPoint, currentProgress, UnwindReason{nil, nil}, s} @@ -70,6 +90,11 @@ func (s *Sync) NextStage() { return } s.currentStage++ + + isDiagEnabled := diagnostics.TypeOf(diagnostics.CurrentSyncStage{}).Enabled() + if isDiagEnabled { + diagnostics.Send(diagnostics.CurrentSyncStage{Stage: s.currentStage}) + } } // IsBefore returns true if stage1 goes before stage2 in staged sync @@ -128,17 +153,29 @@ func (s *Sync) LogPrefix() string { return s.logPrefixes[s.currentStage] } +func (s *Sync) StagesIdsList() []string { + if s == nil { + return []string{} + } + return s.stagesIdsList +} + func (s *Sync) SetCurrentStage(id stages.SyncStage) error { for i, stage := range s.stages { if stage.ID == id { s.currentStage = uint(i) + isDiagEnabled := diagnostics.TypeOf(diagnostics.CurrentSyncStage{}).Enabled() + if isDiagEnabled { + diagnostics.Send(diagnostics.CurrentSyncStage{Stage: s.currentStage}) + } + return nil } } return fmt.Errorf("stage not found with id: %v", id) } -func New(stagesList []*Stage, unwindOrder UnwindOrder, pruneOrder PruneOrder, logger log.Logger) *Sync { +func New(cfg ethconfig.Sync, stagesList []*Stage, unwindOrder UnwindOrder, pruneOrder PruneOrder, logger log.Logger) *Sync { unwindStages := make([]*Stage, len(stagesList)) for i, stageIndex := range unwindOrder { for _, s := range stagesList { @@ -157,18 +194,23 @@ func New(stagesList []*Stage, unwindOrder UnwindOrder, pruneOrder PruneOrder, lo } } } + logPrefixes := make([]string, len(stagesList)) + stagesIdsList := make([]string, len(stagesList)) for i := range stagesList { logPrefixes[i] = fmt.Sprintf("%d/%d %s", i+1, len(stagesList), stagesList[i].ID) + stagesIdsList[i] = string(stagesList[i].ID) } return &Sync{ - stages: stagesList, - currentStage: 0, - unwindOrder: unwindStages, - pruningOrder: pruneStages, - logPrefixes: logPrefixes, - logger: logger, + cfg: cfg, + stages: stagesList, + currentStage: 0, + unwindOrder: unwindStages, + pruningOrder: pruneStages, + logPrefixes: logPrefixes, + logger: logger, + stagesIdsList: stagesIdsList, } } @@ -196,7 +238,7 @@ func (s *Sync) StageState(stage stages.SyncStage, tx kv.Tx, db kv.RoDB) (*StageS return &StageState{s, stage, blockNum}, nil } -func (s *Sync) RunUnwind(db kv.RwDB, tx kv.RwTx) error { +func (s *Sync) RunUnwind(db kv.RwDB, txc wrap.TxContainer) error { if s.unwindPoint == nil { return nil } @@ -204,7 +246,7 @@ func (s *Sync) RunUnwind(db kv.RwDB, tx kv.RwTx) error { if s.unwindOrder[j] == nil || s.unwindOrder[j].Disabled || s.unwindOrder[j].Unwind == nil { continue } - if err := s.unwindStage(false, s.unwindOrder[j], db, tx); err != nil { + if err := s.unwindStage(false, s.unwindOrder[j], db, txc); err != nil { return err } } @@ -217,7 +259,7 @@ func (s *Sync) RunUnwind(db kv.RwDB, tx kv.RwTx) error { return nil } -func (s *Sync) RunNoInterrupt(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { +func (s *Sync) RunNoInterrupt(db kv.RwDB, txc wrap.TxContainer, firstCycle bool) error { s.prevUnwindPoint = nil s.timings = s.timings[:0] @@ -228,7 +270,7 @@ func (s *Sync) RunNoInterrupt(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { if s.unwindOrder[j] == nil || s.unwindOrder[j].Disabled || s.unwindOrder[j].Unwind == nil { continue } - if err := s.unwindStage(firstCycle, s.unwindOrder[j], db, tx); err != nil { + if err := s.unwindStage(firstCycle, s.unwindOrder[j], db, txc); err != nil { return err } } @@ -260,7 +302,7 @@ func (s *Sync) RunNoInterrupt(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { continue } - if err := s.runStage(stage, db, tx, firstCycle, badBlockUnwind); err != nil { + if err := s.runStage(stage, db, txc, firstCycle, badBlockUnwind); err != nil { return err } @@ -269,6 +311,11 @@ func (s *Sync) RunNoInterrupt(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { return libcommon.ErrStopped } + if string(stage.ID) == s.cfg.BreakAfterStage { // break process loop + s.logger.Warn("--sync.loop.break.after caused stage break") + break + } + s.NextStage() } @@ -280,10 +327,12 @@ func (s *Sync) RunNoInterrupt(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { return nil } -func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { +func (s *Sync) Run(db kv.RwDB, txc wrap.TxContainer, firstCycle bool) (bool, error) { s.prevUnwindPoint = nil s.timings = s.timings[:0] + hasMore := false + for !s.IsDone() { var badBlockUnwind bool if s.unwindPoint != nil { @@ -291,8 +340,8 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { if s.unwindOrder[j] == nil || s.unwindOrder[j].Disabled || s.unwindOrder[j].Unwind == nil { continue } - if err := s.unwindStage(firstCycle, s.unwindOrder[j], db, tx); err != nil { - return err + if err := s.unwindStage(firstCycle, s.unwindOrder[j], db, txc); err != nil { + return false, err } } s.prevUnwindPoint = s.unwindPoint @@ -302,7 +351,7 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { } s.unwindReason = UnwindReason{} if err := s.SetCurrentStage(s.stages[0].ID); err != nil { - return err + return false, err } // If there were unwinds at the start, a heavier but invalid chain may be present, so // we relax the rules for Stage1 @@ -318,7 +367,7 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { if string(stage.ID) == dbg.StopBeforeStage() { // stop process for debugging reasons s.logger.Warn("STOP_BEFORE_STAGE env flag forced to stop app") - return libcommon.ErrStopped + return false, libcommon.ErrStopped } if stage.Disabled || stage.Forward == nil { @@ -328,24 +377,47 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { continue } - if err := s.runStage(stage, db, tx, firstCycle, badBlockUnwind); err != nil { - return err + if err := s.runStage(stage, db, txc, firstCycle, badBlockUnwind); err != nil { + return false, err } if string(stage.ID) == dbg.StopAfterStage() { // stop process for debugging reasons s.logger.Warn("STOP_AFTER_STAGE env flag forced to stop app") - return libcommon.ErrStopped + return false, libcommon.ErrStopped + } + + if string(stage.ID) == s.cfg.BreakAfterStage { // break process loop + s.logger.Warn("--sync.loop.break.after caused stage break") + if s.posTransition != nil { + ptx := txc.Tx + + if ptx == nil { + if tx, err := db.BeginRw(context.Background()); err == nil { + ptx = tx + defer tx.Rollback() + } + } + + if ptx != nil { + if progress, err := stages.GetStageProgress(ptx, stage.ID); err == nil { + hasMore = progress < *s.posTransition + } + } + } else { + hasMore = true + } + break } s.NextStage() } if err := s.SetCurrentStage(s.stages[0].ID); err != nil { - return err + return false, err } s.currentStage = 0 - return nil + return hasMore, nil } func (s *Sync) RunPrune(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { @@ -420,14 +492,14 @@ func PrintTables(db kv.RoDB, tx kv.RwTx) []interface{} { return bucketSizes } -func (s *Sync) runStage(stage *Stage, db kv.RwDB, tx kv.RwTx, firstCycle bool, badBlockUnwind bool) (err error) { +func (s *Sync) runStage(stage *Stage, db kv.RwDB, txc wrap.TxContainer, firstCycle bool, badBlockUnwind bool) (err error) { start := time.Now() - stageState, err := s.StageState(stage.ID, tx, db) + stageState, err := s.StageState(stage.ID, txc.Tx, db) if err != nil { return err } - if err = stage.Forward(firstCycle, badBlockUnwind, stageState, s, tx, s.logger); err != nil { + if err = stage.Forward(firstCycle, badBlockUnwind, stageState, s, txc, s.logger); err != nil { wrappedError := fmt.Errorf("[%s] %w", s.LogPrefix(), err) s.logger.Debug("Error while executing stage", "err", wrappedError) return wrappedError @@ -444,10 +516,10 @@ func (s *Sync) runStage(stage *Stage, db kv.RwDB, tx kv.RwTx, firstCycle bool, b return nil } -func (s *Sync) unwindStage(firstCycle bool, stage *Stage, db kv.RwDB, tx kv.RwTx) error { +func (s *Sync) unwindStage(firstCycle bool, stage *Stage, db kv.RwDB, txc wrap.TxContainer) error { start := time.Now() s.logger.Trace("Unwind...", "stage", stage.ID) - stageState, err := s.StageState(stage.ID, tx, db) + stageState, err := s.StageState(stage.ID, txc.Tx, db) if err != nil { return err } @@ -463,7 +535,7 @@ func (s *Sync) unwindStage(firstCycle bool, stage *Stage, db kv.RwDB, tx kv.RwTx return err } - err = stage.Unwind(firstCycle, unwind, stageState, tx, s.logger) + err = stage.Unwind(firstCycle, unwind, stageState, txc, s.logger) if err != nil { return fmt.Errorf("[%s] %w", s.LogPrefix(), err) } diff --git a/eth/stagedsync/sync_test.go b/eth/stagedsync/sync_test.go index 8992c31c5a0..6324d72e060 100644 --- a/eth/stagedsync/sync_test.go +++ b/eth/stagedsync/sync_test.go @@ -5,11 +5,12 @@ import ( "fmt" "testing" - "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/assert" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" ) @@ -19,7 +20,7 @@ func TestStagesSuccess(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Headers) return nil }, @@ -27,7 +28,7 @@ func TestStagesSuccess(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Bodies) return nil }, @@ -35,15 +36,15 @@ func TestStagesSuccess(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Senders) return nil }, }, } - state := New(s, nil, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -58,7 +59,7 @@ func TestDisabledStages(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Headers) return nil }, @@ -66,7 +67,7 @@ func TestDisabledStages(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Bodies) return nil }, @@ -75,15 +76,15 @@ func TestDisabledStages(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Senders) return nil }, }, } - state := New(s, nil, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -99,7 +100,7 @@ func TestErroredStage(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Headers) return nil }, @@ -107,7 +108,7 @@ func TestErroredStage(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Bodies) return expectedErr }, @@ -115,15 +116,15 @@ func TestErroredStage(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Senders) return nil }, }, } - state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.Equal(t, fmt.Errorf("[2/3 Bodies] %w", expectedErr), err) expectedFlow := []stages.SyncStage{ @@ -139,39 +140,39 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { - return s.Update(tx, 2000) + return s.Update(txc.Tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.Headers)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { - return s.Update(tx, 1000) + return s.Update(txc.Tx, 1000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.Bodies)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if s.BlockNumber == 0 { - if err := s.Update(tx, 1700); err != nil { + if err := s.Update(txc.Tx, 1700); err != nil { return err } } @@ -183,30 +184,30 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.Senders)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, { ID: stages.IntermediateHashes, Disabled: true, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.IntermediateHashes) if s.BlockNumber == 0 { - return s.Update(tx, 2000) + return s.Update(txc.Tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.IntermediateHashes)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, } - state := New(s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -237,69 +238,69 @@ func TestUnwind(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { - return s.Update(tx, 2000) + return s.Update(txc.Tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.Headers)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { - return s.Update(tx, 2000) + return s.Update(txc.Tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.Bodies)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Senders) if !unwound { unwound = true u.UnwindTo(500, UnwindReason{}) - return s.Update(tx, 3000) + return s.Update(txc.Tx, 3000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.Senders)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, { ID: stages.IntermediateHashes, Disabled: true, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.IntermediateHashes) if s.BlockNumber == 0 { - return s.Update(tx, 2000) + return s.Update(txc.Tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.IntermediateHashes)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, } - state := New(s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -326,7 +327,7 @@ func TestUnwind(t *testing.T) { flow = flow[:0] state.unwindOrder = []*Stage{s[3], s[2], s[1], s[0]} state.UnwindTo(100, UnwindReason{}) - err = state.Run(db, tx, true /* initialCycle */) + _, err = state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.NoError(t, err) expectedFlow = []stages.SyncStage{ @@ -345,25 +346,25 @@ func TestUnwindEmptyUnwinder(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { - return s.Update(tx, 2000) + return s.Update(txc.Tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.Headers)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { - return s.Update(tx, 2000) + return s.Update(txc.Tx, 2000) } return nil }, @@ -371,24 +372,24 @@ func TestUnwindEmptyUnwinder(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Senders) if !unwound { unwound = true u.UnwindTo(500, UnwindReason{}) - return s.Update(tx, 3000) + return s.Update(txc.Tx, 3000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.Senders)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, } - state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -419,36 +420,36 @@ func TestSyncDoTwice(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Headers) - return s.Update(tx, s.BlockNumber+100) + return s.Update(txc.Tx, s.BlockNumber+100) }, }, { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Bodies) - return s.Update(tx, s.BlockNumber+200) + return s.Update(txc.Tx, s.BlockNumber+200) }, }, { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Senders) - return s.Update(tx, s.BlockNumber+300) + return s.Update(txc.Tx, s.BlockNumber+300) }, }, } - state := New(s, nil, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.NoError(t, err) - state = New(s, nil, nil, log.New()) - err = state.Run(db, tx, true /* initialCycle */) + state = New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) + _, err = state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -477,7 +478,7 @@ func TestStateSyncInterruptRestart(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Headers) return nil }, @@ -485,7 +486,7 @@ func TestStateSyncInterruptRestart(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Bodies) return expectedErr }, @@ -493,22 +494,22 @@ func TestStateSyncInterruptRestart(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Senders) return nil }, }, } - state := New(s, nil, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.Equal(t, fmt.Errorf("[2/3 Bodies] %w", expectedErr), err) expectedErr = nil - state = New(s, nil, nil, log.New()) - err = state.Run(db, tx, true /* initialCycle */) + state = New(ethconfig.Defaults.Sync, s, nil, nil, log.New()) + _, err = state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -529,59 +530,59 @@ func TestSyncInterruptLongUnwind(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { - return s.Update(tx, 2000) + return s.Update(txc.Tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.Headers)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { - return s.Update(tx, 2000) + return s.Update(txc.Tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.Bodies)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, stages.Senders) if !unwound { unwound = true u.UnwindTo(500, UnwindReason{}) - return s.Update(tx, 3000) + return s.Update(txc.Tx, 3000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { flow = append(flow, unwindOf(stages.Senders)) if !interrupted { interrupted = true return errInterrupted } assert.Equal(t, 500, int(u.UnwindPoint)) - return u.Done(tx) + return u.Done(txc.Tx) }, }, } - state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true /* initialCycle */) + _, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.Error(t, errInterrupted, err) //state = NewState(s) @@ -589,7 +590,7 @@ func TestSyncInterruptLongUnwind(t *testing.T) { //err = state.LoadUnwindInfo(tx) //assert.NoError(t, err) //state.UnwindTo(500, libcommon.Hash{}) - err = state.Run(db, tx, true /* initialCycle */) + _, err = state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ diff --git a/fork.yaml b/fork.yaml index 30154040586..b67aa653f19 100644 --- a/fork.yaml +++ b/fork.yaml @@ -4,7 +4,7 @@ footer: | # define the footer with markdown base: name: ledgerwatch/erigon url: https://github.com/ledgerwatch/erigon - hash: 4186213f225f79844e94140ff9336c992a2b3d62 + hash: 3040e2576c29512addaf8ce50528609b4ff9a8e0 fork: name: bobanetwork/v3-erigon url: https://github.com/bobanetwork/v3-erigon diff --git a/go.mod b/go.mod index 7b0b4b60a68..20064dfbea6 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ go 1.20 require ( github.com/erigontech/mdbx-go v0.27.21 github.com/erigontech/silkworm-go v0.10.0 - github.com/ledgerwatch/erigon-lib v1.0.0 github.com/ledgerwatch/log/v3 v3.9.0 github.com/ledgerwatch/secp256k1 v1.0.0 ) @@ -14,8 +13,9 @@ require ( gfx.cafe/util/go/generic v0.0.0-20230721185457-c559e86c829c github.com/99designs/gqlgen v0.17.40 github.com/Giulio2002/bls v0.0.0-20230906201036-c2330c97dc7d + github.com/Masterminds/sprig/v3 v3.2.3 github.com/RoaringBitmap/roaring v1.2.3 - github.com/VictoriaMetrics/fastcache v1.12.1 + github.com/VictoriaMetrics/fastcache v1.12.2 github.com/alecthomas/kong v0.8.0 github.com/anacrolix/log v0.14.3-0.20230823030427-4b296d71a6b4 github.com/anacrolix/sync v0.4.0 @@ -35,15 +35,18 @@ require ( github.com/emicklei/dot v1.6.0 github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35 - github.com/go-chi/chi/v5 v5.0.10 + github.com/gfx-labs/sse v0.0.0-20231226060816-f747e26a9baa + github.com/go-chi/chi/v5 v5.0.11 + github.com/go-chi/cors v1.2.1 github.com/goccy/go-json v0.9.11 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang/mock v1.6.0 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/google/btree v1.1.2 + github.com/google/cel-go v0.18.2 github.com/google/gofuzz v1.2.0 - github.com/gorilla/websocket v1.5.0 + github.com/gorilla/websocket v1.5.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/hashicorp/golang-lru/arc/v2 v2.0.6 github.com/hashicorp/golang-lru/v2 v2.0.6 @@ -55,12 +58,12 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/klauspost/compress v1.17.3 + github.com/ledgerwatch/erigon-lib v1.0.0 github.com/libp2p/go-libp2p v0.31.0 github.com/libp2p/go-libp2p-mplex v0.9.0 github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/maticnetwork/crand v1.0.2 - github.com/maticnetwork/polyproto v0.0.3-0.20230216113155-340ea926ca53 - github.com/multiformats/go-multiaddr v0.11.0 + github.com/multiformats/go-multiaddr v0.12.1 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 github.com/pelletier/go-toml v1.9.5 github.com/pelletier/go-toml/v2 v2.1.0 @@ -79,7 +82,7 @@ require ( github.com/tidwall/btree v1.6.0 github.com/ugorji/go/codec v1.1.13 github.com/ugorji/go/codec/codecgen v1.1.13 - github.com/urfave/cli/v2 v2.25.7 + github.com/urfave/cli/v2 v2.27.1 github.com/valyala/fastjson v1.6.4 github.com/vektah/gqlparser/v2 v2.5.10 github.com/xsleonard/go-merkle v1.1.0 @@ -87,21 +90,24 @@ require ( golang.org/x/crypto v0.17.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/net v0.19.0 - golang.org/x/sync v0.5.0 - golang.org/x/sys v0.15.0 + golang.org/x/sync v0.6.0 + golang.org/x/sys v0.16.0 golang.org/x/time v0.5.0 - google.golang.org/grpc v1.59.0 + google.golang.org/grpc v1.60.1 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.32.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - modernc.org/sqlite v1.27.0 + modernc.org/sqlite v1.28.0 pgregory.net/rapid v1.1.0 + sigs.k8s.io/yaml v1.4.0 ) require ( + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 // indirect github.com/alecthomas/atomic v0.1.0-alpha2 // indirect @@ -118,6 +124,7 @@ require ( github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496 // indirect github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 // indirect github.com/anacrolix/utp v0.1.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/aws/aws-sdk-go-v2 v1.21.2 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.14 // indirect github.com/aws/aws-sdk-go-v2/config v1.19.0 // indirect @@ -173,6 +180,7 @@ require ( github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect github.com/google/uuid v1.3.1 // indirect github.com/ianlancetaylor/cgosymbolizer v0.0.0-20220405231054-a1ae3e4bba26 // indirect + github.com/imdario/mergo v0.3.11 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect @@ -183,7 +191,7 @@ require ( github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231223133303-67e341eff759 // indirect + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240115083615-b5feeb63e191 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect @@ -198,12 +206,14 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/miekg/dns v1.1.55 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -238,23 +248,26 @@ require ( github.com/pion/webrtc/v3 v3.1.42 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_golang v1.18.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qtls-go1-20 v0.3.3 // indirect - github.com/quic-go/quic-go v0.38.1 // indirect + github.com/quic-go/quic-go v0.38.2 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/sosodev/duration v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/cast v1.3.1 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.etcd.io/bbolt v1.3.6 // indirect @@ -266,7 +279,9 @@ require ( golang.org/x/mod v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.16.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect lukechampine.com/blake3 v1.2.1 // indirect lukechampine.com/uint128 v1.3.0 // indirect diff --git a/go.sum b/go.sum index b0bee5c9319..de4b8846abe 100644 --- a/go.sum +++ b/go.sum @@ -55,6 +55,12 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Giulio2002/bls v0.0.0-20230906201036-c2330c97dc7d h1:fAztVLpjcVcd2al4GL8xYr9Yp7LmXXSTuLqu83U8hKo= github.com/Giulio2002/bls v0.0.0-20230906201036-c2330c97dc7d/go.mod h1:nCQrFU6/QsJtLS+SBLWRn9UG2nds1f3hQKfWHCrtUqw= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= @@ -62,8 +68,8 @@ github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVO github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= -github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 h1:byYvvbfSo3+9efR4IeReh77gVs4PnNDR3AMOE9NJ7a0= @@ -137,6 +143,8 @@ github.com/anacrolix/utp v0.1.0 h1:FOpQOmIwYsnENnz7tAGohA+r6iXpRjrq8ssKSre2Cp4= github.com/anacrolix/utp v0.1.0/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= @@ -308,6 +316,8 @@ github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c h1:uYNKzPntb8c6DKvP9E github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35 h1:I8QswD9gf3VEpr7bpepKKOm7ChxFITIG+oc1I5/S0no= github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35/go.mod h1:DMDd04jjQgdynaAwbEgiRERIGpC8fDjx0+y06an7Psg= +github.com/gfx-labs/sse v0.0.0-20231226060816-f747e26a9baa h1:b6fBm4SLM8jywQHNmc3ZCl6zQEhEyZl6bp7is4en72M= +github.com/gfx-labs/sse v0.0.0-20231226060816-f747e26a9baa/go.mod h1:K0FMPjMrIaS1+/SeZeOVkGVjDVERZJW53inQL00FjLE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= @@ -316,8 +326,10 @@ github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk= -github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-chi/chi/v5 v5.0.11 h1:BnpYbFZ3T3S1WMpD79r7R5ThWX40TaFB7L31Y8xqSwA= +github.com/go-chi/chi/v5 v5.0.11/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= +github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -402,6 +414,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.18.2 h1:L0B6sNBSVmt0OyECi8v6VOS74KOc9W/tLiWKfZABvf4= +github.com/google/cel-go v0.18.2/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -414,6 +428,7 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -438,6 +453,7 @@ github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBB github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= @@ -453,8 +469,8 @@ github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORR github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= @@ -475,6 +491,7 @@ github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63 github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= @@ -483,6 +500,8 @@ github.com/ianlancetaylor/cgosymbolizer v0.0.0-20220405231054-a1ae3e4bba26 h1:UT github.com/ianlancetaylor/cgosymbolizer v0.0.0-20220405231054-a1ae3e4bba26/go.mod h1:DvXTE/K/RtHehxU8/GtDs4vFtfw64jJ3PaCnFri8CRg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= @@ -540,8 +559,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231223133303-67e341eff759 h1:ov31f3HPnYycT15Lhg3k9Q4Dx+qpQFCQWAcoxjoiGvM= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231223133303-67e341eff759/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240115083615-b5feeb63e191 h1:X/mHEyh0xEuhixj6hKCNQl04NuNDToYWJ08vr66e6L0= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240115083615-b5feeb63e191/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= github.com/ledgerwatch/log/v3 v3.9.0/go.mod h1:EiAY6upmI/6LkNhOVxb4eVsmsP11HZCnZ3PlJMjYiqE= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -579,8 +598,6 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8 github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/maticnetwork/crand v1.0.2 h1:Af0tAivC8zrxXDpGWNWVT/0s1fOz8w0eRbahZgURS8I= github.com/maticnetwork/crand v1.0.2/go.mod h1:/NRNL3bj2eYdqpWmoIP5puxndTpi0XRxpj5ZKxfHjyg= -github.com/maticnetwork/polyproto v0.0.3-0.20230216113155-340ea926ca53 h1:PjYV+lghs106JKkrYgOnrsfDLoTc11BxZd4rUa4Rus4= -github.com/maticnetwork/polyproto v0.0.3-0.20230216113155-340ea926ca53/go.mod h1:e1mU2EXSwEpn5jM7GfNwu3AupsV6WAGoPFFfswXOF0o= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= @@ -591,8 +608,8 @@ github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4 github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= @@ -607,8 +624,12 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= @@ -631,8 +652,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.11.0 h1:XqGyJ8ufbCE0HmTDwx2kPdsrQ36AGPZNZX6s6xfJH10= -github.com/multiformats/go-multiaddr v0.11.0/go.mod h1:gWUm0QLR4thQ6+ZF6SXUw8YjtwQSPapICM+NmCkxHSM= +github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk= +github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -739,8 +760,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -752,16 +773,16 @@ github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7q github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/protolambda/ztyp v0.2.2 h1:rVcL3vBu9W/aV646zF6caLS/dyn9BN8NYiuJzicLNyY= github.com/protolambda/ztyp v0.2.2/go.mod h1:9bYgKGqg3wJqT9ac1gI2hnVb0STQq7p/1lapqrqY1dU= github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 h1:0tVE4tdWQK9ZpYygoV7+vS6QkDvQVySboMVEIxBJmXw= @@ -774,8 +795,8 @@ github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/qtls-go1-20 v0.3.3 h1:17/glZSLI9P9fDAeyCHBFSWSqJcwx1byhLwP5eUIDCM= github.com/quic-go/qtls-go1-20 v0.3.3/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.38.1 h1:M36YWA5dEhEeT+slOu/SwMEucbYd0YFidxG3KlGPZaE= -github.com/quic-go/quic-go v0.38.1/go.mod h1:ijnZM7JsFIkp4cRyjxJNIzdSfCLmUMg9wdyhGmg+SN4= +github.com/quic-go/quic-go v0.38.2 h1:VWv/6gxIoB8hROQJhx1JEyiegsUQ+zMN3em3kynTGdg= +github.com/quic-go/quic-go v0.38.2/go.mod h1:ijnZM7JsFIkp4cRyjxJNIzdSfCLmUMg9wdyhGmg+SN4= github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -789,8 +810,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 h1:Lt9DzQALzHoDwMBGJ6v8ObDPR0dzr2a6sXTB1Fq7IHs= @@ -804,6 +825,8 @@ github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5P github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -844,10 +867,14 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -881,8 +908,8 @@ github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCB github.com/ugorji/go/codec/codecgen v1.1.13 h1:rGpZ4Q63VcWA3DMBbIHvg+SQweUkfXBBa/f9X0W+tFg= github.com/ugorji/go/codec/codecgen v1.1.13/go.mod h1:EhCxlc7Crov+HLygD4+hBCitXNrrGKRrRWj+pRsyJGg= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= -github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= +github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vektah/gqlparser/v2 v2.5.10 h1:6zSM4azXC9u4Nxy5YmdmGu4uKamfwsdKTwp5zsEealU= @@ -951,7 +978,9 @@ golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -993,6 +1022,7 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1045,9 +1075,11 @@ golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220531201128-c960675eff93/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1075,8 +1107,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1144,16 +1176,23 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1162,6 +1201,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= @@ -1309,8 +1349,10 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1330,8 +1372,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1346,9 +1388,11 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= +gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1400,8 +1444,8 @@ modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.27.0 h1:MpKAHoyYB7xqcwnUwkuD+npwEa0fojF0B5QRbN+auJ8= -modernc.org/sqlite v1.27.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= +modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= +modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= @@ -1415,6 +1459,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= zombiezen.com/go/sqlite v0.13.1 h1:qDzxyWWmMtSSEH5qxamqBFmqA2BLSSbtODi3ojaE02o= diff --git a/p2p/discover/lookup.go b/p2p/discover/lookup.go index 87ba2c2d55e..5bc2ba47982 100644 --- a/p2p/discover/lookup.go +++ b/p2p/discover/lookup.go @@ -132,12 +132,18 @@ func (it *lookup) startQueries() bool { return it.queries > 0 } +type ctxKey int + +const ( + ckNoSlowdown ctxKey = iota +) + func disableLookupSlowdown(ctx context.Context) context.Context { - return context.WithValue(ctx, "p2p.discover.lookup.noSlowdown", true) + return context.WithValue(ctx, ckNoSlowdown, true) } func isDisabledLookupSlowdown(ctx context.Context) bool { - return ctx.Value("p2p.discover.lookup.noSlowdown") != nil + return ctx.Value(ckNoSlowdown) != nil } func (it *lookup) slowdown() { diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go index c9c0153bf78..e2a2354408c 100644 --- a/p2p/discover/table_test.go +++ b/p2p/discover/table_test.go @@ -29,6 +29,7 @@ import ( "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/p2p/enr" "github.com/ledgerwatch/erigon/p2p/netutil" + "github.com/ledgerwatch/log/v3" ) func TestTable_pingReplace(t *testing.T) { @@ -49,7 +50,7 @@ func TestTable_pingReplace(t *testing.T) { func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) { transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) defer db.Close() defer tab.close() @@ -118,7 +119,7 @@ func testTableBumpNoDuplicatesRun(t *testing.T, bucketCountGen byte, bumpCountGe if len(bumps) > 0 { tmpDir := t.TempDir() - tab, db := newTestTable(newPingRecorder(), tmpDir) + tab, db := newTestTable(newPingRecorder(), tmpDir, log.Root()) defer db.Close() defer tab.close() @@ -170,7 +171,7 @@ func TestTable_bumpNoDuplicates_examples(t *testing.T) { func TestTable_IPLimit(t *testing.T) { transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) defer db.Close() defer tab.close() @@ -188,7 +189,7 @@ func TestTable_IPLimit(t *testing.T) { func TestTable_BucketIPLimit(t *testing.T) { transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) defer db.Close() defer tab.close() @@ -224,7 +225,7 @@ func testTableFindNodeByIDRun(t *testing.T, nodesCountGen uint16, resultsCountGe // for any node table, Target and N transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) defer db.Close() defer tab.close() @@ -328,7 +329,7 @@ func testTableReadRandomNodesGetAllRun(t *testing.T, nodesCountGen uint16, rand buf := make([]*enode.Node, nodesCount) transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) defer db.Close() defer tab.close() <-tab.initDone @@ -392,7 +393,7 @@ func generateNode(rand *rand.Rand) *node { func TestTable_addVerifiedNode(t *testing.T) { tmpDir := t.TempDir() - tab, db := newTestTable(newPingRecorder(), tmpDir) + tab, db := newTestTable(newPingRecorder(), tmpDir, log.Root()) <-tab.initDone defer db.Close() defer tab.close() @@ -425,7 +426,7 @@ func TestTable_addVerifiedNode(t *testing.T) { func TestTable_addSeenNode(t *testing.T) { tmpDir := t.TempDir() - tab, db := newTestTable(newPingRecorder(), tmpDir) + tab, db := newTestTable(newPingRecorder(), tmpDir, log.Root()) <-tab.initDone defer db.Close() defer tab.close() @@ -460,7 +461,7 @@ func TestTable_addSeenNode(t *testing.T) { func TestTable_revalidateSyncRecord(t *testing.T) { transport := newPingRecorder() tmpDir := t.TempDir() - tab, db := newTestTable(transport, tmpDir) + tab, db := newTestTable(transport, tmpDir, log.Root()) <-tab.initDone defer db.Close() defer tab.close() diff --git a/p2p/discover/table_util_test.go b/p2p/discover/table_util_test.go index e4613192884..72fea0258ae 100644 --- a/p2p/discover/table_util_test.go +++ b/p2p/discover/table_util_test.go @@ -43,8 +43,8 @@ func init() { nullNode = enode.SignNull(&r, enode.ID{}) } -func newTestTable(t transport, tmpDir string) (*Table, *enode.DB) { - db, err := enode.OpenDB(context.Background(), "", tmpDir) +func newTestTable(t transport, tmpDir string, logger log.Logger) (*Table, *enode.DB) { + db, err := enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index bc665d0a1c3..deb4427a7a7 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -29,12 +29,13 @@ import ( "time" lru "github.com/hashicorp/golang-lru/v2" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/p2p/discover/v4wire" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/p2p/netutil" - "github.com/ledgerwatch/log/v3" ) // Errors @@ -610,74 +611,30 @@ func (t *UDPv4) loop() { }() case r := <-t.gotreply: - - type matchCandidate struct { - el *list.Element - errc chan error - } - - var matchCandidates []matchCandidate - - mutex.Lock() - for el := plist.Front(); el != nil; el = el.Next() { - p := el.Value.(*replyMatcher) - if p.from == r.from && p.ptype == r.data.Kind() && p.ip.Equal(r.ip) { - candidate := matchCandidate{el, p.errc} - p.errc = make(chan error, 1) - matchCandidates = append(matchCandidates, candidate) - } - } - mutex.Unlock() - - if len(matchCandidates) == 0 { - // if there are no matched candidates try again matching against - // ip & port to handle node key changes + func() { mutex.Lock() - for el := plist.Front(); el != nil; el = el.Next() { - p := el.Value.(*replyMatcher) - if p.ptype == r.data.Kind() && p.ip.Equal(r.ip) && p.port == r.port { - candidate := matchCandidate{el, p.errc} - p.errc = make(chan error, 1) - matchCandidates = append(matchCandidates, candidate) - } - } - mutex.Unlock() - - if len(matchCandidates) == 0 { - r.matched <- false - } - } + defer mutex.Unlock() - go func(r reply) { var matched bool // whether any replyMatcher considered the reply acceptable. - for _, candidate := range matchCandidates { - p := candidate.el.Value.(*replyMatcher) - ok, requestDone := p.callback(r.data) - matched = matched || ok - p.reply = r.data - - // Remove the matcher if callback indicates that all replies have been received. - if requestDone { - mutex.Lock() - plist.Remove(candidate.el) - mutex.Unlock() - candidate.errc <- nil - listUpdate <- candidate.el - } else { - select { - case err := <-p.errc: - candidate.errc <- err - default: - p.errc = candidate.errc + for el := plist.Front(); el != nil; el = el.Next() { + p := el.Value.(*replyMatcher) + if (p.ptype == r.data.Kind()) && p.ip.Equal(r.ip) && (p.port == r.port) { + ok, requestDone := p.callback(r.data) + matched = matched || ok + p.reply = r.data + // Remove the matcher if callback indicates that all replies have been received. + if requestDone { + p.errc <- nil + plist.Remove(el) + listUpdate <- el } + // Reset the continuous timeout counter (time drift detection) + contTimeouts = 0 } } - r.matched <- matched - }(r) + }() - // Reset the continuous timeout counter (time drift detection) - contTimeouts = 0 case key := <-t.gotkey: go func() { if key, err := v4wire.DecodePubkey(crypto.S256(), key); err == nil { diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index 8811e4c41df..923bca651d4 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -82,7 +82,7 @@ func newUDPTestContext(ctx context.Context, t *testing.T, logger log.Logger) *ud tmpDir := t.TempDir() var err error - test.db, err = enode.OpenDB(ctx, "", tmpDir) + test.db, err = enode.OpenDB(ctx, "", tmpDir, logger) if err != nil { panic(err) } @@ -619,7 +619,7 @@ func startLocalhostV4(ctx context.Context, t *testing.T, cfg Config, logger log. cfg.PrivateKey = newkey() tmpDir := t.TempDir() - db, err := enode.OpenDB(context.Background(), "", tmpDir) + db, err := enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } diff --git a/p2p/discover/v5_udp_test.go b/p2p/discover/v5_udp_test.go index 9e0c70f6f07..c4e9c350885 100644 --- a/p2p/discover/v5_udp_test.go +++ b/p2p/discover/v5_udp_test.go @@ -41,7 +41,7 @@ import ( func startLocalhostV5(t *testing.T, cfg Config, logger log.Logger) *UDPv5 { cfg.PrivateKey = newkey() tmpDir := t.TempDir() - db, err := enode.OpenDB(context.Background(), "", tmpDir) + db, err := enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } @@ -573,7 +573,7 @@ func newUDPV5TestContext(ctx context.Context, t *testing.T, logger log.Logger) * t.Cleanup(test.close) var err error tmpDir := t.TempDir() - test.db, err = enode.OpenDB(context.Background(), "", tmpDir) + test.db, err = enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } @@ -627,7 +627,7 @@ func (test *udpV5Test) getNode(key *ecdsa.PrivateKey, addr *net.UDPAddr, logger ln := test.nodesByID[id] if ln == nil { tmpDir := test.t.TempDir() - db, err := enode.OpenDB(context.Background(), "", tmpDir) + db, err := enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go index 0eee9ed8d64..f28d5278642 100644 --- a/p2p/discover/v5wire/encoding_test.go +++ b/p2p/discover/v5wire/encoding_test.go @@ -537,7 +537,7 @@ func (t *handshakeTest) close() { } func (n *handshakeTestNode) init(key *ecdsa.PrivateKey, ip net.IP, clock mclock.Clock, tmpDir string, logger log.Logger) { - db, err := enode.OpenDB(context.Background(), "", tmpDir) + db, err := enode.OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } diff --git a/p2p/dnsdisc/tree.go b/p2p/dnsdisc/tree.go index 8329e6667c2..0c47edf9478 100644 --- a/p2p/dnsdisc/tree.go +++ b/p2p/dnsdisc/tree.go @@ -311,11 +311,11 @@ func parseLink(e string) (*linkEntry, error) { return nil, fmt.Errorf("wrong/missing scheme 'enrtree' in URL") } e = e[len(linkPrefix):] - pos := strings.IndexByte(e, '@') - if pos == -1 { + keystring, domain, ok := strings.Cut(e, "@") + if !ok { return nil, entryError{"link", errNoPubkey} } - keystring, domain := e[:pos], e[pos+1:] + keybytes, err := b32format.DecodeString(keystring) if err != nil { return nil, entryError{"link", errBadPubkey} diff --git a/p2p/enode/localnode_test.go b/p2p/enode/localnode_test.go index 2046dfd23f1..8e5b51b8e40 100644 --- a/p2p/enode/localnode_test.go +++ b/p2p/enode/localnode_test.go @@ -29,7 +29,7 @@ import ( ) func newLocalNodeForTesting(tmpDir string, logger log.Logger) (*LocalNode, *DB) { - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, logger) if err != nil { panic(err) } diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go index fb4b27e4c85..b955a7a29f5 100644 --- a/p2p/enode/nodedb.go +++ b/p2p/enode/nodedb.go @@ -29,14 +29,13 @@ import ( "time" "github.com/c2h5oh/datasize" + mdbx1 "github.com/erigontech/mdbx-go/mdbx" + "github.com/ledgerwatch/log/v3" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/log/v3" - - mdbx1 "github.com/erigontech/mdbx-go/mdbx" ) // Keys in the node database. @@ -75,17 +74,18 @@ var zeroIP = make(net.IP, 16) // DB is the node database, storing previously seen nodes and any collected metadata about // them for QoS purposes. type DB struct { - kv kv.RwDB // Interface to the database itself - runner sync.Once // Ensures we can start at most one expirer - quit chan struct{} // Channel to signal the expiring thread to stop + kv kv.RwDB // Interface to the database itself + runner sync.Once // Ensures we can start at most one expirer + + ctx context.Context + ctxCancel func() } // OpenDB opens a node database for storing and retrieving infos about known peers in the // network. If no path is given an in-memory, temporary database is constructed. -func OpenDB(ctx context.Context, path string, tmpDir string) (*DB, error) { - logger := log.New() //TODO: move higher +func OpenDB(ctx context.Context, path string, tmpDir string, logger log.Logger) (*DB, error) { if path == "" { - return newMemoryDB(logger, tmpDir) + return newMemoryDB(ctx, logger, tmpDir) } return newPersistentDB(ctx, logger, path) } @@ -98,22 +98,27 @@ func bucketsConfig(_ kv.TableCfg) kv.TableCfg { } // newMemoryNodeDB creates a new in-memory node database without a persistent backend. -func newMemoryDB(logger log.Logger, tmpDir string) (*DB, error) { - db := &DB{quit: make(chan struct{})} - var err error - db.kv, err = mdbx.NewMDBX(logger).InMem(tmpDir).Label(kv.SentryDB).WithTableCfg(bucketsConfig).MapSize(1 * datasize.GB).Open(context.Background()) +func newMemoryDB(ctx context.Context, logger log.Logger, tmpDir string) (*DB, error) { + db, err := mdbx.NewMDBX(logger). + InMem(tmpDir). + Label(kv.SentryDB). + WithTableCfg(bucketsConfig). + MapSize(1 * datasize.GB). + Open(ctx) if err != nil { return nil, err } - return db, nil + + nodeDB := &DB{kv: db} + nodeDB.ctx, nodeDB.ctxCancel = context.WithCancel(ctx) + + return nodeDB, nil } // newPersistentNodeDB creates/opens a persistent node database, // also flushing its contents in case of a version mismatch. func newPersistentDB(ctx context.Context, logger log.Logger, path string) (*DB, error) { - var db kv.RwDB - var err error - db, err = mdbx.NewMDBX(logger). + db, err := mdbx.NewMDBX(logger). Path(path). Label(kv.SentryDB). WithTableCfg(bucketsConfig). @@ -126,13 +131,14 @@ func newPersistentDB(ctx context.Context, logger log.Logger, path string) (*DB, if err != nil { return nil, err } + // The nodes contained in the cache correspond to a certain protocol version. // Flush all nodes if the version doesn't match. currentVer := make([]byte, binary.MaxVarintLen64) currentVer = currentVer[:binary.PutVarint(currentVer, int64(dbVersion))] var blob []byte - if err := db.Update(context.Background(), func(tx kv.RwTx) error { + if err := db.Update(ctx, func(tx kv.RwTx) error { c, err := tx.RwCursor(kv.Inodes) if err != nil { return err @@ -151,6 +157,7 @@ func newPersistentDB(ctx context.Context, logger log.Logger, path string) (*DB, }); err != nil { return nil, err } + if blob != nil && !bytes.Equal(blob, currentVer) { db.Close() if err := os.RemoveAll(path); err != nil { @@ -158,7 +165,11 @@ func newPersistentDB(ctx context.Context, logger log.Logger, path string) (*DB, } return newPersistentDB(ctx, logger, path) } - return &DB{kv: db, quit: make(chan struct{})}, nil + + nodeDB := &DB{kv: db} + nodeDB.ctx, nodeDB.ctxCancel = context.WithCancel(ctx) + + return nodeDB, nil } // nodeKey returns the database key for a node record. @@ -228,7 +239,7 @@ func localItemKey(id ID, field string) []byte { // fetchInt64 retrieves an integer associated with a particular key. func (db *DB) fetchInt64(key []byte) int64 { var val int64 - if err := db.kv.View(context.Background(), func(tx kv.Tx) error { + if err := db.kv.View(db.ctx, func(tx kv.Tx) error { blob, errGet := tx.GetOne(kv.Inodes, key) if errGet != nil { return errGet @@ -250,7 +261,7 @@ func (db *DB) fetchInt64(key []byte) int64 { func (db *DB) storeInt64(key []byte, n int64) error { blob := make([]byte, binary.MaxVarintLen64) blob = blob[:binary.PutVarint(blob, n)] - return db.kv.Update(context.Background(), func(tx kv.RwTx) error { + return db.kv.Update(db.ctx, func(tx kv.RwTx) error { return tx.Put(kv.Inodes, libcommon.CopyBytes(key), blob) }) } @@ -258,7 +269,7 @@ func (db *DB) storeInt64(key []byte, n int64) error { // fetchUint64 retrieves an integer associated with a particular key. func (db *DB) fetchUint64(key []byte) uint64 { var val uint64 - if err := db.kv.View(context.Background(), func(tx kv.Tx) error { + if err := db.kv.View(db.ctx, func(tx kv.Tx) error { blob, errGet := tx.GetOne(kv.Inodes, key) if errGet != nil { return errGet @@ -277,7 +288,7 @@ func (db *DB) fetchUint64(key []byte) uint64 { func (db *DB) storeUint64(key []byte, n uint64) error { blob := make([]byte, binary.MaxVarintLen64) blob = blob[:binary.PutUvarint(blob, n)] - return db.kv.Update(context.Background(), func(tx kv.RwTx) error { + return db.kv.Update(db.ctx, func(tx kv.RwTx) error { return tx.Put(kv.Inodes, libcommon.CopyBytes(key), blob) }) } @@ -285,7 +296,7 @@ func (db *DB) storeUint64(key []byte, n uint64) error { // Node retrieves a node with a given id from the database. func (db *DB) Node(id ID) *Node { var blob []byte - if err := db.kv.View(context.Background(), func(tx kv.Tx) error { + if err := db.kv.View(db.ctx, func(tx kv.Tx) error { v, errGet := tx.GetOne(kv.NodeRecords, nodeKey(id)) if errGet != nil { return errGet @@ -323,7 +334,7 @@ func (db *DB) UpdateNode(node *Node) error { if err != nil { return err } - if err := db.kv.Update(context.Background(), func(tx kv.RwTx) error { + if err := db.kv.Update(db.ctx, func(tx kv.RwTx) error { return tx.Put(kv.NodeRecords, nodeKey(node.ID()), blob) }); err != nil { return err @@ -347,11 +358,11 @@ func (db *DB) Resolve(n *Node) *Node { // DeleteNode deletes all information associated with a node. func (db *DB) DeleteNode(id ID) { - deleteRange(db.kv, nodeKey(id)) + db.deleteRange(nodeKey(id)) } -func deleteRange(db kv.RwDB, prefix []byte) { - if err := db.Update(context.Background(), func(tx kv.RwTx) error { +func (db *DB) deleteRange(prefix []byte) { + if err := db.kv.Update(db.ctx, func(tx kv.RwTx) error { for bucket := range bucketsConfig(nil) { if err := deleteRangeInBucket(tx, prefix, bucket); err != nil { return err @@ -399,7 +410,7 @@ func (db *DB) expirer() { select { case <-tick.C: db.expireNodes() - case <-db.quit: + case <-db.ctx.Done(): return } } @@ -413,7 +424,7 @@ func (db *DB) expireNodes() { youngestPong int64 ) var toDelete [][]byte - if err := db.kv.View(context.Background(), func(tx kv.Tx) error { + if err := db.kv.View(db.ctx, func(tx kv.Tx) error { c, err := tx.Cursor(kv.Inodes) if err != nil { return err @@ -456,7 +467,7 @@ func (db *DB) expireNodes() { log.Warn("nodeDB.expireNodes failed", "err", err) } for _, td := range toDelete { - deleteRange(db.kv, td) + db.deleteRange(td) } } @@ -546,7 +557,7 @@ func (db *DB) QuerySeeds(n int, maxAge time.Duration) []*Node { id ID ) - if err := db.kv.View(context.Background(), func(tx kv.Tx) error { + if err := db.kv.View(db.ctx, func(tx kv.Tx) error { c, err := tx.Cursor(kv.NodeRecords) if err != nil { return err @@ -604,14 +615,6 @@ func (db *DB) QuerySeeds(n int, maxAge time.Duration) []*Node { // close flushes and closes the database files. func (db *DB) Close() { - select { - case <-db.quit: - return // means closed already - default: - } - if db.quit == nil { - return - } - libcommon.SafeClose(db.quit) + db.ctxCancel() db.kv.Close() } diff --git a/p2p/enode/nodedb_test.go b/p2p/enode/nodedb_test.go index 313f424b947..4e72954ba1d 100644 --- a/p2p/enode/nodedb_test.go +++ b/p2p/enode/nodedb_test.go @@ -25,6 +25,8 @@ import ( "reflect" "testing" "time" + + "github.com/ledgerwatch/log/v3" ) var keytestID = HexID("51232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439") @@ -89,7 +91,7 @@ var nodeDBInt64Tests = []struct { func TestDBInt64(t *testing.T) { tmpDir := t.TempDir() - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, log.Root()) if err != nil { panic(err) } @@ -125,7 +127,7 @@ func TestDBFetchStore(t *testing.T) { inst := time.Now() num := 314 - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, log.Root()) if err != nil { panic(err) } @@ -268,7 +270,7 @@ func TestDBSeedQuery(t *testing.T) { } func testSeedQuery(tmpDir string) error { - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, log.Root()) if err != nil { panic(err) } @@ -318,7 +320,7 @@ func TestDBPersistency(t *testing.T) { ) // Create a persistent database and store some values - db, err := OpenDB(context.Background(), filepath.Join(root, "database"), root) + db, err := OpenDB(context.Background(), filepath.Join(root, "database"), root, log.Root()) if err != nil { t.Fatalf("failed to create persistent database: %v", err) } @@ -329,7 +331,7 @@ func TestDBPersistency(t *testing.T) { db.Close() // ReopenSegments the database and check the value - db, err = OpenDB(context.Background(), filepath.Join(root, "database"), root) + db, err = OpenDB(context.Background(), filepath.Join(root, "database"), root, log.Root()) if err != nil { t.Fatalf("failed to open persistent database: %v", err) } @@ -432,7 +434,7 @@ var nodeDBExpirationNodes = []struct { func TestDBExpiration(t *testing.T) { tmpDir := t.TempDir() - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, log.Root()) if err != nil { panic(err) } @@ -479,7 +481,7 @@ func TestDBExpiration(t *testing.T) { // in the database. func TestDBExpireV5(t *testing.T) { tmpDir := t.TempDir() - db, err := OpenDB(context.Background(), "", tmpDir) + db, err := OpenDB(context.Background(), "", tmpDir, log.Root()) if err != nil { panic(err) } diff --git a/p2p/sentry/sentry_grpc_server_test.go b/p2p/sentry/sentry_grpc_server_test.go index 93b0c4b8e8d..5cd8739c9a8 100644 --- a/p2p/sentry/sentry_grpc_server_test.go +++ b/p2p/sentry/sentry_grpc_server_test.go @@ -22,6 +22,7 @@ import ( "github.com/ledgerwatch/erigon/core/state/temporal" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/p2p" + "github.com/ledgerwatch/log/v3" ) func testSentryServer(db kv.Getter, genesis *types.Genesis, genesisHash libcommon.Hash) *GrpcServer { @@ -88,8 +89,8 @@ func testForkIDSplit(t *testing.T, protocol uint) { gspecNoFork = &types.Genesis{Config: configNoFork} gspecProFork = &types.Genesis{Config: configProFork} - genesisNoFork = core.MustCommitGenesis(gspecNoFork, dbNoFork, "") - genesisProFork = core.MustCommitGenesis(gspecProFork, dbProFork, "") + genesisNoFork = core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root()) + genesisProFork = core.MustCommitGenesis(gspecProFork, dbProFork, "", log.Root()) ) var s1, s2 *GrpcServer @@ -177,7 +178,7 @@ func TestSentryServerImpl_SetStatusInitPanic(t *testing.T) { configNoFork := &chain.Config{HomesteadBlock: big.NewInt(1), ChainID: big.NewInt(1)} _, dbNoFork, _ := temporal.NewTestDB(t, datadir.New(t.TempDir()), nil) gspecNoFork := &types.Genesis{Config: configNoFork} - genesisNoFork := core.MustCommitGenesis(gspecNoFork, dbNoFork, "") + genesisNoFork := core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root()) ss := &GrpcServer{p2p: &p2p.Config{}} _, err := ss.SetStatus(context.Background(), &proto_sentry.StatusData{ diff --git a/p2p/sentry/simulator/sentry_simulator.go b/p2p/sentry/simulator/sentry_simulator.go new file mode 100644 index 00000000000..51eb8c2de68 --- /dev/null +++ b/p2p/sentry/simulator/sentry_simulator.go @@ -0,0 +1,453 @@ +package simulator + +import ( + "bytes" + "context" + "fmt" + "path/filepath" + + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon-lib/gointerfaces" + sentry_if "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + core_types "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/protocols/eth" + "github.com/ledgerwatch/erigon/p2p" + "github.com/ledgerwatch/erigon/p2p/discover/v4wire" + "github.com/ledgerwatch/erigon/p2p/enode" + "github.com/ledgerwatch/erigon/p2p/sentry" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" + "github.com/ledgerwatch/log/v3" + "google.golang.org/protobuf/types/known/emptypb" +) + +type server struct { + sentry_if.UnimplementedSentryServer + ctx context.Context + peers map[[64]byte]*p2p.Peer + messageReceivers map[sentry_if.MessageId][]sentry_if.Sentry_MessagesServer + logger log.Logger + knownSnapshots *freezeblocks.RoSnapshots + activeSnapshots *freezeblocks.RoSnapshots + blockReader *freezeblocks.BlockReader + downloader *TorrentClient +} + +func newPeer(name string, caps []p2p.Cap) (*p2p.Peer, error) { + key, err := crypto.GenerateKey() + + if err != nil { + return nil, err + } + + return p2p.NewPeer(enode.PubkeyToIDV4(&key.PublicKey), v4wire.EncodePubkey(&key.PublicKey), name, caps, true), nil +} + +func NewSentry(ctx context.Context, chain string, snapshotLocation string, peerCount int, logger log.Logger) (sentry_if.SentryServer, error) { + peers := map[[64]byte]*p2p.Peer{} + + for i := 0; i < peerCount; i++ { + peer, err := newPeer(fmt.Sprint("peer-", i), nil) + + if err != nil { + return nil, err + } + peers[peer.Pubkey()] = peer + } + + cfg := snapcfg.KnownCfg(chain, 0) + + knownSnapshots := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, "", cfg.Version, logger) + + files := make([]string, 0, len(cfg.Preverified)) + + for _, item := range cfg.Preverified { + files = append(files, item.Name) + } + + knownSnapshots.InitSegments(files) + + //s.knownSnapshots.ReopenList([]string{ent2.Name()}, false) + activeSnapshots := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ + Enabled: true, + Produce: false, + NoDownloader: true, + }, snapshotLocation, cfg.Version, logger) + + if err := activeSnapshots.ReopenFolder(); err != nil { + return nil, err + } + + downloader, err := NewTorrentClient(ctx, chain, snapshotLocation, logger) + + if err != nil { + return nil, err + } + + s := &server{ + ctx: ctx, + peers: peers, + messageReceivers: map[sentry_if.MessageId][]sentry_if.Sentry_MessagesServer{}, + knownSnapshots: knownSnapshots, + activeSnapshots: activeSnapshots, + blockReader: freezeblocks.NewBlockReader(activeSnapshots, nil), + logger: logger, + downloader: downloader, + } + + go func() { + <-ctx.Done() + s.Close() + }() + + return s, nil +} + +func (s *server) Close() { + s.downloader.Close() + if closer, ok := s.downloader.cfg.DefaultStorage.(interface{ Close() error }); ok { + closer.Close() + } + s.activeSnapshots.Close() +} + +func (s *server) NodeInfo(context.Context, *emptypb.Empty) (*types.NodeInfoReply, error) { + return nil, fmt.Errorf("TODO") +} + +func (s *server) PeerById(ctx context.Context, in *sentry_if.PeerByIdRequest) (*sentry_if.PeerByIdReply, error) { + peerId := sentry.ConvertH512ToPeerID(in.PeerId) + + peer, ok := s.peers[peerId] + + if !ok { + return nil, fmt.Errorf("unknown peer") + } + + info := peer.Info() + + return &sentry_if.PeerByIdReply{ + Peer: &types.PeerInfo{ + Id: info.ID, + Name: info.Name, + Enode: info.Enode, + Enr: info.ENR, + Caps: info.Caps, + ConnLocalAddr: info.Network.LocalAddress, + ConnRemoteAddr: info.Network.RemoteAddress, + ConnIsInbound: info.Network.Inbound, + ConnIsTrusted: info.Network.Trusted, + ConnIsStatic: info.Network.Static, + }, + }, nil +} + +func (s *server) PeerCount(context.Context, *sentry_if.PeerCountRequest) (*sentry_if.PeerCountReply, error) { + return &sentry_if.PeerCountReply{Count: uint64(len(s.peers))}, nil +} + +func (s *server) PeerEvents(*sentry_if.PeerEventsRequest, sentry_if.Sentry_PeerEventsServer) error { + return fmt.Errorf("TODO") +} + +func (s *server) PeerMinBlock(context.Context, *sentry_if.PeerMinBlockRequest) (*emptypb.Empty, error) { + return nil, fmt.Errorf("TODO") +} + +func (s *server) Peers(context.Context, *emptypb.Empty) (*sentry_if.PeersReply, error) { + reply := &sentry_if.PeersReply{} + + for _, peer := range s.peers { + info := peer.Info() + + reply.Peers = append(reply.Peers, + &types.PeerInfo{ + Id: info.ID, + Name: info.Name, + Enode: info.Enode, + Enr: info.ENR, + Caps: info.Caps, + ConnLocalAddr: info.Network.LocalAddress, + ConnRemoteAddr: info.Network.RemoteAddress, + ConnIsInbound: info.Network.Inbound, + ConnIsTrusted: info.Network.Trusted, + ConnIsStatic: info.Network.Static, + }) + } + + return reply, nil +} + +func (s *server) SendMessageById(ctx context.Context, in *sentry_if.SendMessageByIdRequest) (*sentry_if.SentPeers, error) { + peerId := sentry.ConvertH512ToPeerID(in.PeerId) + + if err := s.sendMessageById(ctx, peerId, in.Data); err != nil { + return nil, err + } + + return &sentry_if.SentPeers{ + Peers: []*types.H512{in.PeerId}, + }, nil +} + +func (s *server) sendMessageById(ctx context.Context, peerId [64]byte, messageData *sentry_if.OutboundMessageData) error { + peer, ok := s.peers[peerId] + + if !ok { + return fmt.Errorf("unknown peer") + } + + switch messageData.Id { + case sentry_if.MessageId_GET_BLOCK_HEADERS_65: + packet := ð.GetBlockHeadersPacket{} + if err := rlp.DecodeBytes(messageData.Data, packet); err != nil { + return fmt.Errorf("failed to decode packet: %w", err) + } + + go s.processGetBlockHeaders(ctx, peer, 0, packet) + + case sentry_if.MessageId_GET_BLOCK_HEADERS_66: + packet := ð.GetBlockHeadersPacket66{} + if err := rlp.DecodeBytes(messageData.Data, packet); err != nil { + return fmt.Errorf("failed to decode packet: %w", err) + } + + go s.processGetBlockHeaders(ctx, peer, packet.RequestId, packet.GetBlockHeadersPacket) + + default: + return fmt.Errorf("unhandled message id: %s", messageData.Id) + } + + return nil +} + +func (s *server) SendMessageByMinBlock(ctx context.Context, request *sentry_if.SendMessageByMinBlockRequest) (*sentry_if.SentPeers, error) { + return s.UnimplementedSentryServer.SendMessageByMinBlock(ctx, request) +} + +func (s *server) SendMessageToAll(ctx context.Context, data *sentry_if.OutboundMessageData) (*sentry_if.SentPeers, error) { + sentPeers := &sentry_if.SentPeers{} + + for _, peer := range s.peers { + peerKey := peer.Pubkey() + + if err := s.sendMessageById(ctx, peerKey, data); err != nil { + return sentPeers, err + } + + sentPeers.Peers = append(sentPeers.Peers, gointerfaces.ConvertBytesToH512(peerKey[:])) + } + + return sentPeers, nil +} + +func (s *server) SendMessageToRandomPeers(ctx context.Context, request *sentry_if.SendMessageToRandomPeersRequest) (*sentry_if.SentPeers, error) { + sentPeers := &sentry_if.SentPeers{} + + var i uint64 + + for _, peer := range s.peers { + peerKey := peer.Pubkey() + + if err := s.sendMessageById(ctx, peerKey, request.Data); err != nil { + return sentPeers, err + } + + sentPeers.Peers = append(sentPeers.Peers, gointerfaces.ConvertBytesToH512(peerKey[:])) + + i++ + + if i == request.MaxPeers { + break + } + } + + return sentPeers, nil + +} + +func (s *server) Messages(request *sentry_if.MessagesRequest, receiver sentry_if.Sentry_MessagesServer) error { + for _, messageId := range request.Ids { + receivers := s.messageReceivers[messageId] + s.messageReceivers[messageId] = append(receivers, receiver) + } + + <-s.ctx.Done() + + return nil +} + +func (s *server) processGetBlockHeaders(ctx context.Context, peer *p2p.Peer, requestId uint64, request *eth.GetBlockHeadersPacket) { + r65 := s.messageReceivers[sentry_if.MessageId_BLOCK_HEADERS_65] + r66 := s.messageReceivers[sentry_if.MessageId_BLOCK_HEADERS_66] + + if len(r65)+len(r66) > 0 { + + peerKey := peer.Pubkey() + peerId := gointerfaces.ConvertBytesToH512(peerKey[:]) + + headers, err := s.getHeaders(ctx, request.Origin, request.Amount, request.Skip, request.Reverse) + + if err != nil { + s.logger.Warn("Can't get headers", "error", err) + return + } + + if len(r65) > 0 { + var data bytes.Buffer + + err := rlp.Encode(&data, headers) + + if err != nil { + s.logger.Warn("Can't encode headers", "error", err) + return + } + + for _, receiver := range r65 { + receiver.Send(&sentry_if.InboundMessage{ + Id: sentry_if.MessageId_BLOCK_HEADERS_65, + Data: data.Bytes(), + PeerId: peerId, + }) + } + } + + if len(r66) > 0 { + var data bytes.Buffer + + err := rlp.Encode(&data, ð.BlockHeadersPacket66{ + RequestId: requestId, + BlockHeadersPacket: headers, + }) + + if err != nil { + fmt.Printf("Error (move to logger): %s", err) + return + } + + for _, receiver := range r66 { + receiver.Send(&sentry_if.InboundMessage{ + Id: sentry_if.MessageId_BLOCK_HEADERS_66, + Data: data.Bytes(), + PeerId: peerId, + }) + } + } + } +} + +func (s *server) getHeaders(ctx context.Context, origin eth.HashOrNumber, amount uint64, skip uint64, reverse bool) (eth.BlockHeadersPacket, error) { + + var headers eth.BlockHeadersPacket + + var next uint64 + + nextBlockNum := func(blockNum uint64) uint64 { + inc := uint64(1) + + if skip != 0 { + inc = skip + } + + if reverse { + return blockNum - inc + } else { + return blockNum + inc + } + } + + if origin.Hash != (common.Hash{}) { + header, err := s.getHeaderByHash(ctx, origin.Hash) + + if err != nil { + return nil, err + } + + headers = append(headers, header) + + next = nextBlockNum(header.Number.Uint64()) + } else { + header, err := s.getHeader(ctx, origin.Number) + + if err != nil { + return nil, err + } + + headers = append(headers, header) + + next = nextBlockNum(header.Number.Uint64()) + } + + for len(headers) < int(amount) { + header, err := s.getHeader(ctx, next) + + if err != nil { + return nil, err + } + + headers = append(headers, header) + + next = nextBlockNum(header.Number.Uint64()) + } + + return headers, nil +} + +func (s *server) getHeader(ctx context.Context, blockNum uint64) (*core_types.Header, error) { + header, err := s.blockReader.Header(ctx, nil, common.Hash{}, blockNum) + + if err != nil { + return nil, err + } + + if header == nil { + view := s.knownSnapshots.View() + defer view.Close() + + if seg, ok := view.HeadersSegment(blockNum); ok { + if err := s.downloadHeaders(ctx, seg); err != nil { + return nil, err + } + } + + s.activeSnapshots.ReopenSegments([]snaptype.Type{snaptype.Headers}) + + header, err = s.blockReader.Header(ctx, nil, common.Hash{}, blockNum) + + if err != nil { + return nil, err + } + } + + return header, nil +} + +func (s *server) getHeaderByHash(ctx context.Context, hash common.Hash) (*core_types.Header, error) { + return s.blockReader.HeaderByHash(ctx, nil, hash) +} + +func (s *server) downloadHeaders(ctx context.Context, header *freezeblocks.HeaderSegment) error { + fileName := snaptype.SegmentFileName(s.knownSnapshots.Version(), header.From(), header.To(), snaptype.Headers) + + s.logger.Info(fmt.Sprintf("Downloading %s", fileName)) + + err := s.downloader.Download(ctx, fileName) + + if err != nil { + return fmt.Errorf("can't download %s: %w", fileName, err) + } + + s.logger.Info(fmt.Sprintf("Indexing %s", fileName)) + + return freezeblocks.HeadersIdx(ctx, + filepath.Join(s.downloader.LocalFsRoot(), fileName), s.knownSnapshots.Version(), header.From(), s.downloader.LocalFsRoot(), nil, log.LvlDebug, s.logger) +} diff --git a/p2p/sentry/simulator/simulator_test.go b/p2p/sentry/simulator/simulator_test.go new file mode 100644 index 00000000000..f94815e44e7 --- /dev/null +++ b/p2p/sentry/simulator/simulator_test.go @@ -0,0 +1,205 @@ +//go:build integration + +package simulator_test + +import ( + "bytes" + "context" + "testing" + + "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon-lib/direct" + "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + sentry_if "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon/eth/protocols/eth" + "github.com/ledgerwatch/erigon/p2p/sentry/simulator" + "github.com/ledgerwatch/erigon/rlp" +) + +func TestSimulatorStart(t *testing.T) { + t.Skip("For now, this test is intended for manual runs only as it downloads snapshots and takes too long") + + ctx, cancel := context.WithCancel(context.Background()) + + defer cancel() + + logger := log.New() + logger.SetHandler(log.StdoutHandler) + dataDir := t.TempDir() + + sim, err := simulator.NewSentry(ctx, "mumbai", dataDir, 1, logger) + + if err != nil { + t.Fatal(err) + } + + simClient := direct.NewSentryClientDirect(66, sim) + + peerCount, err := simClient.PeerCount(ctx, &sentry.PeerCountRequest{}) + + if err != nil { + t.Fatal(err) + } + + if peerCount.Count != 1 { + t.Fatal("Invalid response count: expected:", 1, "got:", peerCount.Count) + } + + receiver, err := simClient.Messages(ctx, &sentry.MessagesRequest{ + Ids: []sentry.MessageId{sentry.MessageId_BLOCK_HEADERS_66}, + }) + + if err != nil { + t.Fatal(err) + } + + getHeaders66 := ð.GetBlockHeadersPacket66{ + RequestId: 1, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{Number: 10}, + Amount: 10, + }, + } + + var data bytes.Buffer + + err = rlp.Encode(&data, getHeaders66) + + if err != nil { + t.Fatal(err) + } + + peers, err := simClient.SendMessageToAll(ctx, &sentry.OutboundMessageData{ + Id: sentry_if.MessageId_GET_BLOCK_HEADERS_66, + Data: data.Bytes(), + }) + + if err != nil { + t.Fatal(err) + } + + if len(peers.Peers) != int(peerCount.Count) { + t.Fatal("Unexpected peer count expected:", peerCount.Count, len(peers.Peers)) + } + + message, err := receiver.Recv() + + if err != nil { + t.Fatal(err) + } + + if message.Id != sentry_if.MessageId_BLOCK_HEADERS_66 { + t.Fatal("unexpected message id expected:", sentry_if.MessageId_BLOCK_HEADERS_66, "got:", message.Id) + } + + var expectedPeer bool + + for _, peer := range peers.Peers { + if message.PeerId.String() == peer.String() { + expectedPeer = true + break + } + } + + if !expectedPeer { + t.Fatal("message received from unexpected peer:", message.PeerId) + } + + packet := ð.BlockHeadersPacket66{} + + if err := rlp.DecodeBytes(message.Data, packet); err != nil { + t.Fatal("failed to decode packet:", err) + } + + if len(packet.BlockHeadersPacket) != 10 { + t.Fatal("unexpected header count: expected:", 10, "got:", len(packet.BlockHeadersPacket)) + } + + blockNum := uint64(10) + + for _, header := range packet.BlockHeadersPacket { + if header.Number.Uint64() != blockNum { + t.Fatal("unexpected block number: expected:", blockNum, "got:", header.Number) + } + + blockNum++ + } + + simClient65 := direct.NewSentryClientDirect(65, sim) + + getHeaders65 := ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{Number: 100}, + Amount: 50, + } + + data.Reset() + + err = rlp.Encode(&data, getHeaders65) + + if err != nil { + t.Fatal(err) + } + + peers65, err := simClient65.SendMessageById(ctx, &sentry_if.SendMessageByIdRequest{ + Data: &sentry.OutboundMessageData{ + Id: sentry_if.MessageId_GET_BLOCK_HEADERS_65, + Data: data.Bytes(), + }, + PeerId: peers.Peers[0], + }) + + if err != nil { + t.Fatal(err) + } + + if len(peers65.Peers) != 1 { + t.Fatal("message sent to unexpected number of peers:", len(peers65.Peers)) + } + + if peers65.Peers[0].String() != peers.Peers[0].String() { + t.Fatal("message sent to unexpected number of peers", peers65.Peers[0]) + } + + receiver65, err := simClient65.Messages(ctx, &sentry.MessagesRequest{ + Ids: []sentry.MessageId{sentry.MessageId_BLOCK_HEADERS_65}, + }) + + if err != nil { + t.Fatal(err) + } + + message, err = receiver65.Recv() + + if err != nil { + t.Fatal(err) + } + + if message.Id != sentry_if.MessageId_BLOCK_HEADERS_65 { + t.Fatal("unexpected message id expected:", sentry_if.MessageId_BLOCK_HEADERS_65, "got:", message.Id) + } + + if message.PeerId.String() != peers.Peers[0].String() { + t.Fatal("message received from unexpected peer:", message.PeerId) + } + + packet65 := eth.BlockHeadersPacket{} + + if err := rlp.DecodeBytes(message.Data, &packet65); err != nil { + t.Fatal("failed to decode packet:", err) + } + + if len(packet65) != 50 { + t.Fatal("unexpected header count: expected:", 50, "got:", len(packet.BlockHeadersPacket)) + } + + blockNum = uint64(100) + + for _, header := range packet65 { + if header.Number.Uint64() != blockNum { + t.Fatal("unexpected block number: expected:", blockNum, "got:", header.Number) + } + + blockNum++ + } +} diff --git a/p2p/sentry/simulator/syncutil.go b/p2p/sentry/simulator/syncutil.go new file mode 100644 index 00000000000..c38877b4fc3 --- /dev/null +++ b/p2p/sentry/simulator/syncutil.go @@ -0,0 +1,195 @@ +package simulator + +import ( + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/storage" + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/downloader" + "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" + "github.com/ledgerwatch/erigon/cmd/downloader/downloadernat" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/p2p/nat" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/log/v3" + "golang.org/x/sync/errgroup" +) + +// The code in this file is taken from cmd/snapshots - which is yet to be merged +// to devel - once tthat is done this file can be removed + +type TorrentClient struct { + *torrent.Client + cfg *torrent.ClientConfig + items map[string]snapcfg.PreverifiedItem +} + +func NewTorrentClient(ctx context.Context, chain string, torrentDir string, logger log.Logger) (*TorrentClient, error) { + + relativeDataDir := torrentDir + if torrentDir != "" { + var err error + absdatadir, err := filepath.Abs(torrentDir) + if err != nil { + panic(err) + } + torrentDir = absdatadir + } + + dirs := datadir.Dirs{ + RelativeDataDir: relativeDataDir, + DataDir: torrentDir, + Snap: torrentDir, + } + + webseedsList := common.CliString2Array(utils.WebSeedsFlag.Value) + + if known, ok := snapcfg.KnownWebseeds[chain]; ok { + webseedsList = append(webseedsList, known...) + } + + var downloadRate, uploadRate datasize.ByteSize + + if err := downloadRate.UnmarshalText([]byte(utils.TorrentDownloadRateFlag.Value)); err != nil { + return nil, err + } + + if err := uploadRate.UnmarshalText([]byte(utils.TorrentUploadRateFlag.Value)); err != nil { + return nil, err + } + + logLevel, _, err := downloadercfg.Int2LogLevel(utils.TorrentVerbosityFlag.Value) + + if err != nil { + return nil, err + } + + version := "erigon: " + params.VersionWithCommit(params.GitCommit) + + cfg, err := downloadercfg.New(dirs, version, logLevel, downloadRate, uploadRate, + utils.TorrentPortFlag.Value, utils.TorrentConnsPerFileFlag.Value, 0, nil, webseedsList, chain) + + if err != nil { + return nil, err + } + + if err := os.MkdirAll(torrentDir, 0755); err != nil { + return nil, err + } + + cfg.ClientConfig.DataDir = torrentDir + + cfg.ClientConfig.PieceHashersPerTorrent = 32 * runtime.NumCPU() + cfg.ClientConfig.DisableIPv6 = utils.DisableIPV6.Value + cfg.ClientConfig.DisableIPv4 = utils.DisableIPV4.Value + + natif, err := nat.Parse(utils.NATFlag.Value) + + if err != nil { + return nil, fmt.Errorf("invalid nat option %s: %w", utils.NATFlag.Value, err) + } + + downloadernat.DoNat(natif, cfg.ClientConfig, logger) + + cfg.ClientConfig.DefaultStorage = storage.NewMMap(torrentDir) + + cli, err := torrent.NewClient(cfg.ClientConfig) + + if err != nil { + return nil, fmt.Errorf("can't create torrent client: %w", err) + } + + items := map[string]snapcfg.PreverifiedItem{} + for _, it := range snapcfg.KnownCfg(chain, 0).Preverified { + items[it.Name] = it + } + + return &TorrentClient{cli, cfg.ClientConfig, items}, nil +} + +func (s *TorrentClient) LocalFsRoot() string { + return s.cfg.DataDir +} + +func (s *TorrentClient) Download(ctx context.Context, files ...string) error { + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(len(files)) + + for _, f := range files { + file := f + + g.Go(func() error { + it, ok := s.items[file] + + if !ok { + return fs.ErrNotExist + } + + t, err := func() (*torrent.Torrent, error) { + infoHash := snaptype.Hex2InfoHash(it.Hash) + + for _, t := range s.Torrents() { + if t.Name() == file { + return t, nil + } + } + + mi := &metainfo.MetaInfo{AnnounceList: downloader.Trackers} + magnet := mi.Magnet(&infoHash, &metainfo.Info{Name: file}) + spec, err := torrent.TorrentSpecFromMagnetUri(magnet.String()) + + if err != nil { + return nil, err + } + + spec.DisallowDataDownload = true + + t, _, err := s.AddTorrentSpec(spec) + if err != nil { + return nil, err + } + + return t, nil + }() + + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.GotInfo(): + } + + if !t.Complete.Bool() { + t.AllowDataDownload() + t.DownloadAll() + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.Complete.On(): + } + } + + closed := t.Closed() + t.Drop() + <-closed + + return nil + }) + } + + return g.Wait() +} diff --git a/p2p/server.go b/p2p/server.go index 7ba83014a3e..c42f88d0355 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -558,7 +558,7 @@ func (srv *Server) setupLocalNode() error { } sort.Sort(capsByNameAndVersion(srv.ourHandshake.Caps)) // Create the local node - db, err := enode.OpenDB(srv.quitCtx, srv.Config.NodeDatabase, srv.Config.TmpDir) + db, err := enode.OpenDB(srv.quitCtx, srv.Config.NodeDatabase, srv.Config.TmpDir, srv.logger) if err != nil { return err } diff --git a/params/bootnodes.go b/params/bootnodes.go index 5b250aa7791..5e7734c7484 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -109,7 +109,10 @@ var V5Bootnodes = []string{ "enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg", } -var AmoyBootnodes = []string{} // Todo: Add BorAmoy bootnodes +var AmoyBootnodes = []string{ + "enode://bce861be777e91b0a5a49d58a51e14f32f201b4c6c2d1fbea6c7a1f14756cbb3f931f3188d6b65de8b07b53ff28d03b6e366d09e56360d2124a9fc5a15a0913d@54.217.171.196:30303", + "enode://4a3dc0081a346d26a73d79dd88216a9402d2292318e2db9947dbc97ea9c4afb2498dc519c0af04420dc13a238c279062da0320181e7c1461216ce4513bfd40bf@13.251.184.185:30303", +} var BorMainnetBootnodes = []string{ "enode://b8f1cc9c5d4403703fbf377116469667d2b1823c0daf16b7250aa576bacf399e42c3930ccfcb02c5df6879565a2b8931335565f0e8d3f8e72385ecf4a4bf160a@3.36.224.80:30303", diff --git a/params/chainspecs/chiado.json b/params/chainspecs/chiado.json index c1f2acf4be8..cc642d266f2 100644 --- a/params/chainspecs/chiado.json +++ b/params/chainspecs/chiado.json @@ -17,6 +17,7 @@ "terminalTotalDifficulty": 231707791542740786049188744689299064356246512, "terminalTotalDifficultyPassed": true, "shanghaiTime": 1684934220, + "cancunTime": 1706724940, "minBlobGasPrice": 1000000000, "maxBlobGasPerBlock": 262144, "targetBlobGasPerBlock": 131072, diff --git a/params/chainspecs/goerli.json b/params/chainspecs/goerli.json index 46603963dc3..4bf04baa184 100644 --- a/params/chainspecs/goerli.json +++ b/params/chainspecs/goerli.json @@ -14,6 +14,7 @@ "terminalTotalDifficulty": 10790000, "terminalTotalDifficultyPassed": true, "shanghaiTime": 1678832736, + "cancunTime": 1705473120, "clique": { "period": 15, "epoch": 30000 diff --git a/params/chainspecs/holesky.json b/params/chainspecs/holesky.json index 852beb80e3d..e582e406008 100644 --- a/params/chainspecs/holesky.json +++ b/params/chainspecs/holesky.json @@ -13,5 +13,6 @@ "mergeForkBlock": 0, "terminalTotalDifficulty": 0, "terminalTotalDifficultyPassed": true, - "shanghaiTime": 1696000704 + "shanghaiTime": 1696000704, + "cancunTime": 1707305664 } \ No newline at end of file diff --git a/params/chainspecs/sepolia.json b/params/chainspecs/sepolia.json index 9c9d42410d9..9fe80a63fbe 100644 --- a/params/chainspecs/sepolia.json +++ b/params/chainspecs/sepolia.json @@ -16,5 +16,6 @@ "terminalTotalDifficultyPassed": true, "mergeNetsplitBlock": 1735371, "shanghaiTime": 1677557088, + "cancunTime": 1706655072, "ethash": {} } diff --git a/params/config.go b/params/config.go index bd5c68ebdde..724b6a4d6a3 100644 --- a/params/config.go +++ b/params/config.go @@ -26,6 +26,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/chain/networkname" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" "github.com/ledgerwatch/erigon/common/paths" ) @@ -39,12 +40,23 @@ func readChainSpec(filename string) *chain.Config { panic(fmt.Sprintf("Could not open chainspec for %s: %v", filename, err)) } defer f.Close() + decoder := json.NewDecoder(f) spec := &chain.Config{} err = decoder.Decode(&spec) if err != nil { panic(fmt.Sprintf("Could not parse chainspec for %s: %v", filename, err)) } + + if spec.BorJSON != nil { + borConfig := &borcfg.BorConfig{} + err = json.Unmarshal(spec.BorJSON, borConfig) + if err != nil { + panic(fmt.Sprintf("Could not parse 'bor' chainspec for %s: %v", filename, err)) + } + spec.Bor = borConfig + } + return spec } diff --git a/params/protocol_params.go b/params/protocol_params.go index 7a3f6621635..a28c2c37dbc 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -175,6 +175,9 @@ const ( // EIP-4844: Shard Blob Transactions PointEvaluationGas uint64 = 50000 + + // PIP-27: secp256r1 elliptic curve signature verifier gas price + P256VerifyGas uint64 = 3450 ) // EIP-4788: Beacon block root in the EVM diff --git a/params/version.go b/params/version.go index 652bb59daf4..8109dd392eb 100644 --- a/params/version.go +++ b/params/version.go @@ -32,7 +32,7 @@ var ( // see https://calver.org const ( VersionMajor = 2 // Major version component of the current release - VersionMinor = 56 // Minor version component of the current release + VersionMinor = 57 // Minor version component of the current release VersionMicro = 0 // Patch version component of the current release VersionModifier = "dev" // Modifier component of the current release VersionKeyCreated = "ErigonVersionCreated" diff --git a/consensus/bor/bor.go b/polygon/bor/bor.go similarity index 86% rename from consensus/bor/bor.go rename to polygon/bor/bor.go index cb86b4a5e12..aef97e200cb 100644 --- a/consensus/bor/bor.go +++ b/polygon/bor/bor.go @@ -26,16 +26,8 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor/finality" - "github.com/ledgerwatch/erigon/consensus/bor/finality/flags" - "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" - "github.com/ledgerwatch/erigon/consensus/bor/statefull" - "github.com/ledgerwatch/erigon/consensus/bor/valset" "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" @@ -45,6 +37,13 @@ import ( "github.com/ledgerwatch/erigon/crypto/cryptopool" "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" + "github.com/ledgerwatch/erigon/polygon/bor/finality" + "github.com/ledgerwatch/erigon/polygon/bor/finality/flags" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" + "github.com/ledgerwatch/erigon/polygon/bor/statefull" + "github.com/ledgerwatch/erigon/polygon/bor/valset" + "github.com/ledgerwatch/erigon/polygon/heimdall" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/services" @@ -110,9 +109,9 @@ var ( // their extra-data fields. errExtraValidators = errors.New("non-sprint-end block contains extra validator list") - // errInvalidSpanValidators is returned if a block contains an + // errInvalidSprintValidators is returned if a block contains an // invalid list of validators (i.e. non divisible by 40 bytes). - ErrInvalidSpanValidators = errors.New("invalid validator list on sprint end block") + errInvalidSprintValidators = errors.New("invalid validator list on sprint end block") // errInvalidMixDigest is returned if a block's mix digest is non-zero. errInvalidMixDigest = errors.New("non-zero mix digest") @@ -142,7 +141,7 @@ var ( type SignerFn func(signer libcommon.Address, mimeType string, message []byte) ([]byte, error) // ecrecover extracts the Ethereum account address from a signed header. -func Ecrecover(header *types.Header, sigcache *lru.ARCCache[libcommon.Hash, libcommon.Address], c *chain.BorConfig) (libcommon.Address, error) { +func Ecrecover(header *types.Header, sigcache *lru.ARCCache[libcommon.Hash, libcommon.Address], c *borcfg.BorConfig) (libcommon.Address, error) { // If the signature's already cached, return that hash := header.Hash() if address, known := sigcache.Get(hash); known { @@ -169,7 +168,7 @@ func Ecrecover(header *types.Header, sigcache *lru.ARCCache[libcommon.Hash, libc } // SealHash returns the hash of a block prior to it being sealed. -func SealHash(header *types.Header, c *chain.BorConfig) (hash libcommon.Hash) { +func SealHash(header *types.Header, c *borcfg.BorConfig) (hash libcommon.Hash) { hasher := cryptopool.NewLegacyKeccak256() defer cryptopool.ReturnToPoolKeccak256(hasher) @@ -179,7 +178,7 @@ func SealHash(header *types.Header, c *chain.BorConfig) (hash libcommon.Hash) { return hash } -func encodeSigHeader(w io.Writer, header *types.Header, c *chain.BorConfig) { +func encodeSigHeader(w io.Writer, header *types.Header, c *borcfg.BorConfig) { enc := []interface{}{ header.ParentHash, header.UncleHash, @@ -210,11 +209,11 @@ func encodeSigHeader(w io.Writer, header *types.Header, c *chain.BorConfig) { } // CalcProducerDelay is the block delay algorithm based on block time, period, producerDelay and turn-ness of a signer -func CalcProducerDelay(number uint64, succession int, c *chain.BorConfig) uint64 { +func CalcProducerDelay(number uint64, succession int, c *borcfg.BorConfig) uint64 { // When the block is the first block of the sprint, it is expected to be delayed by `producerDelay`. // That is to allow time for block propagation in the last sprint delay := c.CalculatePeriod(number) - if number%c.CalculateSprint(number) == 0 { + if number%c.CalculateSprintLength(number) == 0 { delay = c.CalculateProducerDelay(number) } @@ -225,6 +224,48 @@ func CalcProducerDelay(number uint64, succession int, c *chain.BorConfig) uint64 return delay } +func MinNextBlockTime(parent *types.Header, succession int, config *borcfg.BorConfig) uint64 { + return parent.Time + CalcProducerDelay(parent.Number.Uint64()+1, succession, config) +} + +// ValidateHeaderTimeSignerSuccessionNumber - valset.ValidatorSet abstraction for unit tests +type ValidateHeaderTimeSignerSuccessionNumber interface { + GetSignerSuccessionNumber(signer libcommon.Address, number uint64) (int, error) +} + +func ValidateHeaderTime( + header *types.Header, + now time.Time, + parent *types.Header, + validatorSet ValidateHeaderTimeSignerSuccessionNumber, + config *borcfg.BorConfig, + signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address], +) error { + if header.Time > uint64(now.Unix()) { + return consensus.ErrFutureBlock + } + + if parent == nil { + return nil + } + + signer, err := Ecrecover(header, signaturesCache, config) + if err != nil { + return err + } + + succession, err := validatorSet.GetSignerSuccessionNumber(signer, header.Number.Uint64()) + if err != nil { + return err + } + + if header.Time < MinNextBlockTime(parent, succession, config) { + return &BlockTooSoonError{header.Number.Uint64(), succession} + } + + return nil +} + // BorRLP returns the rlp bytes which needs to be signed for the bor // sealing. The RLP to sign consists of the entire header apart from the 65 byte signature // contained at the end of the extra data. @@ -232,7 +273,7 @@ func CalcProducerDelay(number uint64, succession int, c *chain.BorConfig) uint64 // Note, the method requires the extra data to be at least 65 bytes, otherwise it // panics. This is done to avoid accidentally using both forms (signature present // or not), which could be abused to produce different hashes for the same header. -func BorRLP(header *types.Header, c *chain.BorConfig) []byte { +func BorRLP(header *types.Header, c *borcfg.BorConfig) []byte { b := new(bytes.Buffer) encodeSigHeader(b, header, c) @@ -241,9 +282,9 @@ func BorRLP(header *types.Header, c *chain.BorConfig) []byte { // Bor is the matic-bor consensus engine type Bor struct { - chainConfig *chain.Config // Chain config - config *chain.BorConfig // Consensus engine configuration parameters for bor consensus - DB kv.RwDB // Database to store and retrieve snapshot checkpoints + chainConfig *chain.Config // Chain config + config *borcfg.BorConfig // Consensus engine configuration parameters for bor consensus + DB kv.RwDB // Database to store and retrieve snapshot checkpoints blockReader services.FullBlockReader Recents *lru.ARCCache[libcommon.Hash, *Snapshot] // Snapshots for recent block to speed up reorgs @@ -254,8 +295,8 @@ type Bor struct { execCtx context.Context // context of caller execution stage spanner Spanner - GenesisContractsClient GenesisContract - HeimdallClient heimdall.IHeimdallClient + GenesisContractsClient GenesisContracts + HeimdallClient heimdall.HeimdallClient // scope event.SubscriptionScope // The fields below are for testing only @@ -274,110 +315,21 @@ type signer struct { signFn SignerFn // Signer function to authorize hashes with } -type sprint struct { - from, size uint64 -} - -type sprints []sprint - -func (s sprints) Len() int { - return len(s) -} - -func (s sprints) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s sprints) Less(i, j int) bool { - return s[i].from < s[j].from -} - -func asSprints(configSprints map[string]uint64) sprints { - sprints := make(sprints, len(configSprints)) - - i := 0 - for key, value := range configSprints { - sprints[i].from, _ = strconv.ParseUint(key, 10, 64) - sprints[i].size = value - i++ - } - - sort.Sort(sprints) - - return sprints -} - -func CalculateSprintCount(config *chain.BorConfig, from, to uint64) int { - - switch { - case from > to: - return 0 - case from < to: - to-- - } - - sprints := asSprints(config.Sprint) - - count := uint64(0) - startCalc := from - - zeroth := func(boundary uint64, size uint64) uint64 { - if boundary%size == 0 { - return 1 - } - - return 0 - } - - for i := 0; i < len(sprints)-1; i++ { - if startCalc >= sprints[i].from && startCalc < sprints[i+1].from { - if to >= sprints[i].from && to < sprints[i+1].from { - if startCalc == to { - return int(count + zeroth(startCalc, sprints[i].size)) - } - return int(count + zeroth(startCalc, sprints[i].size) + (to-startCalc)/sprints[i].size) - } else { - endCalc := sprints[i+1].from - 1 - count += zeroth(startCalc, sprints[i].size) + (endCalc-startCalc)/sprints[i].size - startCalc = endCalc + 1 - } - } - } - - if startCalc == to { - return int(count + zeroth(startCalc, sprints[len(sprints)-1].size)) - } - - return int(count + zeroth(startCalc, sprints[len(sprints)-1].size) + (to-startCalc)/sprints[len(sprints)-1].size) -} - -func CalculateSprint(config *chain.BorConfig, number uint64) uint64 { - sprints := asSprints(config.Sprint) - - for i := 0; i < len(sprints)-1; i++ { - if number >= sprints[i].from && number < sprints[i+1].from { - return sprints[i].size - } - } - - return sprints[len(sprints)-1].size -} - // New creates a Matic Bor consensus engine. func New( chainConfig *chain.Config, db kv.RwDB, blockReader services.FullBlockReader, spanner Spanner, - heimdallClient heimdall.IHeimdallClient, - genesisContracts GenesisContract, + heimdallClient heimdall.HeimdallClient, + genesisContracts GenesisContracts, logger log.Logger, ) *Bor { // get bor config - borConfig := chainConfig.Bor + borConfig := chainConfig.Bor.(*borcfg.BorConfig) // Set any missing consensus parameters to their defaults - if borConfig != nil && borConfig.CalculateSprint(0) == 0 { + if borConfig != nil && borConfig.CalculateSprintLength(0) == 0 { borConfig.Sprint = defaultSprintLength } @@ -404,7 +356,7 @@ func New( libcommon.Address{}, func(_ libcommon.Address, _ string, i []byte) ([]byte, error) { // return an error to prevent panics - return nil, &UnauthorizedSignerError{0, libcommon.Address{}.Bytes()} + return nil, &valset.UnauthorizedSignerError{Number: 0, Signer: libcommon.Address{}.Bytes()} }, }) @@ -440,12 +392,12 @@ func (w rwWrapper) BeginRwNosync(ctx context.Context) (kv.RwTx, error) { // This is used by the rpcdaemon and tests which need read only access to the provided data services func NewRo(chainConfig *chain.Config, db kv.RoDB, blockReader services.FullBlockReader, spanner Spanner, - genesisContracts GenesisContract, logger log.Logger) *Bor { + genesisContracts GenesisContracts, logger log.Logger) *Bor { // get bor config - borConfig := chainConfig.Bor + borConfig := chainConfig.Bor.(*borcfg.BorConfig) // Set any missing consensus parameters to their defaults - if borConfig != nil && borConfig.CalculateSprint(0) == 0 { + if borConfig != nil && borConfig.CalculateSprintLength(0) == 0 { borConfig.Sprint = defaultSprintLength } @@ -470,6 +422,10 @@ func (c *Bor) Type() chain.ConsensusName { return chain.BorConsensus } +func (c *Bor) Config() *borcfg.BorConfig { + return c.config +} + type HeaderProgress interface { Progress() uint64 } @@ -521,7 +477,6 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head if header.Number == nil { return errUnknownBlock } - number := header.Number.Uint64() // Don't waste time checking blocks from the future @@ -529,56 +484,29 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head return consensus.ErrFutureBlock } - if err := ValidateHeaderExtraField(header.Extra); err != nil { + if err := ValidateHeaderUnusedFields(header); err != nil { return err } - // check extr adata - isSprintEnd := isSprintStart(number+1, c.config.CalculateSprint(number)) - - // Ensure that the extra-data contains a signer list on checkpoint, but none otherwise - signersBytes := len(GetValidatorBytes(header, c.config)) - if !isSprintEnd && signersBytes != 0 { - return errExtraValidators - } - - if isSprintEnd && signersBytes%validatorHeaderBytesLength != 0 { - return ErrInvalidSpanValidators - } - - // Ensure that the mix digest is zero as we don't have fork protection currently - if header.MixDigest != (libcommon.Hash{}) { - return errInvalidMixDigest + if err := ValidateHeaderExtraLength(header.Extra); err != nil { + return err } - - // Ensure that the block doesn't contain any uncles which are meaningless in PoA - if header.UncleHash != uncleHash { - return errInvalidUncleHash + if err := ValidateHeaderSprintValidators(header, c.config); err != nil { + return err } // Ensure that the block's difficulty is meaningful (may not be correct at this point) - if number > 0 { - if header.Difficulty == nil { - return errInvalidDifficulty - } - } - - // Verify that the gas limit is <= 2^63-1 - if header.GasLimit > params.MaxGasLimit { - return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit) - } - - if header.WithdrawalsHash != nil { - return consensus.ErrUnexpectedWithdrawals + if (number > 0) && (header.Difficulty == nil) { + return errInvalidDifficulty } // All basic checks passed, verify cascading fields return c.verifyCascadingFields(chain, header, parents) } -// ValidateHeaderExtraField validates that the extra-data contains both the vanity and signature. +// ValidateHeaderExtraLength validates that the extra-data contains both the vanity and signature. // header.Extra = header.Vanity + header.ProducerBytes (optional) + header.Seal -func ValidateHeaderExtraField(extraBytes []byte) error { +func ValidateHeaderExtraLength(extraBytes []byte) error { if len(extraBytes) < types.ExtraVanityLength { return errMissingVanity } @@ -590,6 +518,41 @@ func ValidateHeaderExtraField(extraBytes []byte) error { return nil } +// ValidateHeaderSprintValidators validates that the extra-data contains a validators list only in the last header of a sprint. +func ValidateHeaderSprintValidators(header *types.Header, config *borcfg.BorConfig) error { + number := header.Number.Uint64() + isSprintEnd := isSprintStart(number+1, config.CalculateSprintLength(number)) + validatorBytes := GetValidatorBytes(header, config) + validatorBytesLen := len(validatorBytes) + + if !isSprintEnd && (validatorBytesLen != 0) { + return errExtraValidators + } + if isSprintEnd && (validatorBytesLen%validatorHeaderBytesLength != 0) { + return errInvalidSprintValidators + } + return nil +} + +// ValidateHeaderUnusedFields validates that unused fields are empty. +func ValidateHeaderUnusedFields(header *types.Header) error { + // Ensure that the mix digest is zero as we don't have fork protection currently + if header.MixDigest != (libcommon.Hash{}) { + return errInvalidMixDigest + } + + // Ensure that the block doesn't contain any uncles which are meaningless in PoA + if header.UncleHash != uncleHash { + return errInvalidUncleHash + } + + if header.WithdrawalsHash != nil { + return consensus.ErrUnexpectedWithdrawals + } + + return misc.VerifyAbsenceOfCancunHeaderFields(header) +} + // verifyCascadingFields verifies all the header fields that are not standalone, // rather depend on a batch of previous headers. The caller may optionally pass // in a batch of parents (ascending order) to avoid looking those up from the @@ -615,12 +578,30 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t return consensus.ErrUnknownAncestor } + if parent.Time+c.config.CalculatePeriod(number) > header.Time { + return ErrInvalidTimestamp + } + + return ValidateHeaderGas(header, parent, chain.Config()) +} + +// ValidateHeaderGas validates GasUsed, GasLimit and BaseFee. +func ValidateHeaderGas(header *types.Header, parent *types.Header, chainConfig *chain.Config) error { + // Verify that the gas limit is <= 2^63-1 + if header.GasLimit > params.MaxGasLimit { + return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit) + } + // Verify that the gasUsed is <= gasLimit if header.GasUsed > header.GasLimit { return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit) } - if !chain.Config().IsLondon(header.Number.Uint64()) { + if parent == nil { + return nil + } + + if !chainConfig.IsLondon(header.Number.Uint64()) { // Verify BaseFee not present before EIP-1559 fork. if header.BaseFee != nil { return fmt.Errorf("invalid baseFee before fork: have %d, want ", header.BaseFee) @@ -628,14 +609,11 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t if err := misc.VerifyGaslimit(parent.GasLimit, header.GasLimit); err != nil { return err } - } else if err := misc.VerifyEip1559Header(chain.Config(), parent, header, false /*skipGasLimit*/); err != nil { + } else if err := misc.VerifyEip1559Header(chainConfig, parent, header, false /*skipGasLimit*/); err != nil { // Verify the header's EIP-1559 attributes. return err } - if parent.Time+c.config.CalculatePeriod(number) > header.Time { - return ErrInvalidTimestamp - } return nil } @@ -852,21 +830,6 @@ func (c *Bor) verifySeal(chain consensus.ChainHeaderReader, header *types.Header if number == 0 { return errUnknownBlock } - // Resolve the authorization key and check against signers - signer, err := Ecrecover(header, c.Signatures, c.config) - if err != nil { - return err - } - - if !snap.ValidatorSet.HasAddress(signer) { - // Check the UnauthorizedSignerError.Error() msg to see why we pass number-1 - return &UnauthorizedSignerError{number - 1, signer.Bytes()} - } - - succession, err := snap.GetSignerSuccessionNumber(signer) - if err != nil { - return err - } var parent *types.Header if len(parents) > 0 { // if parents is nil, len(parents) is zero @@ -875,12 +838,17 @@ func (c *Bor) verifySeal(chain consensus.ChainHeaderReader, header *types.Header parent = chain.GetHeader(header.ParentHash, number-1) } - if parent != nil && header.Time < parent.Time+CalcProducerDelay(number, succession, c.config) { - return &BlockTooSoonError{number, succession} + if err := ValidateHeaderTime(header, time.Now(), parent, snap.ValidatorSet, c.config, c.Signatures); err != nil { + return err } // Ensure that the difficulty corresponds to the turn-ness of the signer if !c.fakeDiff { + signer, err := Ecrecover(header, c.Signatures, c.config) + if err != nil { + return err + } + difficulty := snap.Difficulty(signer) if header.Difficulty.Uint64() != difficulty { return &WrongDifficultyError{number, difficulty, header.Difficulty.Uint64(), signer.Bytes()} @@ -890,10 +858,6 @@ func (c *Bor) verifySeal(chain consensus.ChainHeaderReader, header *types.Header return nil } -func IsBlockOnTime(parent *types.Header, header *types.Header, number uint64, succession int, cfg *chain.BorConfig) bool { - return parent != nil && header.Time < parent.Time+CalcProducerDelay(number, succession, cfg) -} - // Prepare implements consensus.Engine, preparing all the consensus fields of the // header for running the transactions on top. func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, state *state.IntraBlockState) error { @@ -923,7 +887,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, s // client calls `GetCurrentValidators` because it makes a contract call // where it fetches producers internally. As we fetch data from span // in Erigon, use directly the `GetCurrentProducers` function. - if isSprintStart(number+1, c.config.CalculateSprint(number)) { + if isSprintStart(number+1, c.config.CalculateSprintLength(number)) { spanID := SpanIDAt(number + 1) newValidators, err := c.spanner.GetCurrentProducers(spanID, c.authorizedSigner.Load().signer, chain) if err != nil { @@ -933,7 +897,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, s // sort validator by address sort.Sort(valset.ValidatorsByAddress(newValidators)) - if c.config.IsParallelUniverse(header.Number.Uint64()) { + if c.config.IsNapoli(header.Number.Uint64()) { // PIP-16: Transaction Dependency Data var tempValidatorBytes []byte for _, validator := range newValidators { @@ -957,7 +921,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, s header.Extra = append(header.Extra, validator.HeaderBytes()...) } } - } else if c.config.IsParallelUniverse(header.Number.Uint64()) { + } else if c.config.IsNapoli(header.Number.Uint64()) { // PIP-16: Transaction Dependency Data blockExtraData := &BlockExtraData{ ValidatorBytes: nil, TxDependency: nil, @@ -985,15 +949,16 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, s } var succession int + signer := c.authorizedSigner.Load().signer // if signer is not empty - if signer := c.authorizedSigner.Load().signer; !bytes.Equal(signer.Bytes(), libcommon.Address{}.Bytes()) { - succession, err = snap.GetSignerSuccessionNumber(signer) + if !bytes.Equal(signer.Bytes(), libcommon.Address{}.Bytes()) { + succession, err = snap.ValidatorSet.GetSignerSuccessionNumber(signer, number) if err != nil { return err } } - header.Time = parent.Time + CalcProducerDelay(number, succession, c.config) + header.Time = MinNextBlockTime(parent, succession, c.config) if header.Time < uint64(time.Now().Unix()) { header.Time = uint64(time.Now().Unix()) } @@ -1018,7 +983,7 @@ func (c *Bor) Finalize(config *chain.Config, header *types.Header, state *state. return nil, nil, consensus.ErrUnexpectedWithdrawals } - if isSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) { + if isSprintStart(headerNumber, c.config.CalculateSprintLength(headerNumber)) { cx := statefull.ChainContext{Chain: chain, Bor: c} if c.blockReader != nil { @@ -1084,7 +1049,7 @@ func (c *Bor) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Heade return nil, nil, nil, consensus.ErrUnexpectedWithdrawals } - if isSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) { + if isSprintStart(headerNumber, c.config.CalculateSprintLength(headerNumber)) { cx := statefull.ChainContext{Chain: chain, Bor: c} if c.blockReader != nil { @@ -1166,13 +1131,7 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result return err } - // Bail out if we're unauthorized to sign a block - if !snap.ValidatorSet.HasAddress(signer) { - // Check the UnauthorizedSignerError.Error() msg to see why we pass number-1 - return &UnauthorizedSignerError{number - 1, signer.Bytes()} - } - - successionNumber, err := snap.GetSignerSuccessionNumber(signer) + successionNumber, err := snap.ValidatorSet.GetSignerSuccessionNumber(signer, number) if err != nil { return err } @@ -1196,11 +1155,13 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result select { case <-stop: c.logger.Info("[bor] Stopped sealing operation for block", "number", number) + results <- nil return case <-time.After(delay): if c.headerProgress != nil && c.headerProgress.Progress() >= number { c.logger.Info("Discarding sealing operation for block", "number", number) + results <- nil return } @@ -1258,7 +1219,6 @@ func (c *Bor) IsValidator(header *types.Header) (bool, error) { // IsProposer returns true if this instance is the proposer for this block func (c *Bor) IsProposer(header *types.Header) (bool, error) { number := header.Number.Uint64() - if number == 0 { return false, nil } @@ -1268,14 +1228,8 @@ func (c *Bor) IsProposer(header *types.Header) (bool, error) { return false, err } - currentSigner := c.authorizedSigner.Load() - - if !snap.ValidatorSet.HasAddress(currentSigner.signer) { - return false, nil - } - - successionNumber, err := snap.GetSignerSuccessionNumber(currentSigner.signer) - + signer := c.authorizedSigner.Load().signer + successionNumber, err := snap.ValidatorSet.GetSignerSuccessionNumber(signer, number) return successionNumber == 0, err } @@ -1356,37 +1310,23 @@ func (c *Bor) checkAndCommitSpan( ) error { headerNumber := header.Number.Uint64() - span, err := c.spanner.GetCurrentSpan(syscall) + currentSpan, err := c.spanner.GetCurrentSpan(syscall) if err != nil { return err } - if c.needToCommitSpan(span, headerNumber) { - err := c.fetchAndCommitSpan(span.ID+1, state, header, chain, syscall) - return err - } - - return nil -} - -func (c *Bor) needToCommitSpan(currentSpan *span.Span, headerNumber uint64) bool { - // if span is nil - if currentSpan == nil { - return false - } - // check span is not set initially if currentSpan.EndBlock == 0 { - return true + return c.fetchAndCommitSpan(currentSpan.ID, state, header, chain, syscall) } - sprintLength := c.config.CalculateSprint(headerNumber) // if current block is first block of last sprint in current span + sprintLength := c.config.CalculateSprintLength(headerNumber) if currentSpan.EndBlock > sprintLength && currentSpan.EndBlock-sprintLength+1 == headerNumber { - return true + return c.fetchAndCommitSpan(currentSpan.ID+1, state, header, chain, syscall) } - return false + return nil } func (c *Bor) fetchAndCommitSpan( @@ -1396,7 +1336,7 @@ func (c *Bor) fetchAndCommitSpan( chain statefull.ChainContext, syscall consensus.SystemCall, ) error { - var heimdallSpan span.HeimdallSpan + var heimdallSpan heimdall.HeimdallSpan if c.HeimdallClient == nil { // fixme: move to a new mock or fake and remove c.HeimdallClient completely @@ -1450,12 +1390,23 @@ func (c *Bor) GetRootHash(ctx context.Context, tx kv.Tx, start, end uint64) (str if start > end || end > currentHeaderNumber { return "", &valset.InvalidStartEndBlockError{Start: start, End: end, CurrentHeader: currentHeaderNumber} } - blockHeaders := make([]*types.Header, end-start+1) + blockHeaders := make([]*types.Header, length) for number := start; number <= end; number++ { blockHeaders[number-start], _ = c.getHeaderByNumber(ctx, tx, number) } - headers := make([][32]byte, NextPowerOfTwo(length)) + hash, err := ComputeHeadersRootHash(blockHeaders) + if err != nil { + return "", err + } + + hashStr := hex.EncodeToString(hash) + c.rootHashCache.Add(cacheKey, hashStr) + return hashStr, nil +} + +func ComputeHeadersRootHash(blockHeaders []*types.Header) ([]byte, error) { + headers := make([][32]byte, NextPowerOfTwo(uint64(len(blockHeaders)))) for i := 0; i < len(blockHeaders); i++ { blockHeader := blockHeaders[i] header := crypto.Keccak256(AppendBytes32( @@ -1471,13 +1422,10 @@ func (c *Bor) GetRootHash(ctx context.Context, tx kv.Tx, start, end uint64) (str } tree := merkle.NewTreeWithOpts(merkle.TreeOptions{EnableHashSorting: false, DisableHashLeaves: true}) if err := tree.Generate(Convert(headers), sha3.NewLegacyKeccak256()); err != nil { - return "", err + return nil, err } - root := hex.EncodeToString(tree.Root().Hash) - - c.rootHashCache.Add(cacheKey, root) - return root, nil + return tree.Root().Hash, nil } func (c *Bor) getHeaderByNumber(ctx context.Context, tx kv.Tx, number uint64) (*types.Header, error) { @@ -1507,7 +1455,7 @@ func (c *Bor) CommitStates( return nil } -func (c *Bor) SetHeimdallClient(h heimdall.IHeimdallClient) { +func (c *Bor) SetHeimdallClient(h heimdall.HeimdallClient) { c.HeimdallClient = h } @@ -1521,7 +1469,7 @@ func (c *Bor) getNextHeimdallSpanForTest( header *types.Header, chain statefull.ChainContext, syscall consensus.SystemCall, -) (*span.HeimdallSpan, error) { +) (*heimdall.HeimdallSpan, error) { headerNumber := header.Number.Uint64() spanBor, err := c.spanner.GetCurrentSpan(syscall) @@ -1543,14 +1491,14 @@ func (c *Bor) getNextHeimdallSpanForTest( spanBor.StartBlock = spanBor.EndBlock + 1 } - spanBor.EndBlock = spanBor.StartBlock + (100 * c.config.CalculateSprint(headerNumber)) - 1 + spanBor.EndBlock = spanBor.StartBlock + (100 * c.config.CalculateSprintLength(headerNumber)) - 1 selectedProducers := make([]valset.Validator, len(snap.ValidatorSet.Validators)) for i, v := range snap.ValidatorSet.Validators { selectedProducers[i] = *v } - heimdallSpan := &span.HeimdallSpan{ + heimdallSpan := &heimdall.HeimdallSpan{ Span: *spanBor, ValidatorSet: *snap.ValidatorSet, SelectedProducers: selectedProducers, @@ -1592,7 +1540,7 @@ func getUpdatedValidatorSet(oldValidatorSet *valset.ValidatorSet, newVals []*val } } - if err := v.UpdateWithChangeSet(changes, logger); err != nil { + if err := v.UpdateWithChangeSet(changes); err != nil { logger.Error("[bor] Error while updating change set", "error", err) } @@ -1633,10 +1581,10 @@ func GetTxDependency(b *types.Block) [][]uint64 { return blockExtraData.TxDependency } -func GetValidatorBytes(h *types.Header, config *chain.BorConfig) []byte { +func GetValidatorBytes(h *types.Header, config *borcfg.BorConfig) []byte { tempExtra := h.Extra - if !config.IsParallelUniverse(h.Number.Uint64()) { + if !config.IsNapoli(h.Number.Uint64()) { return tempExtra[types.ExtraVanityLength : len(tempExtra)-types.ExtraSealLength] } diff --git a/consensus/bor/bor_test.go b/polygon/bor/bor_test.go similarity index 86% rename from consensus/bor/bor_test.go rename to polygon/bor/bor_test.go index 373b3bd10d5..ee3586d734b 100644 --- a/consensus/bor/bor_test.go +++ b/polygon/bor/bor_test.go @@ -7,53 +7,63 @@ import ( "math/big" "testing" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" + "github.com/ledgerwatch/erigon/polygon/heimdall" + + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/clerk" - "github.com/ledgerwatch/erigon/consensus/bor/contract" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" - "github.com/ledgerwatch/erigon/consensus/bor/valset" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/valset" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/stages/mock" - "github.com/ledgerwatch/log/v3" ) type test_heimdall struct { - currentSpan *span.HeimdallSpan + currentSpan *heimdall.HeimdallSpan chainConfig *chain.Config + borConfig *borcfg.BorConfig validatorSet *valset.ValidatorSet - spans map[uint64]*span.HeimdallSpan + spans map[uint64]*heimdall.HeimdallSpan } func newTestHeimdall(chainConfig *chain.Config) *test_heimdall { - return &test_heimdall{nil, chainConfig, nil, map[uint64]*span.HeimdallSpan{}} + return &test_heimdall{ + currentSpan: nil, + chainConfig: chainConfig, + borConfig: chainConfig.Bor.(*borcfg.BorConfig), + validatorSet: nil, + spans: map[uint64]*heimdall.HeimdallSpan{}, + } +} + +func (h *test_heimdall) BorConfig() *borcfg.BorConfig { + return h.borConfig } -func (h test_heimdall) StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error) { +func (h test_heimdall) StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*heimdall.EventRecordWithTime, error) { return nil, nil } -func (h *test_heimdall) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) { +func (h *test_heimdall) Span(ctx context.Context, spanID uint64) (*heimdall.HeimdallSpan, error) { if span, ok := h.spans[spanID]; ok { h.currentSpan = span return span, nil } - var nextSpan = span.Span{ + var nextSpan = heimdall.Span{ ID: spanID, } @@ -67,7 +77,7 @@ func (h *test_heimdall) Span(ctx context.Context, spanID uint64) (*span.Heimdall nextSpan.StartBlock = h.currentSpan.EndBlock + 1 } - nextSpan.EndBlock = nextSpan.StartBlock + (100 * h.chainConfig.Bor.CalculateSprint(nextSpan.StartBlock)) - 1 + nextSpan.EndBlock = nextSpan.StartBlock + (100 * h.borConfig.CalculateSprintLength(nextSpan.StartBlock)) - 1 // TODO we should use a subset here - see: https://wiki.polygon.technology/docs/pos/bor/ @@ -77,7 +87,7 @@ func (h *test_heimdall) Span(ctx context.Context, spanID uint64) (*span.Heimdall selectedProducers[i] = *v } - h.currentSpan = &span.HeimdallSpan{ + h.currentSpan = &heimdall.HeimdallSpan{ Span: nextSpan, ValidatorSet: *h.validatorSet, SelectedProducers: selectedProducers, @@ -91,13 +101,13 @@ func (h *test_heimdall) Span(ctx context.Context, spanID uint64) (*span.Heimdall func (h test_heimdall) currentSprintLength() int { if h.currentSpan != nil { - return int(h.chainConfig.Bor.CalculateSprint(h.currentSpan.StartBlock)) + return int(h.borConfig.CalculateSprintLength(h.currentSpan.StartBlock)) } - return int(h.chainConfig.Bor.CalculateSprint(256)) + return int(h.borConfig.CalculateSprintLength(256)) } -func (h test_heimdall) FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) { +func (h test_heimdall) FetchCheckpoint(ctx context.Context, number int64) (*heimdall.Checkpoint, error) { return nil, fmt.Errorf("TODO") } @@ -105,7 +115,7 @@ func (h test_heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) return 0, fmt.Errorf("TODO") } -func (h test_heimdall) FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) { +func (h test_heimdall) FetchMilestone(ctx context.Context, number int64) (*heimdall.Milestone, error) { return nil, fmt.Errorf("TODO") } @@ -181,16 +191,16 @@ func (r headerReader) BorSpan(spanId uint64) []byte { } type spanner struct { - *span.ChainSpanner + *bor.ChainSpanner validatorAddress common.Address - currentSpan span.Span + currentSpan heimdall.Span } -func (c spanner) GetCurrentSpan(_ consensus.SystemCall) (*span.Span, error) { +func (c spanner) GetCurrentSpan(_ consensus.SystemCall) (*heimdall.Span, error) { return &c.currentSpan, nil } -func (c *spanner) CommitSpan(heimdallSpan span.HeimdallSpan, syscall consensus.SystemCall) error { +func (c *spanner) CommitSpan(heimdallSpan heimdall.HeimdallSpan, syscall consensus.SystemCall) error { c.currentSpan = heimdallSpan.Span return nil } @@ -263,6 +273,8 @@ func (v validator) verifyBlocks(blocks []*types.Block) error { return nil } +type heimdallSpan = heimdall.Span + func newValidator(t *testing.T, heimdall *test_heimdall, blocks map[uint64]*types.Block) validator { logger := log.Root() @@ -272,7 +284,11 @@ func newValidator(t *testing.T, heimdall *test_heimdall, blocks map[uint64]*type heimdall.chainConfig, memdb.New(""), nil, /* blockReader */ - &spanner{span.NewChainSpanner(contract.ValidatorSet(), heimdall.chainConfig, false, logger), validatorAddress, span.Span{}}, + &spanner{ + ChainSpanner: bor.NewChainSpanner(bor.GenesisContractValidatorSetABI(), heimdall.chainConfig, false, logger), + validatorAddress: validatorAddress, + currentSpan: heimdallSpan{}, + }, heimdall, test_genesisContract{}, logger, @@ -291,7 +307,7 @@ func newValidator(t *testing.T, heimdall *test_heimdall, blocks map[uint64]*type VotingPower: 1000, ProposerPriority: 1, }, - }, logger) + }) } else { heimdall.validatorSet.UpdateWithChangeSet([]*valset.Validator{ { @@ -300,7 +316,7 @@ func newValidator(t *testing.T, heimdall *test_heimdall, blocks map[uint64]*type VotingPower: 1000, ProposerPriority: 1, }, - }, logger) + }) } bor.Authorize(validatorAddress, func(_ libcommon.Address, mimeType string, message []byte) ([]byte, error) { @@ -346,11 +362,11 @@ func TestVerifyRun(t *testing.T) { } func TestVerifySprint(t *testing.T) { - //testVerify(t, 10, 4, int(params.BorDevnetChainConfig.Bor.CalculateSprint(256))) + //testVerify(t, 10, 4, int(params.BorDevnetChainConfig.Bor.CalculateSprintLength(256))) } func TestVerifySpan(t *testing.T) { - //testVerify(t, 10, 4 /*100**/ *int(params.BorDevnetChainConfig.Bor.CalculateSprint(256))) + //testVerify(t, 10, 4 /*100**/ *int(params.BorDevnetChainConfig.Bor.CalculateSprintLength(256))) } func testVerify(t *testing.T, noValidators int, chainLength int) { @@ -392,7 +408,7 @@ func testVerify(t *testing.T, noValidators int, chainLength int) { if isProposer { if vi != lastProposerIndex { - sprintLen := params.BorDevnetChainConfig.Bor.CalculateSprint(block.NumberU64()) + sprintLen := heimdall.BorConfig().CalculateSprintLength(block.NumberU64()) if block.NumberU64() > 1 && block.NumberU64()%sprintLen != 0 { t.Fatalf("Unexpected sprint boundary at %d for: %d", bi, block.NumberU64()) } diff --git a/polygon/bor/borcfg/bor_config.go b/polygon/bor/borcfg/bor_config.go new file mode 100644 index 00000000000..6c02f069b82 --- /dev/null +++ b/polygon/bor/borcfg/bor_config.go @@ -0,0 +1,193 @@ +package borcfg + +import ( + "math/big" + "sort" + "strconv" + + "github.com/ledgerwatch/erigon-lib/common" +) + +// BorConfig is the consensus engine configs for Matic bor based sealing. +type BorConfig struct { + Period map[string]uint64 `json:"period"` // Number of seconds between blocks to enforce + ProducerDelay map[string]uint64 `json:"producerDelay"` // Number of seconds delay between two producer interval + Sprint map[string]uint64 `json:"sprint"` // Epoch length to proposer + BackupMultiplier map[string]uint64 `json:"backupMultiplier"` // Backup multiplier to determine the wiggle time + ValidatorContract string `json:"validatorContract"` // Validator set contract + StateReceiverContract string `json:"stateReceiverContract"` // State receiver contract + + OverrideStateSyncRecords map[string]int `json:"overrideStateSyncRecords"` // override state records count + BlockAlloc map[string]interface{} `json:"blockAlloc"` + + JaipurBlock *big.Int `json:"jaipurBlock"` // Jaipur switch block (nil = no fork, 0 = already on Jaipur) + DelhiBlock *big.Int `json:"delhiBlock"` // Delhi switch block (nil = no fork, 0 = already on Delhi) + IndoreBlock *big.Int `json:"indoreBlock"` // Indore switch block (nil = no fork, 0 = already on Indore) + AgraBlock *big.Int `json:"agraBlock"` // Agra switch block (nil = no fork, 0 = already on Agra) + NapoliBlock *big.Int `json:"napoliBlock"` // Napoli switch block (nil = no fork, 0 = already on Napoli) + StateSyncConfirmationDelay map[string]uint64 `json:"stateSyncConfirmationDelay"` // StateSync Confirmation Delay, in seconds, to calculate `to` + + sprints sprints +} + +// String implements the stringer interface, returning the consensus engine details. +func (c *BorConfig) String() string { + return "bor" +} + +func (c *BorConfig) CalculateProducerDelay(number uint64) uint64 { + return borKeyValueConfigHelper(c.ProducerDelay, number) +} + +func (c *BorConfig) CalculateSprintLength(number uint64) uint64 { + if c.sprints == nil { + c.sprints = asSprints(c.Sprint) + } + + for i := 0; i < len(c.sprints)-1; i++ { + if number >= c.sprints[i].from && number < c.sprints[i+1].from { + return c.sprints[i].size + } + } + + return c.sprints[len(c.sprints)-1].size +} + +func (c *BorConfig) CalculateSprintNumber(number uint64) uint64 { + if c.sprints == nil { + c.sprints = asSprints(c.Sprint) + } + + // unknown sprint size + if (len(c.sprints) == 0) || (number < c.sprints[0].from) { + return 0 + } + + // remove sprint configs that are not in effect yet + sprints := c.sprints + for number < sprints[len(sprints)-1].from { + sprints = sprints[:len(sprints)-1] + } + + var count uint64 + end := number + for len(sprints) > 0 { + sprint := sprints[len(sprints)-1] + count += (end - sprint.from) / sprint.size + + sprints = sprints[:len(sprints)-1] + end = sprint.from + } + + if c.sprints[0].from > 0 { + count++ + } + return count +} + +func (c *BorConfig) CalculateBackupMultiplier(number uint64) uint64 { + return borKeyValueConfigHelper(c.BackupMultiplier, number) +} + +func (c *BorConfig) CalculatePeriod(number uint64) uint64 { + return borKeyValueConfigHelper(c.Period, number) +} + +// isForked returns whether a fork scheduled at block s is active at the given head block. +func isForked(s *big.Int, head uint64) bool { + if s == nil { + return false + } + return s.Uint64() <= head +} + +func (c *BorConfig) IsJaipur(number uint64) bool { + return isForked(c.JaipurBlock, number) +} + +func (c *BorConfig) IsDelhi(number uint64) bool { + return isForked(c.DelhiBlock, number) +} + +func (c *BorConfig) IsIndore(number uint64) bool { + return isForked(c.IndoreBlock, number) +} + +// IsAgra returns whether num is either equal to the Agra fork block or greater. +// The Agra hard fork is based on the Shanghai hard fork, but it doesn't include withdrawals. +// Also Agra is activated based on the block number rather than the timestamp. +// Refer to https://forum.polygon.technology/t/pip-28-agra-hardfork +func (c *BorConfig) IsAgra(num uint64) bool { + return isForked(c.AgraBlock, num) +} + +func (c *BorConfig) GetAgraBlock() *big.Int { + return c.AgraBlock +} + +// Refer to https://forum.polygon.technology/t/pip-33-napoli-upgrade +func (c *BorConfig) IsNapoli(num uint64) bool { + return isForked(c.NapoliBlock, num) +} + +func (c *BorConfig) GetNapoliBlock() *big.Int { + return c.NapoliBlock +} + +func (c *BorConfig) CalculateStateSyncDelay(number uint64) uint64 { + return borKeyValueConfigHelper(c.StateSyncConfirmationDelay, number) +} + +func borKeyValueConfigHelper[T uint64 | common.Address](field map[string]T, number uint64) T { + fieldUint := make(map[uint64]T) + for k, v := range field { + keyUint, err := strconv.ParseUint(k, 10, 64) + if err != nil { + panic(err) + } + fieldUint[keyUint] = v + } + + keys := common.SortedKeys(fieldUint) + + for i := 0; i < len(keys)-1; i++ { + if number >= keys[i] && number < keys[i+1] { + return fieldUint[keys[i]] + } + } + + return fieldUint[keys[len(keys)-1]] +} + +type sprint struct { + from, size uint64 +} + +type sprints []sprint + +func (s sprints) Len() int { + return len(s) +} + +func (s sprints) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s sprints) Less(i, j int) bool { + return s[i].from < s[j].from +} + +func asSprints(configSprints map[string]uint64) sprints { + sprints := make(sprints, len(configSprints)) + + i := 0 + for key, value := range configSprints { + sprints[i].from, _ = strconv.ParseUint(key, 10, 64) + sprints[i].size = value + i++ + } + + sort.Sort(sprints) + + return sprints +} diff --git a/polygon/bor/borcfg/bor_config_test.go b/polygon/bor/borcfg/bor_config_test.go new file mode 100644 index 00000000000..26109597b27 --- /dev/null +++ b/polygon/bor/borcfg/bor_config_test.go @@ -0,0 +1,47 @@ +package borcfg + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestCalculateSprintNumber(t *testing.T) { + cfg := BorConfig{ + Sprint: map[string]uint64{ + "0": 64, + "256": 16, + }, + } + + examples := map[uint64]uint64{ + 0: 0, + 1: 0, + 2: 0, + 63: 0, + 64: 1, + 65: 1, + 66: 1, + 127: 1, + 128: 2, + 191: 2, + 192: 3, + 255: 3, + 256: 4, + 257: 4, + 258: 4, + 271: 4, + 272: 5, + 273: 5, + 274: 5, + 287: 5, + 288: 6, + 303: 6, + 304: 7, + 319: 7, + 320: 8, + } + + for blockNumber, expectedSprintNumber := range examples { + assert.Equal(t, expectedSprintNumber, cfg.CalculateSprintNumber(blockNumber), blockNumber) + } +} diff --git a/consensus/bor/errors.go b/polygon/bor/errors.go similarity index 56% rename from consensus/bor/errors.go rename to polygon/bor/errors.go index c70aff344a0..7d57658ff8d 100644 --- a/consensus/bor/errors.go +++ b/polygon/bor/errors.go @@ -2,9 +2,6 @@ package bor import ( "fmt" - "time" - - "github.com/ledgerwatch/erigon/consensus/bor/clerk" ) type MaxCheckpointLengthExceededError struct { @@ -51,34 +48,6 @@ func (e *BlockTooSoonError) Error() string { ) } -// UnauthorizedProposerError is returned if a header is [being] signed by an unauthorized entity. -type UnauthorizedProposerError struct { - Number uint64 - Proposer []byte -} - -func (e *UnauthorizedProposerError) Error() string { - return fmt.Sprintf( - "Proposer 0x%x is not a part of the producer set at block %d", - e.Proposer, - e.Number, - ) -} - -// UnauthorizedSignerError is returned if a header is [being] signed by an unauthorized entity. -type UnauthorizedSignerError struct { - Number uint64 - Signer []byte -} - -func (e *UnauthorizedSignerError) Error() string { - return fmt.Sprintf( - "Signer 0x%x is not a part of the producer set at block %d", - e.Signer, - e.Number, - ) -} - // WrongDifficultyError is returned if the difficulty of a block doesn't match the // turn of the signer. type WrongDifficultyError struct { @@ -97,20 +66,3 @@ func (e *WrongDifficultyError) Error() string { e.Signer, ) } - -type InvalidStateReceivedError struct { - Number uint64 - LastStateID uint64 - To *time.Time - Event *clerk.EventRecordWithTime -} - -func (e *InvalidStateReceivedError) Error() string { - return fmt.Sprintf( - "Received invalid event %v at block %d. Requested events until %s. Last state id was %d", - e.Event, - e.Number, - e.To.Format(time.RFC3339), - e.LastStateID, - ) -} diff --git a/consensus/bor/fake.go b/polygon/bor/fake.go similarity index 100% rename from consensus/bor/fake.go rename to polygon/bor/fake.go diff --git a/consensus/bor/finality/api.go b/polygon/bor/finality/api.go similarity index 95% rename from consensus/bor/finality/api.go rename to polygon/bor/finality/api.go index 288080e570b..5df9ff2ca22 100644 --- a/consensus/bor/finality/api.go +++ b/polygon/bor/finality/api.go @@ -3,9 +3,9 @@ package finality import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" ) func GetFinalizedBlockNumber(tx kv.Tx) uint64 { diff --git a/consensus/bor/finality/bor_verifier.go b/polygon/bor/finality/bor_verifier.go similarity index 96% rename from consensus/bor/finality/bor_verifier.go rename to polygon/bor/finality/bor_verifier.go index a8dde9dc1ce..9a6da3203e6 100644 --- a/consensus/bor/finality/bor_verifier.go +++ b/polygon/bor/finality/bor_verifier.go @@ -9,9 +9,8 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/metrics" - "github.com/ledgerwatch/erigon/consensus/bor/finality/generics" - "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" ) var ( @@ -161,5 +160,5 @@ func rewindBack(head uint64, rewindTo uint64) { // Chain cannot be rewinded from this routine // hence we are using a shared variable - generics.BorMilestoneRewind.Store(&rewindTo) + BorMilestoneRewind.Store(&rewindTo) } diff --git a/consensus/bor/finality/flags/flags.go b/polygon/bor/finality/flags/flags.go similarity index 100% rename from consensus/bor/finality/flags/flags.go rename to polygon/bor/finality/flags/flags.go diff --git a/polygon/bor/finality/generics/generics.go b/polygon/bor/finality/generics/generics.go new file mode 100644 index 00000000000..7185ee893da --- /dev/null +++ b/polygon/bor/finality/generics/generics.go @@ -0,0 +1,15 @@ +package generics + +import ( + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/types" +) + +func Empty[T any]() (t T) { + return +} + +type Response struct { + Headers []*types.Header + Hashes []libcommon.Hash +} diff --git a/polygon/bor/finality/milestone_rewind.go b/polygon/bor/finality/milestone_rewind.go new file mode 100644 index 00000000000..772d91d9845 --- /dev/null +++ b/polygon/bor/finality/milestone_rewind.go @@ -0,0 +1,12 @@ +package finality + +import "sync/atomic" + +// BorMilestoneRewind is used as a flag/variable +// Flag: if equals 0, no rewind according to bor whitelisting service +// Variable: if not equals 0, rewind chain back to BorMilestoneRewind +var BorMilestoneRewind atomic.Pointer[uint64] + +func IsMilestoneRewindPending() bool { + return BorMilestoneRewind.Load() != nil && *BorMilestoneRewind.Load() != 0 +} diff --git a/consensus/bor/finality/rawdb/checkpoint.go b/polygon/bor/finality/rawdb/checkpoint.go similarity index 100% rename from consensus/bor/finality/rawdb/checkpoint.go rename to polygon/bor/finality/rawdb/checkpoint.go diff --git a/consensus/bor/finality/rawdb/milestone.go b/polygon/bor/finality/rawdb/milestone.go similarity index 98% rename from consensus/bor/finality/rawdb/milestone.go rename to polygon/bor/finality/rawdb/milestone.go index d5ac8f49621..db748a42f73 100644 --- a/consensus/bor/finality/rawdb/milestone.go +++ b/polygon/bor/finality/rawdb/milestone.go @@ -7,7 +7,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/consensus/bor/finality/generics" + "github.com/ledgerwatch/erigon/polygon/bor/finality/generics" "github.com/ledgerwatch/log/v3" ) diff --git a/consensus/bor/finality/whitelist.go b/polygon/bor/finality/whitelist.go similarity index 90% rename from consensus/bor/finality/whitelist.go rename to polygon/bor/finality/whitelist.go index 76abfcc0d35..97584eb6f09 100644 --- a/consensus/bor/finality/whitelist.go +++ b/polygon/bor/finality/whitelist.go @@ -6,16 +6,18 @@ import ( "fmt" "time" + "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon/polygon/heimdall" + "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/consensus/bor/finality/flags" - "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall" + "github.com/ledgerwatch/erigon/polygon/bor/finality/flags" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/log/v3" ) type config struct { - heimdall heimdall.IHeimdallClient + heimdall heimdall.HeimdallClient borDB kv.RwDB chainDB kv.RwDB blockReader services.BlockReader @@ -28,7 +30,7 @@ type BorAPI interface { GetRootHash(start uint64, end uint64) (string, error) } -func Whitelist(heimdall heimdall.IHeimdallClient, borDB kv.RwDB, chainDB kv.RwDB, blockReader services.BlockReader, logger log.Logger, borAPI BorAPI, closeCh chan struct{}) { +func Whitelist(heimdall heimdall.HeimdallClient, borDB kv.RwDB, chainDB kv.RwDB, blockReader services.BlockReader, logger log.Logger, borAPI BorAPI, closeCh chan struct{}) { if !flags.Milestone { return } @@ -94,7 +96,7 @@ func startNoAckMilestoneByIDService(config *config) { RetryHeimdallHandler(handleNoAckMilestoneByID, config, tickerDuration, noAckMilestoneTimeout, fnName) } -type heimdallHandler func(ctx context.Context, heimdallClient heimdall.IHeimdallClient, config *config) error +type heimdallHandler func(ctx context.Context, heimdallClient heimdall.HeimdallClient, config *config) error func RetryHeimdallHandler(fn heimdallHandler, config *config, tickerDuration time.Duration, timeout time.Duration, fnName string) { retryHeimdallHandler(fn, config, tickerDuration, timeout, fnName) @@ -157,7 +159,7 @@ func retryHeimdallHandler(fn heimdallHandler, config *config, tickerDuration tim } // handleWhitelistCheckpoint handles the checkpoint whitelist mechanism. -func handleWhitelistCheckpoint(ctx context.Context, heimdallClient heimdall.IHeimdallClient, config *config) error { +func handleWhitelistCheckpoint(ctx context.Context, heimdallClient heimdall.HeimdallClient, config *config) error { service := whitelist.GetWhitelistingService() // Create a new bor verifier, which will be used to verify checkpoints and milestones @@ -177,7 +179,7 @@ func handleWhitelistCheckpoint(ctx context.Context, heimdallClient heimdall.IHei } // handleMilestone handles the milestone mechanism. -func handleMilestone(ctx context.Context, heimdallClient heimdall.IHeimdallClient, config *config) error { +func handleMilestone(ctx context.Context, heimdallClient heimdall.HeimdallClient, config *config) error { service := whitelist.GetWhitelistingService() // Create a new bor verifier, which will be used to verify checkpoints and milestones @@ -205,7 +207,7 @@ func handleMilestone(ctx context.Context, heimdallClient heimdall.IHeimdallClien return nil } -func handleNoAckMilestone(ctx context.Context, heimdallClient heimdall.IHeimdallClient, config *config) error { +func handleNoAckMilestone(ctx context.Context, heimdallClient heimdall.HeimdallClient, config *config) error { service := whitelist.GetWhitelistingService() milestoneID, err := fetchNoAckMilestone(ctx, heimdallClient, config.logger) @@ -222,7 +224,7 @@ func handleNoAckMilestone(ctx context.Context, heimdallClient heimdall.IHeimdall return nil } -func handleNoAckMilestoneByID(ctx context.Context, heimdallClient heimdall.IHeimdallClient, config *config) error { +func handleNoAckMilestoneByID(ctx context.Context, heimdallClient heimdall.HeimdallClient, config *config) error { service := whitelist.GetWhitelistingService() milestoneIDs := service.GetMilestoneIDsList() diff --git a/consensus/bor/finality/whitelist/checkpoint.go b/polygon/bor/finality/whitelist/checkpoint.go similarity index 94% rename from consensus/bor/finality/whitelist/checkpoint.go rename to polygon/bor/finality/whitelist/checkpoint.go index fc4a1443610..fd33dd656f0 100644 --- a/consensus/bor/finality/whitelist/checkpoint.go +++ b/polygon/bor/finality/whitelist/checkpoint.go @@ -3,8 +3,8 @@ package whitelist import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/metrics" - "github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor/finality/rawdb" ) type checkpoint struct { diff --git a/consensus/bor/finality/whitelist/finality.go b/polygon/bor/finality/whitelist/finality.go similarity index 96% rename from consensus/bor/finality/whitelist/finality.go rename to polygon/bor/finality/whitelist/finality.go index f1abbbf3df6..9469a95c91f 100644 --- a/consensus/bor/finality/whitelist/finality.go +++ b/polygon/bor/finality/whitelist/finality.go @@ -5,8 +5,8 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor/finality/rawdb" "github.com/ledgerwatch/log/v3" ) diff --git a/consensus/bor/finality/whitelist/milestone.go b/polygon/bor/finality/whitelist/milestone.go similarity index 98% rename from consensus/bor/finality/whitelist/milestone.go rename to polygon/bor/finality/whitelist/milestone.go index 0d80ed4b5a7..b4777c13cae 100644 --- a/consensus/bor/finality/whitelist/milestone.go +++ b/polygon/bor/finality/whitelist/milestone.go @@ -5,9 +5,9 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/metrics" - "github.com/ledgerwatch/erigon/consensus/bor/finality/flags" - "github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor/finality/flags" + "github.com/ledgerwatch/erigon/polygon/bor/finality/rawdb" ) type milestone struct { diff --git a/consensus/bor/finality/whitelist/service.go b/polygon/bor/finality/whitelist/service.go similarity index 98% rename from consensus/bor/finality/whitelist/service.go rename to polygon/bor/finality/whitelist/service.go index 7bf7aa89819..14dec13d799 100644 --- a/consensus/bor/finality/whitelist/service.go +++ b/polygon/bor/finality/whitelist/service.go @@ -5,8 +5,8 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor/finality/rawdb" ) var ( diff --git a/consensus/bor/finality/whitelist/service_test.go b/polygon/bor/finality/whitelist/service_test.go similarity index 99% rename from consensus/bor/finality/whitelist/service_test.go rename to polygon/bor/finality/whitelist/service_test.go index 0a45e6fe712..62fe3651dea 100644 --- a/consensus/bor/finality/whitelist/service_test.go +++ b/polygon/bor/finality/whitelist/service_test.go @@ -12,8 +12,8 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor/finality/rawdb" "github.com/stretchr/testify/require" "pgregory.net/rapid" diff --git a/consensus/bor/finality/whitelist_helpers.go b/polygon/bor/finality/whitelist_helpers.go similarity index 92% rename from consensus/bor/finality/whitelist_helpers.go rename to polygon/bor/finality/whitelist_helpers.go index ddeb1e19dfb..f680c0d0fa7 100644 --- a/consensus/bor/finality/whitelist_helpers.go +++ b/polygon/bor/finality/whitelist_helpers.go @@ -4,10 +4,12 @@ import ( "context" "errors" - "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall" "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon/polygon/heimdall" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" ) var ( @@ -24,7 +26,7 @@ var ( // fetchWhitelistCheckpoint fetches the latest checkpoint from it's local heimdall // and verifies the data against bor data. -func fetchWhitelistCheckpoint(ctx context.Context, heimdallClient heimdall.IHeimdallClient, verifier *borVerifier, config *config) (uint64, common.Hash, error) { +func fetchWhitelistCheckpoint(ctx context.Context, heimdallClient heimdall.HeimdallClient, verifier *borVerifier, config *config) (uint64, common.Hash, error) { var ( blockNum uint64 blockHash common.Hash @@ -64,7 +66,7 @@ func fetchWhitelistCheckpoint(ctx context.Context, heimdallClient heimdall.IHeim // fetchWhitelistMilestone fetches the latest milestone from it's local heimdall // and verifies the data against bor data. -func fetchWhitelistMilestone(ctx context.Context, heimdallClient heimdall.IHeimdallClient, verifier *borVerifier, config *config) (uint64, common.Hash, error) { +func fetchWhitelistMilestone(ctx context.Context, heimdallClient heimdall.HeimdallClient, verifier *borVerifier, config *config) (uint64, common.Hash, error) { var ( num uint64 hash common.Hash @@ -99,7 +101,7 @@ func fetchWhitelistMilestone(ctx context.Context, heimdallClient heimdall.IHeimd return num, hash, nil } -func fetchNoAckMilestone(ctx context.Context, heimdallClient heimdall.IHeimdallClient, logger log.Logger) (string, error) { +func fetchNoAckMilestone(ctx context.Context, heimdallClient heimdall.HeimdallClient, logger log.Logger) (string, error) { var ( milestoneID string ) @@ -118,7 +120,7 @@ func fetchNoAckMilestone(ctx context.Context, heimdallClient heimdall.IHeimdallC return milestoneID, nil } -func fetchNoAckMilestoneByID(ctx context.Context, heimdallClient heimdall.IHeimdallClient, milestoneID string, logger log.Logger) error { +func fetchNoAckMilestoneByID(ctx context.Context, heimdallClient heimdall.HeimdallClient, milestoneID string, logger log.Logger) error { err := heimdallClient.FetchNoAckMilestone(ctx, milestoneID) if errors.Is(err, heimdall.ErrServiceUnavailable) { logger.Debug("[bor.heimdall] Failed to fetch no-ack milestone by ID", "milestoneID", milestoneID, "err", err) diff --git a/polygon/bor/genesis_contracts.go b/polygon/bor/genesis_contracts.go new file mode 100644 index 00000000000..59af92c777c --- /dev/null +++ b/polygon/bor/genesis_contracts.go @@ -0,0 +1,87 @@ +package bor + +import ( + "math/big" + "strings" + + "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon-lib/chain" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/accounts/abi" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/rlp" +) + +const ( + validatorSetABIJSON = `[{"constant":true,"inputs":[],"name":"SPRINT","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"SYSTEM_ADDRESS","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"CHAIN","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"FIRST_END_BLOCK","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"producers","outputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"ROUND_TYPE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"BOR_ID","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"spanNumbers","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"VOTE_TYPE","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"validators","outputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"spans","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"startBlock","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"endBlock","type":"uint256"}],"name":"NewSpan","type":"event"},{"constant":true,"inputs":[],"name":"currentSprint","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getCurrentSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getNextSpan","outputs":[{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"number","type":"uint256"}],"name":"getSpanByBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"currentSpanNumber","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getValidatorsTotalStakeBySpan","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"}],"name":"getProducersTotalStakeBySpan","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"getValidatorBySigner","outputs":[{"components":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"power","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"internalType":"struct BorValidatorSet.Validator","name":"result","type":"tuple"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"isValidator","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"address","name":"signer","type":"address"}],"name":"isProducer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"signer","type":"address"}],"name":"isCurrentValidator","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"signer","type":"address"}],"name":"isCurrentProducer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"number","type":"uint256"}],"name":"getBorValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getInitialValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"internalType":"address[]","name":"","type":"address[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"newSpan","type":"uint256"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint256","name":"endBlock","type":"uint256"},{"internalType":"bytes","name":"validatorBytes","type":"bytes"},{"internalType":"bytes","name":"producerBytes","type":"bytes"}],"name":"commitSpan","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"internalType":"uint256","name":"span","type":"uint256"},{"internalType":"bytes32","name":"dataHash","type":"bytes32"},{"internalType":"bytes","name":"sigs","type":"bytes"}],"name":"getStakePowerBySigs","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"rootHash","type":"bytes32"},{"internalType":"bytes32","name":"leaf","type":"bytes32"},{"internalType":"bytes","name":"proof","type":"bytes"}],"name":"checkMembership","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"d","type":"bytes32"}],"name":"leafNode","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"left","type":"bytes32"},{"internalType":"bytes32","name":"right","type":"bytes32"}],"name":"innerNode","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"pure","type":"function"}]` + stateReceiverABIJSON = `[{"constant":true,"inputs":[],"name":"SYSTEM_ADDRESS","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"lastStateId","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"syncTime","type":"uint256"},{"internalType":"bytes","name":"recordBytes","type":"bytes"}],"name":"commitState","outputs":[{"internalType":"bool","name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]` +) + +var ( + validatorSetABI, _ = abi.JSON(strings.NewReader(validatorSetABIJSON)) + stateReceiverABI, _ = abi.JSON(strings.NewReader(stateReceiverABIJSON)) +) + +func GenesisContractValidatorSetABI() abi.ABI { + return validatorSetABI +} +func GenesisContractStateReceiverABI() abi.ABI { + return stateReceiverABI +} + +type GenesisContracts interface { + CommitState(event rlp.RawValue, syscall consensus.SystemCall) error + LastStateId(syscall consensus.SystemCall) (*big.Int, error) +} + +type GenesisContractsClient struct { + validatorSetABI abi.ABI + stateReceiverABI abi.ABI + ValidatorContract libcommon.Address + StateReceiverContract libcommon.Address + chainConfig *chain.Config + logger log.Logger +} + +func NewGenesisContractsClient( + chainConfig *chain.Config, + validatorContract, + stateReceiverContract string, + logger log.Logger, +) *GenesisContractsClient { + return &GenesisContractsClient{ + validatorSetABI: GenesisContractValidatorSetABI(), + stateReceiverABI: GenesisContractStateReceiverABI(), + ValidatorContract: libcommon.HexToAddress(validatorContract), + StateReceiverContract: libcommon.HexToAddress(stateReceiverContract), + chainConfig: chainConfig, + logger: logger, + } +} + +func (gc *GenesisContractsClient) CommitState(event rlp.RawValue, syscall consensus.SystemCall) error { + _, err := syscall(gc.StateReceiverContract, event) + return err +} + +func (gc *GenesisContractsClient) LastStateId(syscall consensus.SystemCall) (*big.Int, error) { + const method = "lastStateId" + + data, err := gc.stateReceiverABI.Pack(method) + if err != nil { + gc.logger.Error("[bor] Unable to pack tx for LastStateId", "err", err) + return nil, err + } + + result, err := syscall(gc.StateReceiverContract, data) + if err != nil { + return nil, err + } + + var ret = new(*big.Int) + if err := gc.stateReceiverABI.UnpackIntoInterface(ret, method, result); err != nil { + return nil, err + } + return *ret, nil +} diff --git a/consensus/bor/merkle.go b/polygon/bor/merkle.go similarity index 100% rename from consensus/bor/merkle.go rename to polygon/bor/merkle.go diff --git a/consensus/bor/snapshot.go b/polygon/bor/snapshot.go similarity index 60% rename from consensus/bor/snapshot.go rename to polygon/bor/snapshot.go index 836acf36343..a41c2acad1c 100644 --- a/consensus/bor/snapshot.go +++ b/polygon/bor/snapshot.go @@ -4,25 +4,26 @@ import ( "bytes" "context" "encoding/json" + "time" lru "github.com/hashicorp/golang-lru/arc/v2" - "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/consensus/bor/valset" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" + "github.com/ledgerwatch/erigon/polygon/bor/valset" ) // Snapshot is the state of the authorization voting at a given point in time. type Snapshot struct { - config *chain.BorConfig // Consensus engine parameters to fine tune behavior + config *borcfg.BorConfig // Consensus engine parameters to fine tune behavior sigcache *lru.ARCCache[common.Hash, common.Address] // Cache of recent block signatures to speed up ecrecover - Number uint64 `json:"number"` // Block number where the snapshot was created - Hash common.Hash `json:"hash"` // Block hash where the snapshot was created - ValidatorSet *valset.ValidatorSet `json:"validatorSet"` // Validator set at this moment - Recents map[uint64]common.Address `json:"recents"` // Set of recent signers for spam protections + Number uint64 `json:"number"` // Block number where the snapshot was created + Hash common.Hash `json:"hash"` // Block hash where the snapshot was created + ValidatorSet *valset.ValidatorSet `json:"validatorSet"` // Validator set at this moment } const BorSeparate = "BorSeparate" @@ -38,7 +39,7 @@ const BorSeparate = "BorSeparate" // method does not initialize the set of recent signers, so only ever use if for // the genesis block. func NewSnapshot( - config *chain.BorConfig, + config *borcfg.BorConfig, sigcache *lru.ARCCache[common.Hash, common.Address], number uint64, hash common.Hash, @@ -50,14 +51,13 @@ func NewSnapshot( sigcache: sigcache, Number: number, Hash: hash, - ValidatorSet: valset.NewValidatorSet(validators, logger), - Recents: make(map[uint64]common.Address), + ValidatorSet: valset.NewValidatorSet(validators), } return snap } // loadSnapshot loads an existing snapshot from the database. -func LoadSnapshot(config *chain.BorConfig, sigcache *lru.ARCCache[common.Hash, common.Address], db kv.RwDB, hash common.Hash) (*Snapshot, error) { +func LoadSnapshot(config *borcfg.BorConfig, sigcache *lru.ARCCache[common.Hash, common.Address], db kv.RwDB, hash common.Hash) (*Snapshot, error) { tx, err := db.BeginRo(context.Background()) if err != nil { return nil, err @@ -109,12 +109,7 @@ func (s *Snapshot) copy() *Snapshot { Number: s.Number, Hash: s.Hash, ValidatorSet: s.ValidatorSet.Copy(), - Recents: make(map[uint64]common.Address), - } - for block, signer := range s.Recents { - cpy.Recents[block] = signer } - return cpy } @@ -139,38 +134,17 @@ func (s *Snapshot) Apply(parent *types.Header, headers []*types.Header, logger l for _, header := range headers { // Remove any votes on checkpoint blocks number := header.Number.Uint64() - sprintLen := s.config.CalculateSprint(number) + sprintLen := s.config.CalculateSprintLength(number) - // Delete the oldest signer from the recent list to allow it signing again - if number >= sprintLen { - delete(snap.Recents, number-sprintLen) + if err := ValidateHeaderTime(header, time.Now(), parent, snap.ValidatorSet, s.config, s.sigcache); err != nil { + return snap, err } - // Resolve the authorization key and check against signers - signer, err := Ecrecover(header, s.sigcache, s.config) + signer, err := Ecrecover(header, s.sigcache, s.config) if err != nil { return nil, err } - var validSigner bool - var succession int - - // check if signer is in validator set - if !snap.ValidatorSet.HasAddress(signer) { - return snap, &UnauthorizedSignerError{number, signer.Bytes()} - } - if succession, err = snap.GetSignerSuccessionNumber(signer); err != nil { - return snap, err - } - - // add recents - snap.Recents[number] = signer - - validSigner = true - - if parent != nil && header.Time < parent.Time+CalcProducerDelay(number, succession, s.config) { - return snap, &BlockTooSoonError{number, succession} - } difficulty := snap.Difficulty(signer) if header.Difficulty.Uint64() != difficulty { return snap, &WrongDifficultyError{number, difficulty, header.Difficulty.Uint64(), signer.Bytes()} @@ -178,7 +152,7 @@ func (s *Snapshot) Apply(parent *types.Header, headers []*types.Header, logger l // change validator set and change proposer if number > 0 && (number+1)%sprintLen == 0 { - if err := ValidateHeaderExtraField(header.Extra); err != nil { + if err := ValidateHeaderExtraLength(header.Extra); err != nil { return snap, err } validatorBytes := GetValidatorBytes(header, s.config) @@ -186,13 +160,10 @@ func (s *Snapshot) Apply(parent *types.Header, headers []*types.Header, logger l // get validators from headers and use that for new validator set newVals, _ := valset.ParseValidators(validatorBytes) v := getUpdatedValidatorSet(snap.ValidatorSet.Copy(), newVals, logger) - v.IncrementProposerPriority(1, logger) + v.IncrementProposerPriority(1) snap.ValidatorSet = v } - if number > 64 && !validSigner { - return snap, &UnauthorizedSignerError{number, signer.Bytes()} - } parent = header snap.Number = number snap.Hash = header.Hash() @@ -201,30 +172,8 @@ func (s *Snapshot) Apply(parent *types.Header, headers []*types.Header, logger l return snap, nil } -// GetSignerSuccessionNumber returns the relative position of signer in terms of the in-turn proposer func (s *Snapshot) GetSignerSuccessionNumber(signer common.Address) (int, error) { - validators := s.ValidatorSet.Validators - proposer := s.ValidatorSet.GetProposer().Address - proposerIndex, _ := s.ValidatorSet.GetByAddress(proposer) - - if proposerIndex == -1 { - return -1, &UnauthorizedProposerError{s.Number, proposer.Bytes()} - } - - signerIndex, _ := s.ValidatorSet.GetByAddress(signer) - - if signerIndex == -1 { - return -1, &UnauthorizedSignerError{s.Number, signer.Bytes()} - } - - tempIndex := signerIndex - if proposerIndex != tempIndex { - if tempIndex < proposerIndex { - tempIndex = tempIndex + len(validators) - } - } - - return tempIndex - proposerIndex, nil + return s.ValidatorSet.GetSignerSuccessionNumber(signer, s.Number) } // signers retrieves the list of authorized signers in ascending order. @@ -244,18 +193,9 @@ func (s *Snapshot) Difficulty(signer common.Address) uint64 { return 1 } - validators := s.ValidatorSet.Validators - proposer := s.ValidatorSet.GetProposer().Address - totalValidators := len(validators) - - proposerIndex, _ := s.ValidatorSet.GetByAddress(proposer) - signerIndex, _ := s.ValidatorSet.GetByAddress(signer) - - // temp index - tempIndex := signerIndex - if tempIndex < proposerIndex { - tempIndex = tempIndex + totalValidators + if d, err := s.ValidatorSet.Difficulty(signer); err == nil { + return d + } else { + return 0 } - - return uint64(totalValidators - (tempIndex - proposerIndex)) } diff --git a/consensus/bor/snapshot_test.go b/polygon/bor/snapshot_test.go similarity index 86% rename from consensus/bor/snapshot_test.go rename to polygon/bor/snapshot_test.go index d3d827ab31b..e39674bca30 100644 --- a/consensus/bor/snapshot_test.go +++ b/polygon/bor/snapshot_test.go @@ -5,12 +5,12 @@ import ( "sort" "testing" - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/valset" - "github.com/ledgerwatch/log/v3" "github.com/maticnetwork/crand" "github.com/stretchr/testify/require" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/valset" ) const ( @@ -21,7 +21,7 @@ func TestGetSignerSuccessionNumber_ProposerIsSigner(t *testing.T) { t.Parallel() validators := buildRandomValidatorSet(numVals) - validatorSet := valset.NewValidatorSet(validators, log.New()) + validatorSet := valset.NewValidatorSet(validators) snap := bor.Snapshot{ ValidatorSet: validatorSet, } @@ -49,7 +49,7 @@ func TestGetSignerSuccessionNumber_SignerIndexIsLarger(t *testing.T) { // give highest ProposerPriority to a particular val, so that they become the proposer validators[proposerIndex].VotingPower = 200 snap := bor.Snapshot{ - ValidatorSet: valset.NewValidatorSet(validators, log.New()), + ValidatorSet: valset.NewValidatorSet(validators), } // choose a signer at an index greater than proposer index @@ -71,7 +71,7 @@ func TestGetSignerSuccessionNumber_SignerIndexIsSmaller(t *testing.T) { // give highest ProposerPriority to a particular val, so that they become the proposer validators[proposerIndex].VotingPower = 200 snap := bor.Snapshot{ - ValidatorSet: valset.NewValidatorSet(validators, log.New()), + ValidatorSet: valset.NewValidatorSet(validators), } // choose a signer at an index greater than proposer index @@ -89,7 +89,7 @@ func TestGetSignerSuccessionNumber_ProposerNotFound(t *testing.T) { validators := buildRandomValidatorSet(numVals) snap := bor.Snapshot{ - ValidatorSet: valset.NewValidatorSet(validators, log.New()), + ValidatorSet: valset.NewValidatorSet(validators), } dummyProposerAddress := randomAddress() @@ -101,7 +101,7 @@ func TestGetSignerSuccessionNumber_ProposerNotFound(t *testing.T) { _, err := snap.GetSignerSuccessionNumber(signer) require.NotNil(t, err) - e, ok := err.(*bor.UnauthorizedProposerError) + e, ok := err.(*valset.UnauthorizedProposerError) require.True(t, ok) require.Equal(t, dummyProposerAddress.Bytes(), e.Proposer) } @@ -111,14 +111,14 @@ func TestGetSignerSuccessionNumber_SignerNotFound(t *testing.T) { validators := buildRandomValidatorSet(numVals) snap := bor.Snapshot{ - ValidatorSet: valset.NewValidatorSet(validators, log.New()), + ValidatorSet: valset.NewValidatorSet(validators), } dummySignerAddress := randomAddress() _, err := snap.GetSignerSuccessionNumber(dummySignerAddress) require.NotNil(t, err) - e, ok := err.(*bor.UnauthorizedSignerError) + e, ok := err.(*valset.UnauthorizedSignerError) require.True(t, ok) require.Equal(t, dummySignerAddress.Bytes(), e.Signer) } diff --git a/polygon/bor/span_id.go b/polygon/bor/span_id.go new file mode 100644 index 00000000000..1c9348b6e1b --- /dev/null +++ b/polygon/bor/span_id.go @@ -0,0 +1,35 @@ +package bor + +import ( + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" +) + +const ( + spanLength = 6400 // Number of blocks in a span + zerothSpanEnd = 255 // End block of 0th span +) + +// SpanIDAt returns the corresponding span id for the given block number. +func SpanIDAt(blockNum uint64) uint64 { + if blockNum > zerothSpanEnd { + return 1 + (blockNum-zerothSpanEnd-1)/spanLength + } + return 0 +} + +// SpanEndBlockNum returns the number of the last block in the given span. +func SpanEndBlockNum(spanID uint64) uint64 { + if spanID > 0 { + return spanID*spanLength + zerothSpanEnd + } + return zerothSpanEnd +} + +// IsBlockInLastSprintOfSpan returns true if a block num is within the last sprint of a span and false otherwise. +func IsBlockInLastSprintOfSpan(blockNum uint64, config *borcfg.BorConfig) bool { + spanNum := SpanIDAt(blockNum) + endBlockNum := SpanEndBlockNum(spanNum) + sprintLen := config.CalculateSprintLength(blockNum) + startBlockNum := endBlockNum - sprintLen + 1 + return startBlockNum <= blockNum && blockNum <= endBlockNum +} diff --git a/consensus/bor/span_id_test.go b/polygon/bor/span_id_test.go similarity index 62% rename from consensus/bor/span_id_test.go rename to polygon/bor/span_id_test.go index 62c82d96c58..3281486576a 100644 --- a/consensus/bor/span_id_test.go +++ b/polygon/bor/span_id_test.go @@ -1,7 +1,12 @@ package bor -import "testing" -import "github.com/stretchr/testify/assert" +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" +) func TestSpanIDAt(t *testing.T) { assert.Equal(t, uint64(0), SpanIDAt(0)) @@ -24,3 +29,16 @@ func TestSpanEndBlockNum(t *testing.T) { assert.Equal(t, uint64(13055), SpanEndBlockNum(2)) assert.Equal(t, uint64(43769855), SpanEndBlockNum(6839)) } + +func TestBlockInLastSprintOfSpan(t *testing.T) { + config := &borcfg.BorConfig{ + Sprint: map[string]uint64{ + "0": 16, + }, + } + assert.True(t, IsBlockInLastSprintOfSpan(6640, config)) + assert.True(t, IsBlockInLastSprintOfSpan(6645, config)) + assert.True(t, IsBlockInLastSprintOfSpan(6655, config)) + assert.False(t, IsBlockInLastSprintOfSpan(6639, config)) + assert.False(t, IsBlockInLastSprintOfSpan(6656, config)) +} diff --git a/consensus/bor/heimdall/span/spanner.go b/polygon/bor/spanner.go similarity index 71% rename from consensus/bor/heimdall/span/spanner.go rename to polygon/bor/spanner.go index b3738c4774c..728bc20f229 100644 --- a/consensus/bor/heimdall/span/spanner.go +++ b/polygon/bor/spanner.go @@ -1,37 +1,55 @@ -package span +package bor import ( "encoding/hex" "encoding/json" "math/big" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor/abi" - "github.com/ledgerwatch/erigon/consensus/bor/valset" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" + "github.com/ledgerwatch/erigon/polygon/bor/valset" + "github.com/ledgerwatch/erigon/polygon/heimdall" "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/log/v3" ) +//go:generate mockgen -destination=./spanner_mock.go -package=bor . Spanner +type Spanner interface { + GetCurrentSpan(syscall consensus.SystemCall) (*heimdall.Span, error) + GetCurrentValidators(spanId uint64, signer libcommon.Address, chain consensus.ChainHeaderReader) ([]*valset.Validator, error) + GetCurrentProducers(spanId uint64, signer libcommon.Address, chain consensus.ChainHeaderReader) ([]*valset.Validator, error) + CommitSpan(heimdallSpan heimdall.HeimdallSpan, syscall consensus.SystemCall) error +} + +type ABI interface { + Pack(name string, args ...interface{}) ([]byte, error) + UnpackIntoInterface(v interface{}, name string, data []byte) error +} + type ChainSpanner struct { - validatorSet abi.ABI + validatorSet ABI chainConfig *chain.Config + borConfig *borcfg.BorConfig logger log.Logger withoutHeimdall bool } -func NewChainSpanner(validatorSet abi.ABI, chainConfig *chain.Config, withoutHeimdall bool, logger log.Logger) *ChainSpanner { +func NewChainSpanner(validatorSet ABI, chainConfig *chain.Config, withoutHeimdall bool, logger log.Logger) *ChainSpanner { + borConfig := chainConfig.Bor.(*borcfg.BorConfig) return &ChainSpanner{ validatorSet: validatorSet, chainConfig: chainConfig, + borConfig: borConfig, logger: logger, withoutHeimdall: withoutHeimdall, } } // GetCurrentSpan get current span from contract -func (c *ChainSpanner) GetCurrentSpan(syscall consensus.SystemCall) (*Span, error) { +func (c *ChainSpanner) GetCurrentSpan(syscall consensus.SystemCall) (*heimdall.Span, error) { // method const method = "getCurrentSpan" @@ -42,7 +60,7 @@ func (c *ChainSpanner) GetCurrentSpan(syscall consensus.SystemCall) (*Span, erro return nil, err } - result, err := syscall(libcommon.HexToAddress(c.chainConfig.Bor.ValidatorContract), data) + result, err := syscall(libcommon.HexToAddress(c.borConfig.ValidatorContract), data) if err != nil { return nil, err } @@ -59,7 +77,7 @@ func (c *ChainSpanner) GetCurrentSpan(syscall consensus.SystemCall) (*Span, erro } // create new span - span := Span{ + span := heimdall.Span{ ID: ret.Number.Uint64(), StartBlock: ret.StartBlock.Uint64(), EndBlock: ret.EndBlock.Uint64(), @@ -75,7 +93,7 @@ func (c *ChainSpanner) GetCurrentValidators(spanId uint64, signer libcommon.Addr } spanBytes := chain.BorSpan(spanId) - var span HeimdallSpan + var span heimdall.HeimdallSpan if err := json.Unmarshal(spanBytes, &span); err != nil { return nil, err } @@ -90,7 +108,7 @@ func (c *ChainSpanner) GetCurrentProducers(spanId uint64, signer libcommon.Addre } spanBytes := chain.BorSpan(spanId) - var span HeimdallSpan + var span heimdall.HeimdallSpan if err := json.Unmarshal(spanBytes, &span); err != nil { return nil, err } @@ -103,7 +121,7 @@ func (c *ChainSpanner) GetCurrentProducers(spanId uint64, signer libcommon.Addre return producers, nil } -func (c *ChainSpanner) CommitSpan(heimdallSpan HeimdallSpan, syscall consensus.SystemCall) error { +func (c *ChainSpanner) CommitSpan(heimdallSpan heimdall.HeimdallSpan, syscall consensus.SystemCall) error { // method const method = "commitSpan" @@ -149,7 +167,7 @@ func (c *ChainSpanner) CommitSpan(heimdallSpan HeimdallSpan, syscall consensus.S return err } - _, err = syscall(libcommon.HexToAddress(c.chainConfig.Bor.ValidatorContract), data) + _, err = syscall(libcommon.HexToAddress(c.borConfig.ValidatorContract), data) return err } diff --git a/consensus/bor/mock/spanner_mock.go b/polygon/bor/spanner_mock.go similarity index 88% rename from consensus/bor/mock/spanner_mock.go rename to polygon/bor/spanner_mock.go index 70db933edd2..45d65a5178a 100644 --- a/consensus/bor/mock/spanner_mock.go +++ b/polygon/bor/spanner_mock.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ledgerwatch/erigon/consensus/bor (interfaces: Spanner) +// Source: github.com/ledgerwatch/erigon/polygon/bor (interfaces: Spanner) -// Package mock is a generated GoMock package. -package mock +// Package bor is a generated GoMock package. +package bor import ( reflect "reflect" @@ -10,8 +10,8 @@ import ( gomock "github.com/golang/mock/gomock" common "github.com/ledgerwatch/erigon-lib/common" consensus "github.com/ledgerwatch/erigon/consensus" - span "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" - valset "github.com/ledgerwatch/erigon/consensus/bor/valset" + valset "github.com/ledgerwatch/erigon/polygon/bor/valset" + heimdall "github.com/ledgerwatch/erigon/polygon/heimdall" ) // MockSpanner is a mock of Spanner interface. @@ -38,7 +38,7 @@ func (m *MockSpanner) EXPECT() *MockSpannerMockRecorder { } // CommitSpan mocks base method. -func (m *MockSpanner) CommitSpan(arg0 span.HeimdallSpan, arg1 consensus.SystemCall) error { +func (m *MockSpanner) CommitSpan(arg0 heimdall.HeimdallSpan, arg1 consensus.SystemCall) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CommitSpan", arg0, arg1) ret0, _ := ret[0].(error) @@ -67,10 +67,10 @@ func (mr *MockSpannerMockRecorder) GetCurrentProducers(arg0, arg1, arg2 interfac } // GetCurrentSpan mocks base method. -func (m *MockSpanner) GetCurrentSpan(arg0 consensus.SystemCall) (*span.Span, error) { +func (m *MockSpanner) GetCurrentSpan(arg0 consensus.SystemCall) (*heimdall.Span, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCurrentSpan", arg0) - ret0, _ := ret[0].(*span.Span) + ret0, _ := ret[0].(*heimdall.Span) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/consensus/bor/heimdall/span/testValidators.go b/polygon/bor/spanner_test_validators.go similarity index 94% rename from consensus/bor/heimdall/span/testValidators.go rename to polygon/bor/spanner_test_validators.go index 29cf1cc2e6a..7cc3d5d3ecc 100644 --- a/consensus/bor/heimdall/span/testValidators.go +++ b/polygon/bor/spanner_test_validators.go @@ -1,9 +1,9 @@ -package span +package bor import ( "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/consensus/bor/valset" + "github.com/ledgerwatch/erigon/polygon/bor/valset" ) // NetworkNameVals is a map of network name to validator set for tests/devnets diff --git a/consensus/bor/statefull/processor.go b/polygon/bor/statefull/processor.go similarity index 100% rename from consensus/bor/statefull/processor.go rename to polygon/bor/statefull/processor.go diff --git a/consensus/bor/valset/error.go b/polygon/bor/valset/errors.go similarity index 51% rename from consensus/bor/valset/error.go rename to polygon/bor/valset/errors.go index 37add216834..e44e8a268ba 100644 --- a/consensus/bor/valset/error.go +++ b/polygon/bor/valset/errors.go @@ -30,3 +30,31 @@ func (e *InvalidStartEndBlockError) Error() string { e.End, ) } + +// UnauthorizedProposerError is returned if a header is [being] signed by an unauthorized entity. +type UnauthorizedProposerError struct { + Number uint64 + Proposer []byte +} + +func (e *UnauthorizedProposerError) Error() string { + return fmt.Sprintf( + "Proposer 0x%x is not a part of the producer set at block %d", + e.Proposer, + e.Number, + ) +} + +// UnauthorizedSignerError is returned if a header is [being] signed by an unauthorized entity. +type UnauthorizedSignerError struct { + Number uint64 + Signer []byte +} + +func (e *UnauthorizedSignerError) Error() string { + return fmt.Sprintf( + "Signer 0x%x is not a part of the producer set at block %d", + e.Signer, + e.Number, + ) +} diff --git a/consensus/bor/valset/validator.go b/polygon/bor/valset/validator.go similarity index 100% rename from consensus/bor/valset/validator.go rename to polygon/bor/valset/validator.go diff --git a/consensus/bor/valset/validator_set.go b/polygon/bor/valset/validator_set.go similarity index 91% rename from consensus/bor/valset/validator_set.go rename to polygon/bor/valset/validator_set.go index de2792d5285..8dffbd78f72 100644 --- a/consensus/bor/valset/validator_set.go +++ b/polygon/bor/valset/validator_set.go @@ -11,7 +11,6 @@ import ( "strings" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/log/v3" ) // MaxTotalVotingPower - the maximum allowed total voting power. @@ -54,16 +53,16 @@ type ValidatorSet struct { // the new ValidatorSet will have an empty list of Validators. // The addresses of validators in `valz` must be unique otherwise the // function panics. -func NewValidatorSet(valz []*Validator, logger log.Logger) *ValidatorSet { +func NewValidatorSet(valz []*Validator) *ValidatorSet { vals := &ValidatorSet{} - err := vals.updateWithChangeSet(valz, false, logger) + err := vals.updateWithChangeSet(valz, false) if err != nil { panic(fmt.Sprintf("cannot create validator set: %s", err)) } if len(valz) > 0 { - vals.IncrementProposerPriority(1, logger) + vals.IncrementProposerPriority(1) } return vals @@ -75,9 +74,9 @@ func (vals *ValidatorSet) IsNilOrEmpty() bool { } // Increment ProposerPriority and update the proposer on a copy, and return it. -func (vals *ValidatorSet) CopyIncrementProposerPriority(times int, logger log.Logger) *ValidatorSet { +func (vals *ValidatorSet) CopyIncrementProposerPriority(times int) *ValidatorSet { validatorCopy := vals.Copy() - validatorCopy.IncrementProposerPriority(times, logger) + validatorCopy.IncrementProposerPriority(times) return validatorCopy } @@ -85,7 +84,7 @@ func (vals *ValidatorSet) CopyIncrementProposerPriority(times int, logger log.Lo // IncrementProposerPriority increments ProposerPriority of each validator and updates the // proposer. Panics if validator set is empty. // `times` must be positive. -func (vals *ValidatorSet) IncrementProposerPriority(times int, logger log.Logger) { +func (vals *ValidatorSet) IncrementProposerPriority(times int) { if vals.IsNilOrEmpty() { panic("empty validator set") } @@ -97,14 +96,14 @@ func (vals *ValidatorSet) IncrementProposerPriority(times int, logger log.Logger // Cap the difference between priorities to be proportional to 2*totalPower by // re-normalizing priorities, i.e., rescale all priorities by multiplying with: // 2*totalVotingPower/(maxPriority - minPriority) - diffMax := PriorityWindowSizeFactor * vals.TotalVotingPower(logger) + diffMax := PriorityWindowSizeFactor * vals.TotalVotingPower() vals.RescalePriorities(diffMax) vals.shiftByAvgProposerPriority() var proposer *Validator // Call IncrementProposerPriority(1) times times. for i := 0; i < times; i++ { - proposer = vals.incrementProposerPriority(logger) + proposer = vals.incrementProposerPriority() } vals.Proposer = proposer @@ -134,7 +133,7 @@ func (vals *ValidatorSet) RescalePriorities(diffMax int64) { } } -func (vals *ValidatorSet) incrementProposerPriority(logger log.Logger) *Validator { +func (vals *ValidatorSet) incrementProposerPriority() *Validator { for _, val := range vals.Validators { // Check for overflow for sum. newPrio := safeAddClip(val.ProposerPriority, val.VotingPower) @@ -143,7 +142,7 @@ func (vals *ValidatorSet) incrementProposerPriority(logger log.Logger) *Validato // Decrement the validator with most ProposerPriority. mostest := vals.getValWithMostPriority() // Mind the underflow. - mostest.ProposerPriority = safeSubClip(mostest.ProposerPriority, vals.TotalVotingPower(logger)) + mostest.ProposerPriority = safeSubClip(mostest.ProposerPriority, vals.TotalVotingPower()) return mostest } @@ -303,10 +302,8 @@ func (vals *ValidatorSet) UpdateTotalVotingPower() error { // TotalVotingPower returns the sum of the voting powers of all validators. // It recomputes the total voting power if required. -func (vals *ValidatorSet) TotalVotingPower(logger log.Logger) int64 { +func (vals *ValidatorSet) TotalVotingPower() int64 { if vals.totalVotingPower == 0 { - logger.Info("invoking updateTotalVotingPower before returning it") - if err := vals.UpdateTotalVotingPower(); err != nil { // Can/should we do better? panic(err) @@ -427,8 +424,8 @@ func processChanges(origChanges []*Validator) (updates, removals []*Validator, e // 'updates' should be a list of proper validator changes, i.e. they have been verified // by processChanges for duplicates and invalid values. // No changes are made to the validator set 'vals'. -func verifyUpdates(updates []*Validator, vals *ValidatorSet, logger log.Logger) (updatedTotalVotingPower int64, numNewValidators int, err error) { - updatedTotalVotingPower = vals.TotalVotingPower(logger) +func verifyUpdates(updates []*Validator, vals *ValidatorSet) (updatedTotalVotingPower int64, numNewValidators int, err error) { + updatedTotalVotingPower = vals.TotalVotingPower() for _, valUpdate := range updates { address := valUpdate.Address @@ -576,7 +573,7 @@ func (vals *ValidatorSet) applyRemovals(deletes []*Validator) { // If 'allowDeletes' is false then delete operations (identified by validators with voting power 0) // are not allowed and will trigger an error if present in 'changes'. // The 'allowDeletes' flag is set to false by NewValidatorSet() and to true by UpdateWithChangeSet(). -func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes bool, logger log.Logger) error { +func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes bool) error { if len(changes) < 1 { return nil } @@ -597,7 +594,7 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes } // Verify that applying the 'updates' against 'vals' will not result in error. - updatedTotalVotingPower, numNewValidators, err := verifyUpdates(updates, vals, logger) + updatedTotalVotingPower, numNewValidators, err := verifyUpdates(updates, vals) if err != nil { return err } @@ -618,7 +615,7 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes } // Scale and center. - vals.RescalePriorities(PriorityWindowSizeFactor * vals.TotalVotingPower(logger)) + vals.RescalePriorities(PriorityWindowSizeFactor * vals.TotalVotingPower()) vals.shiftByAvgProposerPriority() return nil @@ -652,8 +649,42 @@ func (vals *ValidatorSet) UpdateValidatorMap() { // // If an error is detected during verification steps, it is returned and the validator set // is not changed. -func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator, logger log.Logger) error { - return vals.updateWithChangeSet(changes, true, logger) +func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error { + return vals.updateWithChangeSet(changes, true) +} + +// Difficulty returns the difficulty for a particular signer at the current snapshot number +func (vals *ValidatorSet) Difficulty(signer libcommon.Address) (uint64, error) { + indexDiff, err := vals.GetSignerSuccessionNumber(signer, 0) + if err != nil { + return 0, fmt.Errorf("ValidatorSet.Difficulty: %w", err) + } + + return uint64(len(vals.Validators) - indexDiff), nil +} + +// GetSignerSuccessionNumber returns the relative position of signer in terms of the in-turn proposer +func (vals *ValidatorSet) GetSignerSuccessionNumber(signer libcommon.Address, number uint64) (int, error) { + proposer := vals.GetProposer() + if proposer == nil { + return -1, &UnauthorizedProposerError{Number: number, Proposer: []byte{}} + } + + proposerIndex, _ := vals.GetByAddress(proposer.Address) + if proposerIndex < 0 { + return -1, &UnauthorizedProposerError{Number: number, Proposer: proposer.Address.Bytes()} + } + + signerIndex, _ := vals.GetByAddress(signer) + if signerIndex < 0 { + return -1, &UnauthorizedSignerError{Number: number, Signer: signer.Bytes()} + } + + indexDiff := signerIndex - proposerIndex + if indexDiff < 0 { + indexDiff += len(vals.Validators) + } + return indexDiff, nil } //----------------- diff --git a/consensus/bor/heimdall/checkpoint/checkpoint.go b/polygon/heimdall/checkpoint.go similarity index 98% rename from consensus/bor/heimdall/checkpoint/checkpoint.go rename to polygon/heimdall/checkpoint.go index ebced7beef8..267bb7d8b89 100644 --- a/consensus/bor/heimdall/checkpoint/checkpoint.go +++ b/polygon/heimdall/checkpoint.go @@ -1,4 +1,4 @@ -package checkpoint +package heimdall import ( "fmt" diff --git a/consensus/bor/heimdall/client.go b/polygon/heimdall/client.go similarity index 56% rename from consensus/bor/heimdall/client.go rename to polygon/heimdall/client.go index b0edb53c108..26339058077 100644 --- a/consensus/bor/heimdall/client.go +++ b/polygon/heimdall/client.go @@ -13,13 +13,9 @@ import ( "strings" "time" - "github.com/ledgerwatch/erigon-lib/metrics" - - "github.com/ledgerwatch/erigon/consensus/bor/clerk" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon-lib/metrics" ) var ( @@ -35,40 +31,67 @@ var ( const ( stateFetchLimit = 50 apiHeimdallTimeout = 10 * time.Second - retryCall = 5 * time.Second + retryBackOff = time.Second + maxRetries = 5 ) -type StateSyncEventsResponse struct { - Height string `json:"height"` - Result []*clerk.EventRecordWithTime `json:"result"` -} +//go:generate mockgen -destination=./client_mock.go -package=heimdall . HeimdallClient +type HeimdallClient interface { + StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*EventRecordWithTime, error) + Span(ctx context.Context, spanID uint64) (*HeimdallSpan, error) + FetchCheckpoint(ctx context.Context, number int64) (*Checkpoint, error) + FetchCheckpointCount(ctx context.Context) (int64, error) + FetchMilestone(ctx context.Context, number int64) (*Milestone, error) + FetchMilestoneCount(ctx context.Context) (int64, error) -type SpanResponse struct { - Height string `json:"height"` - Result span.HeimdallSpan `json:"result"` + // FetchNoAckMilestone fetches a bool value whether milestone corresponding to the given id failed in the Heimdall + FetchNoAckMilestone(ctx context.Context, milestoneID string) error + + // FetchLastNoAckMilestone fetches the latest failed milestone id + FetchLastNoAckMilestone(ctx context.Context) (string, error) + + // FetchMilestoneID fetches a bool value whether milestone corresponding to the given id is in process in Heimdall + FetchMilestoneID(ctx context.Context, milestoneID string) error + + Close() } -type HeimdallClient struct { - urlString string - client http.Client - closeCh chan struct{} - logger log.Logger +type Client struct { + urlString string + client HttpClient + retryBackOff time.Duration + maxRetries int + closeCh chan struct{} + logger log.Logger } type Request struct { - client http.Client + client HttpClient url *url.URL start time.Time } -func NewHeimdallClient(urlString string, logger log.Logger) *HeimdallClient { - return &HeimdallClient{ - urlString: urlString, - logger: logger, - client: http.Client{ - Timeout: apiHeimdallTimeout, - }, - closeCh: make(chan struct{}), +//go:generate mockgen -destination=./http_client_mock.go -package=heimdall . HttpClient +type HttpClient interface { + Do(req *http.Request) (*http.Response, error) + CloseIdleConnections() +} + +func NewHeimdallClient(urlString string, logger log.Logger) *Client { + httpClient := &http.Client{ + Timeout: apiHeimdallTimeout, + } + return newHeimdallClient(urlString, httpClient, retryBackOff, maxRetries, logger) +} + +func newHeimdallClient(urlString string, httpClient HttpClient, retryBackOff time.Duration, maxRetries int, logger log.Logger) *Client { + return &Client{ + urlString: urlString, + logger: logger, + client: httpClient, + retryBackOff: retryBackOff, + maxRetries: maxRetries, + closeCh: make(chan struct{}), } } @@ -90,21 +113,29 @@ const ( fetchSpanFormat = "bor/span/%d" ) -func (h *HeimdallClient) StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error) { - eventRecords := make([]*clerk.EventRecordWithTime, 0) +func (c *Client) StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*EventRecordWithTime, error) { + eventRecords := make([]*EventRecordWithTime, 0) for { - url, err := stateSyncURL(h.urlString, fromID, to) + url, err := stateSyncURL(c.urlString, fromID, to) if err != nil { return nil, err } - h.logger.Debug("[bor.heimdall] Fetching state sync events", "queryParams", url.RawQuery) + c.logger.Debug("[bor.heimdall] Fetching state sync events", "queryParams", url.RawQuery) ctx = withRequestType(ctx, stateSyncRequest) - response, err := FetchWithRetry[StateSyncEventsResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[StateSyncEventsResponse](ctx, c, url) if err != nil { + if errors.Is(err, ErrNoResponse) { + // for more info check https://github.com/maticnetwork/heimdall/pull/993 + c.logger.Warn( + "[bor.heimdall] check heimdall logs to see if it is in sync - no response when querying state sync events", + "path", url.Path, + "queryParams", url.RawQuery, + ) + } return nil, err } @@ -129,15 +160,15 @@ func (h *HeimdallClient) StateSyncEvents(ctx context.Context, fromID uint64, to return eventRecords, nil } -func (h *HeimdallClient) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) { - url, err := spanURL(h.urlString, spanID) +func (c *Client) Span(ctx context.Context, spanID uint64) (*HeimdallSpan, error) { + url, err := spanURL(c.urlString, spanID) if err != nil { return nil, err } ctx = withRequestType(ctx, spanRequest) - response, err := FetchWithRetry[SpanResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[SpanResponse](ctx, c, url) if err != nil { return nil, err } @@ -146,15 +177,15 @@ func (h *HeimdallClient) Span(ctx context.Context, spanID uint64) (*span.Heimdal } // FetchCheckpoint fetches the checkpoint from heimdall -func (h *HeimdallClient) FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) { - url, err := checkpointURL(h.urlString, number) +func (c *Client) FetchCheckpoint(ctx context.Context, number int64) (*Checkpoint, error) { + url, err := checkpointURL(c.urlString, number) if err != nil { return nil, err } ctx = withRequestType(ctx, checkpointRequest) - response, err := FetchWithRetry[checkpoint.CheckpointResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[CheckpointResponse](ctx, c, url) if err != nil { return nil, err } @@ -168,8 +199,8 @@ func isInvalidMilestoneIndexError(err error) bool { } // FetchMilestone fetches a milestone from heimdall -func (h *HeimdallClient) FetchMilestone(ctx context.Context, number int64) (*milestone.Milestone, error) { - url, err := milestoneURL(h.urlString, number) +func (c *Client) FetchMilestone(ctx context.Context, number int64) (*Milestone, error) { + url, err := milestoneURL(c.urlString, number) if err != nil { return nil, err } @@ -180,7 +211,7 @@ func (h *HeimdallClient) FetchMilestone(ctx context.Context, number int64) (*mil return !isInvalidMilestoneIndexError(err) } - response, err := FetchWithRetryEx[milestone.MilestoneResponse](ctx, h.client, url, isRecoverableError, h.closeCh, h.logger) + response, err := FetchWithRetryEx[MilestoneResponse](ctx, c, url, isRecoverableError) if err != nil { if isInvalidMilestoneIndexError(err) { return nil, fmt.Errorf("%w: number %d", ErrNotInMilestoneList, number) @@ -192,15 +223,15 @@ func (h *HeimdallClient) FetchMilestone(ctx context.Context, number int64) (*mil } // FetchCheckpointCount fetches the checkpoint count from heimdall -func (h *HeimdallClient) FetchCheckpointCount(ctx context.Context) (int64, error) { - url, err := checkpointCountURL(h.urlString) +func (c *Client) FetchCheckpointCount(ctx context.Context) (int64, error) { + url, err := checkpointCountURL(c.urlString) if err != nil { return 0, err } ctx = withRequestType(ctx, checkpointCountRequest) - response, err := FetchWithRetry[checkpoint.CheckpointCountResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[CheckpointCountResponse](ctx, c, url) if err != nil { return 0, err } @@ -209,15 +240,15 @@ func (h *HeimdallClient) FetchCheckpointCount(ctx context.Context) (int64, error } // FetchMilestoneCount fetches the milestone count from heimdall -func (h *HeimdallClient) FetchMilestoneCount(ctx context.Context) (int64, error) { - url, err := milestoneCountURL(h.urlString) +func (c *Client) FetchMilestoneCount(ctx context.Context) (int64, error) { + url, err := milestoneCountURL(c.urlString) if err != nil { return 0, err } ctx = withRequestType(ctx, milestoneCountRequest) - response, err := FetchWithRetry[milestone.MilestoneCountResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[MilestoneCountResponse](ctx, c, url) if err != nil { return 0, err } @@ -226,15 +257,15 @@ func (h *HeimdallClient) FetchMilestoneCount(ctx context.Context) (int64, error) } // FetchLastNoAckMilestone fetches the last no-ack-milestone from heimdall -func (h *HeimdallClient) FetchLastNoAckMilestone(ctx context.Context) (string, error) { - url, err := lastNoAckMilestoneURL(h.urlString) +func (c *Client) FetchLastNoAckMilestone(ctx context.Context) (string, error) { + url, err := lastNoAckMilestoneURL(c.urlString) if err != nil { return "", err } ctx = withRequestType(ctx, milestoneLastNoAckRequest) - response, err := FetchWithRetry[milestone.MilestoneLastNoAckResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[MilestoneLastNoAckResponse](ctx, c, url) if err != nil { return "", err } @@ -243,15 +274,15 @@ func (h *HeimdallClient) FetchLastNoAckMilestone(ctx context.Context) (string, e } // FetchNoAckMilestone fetches the last no-ack-milestone from heimdall -func (h *HeimdallClient) FetchNoAckMilestone(ctx context.Context, milestoneID string) error { - url, err := noAckMilestoneURL(h.urlString, milestoneID) +func (c *Client) FetchNoAckMilestone(ctx context.Context, milestoneID string) error { + url, err := noAckMilestoneURL(c.urlString, milestoneID) if err != nil { return err } ctx = withRequestType(ctx, milestoneNoAckRequest) - response, err := FetchWithRetry[milestone.MilestoneNoAckResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[MilestoneNoAckResponse](ctx, c, url) if err != nil { return err } @@ -263,17 +294,17 @@ func (h *HeimdallClient) FetchNoAckMilestone(ctx context.Context, milestoneID st return nil } -// FetchMilestoneID fetches the bool result from Heimdal whether the ID corresponding +// FetchMilestoneID fetches the bool result from Heimdall whether the ID corresponding // to the given milestone is in process in Heimdall -func (h *HeimdallClient) FetchMilestoneID(ctx context.Context, milestoneID string) error { - url, err := milestoneIDURL(h.urlString, milestoneID) +func (c *Client) FetchMilestoneID(ctx context.Context, milestoneID string) error { + url, err := milestoneIDURL(c.urlString, milestoneID) if err != nil { return err } ctx = withRequestType(ctx, milestoneIDRequest) - response, err := FetchWithRetry[milestone.MilestoneIDResponse](ctx, h.client, url, h.closeCh, h.logger) + response, err := FetchWithRetry[MilestoneIDResponse](ctx, c, url) if err != nil { return err @@ -287,33 +318,31 @@ func (h *HeimdallClient) FetchMilestoneID(ctx context.Context, milestoneID strin } // FetchWithRetry returns data from heimdall with retry -func FetchWithRetry[T any](ctx context.Context, client http.Client, url *url.URL, closeCh chan struct{}, logger log.Logger) (*T, error) { - return FetchWithRetryEx[T](ctx, client, url, nil, closeCh, logger) +func FetchWithRetry[T any](ctx context.Context, client *Client, url *url.URL) (*T, error) { + return FetchWithRetryEx[T](ctx, client, url, nil) } // FetchWithRetryEx returns data from heimdall with retry -func FetchWithRetryEx[T any](ctx context.Context, client http.Client, url *url.URL, isRecoverableError func(error) bool, closeCh chan struct{}, logger log.Logger) (*T, error) { +func FetchWithRetryEx[T any](ctx context.Context, client *Client, url *url.URL, isRecoverableError func(error) bool) (result *T, err error) { attempt := 0 - const logEach = 5 - // create a new ticker for retrying the request - ticker := time.NewTicker(retryCall) + ticker := time.NewTicker(client.retryBackOff) defer ticker.Stop() - for { + for attempt < client.maxRetries { attempt++ - request := &Request{client: client, url: url, start: time.Now()} - result, err := Fetch[T](ctx, request) + request := &Request{client: client.client, url: url, start: time.Now()} + result, err = Fetch[T](ctx, request) if err == nil { return result, nil } // 503 (Service Unavailable) is thrown when an endpoint isn't activated - // yet in heimdall. E.g. when the hardfork hasn't hit yet but heimdall + // yet in heimdall. E.g. when the hard fork hasn't hit yet but heimdall // is upgraded. if errors.Is(err, ErrServiceUnavailable) { - logger.Debug("[bor.heimdall] service unavailable at the moment", "path", url.Path, "attempt", attempt, "err", err) + client.logger.Debug("[bor.heimdall] service unavailable at the moment", "path", url.Path, "queryParams", url.RawQuery, "attempt", attempt, "err", err) return nil, err } @@ -321,21 +350,21 @@ func FetchWithRetryEx[T any](ctx context.Context, client http.Client, url *url.U return nil, err } - if attempt%logEach == 1 { - logger.Warn("[bor.heimdall] an error while fetching", "path", url.Path, "attempt", attempt, "err", err) - } + client.logger.Warn("[bor.heimdall] an error while fetching", "path", url.Path, "queryParams", url.RawQuery, "attempt", attempt, "err", err) select { case <-ctx.Done(): - logger.Debug("[bor.heimdall] request canceled", "reason", ctx.Err(), "path", url.Path, "attempt", attempt) + client.logger.Debug("[bor.heimdall] request canceled", "reason", ctx.Err(), "path", url.Path, "queryParams", url.RawQuery, "attempt", attempt) return nil, ctx.Err() - case <-closeCh: - logger.Debug("[bor.heimdall] shutdown detected, terminating request", "path", url.Path) + case <-client.closeCh: + client.logger.Debug("[bor.heimdall] shutdown detected, terminating request", "path", url.Path, "queryParams", url.RawQuery) return nil, ErrShutdownDetected case <-ticker.C: // retry } } + + return nil, err } // Fetch fetches response from heimdall @@ -355,7 +384,7 @@ func Fetch[T any](ctx context.Context, request *Request) (*T, error) { return nil, err } - if body == nil { + if len(body) == 0 { return nil, ErrNoResponse } @@ -430,7 +459,7 @@ func makeURL(urlString, rawPath, rawQuery string) (*url.URL, error) { } // internal fetch method -func internalFetch(ctx context.Context, client http.Client, u *url.URL) ([]byte, error) { +func internalFetch(ctx context.Context, client HttpClient, u *url.URL) ([]byte, error) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) if err != nil { return nil, err @@ -441,10 +470,12 @@ func internalFetch(ctx context.Context, client http.Client, u *url.URL) ([]byte, return nil, err } - defer res.Body.Close() + defer func() { + _ = res.Body.Close() + }() if res.StatusCode == http.StatusServiceUnavailable { - return nil, fmt.Errorf("%w: url='%s'; status=%d;", ErrServiceUnavailable, u.String(), res.StatusCode) + return nil, fmt.Errorf("%w: url='%s', status=%d", ErrServiceUnavailable, u.String(), res.StatusCode) } // unmarshall data from buffer @@ -460,13 +491,13 @@ func internalFetch(ctx context.Context, client http.Client, u *url.URL) ([]byte, // check status code if res.StatusCode != 200 { - return nil, fmt.Errorf("%w: url='%s'; status=%d; body='%s';", ErrNotSuccessfulResponse, u.String(), res.StatusCode, string(body)) + return nil, fmt.Errorf("%w: url='%s', status=%d, body='%s'", ErrNotSuccessfulResponse, u.String(), res.StatusCode, string(body)) } return body, nil } -func internalFetchWithTimeout(ctx context.Context, client http.Client, url *url.URL) ([]byte, error) { +func internalFetchWithTimeout(ctx context.Context, client HttpClient, url *url.URL) ([]byte, error) { ctx, cancel := context.WithTimeout(ctx, apiHeimdallTimeout) defer cancel() @@ -475,7 +506,7 @@ func internalFetchWithTimeout(ctx context.Context, client http.Client, url *url. } // Close sends a signal to stop the running process -func (h *HeimdallClient) Close() { - close(h.closeCh) - h.client.CloseIdleConnections() +func (c *Client) Close() { + close(c.closeCh) + c.client.CloseIdleConnections() } diff --git a/polygon/heimdall/client_mock.go b/polygon/heimdall/client_mock.go new file mode 100644 index 00000000000..352ae1b5f3a --- /dev/null +++ b/polygon/heimdall/client_mock.go @@ -0,0 +1,180 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/polygon/heimdall (interfaces: HeimdallClient) + +// Package heimdall is a generated GoMock package. +package heimdall + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockHeimdallClient is a mock of HeimdallClient interface. +type MockHeimdallClient struct { + ctrl *gomock.Controller + recorder *MockHeimdallClientMockRecorder +} + +// MockHeimdallClientMockRecorder is the mock recorder for MockHeimdallClient. +type MockHeimdallClientMockRecorder struct { + mock *MockHeimdallClient +} + +// NewMockHeimdallClient creates a new mock instance. +func NewMockHeimdallClient(ctrl *gomock.Controller) *MockHeimdallClient { + mock := &MockHeimdallClient{ctrl: ctrl} + mock.recorder = &MockHeimdallClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockHeimdallClient) EXPECT() *MockHeimdallClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockHeimdallClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockHeimdallClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockHeimdallClient)(nil).Close)) +} + +// FetchCheckpoint mocks base method. +func (m *MockHeimdallClient) FetchCheckpoint(arg0 context.Context, arg1 int64) (*Checkpoint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchCheckpoint", arg0, arg1) + ret0, _ := ret[0].(*Checkpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchCheckpoint indicates an expected call of FetchCheckpoint. +func (mr *MockHeimdallClientMockRecorder) FetchCheckpoint(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchCheckpoint", reflect.TypeOf((*MockHeimdallClient)(nil).FetchCheckpoint), arg0, arg1) +} + +// FetchCheckpointCount mocks base method. +func (m *MockHeimdallClient) FetchCheckpointCount(arg0 context.Context) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchCheckpointCount", arg0) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchCheckpointCount indicates an expected call of FetchCheckpointCount. +func (mr *MockHeimdallClientMockRecorder) FetchCheckpointCount(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchCheckpointCount", reflect.TypeOf((*MockHeimdallClient)(nil).FetchCheckpointCount), arg0) +} + +// FetchLastNoAckMilestone mocks base method. +func (m *MockHeimdallClient) FetchLastNoAckMilestone(arg0 context.Context) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchLastNoAckMilestone", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchLastNoAckMilestone indicates an expected call of FetchLastNoAckMilestone. +func (mr *MockHeimdallClientMockRecorder) FetchLastNoAckMilestone(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchLastNoAckMilestone", reflect.TypeOf((*MockHeimdallClient)(nil).FetchLastNoAckMilestone), arg0) +} + +// FetchMilestone mocks base method. +func (m *MockHeimdallClient) FetchMilestone(arg0 context.Context, arg1 int64) (*Milestone, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchMilestone", arg0, arg1) + ret0, _ := ret[0].(*Milestone) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchMilestone indicates an expected call of FetchMilestone. +func (mr *MockHeimdallClientMockRecorder) FetchMilestone(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestone", reflect.TypeOf((*MockHeimdallClient)(nil).FetchMilestone), arg0, arg1) +} + +// FetchMilestoneCount mocks base method. +func (m *MockHeimdallClient) FetchMilestoneCount(arg0 context.Context) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchMilestoneCount", arg0) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchMilestoneCount indicates an expected call of FetchMilestoneCount. +func (mr *MockHeimdallClientMockRecorder) FetchMilestoneCount(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestoneCount", reflect.TypeOf((*MockHeimdallClient)(nil).FetchMilestoneCount), arg0) +} + +// FetchMilestoneID mocks base method. +func (m *MockHeimdallClient) FetchMilestoneID(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchMilestoneID", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// FetchMilestoneID indicates an expected call of FetchMilestoneID. +func (mr *MockHeimdallClientMockRecorder) FetchMilestoneID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestoneID", reflect.TypeOf((*MockHeimdallClient)(nil).FetchMilestoneID), arg0, arg1) +} + +// FetchNoAckMilestone mocks base method. +func (m *MockHeimdallClient) FetchNoAckMilestone(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchNoAckMilestone", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// FetchNoAckMilestone indicates an expected call of FetchNoAckMilestone. +func (mr *MockHeimdallClientMockRecorder) FetchNoAckMilestone(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchNoAckMilestone", reflect.TypeOf((*MockHeimdallClient)(nil).FetchNoAckMilestone), arg0, arg1) +} + +// Span mocks base method. +func (m *MockHeimdallClient) Span(arg0 context.Context, arg1 uint64) (*HeimdallSpan, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Span", arg0, arg1) + ret0, _ := ret[0].(*HeimdallSpan) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Span indicates an expected call of Span. +func (mr *MockHeimdallClientMockRecorder) Span(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Span", reflect.TypeOf((*MockHeimdallClient)(nil).Span), arg0, arg1) +} + +// StateSyncEvents mocks base method. +func (m *MockHeimdallClient) StateSyncEvents(arg0 context.Context, arg1 uint64, arg2 int64) ([]*EventRecordWithTime, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSyncEvents", arg0, arg1, arg2) + ret0, _ := ret[0].([]*EventRecordWithTime) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSyncEvents indicates an expected call of StateSyncEvents. +func (mr *MockHeimdallClientMockRecorder) StateSyncEvents(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSyncEvents", reflect.TypeOf((*MockHeimdallClient)(nil).StateSyncEvents), arg0, arg1, arg2) +} diff --git a/polygon/heimdall/client_test.go b/polygon/heimdall/client_test.go new file mode 100644 index 00000000000..19638dab8dc --- /dev/null +++ b/polygon/heimdall/client_test.go @@ -0,0 +1,63 @@ +package heimdall + +import ( + "context" + "io" + "net/http" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/require" + + "github.com/ledgerwatch/erigon/turbo/testlog" +) + +type emptyBodyReadCloser struct{} + +func (ebrc emptyBodyReadCloser) Read(_ []byte) (n int, err error) { + return 0, io.EOF +} + +func (ebrc emptyBodyReadCloser) Close() error { + return nil +} + +func TestHeimdallClientFetchesTerminateUponTooManyErrors(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + httpClient := NewMockHttpClient(ctrl) + httpClient.EXPECT(). + Do(gomock.Any()). + Return(&http.Response{ + StatusCode: 404, + Body: emptyBodyReadCloser{}, + }, nil). + Times(5) + logger := testlog.Logger(t, log.LvlDebug) + heimdallClient := newHeimdallClient("https://dummyheimdal.com", httpClient, 100*time.Millisecond, 5, logger) + + spanRes, err := heimdallClient.Span(ctx, 1534) + require.Nil(t, spanRes) + require.Error(t, err) +} + +func TestHeimdallClientStateSyncEventsReturnsErrNoResponseWhenHttp200WithEmptyBody(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + httpClient := NewMockHttpClient(ctrl) + httpClient.EXPECT(). + Do(gomock.Any()). + Return(&http.Response{ + StatusCode: 200, + Body: emptyBodyReadCloser{}, + }, nil). + Times(2) + logger := testlog.Logger(t, log.LvlDebug) + heimdallClient := newHeimdallClient("https://dummyheimdal.com", httpClient, time.Millisecond, 2, logger) + + spanRes, err := heimdallClient.StateSyncEvents(ctx, 100, time.Now().Unix()) + require.Nil(t, spanRes) + require.ErrorIs(t, err, ErrNoResponse) +} diff --git a/consensus/bor/clerk/clerk.go b/polygon/heimdall/event_record.go similarity index 89% rename from consensus/bor/clerk/clerk.go rename to polygon/heimdall/event_record.go index 39b9aefb8c7..09d9be406de 100644 --- a/consensus/bor/clerk/clerk.go +++ b/polygon/heimdall/event_record.go @@ -1,4 +1,4 @@ -package clerk +package heimdall import ( "fmt" @@ -47,3 +47,8 @@ func (e *EventRecordWithTime) BuildEventRecord() *EventRecord { ChainID: e.ChainID, } } + +type StateSyncEventsResponse struct { + Height string `json:"height"` + Result []*EventRecordWithTime `json:"result"` +} diff --git a/polygon/heimdall/http_client_mock.go b/polygon/heimdall/http_client_mock.go new file mode 100644 index 00000000000..bf1564bb500 --- /dev/null +++ b/polygon/heimdall/http_client_mock.go @@ -0,0 +1,62 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/polygon/heimdall (interfaces: HttpClient) + +// Package heimdall is a generated GoMock package. +package heimdall + +import ( + http "net/http" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockHttpClient is a mock of HttpClient interface. +type MockHttpClient struct { + ctrl *gomock.Controller + recorder *MockHttpClientMockRecorder +} + +// MockHttpClientMockRecorder is the mock recorder for MockHttpClient. +type MockHttpClientMockRecorder struct { + mock *MockHttpClient +} + +// NewMockHttpClient creates a new mock instance. +func NewMockHttpClient(ctrl *gomock.Controller) *MockHttpClient { + mock := &MockHttpClient{ctrl: ctrl} + mock.recorder = &MockHttpClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockHttpClient) EXPECT() *MockHttpClientMockRecorder { + return m.recorder +} + +// CloseIdleConnections mocks base method. +func (m *MockHttpClient) CloseIdleConnections() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "CloseIdleConnections") +} + +// CloseIdleConnections indicates an expected call of CloseIdleConnections. +func (mr *MockHttpClientMockRecorder) CloseIdleConnections() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseIdleConnections", reflect.TypeOf((*MockHttpClient)(nil).CloseIdleConnections)) +} + +// Do mocks base method. +func (m *MockHttpClient) Do(arg0 *http.Request) (*http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Do", arg0) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Do indicates an expected call of Do. +func (mr *MockHttpClientMockRecorder) Do(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockHttpClient)(nil).Do), arg0) +} diff --git a/consensus/bor/heimdall/metrics.go b/polygon/heimdall/metrics.go similarity index 100% rename from consensus/bor/heimdall/metrics.go rename to polygon/heimdall/metrics.go diff --git a/consensus/bor/heimdall/milestone/milestone.go b/polygon/heimdall/milestone.go similarity index 98% rename from consensus/bor/heimdall/milestone/milestone.go rename to polygon/heimdall/milestone.go index a849f4461bd..44cfa2e9c1b 100644 --- a/consensus/bor/heimdall/milestone/milestone.go +++ b/polygon/heimdall/milestone.go @@ -1,4 +1,4 @@ -package milestone +package heimdall import ( "math/big" diff --git a/consensus/bor/heimdall/span/span.go b/polygon/heimdall/span.go similarity index 83% rename from consensus/bor/heimdall/span/span.go rename to polygon/heimdall/span.go index 22d3dff2563..862f0573f43 100644 --- a/consensus/bor/heimdall/span/span.go +++ b/polygon/heimdall/span.go @@ -1,8 +1,9 @@ -package span +package heimdall import ( "github.com/google/btree" - "github.com/ledgerwatch/erigon/consensus/bor/valset" + + "github.com/ledgerwatch/erigon/polygon/bor/valset" ) // Span represents a current bor span @@ -28,3 +29,8 @@ func (hs *HeimdallSpan) Less(other btree.Item) bool { } return hs.EndBlock < otherHs.EndBlock } + +type SpanResponse struct { + Height string `json:"height"` + Result HeimdallSpan `json:"result"` +} diff --git a/polygon/sync/canonical_chain_builder.go b/polygon/sync/canonical_chain_builder.go new file mode 100644 index 00000000000..541100c9b7c --- /dev/null +++ b/polygon/sync/canonical_chain_builder.go @@ -0,0 +1,271 @@ +package sync + +import ( + "bytes" + "errors" + "fmt" + "time" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor" +) + +//go:generate mockgen -destination=./canonical_chain_builder_mock.go -package=sync . CanonicalChainBuilder +type CanonicalChainBuilder interface { + Reset(root *types.Header) + ContainsHash(hash libcommon.Hash) bool + Tip() *types.Header + HeadersInRange(start uint64, count uint64) []*types.Header + Prune(newRootNum uint64) error + Connect(headers []*types.Header) error +} + +type producerSlotIndex uint64 + +type forkTreeNode struct { + parent *forkTreeNode + children map[producerSlotIndex]*forkTreeNode + + header *types.Header + headerHash libcommon.Hash + + totalDifficulty uint64 +} + +type canonicalChainBuilderImpl struct { + root *forkTreeNode + tip *forkTreeNode + + difficultyCalc DifficultyCalculator + headerValidator HeaderValidator + spansCache *SpansCache +} + +func NewCanonicalChainBuilder( + root *types.Header, + difficultyCalc DifficultyCalculator, + headerValidator HeaderValidator, + spansCache *SpansCache, +) CanonicalChainBuilder { + impl := &canonicalChainBuilderImpl{ + difficultyCalc: difficultyCalc, + headerValidator: headerValidator, + spansCache: spansCache, + } + impl.Reset(root) + return impl +} + +func (impl *canonicalChainBuilderImpl) Reset(root *types.Header) { + impl.root = &forkTreeNode{ + children: make(map[producerSlotIndex]*forkTreeNode), + header: root, + headerHash: root.Hash(), + } + impl.tip = impl.root + if impl.spansCache != nil { + impl.spansCache.Prune(root.Number.Uint64()) + } +} + +// depth-first search +func (impl *canonicalChainBuilderImpl) enumerate(visitFunc func(*forkTreeNode) bool) { + stack := []*forkTreeNode{impl.root} + for len(stack) > 0 { + // pop + node := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if !visitFunc(node) { + break + } + + for _, child := range node.children { + stack = append(stack, child) + } + } +} + +func (impl *canonicalChainBuilderImpl) nodeByHash(hash libcommon.Hash) *forkTreeNode { + var result *forkTreeNode + impl.enumerate(func(node *forkTreeNode) bool { + if node.headerHash == hash { + result = node + } + return result == nil + }) + return result +} + +func (impl *canonicalChainBuilderImpl) ContainsHash(hash libcommon.Hash) bool { + return impl.nodeByHash(hash) != nil +} + +func (impl *canonicalChainBuilderImpl) Tip() *types.Header { + return impl.tip.header +} + +func (impl *canonicalChainBuilderImpl) Headers() []*types.Header { + var headers []*types.Header + node := impl.tip + for node != nil { + headers = append(headers, node.header) + node = node.parent + } + libcommon.SliceReverse(headers) + return headers +} + +func (impl *canonicalChainBuilderImpl) HeadersInRange(start uint64, count uint64) []*types.Header { + headers := impl.Headers() + if len(headers) == 0 { + return nil + } + if headers[0].Number.Uint64() > start { + return nil + } + if headers[len(headers)-1].Number.Uint64() < start+count-1 { + return nil + } + + offset := start - headers[0].Number.Uint64() + return headers[offset : offset+count] +} + +func (impl *canonicalChainBuilderImpl) Prune(newRootNum uint64) error { + if (newRootNum < impl.root.header.Number.Uint64()) || (newRootNum > impl.Tip().Number.Uint64()) { + return errors.New("canonicalChainBuilderImpl.Prune: newRootNum outside of the canonical chain") + } + + newRoot := impl.tip + for newRoot.header.Number.Uint64() > newRootNum { + newRoot = newRoot.parent + } + impl.root = newRoot + + if impl.spansCache != nil { + impl.spansCache.Prune(newRootNum) + } + return nil +} + +// compareForkTreeNodes compares 2 fork tree nodes. +// It returns a positive number if the chain ending at node1 is "better" than the chain ending at node2. +// The better node belongs to the canonical chain, and it has: +// * a greater total difficulty, +// * or a smaller block number, +// * or a lexicographically greater hash. +// See: https://github.com/maticnetwork/bor/blob/master/core/forkchoice.go#L82 +func compareForkTreeNodes(node1 *forkTreeNode, node2 *forkTreeNode) int { + difficultyDiff := int64(node1.totalDifficulty) - int64(node2.totalDifficulty) + if difficultyDiff != 0 { + return int(difficultyDiff) + } + blockNumDiff := node1.header.Number.Cmp(node2.header.Number) + if blockNumDiff != 0 { + return -blockNumDiff + } + return bytes.Compare(node1.headerHash.Bytes(), node2.headerHash.Bytes()) +} + +func (impl *canonicalChainBuilderImpl) updateTipIfNeeded(tipCandidate *forkTreeNode) { + if compareForkTreeNodes(tipCandidate, impl.tip) > 0 { + impl.tip = tipCandidate + } +} + +func (impl *canonicalChainBuilderImpl) Connect(headers []*types.Header) error { + if (len(headers) > 0) && (headers[0].Number != nil) && (headers[0].Number.Cmp(impl.root.header.Number) == 0) { + headers = headers[1:] + } + if len(headers) == 0 { + return nil + } + + parent := impl.nodeByHash(headers[0].ParentHash) + if parent == nil { + return errors.New("canonicalChainBuilderImpl.Connect: can't connect headers") + } + + headersHashes := libcommon.SliceMap(headers, func(header *types.Header) libcommon.Hash { + return header.Hash() + }) + + // check if headers are linked by ParentHash + for i, header := range headers[1:] { + if header.ParentHash != headersHashes[i] { + return errors.New("canonicalChainBuilderImpl.Connect: invalid headers slice ParentHash") + } + } + + // skip existing matching nodes until a new header is found + for len(headers) > 0 { + var matchingNode *forkTreeNode + for _, c := range parent.children { + if c.headerHash == headersHashes[0] { + matchingNode = c + break + } + } + if matchingNode != nil { + parent = matchingNode + headers = headers[1:] + headersHashes = headersHashes[1:] + } else { + break + } + } + + // if all headers are already inserted + if len(headers) == 0 { + return nil + } + + // attach nodes for the new headers + for i, header := range headers { + if (header.Number == nil) || (header.Number.Uint64() != parent.header.Number.Uint64()+1) { + return errors.New("canonicalChainBuilderImpl.Connect: invalid header.Number") + } + + if impl.headerValidator != nil { + if err := impl.headerValidator.ValidateHeader(header, parent.header, time.Now()); err != nil { + return fmt.Errorf("canonicalChainBuilderImpl.Connect: invalid header error %w", err) + } + } + + difficulty, err := impl.difficultyCalc.HeaderDifficulty(header) + if err != nil { + return fmt.Errorf("canonicalChainBuilderImpl.Connect: header difficulty error %w", err) + } + if (header.Difficulty == nil) || (header.Difficulty.Uint64() != difficulty) { + return &bor.WrongDifficultyError{ + Number: header.Number.Uint64(), + Expected: difficulty, + Actual: header.Difficulty.Uint64(), + Signer: []byte{}, + } + } + + slot := producerSlotIndex(difficulty) + if _, ok := parent.children[slot]; ok { + return errors.New("canonicalChainBuilderImpl.Connect: producer slot is already filled by a different header") + } + + node := &forkTreeNode{ + parent: parent, + children: make(map[producerSlotIndex]*forkTreeNode), + + header: header, + headerHash: headersHashes[i], + + totalDifficulty: parent.totalDifficulty + difficulty, + } + + parent.children[slot] = node + parent = node + impl.updateTipIfNeeded(node) + } + + return nil +} diff --git a/polygon/sync/canonical_chain_builder_mock.go b/polygon/sync/canonical_chain_builder_mock.go new file mode 100644 index 00000000000..0ee6e49e3c9 --- /dev/null +++ b/polygon/sync/canonical_chain_builder_mock.go @@ -0,0 +1,118 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/polygon/sync (interfaces: CanonicalChainBuilder) + +// Package sync is a generated GoMock package. +package sync + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + common "github.com/ledgerwatch/erigon-lib/common" + types "github.com/ledgerwatch/erigon/core/types" +) + +// MockCanonicalChainBuilder is a mock of CanonicalChainBuilder interface. +type MockCanonicalChainBuilder struct { + ctrl *gomock.Controller + recorder *MockCanonicalChainBuilderMockRecorder +} + +// MockCanonicalChainBuilderMockRecorder is the mock recorder for MockCanonicalChainBuilder. +type MockCanonicalChainBuilderMockRecorder struct { + mock *MockCanonicalChainBuilder +} + +// NewMockCanonicalChainBuilder creates a new mock instance. +func NewMockCanonicalChainBuilder(ctrl *gomock.Controller) *MockCanonicalChainBuilder { + mock := &MockCanonicalChainBuilder{ctrl: ctrl} + mock.recorder = &MockCanonicalChainBuilderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCanonicalChainBuilder) EXPECT() *MockCanonicalChainBuilderMockRecorder { + return m.recorder +} + +// Connect mocks base method. +func (m *MockCanonicalChainBuilder) Connect(arg0 []*types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Connect", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Connect indicates an expected call of Connect. +func (mr *MockCanonicalChainBuilderMockRecorder) Connect(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).Connect), arg0) +} + +// ContainsHash mocks base method. +func (m *MockCanonicalChainBuilder) ContainsHash(arg0 common.Hash) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainsHash", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// ContainsHash indicates an expected call of ContainsHash. +func (mr *MockCanonicalChainBuilderMockRecorder) ContainsHash(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainsHash", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).ContainsHash), arg0) +} + +// HeadersInRange mocks base method. +func (m *MockCanonicalChainBuilder) HeadersInRange(arg0, arg1 uint64) []*types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadersInRange", arg0, arg1) + ret0, _ := ret[0].([]*types.Header) + return ret0 +} + +// HeadersInRange indicates an expected call of HeadersInRange. +func (mr *MockCanonicalChainBuilderMockRecorder) HeadersInRange(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadersInRange", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).HeadersInRange), arg0, arg1) +} + +// Prune mocks base method. +func (m *MockCanonicalChainBuilder) Prune(arg0 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Prune", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Prune indicates an expected call of Prune. +func (mr *MockCanonicalChainBuilderMockRecorder) Prune(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).Prune), arg0) +} + +// Reset mocks base method. +func (m *MockCanonicalChainBuilder) Reset(arg0 *types.Header) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Reset", arg0) +} + +// Reset indicates an expected call of Reset. +func (mr *MockCanonicalChainBuilderMockRecorder) Reset(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).Reset), arg0) +} + +// Tip mocks base method. +func (m *MockCanonicalChainBuilder) Tip() *types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Tip") + ret0, _ := ret[0].(*types.Header) + return ret0 +} + +// Tip indicates an expected call of Tip. +func (mr *MockCanonicalChainBuilderMockRecorder) Tip() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tip", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).Tip)) +} diff --git a/polygon/sync/canonical_chain_builder_test.go b/polygon/sync/canonical_chain_builder_test.go new file mode 100644 index 00000000000..fec41c509c8 --- /dev/null +++ b/polygon/sync/canonical_chain_builder_test.go @@ -0,0 +1,246 @@ +package sync + +import ( + "bytes" + "errors" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/ledgerwatch/erigon/core/types" + heimdallspan "github.com/ledgerwatch/erigon/polygon/heimdall" +) + +type testDifficultyCalculator struct { +} + +func (*testDifficultyCalculator) HeaderDifficulty(header *types.Header) (uint64, error) { + if header.Difficulty == nil { + return 0, errors.New("unset header.Difficulty") + } + return header.Difficulty.Uint64(), nil +} + +func (*testDifficultyCalculator) SetSpan(*heimdallspan.HeimdallSpan) {} + +func makeRoot() *types.Header { + return &types.Header{ + Number: big.NewInt(0), + } +} + +func makeCCB(root *types.Header) CanonicalChainBuilder { + difficultyCalc := testDifficultyCalculator{} + builder := NewCanonicalChainBuilder(root, &difficultyCalc, nil, nil) + return builder +} + +type connectCCBTest struct { + t *testing.T + root *types.Header + builder CanonicalChainBuilder + + currentHeaderTime uint64 +} + +func newConnectCCBTest(t *testing.T) (*connectCCBTest, *types.Header) { + root := makeRoot() + builder := makeCCB(root) + test := &connectCCBTest{ + t: t, + root: root, + builder: builder, + } + return test, root +} + +func (test *connectCCBTest) makeHeader(parent *types.Header, difficulty uint64) *types.Header { + test.currentHeaderTime++ + return &types.Header{ + ParentHash: parent.Hash(), + Difficulty: big.NewInt(int64(difficulty)), + Number: big.NewInt(parent.Number.Int64() + 1), + Time: test.currentHeaderTime, + Extra: bytes.Repeat([]byte{0x00}, types.ExtraVanityLength+types.ExtraSealLength), + } +} + +func (test *connectCCBTest) makeHeaders(parent *types.Header, difficulties []uint64) []*types.Header { + count := len(difficulties) + headers := make([]*types.Header, 0, count) + for i := 0; i < count; i++ { + header := test.makeHeader(parent, difficulties[i]) + headers = append(headers, header) + parent = header + } + return headers +} + +func (test *connectCCBTest) testConnect( + headers []*types.Header, + expectedTip *types.Header, + expectedHeaders []*types.Header, +) { + t := test.t + builder := test.builder + + err := builder.Connect(headers) + require.Nil(t, err) + + newTip := builder.Tip() + assert.Equal(t, expectedTip.Hash(), newTip.Hash()) + + require.NotNil(t, newTip.Number) + count := uint64(len(expectedHeaders)) + start := newTip.Number.Uint64() - (count - 1) + + actualHeaders := builder.HeadersInRange(start, count) + require.Equal(t, len(expectedHeaders), len(actualHeaders)) + for i, h := range actualHeaders { + assert.Equal(t, expectedHeaders[i].Hash(), h.Hash()) + } +} + +func TestCCBEmptyState(t *testing.T) { + test, root := newConnectCCBTest(t) + + tip := test.builder.Tip() + assert.Equal(t, root.Hash(), tip.Hash()) + + headers := test.builder.HeadersInRange(0, 1) + require.Equal(t, 1, len(headers)) + assert.Equal(t, root.Hash(), headers[0].Hash()) +} + +func TestCCBConnectEmpty(t *testing.T) { + test, root := newConnectCCBTest(t) + test.testConnect([]*types.Header{}, root, []*types.Header{root}) +} + +// connect 0 to 0 +func TestCCBConnectRoot(t *testing.T) { + test, root := newConnectCCBTest(t) + test.testConnect([]*types.Header{root}, root, []*types.Header{root}) +} + +// connect 1 to 0 +func TestCCBConnectOneToRoot(t *testing.T) { + test, root := newConnectCCBTest(t) + newTip := test.makeHeader(root, 1) + test.testConnect([]*types.Header{newTip}, newTip, []*types.Header{root, newTip}) +} + +// connect 1-2-3 to 0 +func TestCCBConnectSomeToRoot(t *testing.T) { + test, root := newConnectCCBTest(t) + headers := test.makeHeaders(root, []uint64{1, 2, 3}) + test.testConnect(headers, headers[len(headers)-1], append([]*types.Header{root}, headers...)) +} + +// connect any subset of 0-1-2-3 to 0-1-2-3 +func TestCCBConnectOverlapsFull(t *testing.T) { + test, root := newConnectCCBTest(t) + headers := test.makeHeaders(root, []uint64{1, 2, 3}) + require.Nil(t, test.builder.Connect(headers)) + + expectedTip := headers[len(headers)-1] + expectedHeaders := append([]*types.Header{root}, headers...) + + for subsetLen := 1; subsetLen <= len(headers); subsetLen++ { + for i := 0; i+subsetLen-1 < len(expectedHeaders); i++ { + headers := expectedHeaders[i : i+subsetLen] + test.testConnect(headers, expectedTip, expectedHeaders) + } + } +} + +// connect 0-1 to 0 +func TestCCBConnectOverlapPartialOne(t *testing.T) { + test, root := newConnectCCBTest(t) + newTip := test.makeHeader(root, 1) + test.testConnect([]*types.Header{root, newTip}, newTip, []*types.Header{root, newTip}) +} + +// connect 2-3-4-5 to 0-1-2-3 +func TestCCBConnectOverlapPartialSome(t *testing.T) { + test, root := newConnectCCBTest(t) + headers := test.makeHeaders(root, []uint64{1, 2, 3}) + require.Nil(t, test.builder.Connect(headers)) + + overlapHeaders := append(headers[1:], test.makeHeaders(headers[len(headers)-1], []uint64{4, 5})...) + expectedTip := overlapHeaders[len(overlapHeaders)-1] + expectedHeaders := append([]*types.Header{root, headers[0]}, overlapHeaders...) + test.testConnect(overlapHeaders, expectedTip, expectedHeaders) +} + +// connect 2 to 0-1 at 0, then connect 10 to 0-1 +func TestCCBConnectAltMainBecomesFork(t *testing.T) { + test, root := newConnectCCBTest(t) + header1 := test.makeHeader(root, 1) + header2 := test.makeHeader(root, 2) + require.Nil(t, test.builder.Connect([]*types.Header{header1})) + + // the tip changes to header2 + test.testConnect([]*types.Header{header2}, header2, []*types.Header{root, header2}) + + header10 := test.makeHeader(header1, 10) + test.testConnect([]*types.Header{header10}, header10, []*types.Header{root, header1, header10}) +} + +// connect 1 to 0-2 at 0, then connect 10 to 0-1 +func TestCCBConnectAltForkBecomesMain(t *testing.T) { + test, root := newConnectCCBTest(t) + header1 := test.makeHeader(root, 1) + header2 := test.makeHeader(root, 2) + require.Nil(t, test.builder.Connect([]*types.Header{header2})) + + // the tip stays at header2 + test.testConnect([]*types.Header{header1}, header2, []*types.Header{root, header2}) + + header10 := test.makeHeader(header1, 10) + test.testConnect([]*types.Header{header10}, header10, []*types.Header{root, header1, header10}) +} + +// connect 10 and 11 to 1, then 20 and 22 to 2 one by one starting from a [0-1, 0-2] tree +func TestCCBConnectAltForksAtLevel2(t *testing.T) { + test, root := newConnectCCBTest(t) + header1 := test.makeHeader(root, 1) + header10 := test.makeHeader(header1, 10) + header11 := test.makeHeader(header1, 11) + header2 := test.makeHeader(root, 2) + header20 := test.makeHeader(header2, 20) + header22 := test.makeHeader(header2, 22) + require.Nil(t, test.builder.Connect([]*types.Header{header1})) + require.Nil(t, test.builder.Connect([]*types.Header{header2})) + + test.testConnect([]*types.Header{header10}, header10, []*types.Header{root, header1, header10}) + test.testConnect([]*types.Header{header11}, header11, []*types.Header{root, header1, header11}) + test.testConnect([]*types.Header{header20}, header20, []*types.Header{root, header2, header20}) + test.testConnect([]*types.Header{header22}, header22, []*types.Header{root, header2, header22}) +} + +// connect 11 and 10 to 1, then 22 and 20 to 2 one by one starting from a [0-1, 0-2] tree +// then connect 100 to 10, and 200 to 20 +func TestCCBConnectAltForksAtLevel2Reverse(t *testing.T) { + test, root := newConnectCCBTest(t) + header1 := test.makeHeader(root, 1) + header10 := test.makeHeader(header1, 10) + header11 := test.makeHeader(header1, 11) + header2 := test.makeHeader(root, 2) + header20 := test.makeHeader(header2, 20) + header22 := test.makeHeader(header2, 22) + header100 := test.makeHeader(header10, 100) + header200 := test.makeHeader(header20, 200) + require.Nil(t, test.builder.Connect([]*types.Header{header1})) + require.Nil(t, test.builder.Connect([]*types.Header{header2})) + + test.testConnect([]*types.Header{header11}, header11, []*types.Header{root, header1, header11}) + test.testConnect([]*types.Header{header10}, header11, []*types.Header{root, header1, header11}) + test.testConnect([]*types.Header{header22}, header22, []*types.Header{root, header2, header22}) + test.testConnect([]*types.Header{header20}, header22, []*types.Header{root, header2, header22}) + + test.testConnect([]*types.Header{header100}, header100, []*types.Header{root, header1, header10, header100}) + test.testConnect([]*types.Header{header200}, header200, []*types.Header{root, header2, header20, header200}) +} diff --git a/polygon/sync/db.go b/polygon/sync/db.go index 560ab2bc1dd..9fc3ed9bcf6 100644 --- a/polygon/sync/db.go +++ b/polygon/sync/db.go @@ -2,7 +2,7 @@ package sync import "github.com/ledgerwatch/erigon/core/types" -//go:generate mockgen -destination=./mock/db_mock.go -package=mock . DB +//go:generate mockgen -destination=./db_mock.go -package=sync . DB type DB interface { WriteHeaders(headers []*types.Header) error } diff --git a/polygon/sync/mock/db_mock.go b/polygon/sync/db_mock.go similarity index 95% rename from polygon/sync/mock/db_mock.go rename to polygon/sync/db_mock.go index 22e6fa6b482..2993c959eff 100644 --- a/polygon/sync/mock/db_mock.go +++ b/polygon/sync/db_mock.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ledgerwatch/erigon/polygon/sync (interfaces: DB) -// Package mock is a generated GoMock package. -package mock +// Package sync is a generated GoMock package. +package sync import ( reflect "reflect" diff --git a/polygon/sync/difficulty.go b/polygon/sync/difficulty.go new file mode 100644 index 00000000000..7880ade672f --- /dev/null +++ b/polygon/sync/difficulty.go @@ -0,0 +1,84 @@ +package sync + +import ( + "fmt" + + lru "github.com/hashicorp/golang-lru/arc/v2" + + "github.com/ledgerwatch/erigon/eth/stagedsync" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" + "github.com/ledgerwatch/erigon/polygon/bor/valset" +) + +type DifficultyCalculator interface { + HeaderDifficulty(header *types.Header) (uint64, error) +} + +type difficultyCalculatorImpl struct { + borConfig *borcfg.BorConfig + spans *SpansCache + validatorSetFactory func(headerNum uint64) validatorSetInterface + signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address] +} + +func NewDifficultyCalculator( + borConfig *borcfg.BorConfig, + spans *SpansCache, + validatorSetFactory func(headerNum uint64) validatorSetInterface, + signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address], +) DifficultyCalculator { + if signaturesCache == nil { + var err error + signaturesCache, err = lru.NewARC[libcommon.Hash, libcommon.Address](stagedsync.InMemorySignatures) + if err != nil { + panic(err) + } + } + + impl := difficultyCalculatorImpl{ + borConfig: borConfig, + spans: spans, + validatorSetFactory: validatorSetFactory, + signaturesCache: signaturesCache, + } + + if validatorSetFactory == nil { + impl.validatorSetFactory = impl.makeValidatorSet + } + + return &impl +} + +func (impl *difficultyCalculatorImpl) makeValidatorSet(headerNum uint64) validatorSetInterface { + span := impl.spans.SpanAt(headerNum) + if span == nil { + return nil + } + return valset.NewValidatorSet(span.ValidatorSet.Validators) +} + +func (impl *difficultyCalculatorImpl) HeaderDifficulty(header *types.Header) (uint64, error) { + signer, err := bor.Ecrecover(header, impl.signaturesCache, impl.borConfig) + if err != nil { + return 0, err + } + return impl.signerDifficulty(signer, header.Number.Uint64()) +} + +func (impl *difficultyCalculatorImpl) signerDifficulty(signer libcommon.Address, headerNum uint64) (uint64, error) { + validatorSet := impl.validatorSetFactory(headerNum) + if validatorSet == nil { + return 0, fmt.Errorf("difficultyCalculatorImpl.signerDifficulty: no span at %d", headerNum) + } + + sprintNum := impl.borConfig.CalculateSprintNumber(headerNum) + if sprintNum > 0 { + validatorSet.IncrementProposerPriority(int(sprintNum)) + } + + return validatorSet.Difficulty(signer) +} diff --git a/polygon/sync/difficulty_test.go b/polygon/sync/difficulty_test.go new file mode 100644 index 00000000000..669b8dbcfc3 --- /dev/null +++ b/polygon/sync/difficulty_test.go @@ -0,0 +1,138 @@ +package sync + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" +) + +type testValidatorSetInterface struct { + signers []libcommon.Address + sprintNum int +} + +func (v *testValidatorSetInterface) IncrementProposerPriority(times int) { + v.sprintNum = times +} + +func (v *testValidatorSetInterface) GetSignerSuccessionNumber(signer libcommon.Address, number uint64) (int, error) { + var i int + for (i < len(v.signers)) && (v.signers[i] != signer) { + i++ + } + + sprintOffset := v.sprintNum % len(v.signers) + var delta int + if i >= sprintOffset { + delta = i - sprintOffset + } else { + delta = i + len(v.signers) - sprintOffset + } + + return delta, nil +} + +func (v *testValidatorSetInterface) Difficulty(signer libcommon.Address) (uint64, error) { + delta, err := v.GetSignerSuccessionNumber(signer, 0) + if err != nil { + return 0, nil + } + return uint64(len(v.signers) - delta), nil +} + +func TestSignerDifficulty(t *testing.T) { + borConfig := borcfg.BorConfig{ + Sprint: map[string]uint64{"0": 16}, + } + signers := []libcommon.Address{ + libcommon.HexToAddress("00"), + libcommon.HexToAddress("01"), + libcommon.HexToAddress("02"), + } + validatorSetFactory := func(uint64) validatorSetInterface { return &testValidatorSetInterface{signers: signers} } + calc := NewDifficultyCalculator(&borConfig, nil, validatorSetFactory, nil).(*difficultyCalculatorImpl) + + var d uint64 + + // sprint 0 + d, _ = calc.signerDifficulty(signers[0], 0) + assert.Equal(t, uint64(3), d) + + d, _ = calc.signerDifficulty(signers[0], 1) + assert.Equal(t, uint64(3), d) + + d, _ = calc.signerDifficulty(signers[0], 15) + assert.Equal(t, uint64(3), d) + + d, _ = calc.signerDifficulty(signers[1], 0) + assert.Equal(t, uint64(2), d) + + d, _ = calc.signerDifficulty(signers[1], 1) + assert.Equal(t, uint64(2), d) + + d, _ = calc.signerDifficulty(signers[1], 15) + assert.Equal(t, uint64(2), d) + + d, _ = calc.signerDifficulty(signers[2], 0) + assert.Equal(t, uint64(1), d) + + d, _ = calc.signerDifficulty(signers[2], 1) + assert.Equal(t, uint64(1), d) + + d, _ = calc.signerDifficulty(signers[2], 15) + assert.Equal(t, uint64(1), d) + + // sprint 1 + d, _ = calc.signerDifficulty(signers[1], 16) + assert.Equal(t, uint64(3), d) + + d, _ = calc.signerDifficulty(signers[2], 16) + assert.Equal(t, uint64(2), d) + + d, _ = calc.signerDifficulty(signers[0], 16) + assert.Equal(t, uint64(1), d) + + // sprint 2 + d, _ = calc.signerDifficulty(signers[2], 32) + assert.Equal(t, uint64(3), d) + + d, _ = calc.signerDifficulty(signers[0], 32) + assert.Equal(t, uint64(2), d) + + d, _ = calc.signerDifficulty(signers[1], 32) + assert.Equal(t, uint64(1), d) + + // sprint 3 + d, _ = calc.signerDifficulty(signers[0], 48) + assert.Equal(t, uint64(3), d) + + d, _ = calc.signerDifficulty(signers[1], 48) + assert.Equal(t, uint64(2), d) + + d, _ = calc.signerDifficulty(signers[2], 48) + assert.Equal(t, uint64(1), d) +} + +func TestHeaderDifficultyNoSignature(t *testing.T) { + borConfig := borcfg.BorConfig{} + spans := NewSpansCache() + calc := NewDifficultyCalculator(&borConfig, spans, nil, nil) + + _, err := calc.HeaderDifficulty(new(types.Header)) + require.ErrorContains(t, err, "signature suffix missing") +} + +func TestSignerDifficultyNoSpan(t *testing.T) { + borConfig := borcfg.BorConfig{} + spans := NewSpansCache() + calc := NewDifficultyCalculator(&borConfig, spans, nil, nil).(*difficultyCalculatorImpl) + + _, err := calc.signerDifficulty(libcommon.HexToAddress("00"), 0) + require.ErrorContains(t, err, "no span") +} diff --git a/polygon/sync/header_downloader.go b/polygon/sync/header_downloader.go index 76a8f29f33a..2f67e268478 100644 --- a/polygon/sync/header_downloader.go +++ b/polygon/sync/header_downloader.go @@ -13,12 +13,11 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/polygon/sync/peerinfo" ) const headerDownloaderLogPrefix = "HeaderDownloader" -func NewHeaderDownloader(logger log.Logger, sentry Sentry, db DB, heimdall Heimdall, verify HeaderVerifier) *HeaderDownloader { +func NewHeaderDownloader(logger log.Logger, sentry Sentry, db DB, heimdall Heimdall, verify StatePointHeadersVerifier) *HeaderDownloader { statePointHeadersMemo, err := lru.New[common.Hash, []*types.Header](sentry.MaxPeers()) if err != nil { panic(err) @@ -39,7 +38,7 @@ type HeaderDownloader struct { sentry Sentry db DB heimdall Heimdall - verify HeaderVerifier + verify StatePointHeadersVerifier statePointHeadersMemo *lru.Cache[common.Hash, []*types.Header] // statePoint.rootHash->[headers part of state point] } @@ -194,9 +193,9 @@ func (hd *HeaderDownloader) downloadUsingStatePoints(ctx context.Context, stateP } // choosePeers assumes peers are sorted in ascending order based on block num -func (hd *HeaderDownloader) choosePeers(peers peerinfo.PeersWithBlockNumInfo, statePoints statePoints) peerinfo.PeersWithBlockNumInfo { +func (hd *HeaderDownloader) choosePeers(peers PeersWithBlockNumInfo, statePoints statePoints) PeersWithBlockNumInfo { var peersIdx int - chosenPeers := make(peerinfo.PeersWithBlockNumInfo, 0, len(peers)) + chosenPeers := make(PeersWithBlockNumInfo, 0, len(peers)) for _, statePoint := range statePoints { if peersIdx >= len(peers) { break diff --git a/polygon/sync/header_downloader_test.go b/polygon/sync/header_downloader_test.go index f60ef0c6557..5c430bf4f6a 100644 --- a/polygon/sync/header_downloader_test.go +++ b/polygon/sync/header_downloader_test.go @@ -13,11 +13,8 @@ import ( "github.com/stretchr/testify/require" "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/polygon/sync/mock" - "github.com/ledgerwatch/erigon/polygon/sync/peerinfo" + "github.com/ledgerwatch/erigon/polygon/heimdall" "github.com/ledgerwatch/erigon/turbo/testlog" ) @@ -27,10 +24,10 @@ func newHeaderDownloaderTest(t *testing.T) *headerDownloaderTest { func newHeaderDownloaderTestWithOpts(t *testing.T, opts headerDownloaderTestOpts) *headerDownloaderTest { ctrl := gomock.NewController(t) - heimdall := mock.NewMockHeimdall(ctrl) - sentry := mock.NewMockSentry(ctrl) + heimdall := NewMockHeimdall(ctrl) + sentry := NewMockSentry(ctrl) sentry.EXPECT().MaxPeers().Return(100).Times(1) - db := mock.NewMockDB(ctrl) + db := NewMockDB(ctrl) logger := testlog.Logger(t, log.LvlDebug) headerVerifier := opts.getOrCreateDefaultHeaderVerifier() headerDownloader := NewHeaderDownloader(logger, sentry, db, heimdall, headerVerifier) @@ -43,10 +40,10 @@ func newHeaderDownloaderTestWithOpts(t *testing.T, opts headerDownloaderTestOpts } type headerDownloaderTestOpts struct { - headerVerifier HeaderVerifier + headerVerifier StatePointHeadersVerifier } -func (opts headerDownloaderTestOpts) getOrCreateDefaultHeaderVerifier() HeaderVerifier { +func (opts headerDownloaderTestOpts) getOrCreateDefaultHeaderVerifier() StatePointHeadersVerifier { if opts.headerVerifier == nil { return func(_ *statePoint, _ []*types.Header) error { return nil @@ -57,14 +54,14 @@ func (opts headerDownloaderTestOpts) getOrCreateDefaultHeaderVerifier() HeaderVe } type headerDownloaderTest struct { - heimdall *mock.MockHeimdall - sentry *mock.MockSentry - db *mock.MockDB + heimdall *MockHeimdall + sentry *MockSentry + db *MockDB headerDownloader *HeaderDownloader } -func (hdt headerDownloaderTest) fakePeers(count int, blockNums ...*big.Int) peerinfo.PeersWithBlockNumInfo { - peers := make(peerinfo.PeersWithBlockNumInfo, count) +func (hdt headerDownloaderTest) fakePeers(count int, blockNums ...*big.Int) PeersWithBlockNumInfo { + peers := make(PeersWithBlockNumInfo, count) for i := range peers { var blockNum *big.Int if i < len(blockNums) { @@ -73,7 +70,7 @@ func (hdt headerDownloaderTest) fakePeers(count int, blockNums ...*big.Int) peer blockNum = new(big.Int).SetUint64(math.MaxUint64) } - peers[i] = &peerinfo.PeerWithBlockNumInfo{ + peers[i] = &PeerWithBlockNumInfo{ ID: fmt.Sprintf("peer%d", i+1), BlockNum: blockNum, } @@ -82,11 +79,11 @@ func (hdt headerDownloaderTest) fakePeers(count int, blockNums ...*big.Int) peer return peers } -func (hdt headerDownloaderTest) fakeCheckpoints(count int) []*checkpoint.Checkpoint { - checkpoints := make([]*checkpoint.Checkpoint, count) +func (hdt headerDownloaderTest) fakeCheckpoints(count int) []*heimdall.Checkpoint { + checkpoints := make([]*heimdall.Checkpoint, count) for i := range checkpoints { num := i + 1 - checkpoints[i] = &checkpoint.Checkpoint{ + checkpoints[i] = &heimdall.Checkpoint{ StartBlock: big.NewInt(int64(num)), EndBlock: big.NewInt(int64(num)), RootHash: common.BytesToHash([]byte(fmt.Sprintf("0x%d", num))), @@ -96,11 +93,11 @@ func (hdt headerDownloaderTest) fakeCheckpoints(count int) []*checkpoint.Checkpo return checkpoints } -func (hdt headerDownloaderTest) fakeMilestones(count int) []*milestone.Milestone { - milestones := make([]*milestone.Milestone, count) +func (hdt headerDownloaderTest) fakeMilestones(count int) []*heimdall.Milestone { + milestones := make([]*heimdall.Milestone, count) for i := range milestones { num := i + 1 - milestones[i] = &milestone.Milestone{ + milestones[i] = &heimdall.Milestone{ StartBlock: big.NewInt(int64(num)), EndBlock: big.NewInt(int64(num)), Hash: common.BytesToHash([]byte(fmt.Sprintf("0x%d", num))), diff --git a/polygon/sync/header_time_validator.go b/polygon/sync/header_time_validator.go new file mode 100644 index 00000000000..d2da61764cc --- /dev/null +++ b/polygon/sync/header_time_validator.go @@ -0,0 +1,77 @@ +package sync + +import ( + "fmt" + "time" + + lru "github.com/hashicorp/golang-lru/arc/v2" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" + "github.com/ledgerwatch/erigon/polygon/bor/valset" +) + +type HeaderTimeValidator interface { + ValidateHeaderTime(header *types.Header, now time.Time, parent *types.Header) error +} + +type headerTimeValidatorImpl struct { + borConfig *borcfg.BorConfig + spans *SpansCache + validatorSetFactory func(headerNum uint64) validatorSetInterface + signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address] +} + +func NewHeaderTimeValidator( + borConfig *borcfg.BorConfig, + spans *SpansCache, + validatorSetFactory func(headerNum uint64) validatorSetInterface, + signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address], +) HeaderTimeValidator { + if signaturesCache == nil { + var err error + signaturesCache, err = lru.NewARC[libcommon.Hash, libcommon.Address](stagedsync.InMemorySignatures) + if err != nil { + panic(err) + } + } + + impl := headerTimeValidatorImpl{ + borConfig: borConfig, + spans: spans, + validatorSetFactory: validatorSetFactory, + signaturesCache: signaturesCache, + } + + if validatorSetFactory == nil { + impl.validatorSetFactory = impl.makeValidatorSet + } + + return &impl +} + +func (impl *headerTimeValidatorImpl) makeValidatorSet(headerNum uint64) validatorSetInterface { + span := impl.spans.SpanAt(headerNum) + if span == nil { + return nil + } + return valset.NewValidatorSet(span.ValidatorSet.Validators) +} + +func (impl *headerTimeValidatorImpl) ValidateHeaderTime(header *types.Header, now time.Time, parent *types.Header) error { + headerNum := header.Number.Uint64() + validatorSet := impl.validatorSetFactory(headerNum) + if validatorSet == nil { + return fmt.Errorf("headerTimeValidatorImpl.ValidateHeaderTime: no span at %d", headerNum) + } + + sprintNum := impl.borConfig.CalculateSprintNumber(headerNum) + if sprintNum > 0 { + validatorSet.IncrementProposerPriority(int(sprintNum)) + } + + return bor.ValidateHeaderTime(header, now, parent, validatorSet, impl.borConfig, impl.signaturesCache) +} diff --git a/polygon/sync/header_validator.go b/polygon/sync/header_validator.go new file mode 100644 index 00000000000..81b0cf983e2 --- /dev/null +++ b/polygon/sync/header_validator.go @@ -0,0 +1,57 @@ +package sync + +import ( + "time" + + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" +) + +type HeaderValidator interface { + ValidateHeader(header *types.Header, parent *types.Header, now time.Time) error +} + +type headerValidatorImpl struct { + chainConfig *chain.Config + borConfig *borcfg.BorConfig + headerTimeValidator HeaderTimeValidator +} + +func NewHeaderValidator( + chainConfig *chain.Config, + borConfig *borcfg.BorConfig, + headerTimeValidator HeaderTimeValidator, +) HeaderValidator { + return &headerValidatorImpl{ + chainConfig: chainConfig, + borConfig: borConfig, + headerTimeValidator: headerTimeValidator, + } +} + +func (impl *headerValidatorImpl) ValidateHeader(header *types.Header, parent *types.Header, now time.Time) error { + if err := bor.ValidateHeaderUnusedFields(header); err != nil { + return err + } + + if err := bor.ValidateHeaderGas(header, parent, impl.chainConfig); err != nil { + return err + } + + if err := bor.ValidateHeaderExtraLength(header.Extra); err != nil { + return err + } + if err := bor.ValidateHeaderSprintValidators(header, impl.borConfig); err != nil { + return err + } + + if impl.headerTimeValidator != nil { + if err := impl.headerTimeValidator.ValidateHeaderTime(header, now, parent); err != nil { + return err + } + } + + return nil +} diff --git a/polygon/sync/header_verifier.go b/polygon/sync/header_verifier.go deleted file mode 100644 index 6898f384926..00000000000 --- a/polygon/sync/header_verifier.go +++ /dev/null @@ -1,5 +0,0 @@ -package sync - -import "github.com/ledgerwatch/erigon/core/types" - -type HeaderVerifier func(statePoint *statePoint, headers []*types.Header) error diff --git a/polygon/sync/heimdall.go b/polygon/sync/heimdall.go index 9dc9161dd06..0491b9c2367 100644 --- a/polygon/sync/heimdall.go +++ b/polygon/sync/heimdall.go @@ -8,33 +8,31 @@ import ( "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/heimdall" ) // Heimdall is a wrapper of Heimdall HTTP API // -//go:generate mockgen -destination=./mock/heimdall_mock.go -package=mock . Heimdall +//go:generate mockgen -destination=./heimdall_mock.go -package=sync . Heimdall type Heimdall interface { - FetchCheckpoints(ctx context.Context, start uint64) ([]*checkpoint.Checkpoint, error) - FetchMilestones(ctx context.Context, start uint64) ([]*milestone.Milestone, error) - FetchSpan(ctx context.Context, start uint64) (*span.HeimdallSpan, error) - OnMilestoneEvent(ctx context.Context, callback func(*milestone.Milestone)) error + FetchCheckpoints(ctx context.Context, start uint64) ([]*heimdall.Checkpoint, error) + FetchMilestones(ctx context.Context, start uint64) ([]*heimdall.Milestone, error) + FetchSpan(ctx context.Context, start uint64) (*heimdall.HeimdallSpan, error) + OnMilestoneEvent(ctx context.Context, callback func(*heimdall.Milestone)) error } // ErrIncompleteMilestoneRange happens when FetchMilestones is called with an old start block because old milestones are evicted var ErrIncompleteMilestoneRange = errors.New("milestone range doesn't contain the start block") type HeimdallImpl struct { - client heimdall.IHeimdallClient + client heimdall.HeimdallClient pollDelay time.Duration logger log.Logger } -func NewHeimdall(client heimdall.IHeimdallClient, logger log.Logger) Heimdall { +func NewHeimdall(client heimdall.HeimdallClient, logger log.Logger) Heimdall { impl := HeimdallImpl{ client: client, pollDelay: time.Second, @@ -54,27 +52,21 @@ func cmpNumToRange(n uint64, min *big.Int, max *big.Int) int { return 0 } -func cmpBlockNumToCheckpointRange(n uint64, c *checkpoint.Checkpoint) int { +func cmpBlockNumToCheckpointRange(n uint64, c *heimdall.Checkpoint) int { return cmpNumToRange(n, c.StartBlock, c.EndBlock) } -func cmpBlockNumToMilestoneRange(n uint64, m *milestone.Milestone) int { +func cmpBlockNumToMilestoneRange(n uint64, m *heimdall.Milestone) int { return cmpNumToRange(n, m.StartBlock, m.EndBlock) } -func reverse[T any](s []T) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } -} - -func (impl *HeimdallImpl) FetchCheckpoints(ctx context.Context, start uint64) ([]*checkpoint.Checkpoint, error) { +func (impl *HeimdallImpl) FetchCheckpoints(ctx context.Context, start uint64) ([]*heimdall.Checkpoint, error) { count, err := impl.client.FetchCheckpointCount(ctx) if err != nil { return nil, err } - var checkpoints []*checkpoint.Checkpoint + var checkpoints []*heimdall.Checkpoint for i := count; i >= 1; i-- { c, err := impl.client.FetchCheckpoint(ctx, i) @@ -96,23 +88,23 @@ func (impl *HeimdallImpl) FetchCheckpoints(ctx context.Context, start uint64) ([ } } - reverse(checkpoints) + common.SliceReverse(checkpoints) return checkpoints, nil } -func (impl *HeimdallImpl) FetchMilestones(ctx context.Context, start uint64) ([]*milestone.Milestone, error) { +func (impl *HeimdallImpl) FetchMilestones(ctx context.Context, start uint64) ([]*heimdall.Milestone, error) { count, err := impl.client.FetchMilestoneCount(ctx) if err != nil { return nil, err } - var milestones []*milestone.Milestone + var milestones []*heimdall.Milestone for i := count; i >= 1; i-- { m, err := impl.client.FetchMilestone(ctx, i) if err != nil { if errors.Is(err, heimdall.ErrNotInMilestoneList) { - reverse(milestones) + common.SliceReverse(milestones) return milestones, ErrIncompleteMilestoneRange } return nil, err @@ -132,15 +124,15 @@ func (impl *HeimdallImpl) FetchMilestones(ctx context.Context, start uint64) ([] } } - reverse(milestones) + common.SliceReverse(milestones) return milestones, nil } -func (impl *HeimdallImpl) FetchSpan(ctx context.Context, start uint64) (*span.HeimdallSpan, error) { +func (impl *HeimdallImpl) FetchSpan(ctx context.Context, start uint64) (*heimdall.HeimdallSpan, error) { return impl.client.Span(ctx, bor.SpanIDAt(start)) } -func (impl *HeimdallImpl) OnMilestoneEvent(ctx context.Context, callback func(*milestone.Milestone)) error { +func (impl *HeimdallImpl) OnMilestoneEvent(ctx context.Context, callback func(*heimdall.Milestone)) error { currentCount, err := impl.client.FetchMilestoneCount(ctx) if err != nil { return err diff --git a/polygon/sync/mock/heimdall_mock.go b/polygon/sync/heimdall_mock.go similarity index 86% rename from polygon/sync/mock/heimdall_mock.go rename to polygon/sync/heimdall_mock.go index c38947dc559..ca7d1e1fbf6 100644 --- a/polygon/sync/mock/heimdall_mock.go +++ b/polygon/sync/heimdall_mock.go @@ -1,17 +1,16 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ledgerwatch/erigon/polygon/sync (interfaces: Heimdall) -// Package mock is a generated GoMock package. -package mock +// Package sync is a generated GoMock package. +package sync import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" - checkpoint "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - milestone "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" - span "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" + + checkpoint "github.com/ledgerwatch/erigon/polygon/heimdall" ) // MockHeimdall is a mock of Heimdall interface. @@ -53,10 +52,10 @@ func (mr *MockHeimdallMockRecorder) FetchCheckpoints(arg0, arg1 interface{}) *go } // FetchMilestones mocks base method. -func (m *MockHeimdall) FetchMilestones(arg0 context.Context, arg1 uint64) ([]*milestone.Milestone, error) { +func (m *MockHeimdall) FetchMilestones(arg0 context.Context, arg1 uint64) ([]*checkpoint.Milestone, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchMilestones", arg0, arg1) - ret0, _ := ret[0].([]*milestone.Milestone) + ret0, _ := ret[0].([]*checkpoint.Milestone) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -68,10 +67,10 @@ func (mr *MockHeimdallMockRecorder) FetchMilestones(arg0, arg1 interface{}) *gom } // FetchSpan mocks base method. -func (m *MockHeimdall) FetchSpan(arg0 context.Context, arg1 uint64) (*span.HeimdallSpan, error) { +func (m *MockHeimdall) FetchSpan(arg0 context.Context, arg1 uint64) (*checkpoint.HeimdallSpan, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchSpan", arg0, arg1) - ret0, _ := ret[0].(*span.HeimdallSpan) + ret0, _ := ret[0].(*checkpoint.HeimdallSpan) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -83,7 +82,7 @@ func (mr *MockHeimdallMockRecorder) FetchSpan(arg0, arg1 interface{}) *gomock.Ca } // OnMilestoneEvent mocks base method. -func (m *MockHeimdall) OnMilestoneEvent(arg0 context.Context, arg1 func(*milestone.Milestone)) error { +func (m *MockHeimdall) OnMilestoneEvent(arg0 context.Context, arg1 func(*checkpoint.Milestone)) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "OnMilestoneEvent", arg0, arg1) ret0, _ := ret[0].(error) diff --git a/polygon/sync/heimdall_test.go b/polygon/sync/heimdall_test.go index 2036feb84d5..2c4f11c075a 100644 --- a/polygon/sync/heimdall_test.go +++ b/polygon/sync/heimdall_test.go @@ -11,14 +11,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - heimdallclient "github.com/ledgerwatch/erigon/consensus/bor/heimdall" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" - heimdallmock "github.com/ledgerwatch/erigon/consensus/bor/heimdall/mock" + heimdallclient "github.com/ledgerwatch/erigon/polygon/heimdall" ) -func makeCheckpoint(start uint64, len uint) *checkpoint.Checkpoint { - c := checkpoint.Checkpoint{ +func makeCheckpoint(start uint64, len uint) *heimdallclient.Checkpoint { + c := heimdallclient.Checkpoint{ StartBlock: new(big.Int).SetUint64(start), EndBlock: new(big.Int).SetUint64(start + uint64(len) - 1), Timestamp: uint64(time.Now().Unix()), @@ -26,8 +23,8 @@ func makeCheckpoint(start uint64, len uint) *checkpoint.Checkpoint { return &c } -func makeMilestone(start uint64, len uint) *milestone.Milestone { - m := milestone.Milestone{ +func makeMilestone(start uint64, len uint) *heimdallclient.Milestone { + m := heimdallclient.Milestone{ StartBlock: new(big.Int).SetUint64(start), EndBlock: new(big.Int).SetUint64(start + uint64(len) - 1), Timestamp: uint64(time.Now().Unix()), @@ -37,7 +34,7 @@ func makeMilestone(start uint64, len uint) *milestone.Milestone { type heimdallTest struct { ctx context.Context - client *heimdallmock.MockIHeimdallClient + client *heimdallclient.MockHeimdallClient heimdall Heimdall logger log.Logger } @@ -49,7 +46,7 @@ func newHeimdallTest(t *testing.T) heimdallTest { ctrl := gomock.NewController(t) t.Cleanup(ctrl.Finish) - client := heimdallmock.NewMockIHeimdallClient(ctrl) + client := heimdallclient.NewMockHeimdallClient(ctrl) heimdall := NewHeimdall(client, logger) return heimdallTest{ @@ -60,8 +57,8 @@ func newHeimdallTest(t *testing.T) heimdallTest { } } -func (test heimdallTest) setupCheckpoints(count int) []*checkpoint.Checkpoint { - var expectedCheckpoints []*checkpoint.Checkpoint +func (test heimdallTest) setupCheckpoints(count int) []*heimdallclient.Checkpoint { + var expectedCheckpoints []*heimdallclient.Checkpoint for i := 0; i < count; i++ { c := makeCheckpoint(uint64(i*256), 256) expectedCheckpoints = append(expectedCheckpoints, c) @@ -69,15 +66,15 @@ func (test heimdallTest) setupCheckpoints(count int) []*checkpoint.Checkpoint { client := test.client client.EXPECT().FetchCheckpointCount(gomock.Any()).Return(int64(len(expectedCheckpoints)), nil) - client.EXPECT().FetchCheckpoint(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) { + client.EXPECT().FetchCheckpoint(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, number int64) (*heimdallclient.Checkpoint, error) { return expectedCheckpoints[number-1], nil }).AnyTimes() return expectedCheckpoints } -func (test heimdallTest) setupMilestones(count int) []*milestone.Milestone { - var expectedMilestones []*milestone.Milestone +func (test heimdallTest) setupMilestones(count int) []*heimdallclient.Milestone { + var expectedMilestones []*heimdallclient.Milestone for i := 0; i < count; i++ { m := makeMilestone(uint64(i*16), 16) expectedMilestones = append(expectedMilestones, m) @@ -85,7 +82,7 @@ func (test heimdallTest) setupMilestones(count int) []*milestone.Milestone { client := test.client client.EXPECT().FetchMilestoneCount(gomock.Any()).Return(int64(len(expectedMilestones)), nil) - client.EXPECT().FetchMilestone(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, number int64) (*milestone.Milestone, error) { + client.EXPECT().FetchMilestone(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, number int64) (*heimdallclient.Milestone, error) { return expectedMilestones[number-1], nil }).AnyTimes() @@ -191,7 +188,7 @@ func TestFetchMilestonesMiddleStart(t *testing.T) { func TestFetchMilestonesStartingBeforeEvictionPoint(t *testing.T) { test := newHeimdallTest(t) - var expectedMilestones []*milestone.Milestone + var expectedMilestones []*heimdallclient.Milestone for i := 0; i < 20; i++ { m := makeMilestone(uint64(i*16), 16) expectedMilestones = append(expectedMilestones, m) @@ -200,7 +197,7 @@ func TestFetchMilestonesStartingBeforeEvictionPoint(t *testing.T) { client := test.client client.EXPECT().FetchMilestoneCount(gomock.Any()).Return(int64(len(expectedMilestones)), nil) - client.EXPECT().FetchMilestone(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, number int64) (*milestone.Milestone, error) { + client.EXPECT().FetchMilestone(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, number int64) (*heimdallclient.Milestone, error) { if int(number) <= len(expectedMilestones)-keptMilestones { return nil, heimdallclient.ErrNotInMilestoneList } @@ -239,8 +236,8 @@ func TestOnMilestoneEvent(t *testing.T) { expectedMilestone := makeMilestone(0, 12) client.EXPECT().FetchMilestone(gomock.Any(), gomock.Any()).Return(expectedMilestone, nil) - eventChan := make(chan *milestone.Milestone) - err := test.heimdall.OnMilestoneEvent(test.ctx, func(m *milestone.Milestone) { + eventChan := make(chan *heimdallclient.Milestone) + err := test.heimdall.OnMilestoneEvent(test.ctx, func(m *heimdallclient.Milestone) { eventChan <- m }) require.Nil(t, err) diff --git a/polygon/sync/peerinfo/peer_with_block_num_info.go b/polygon/sync/peer_with_block_num_info.go similarity index 96% rename from polygon/sync/peerinfo/peer_with_block_num_info.go rename to polygon/sync/peer_with_block_num_info.go index 643aa078deb..1cde2bbf7e4 100644 --- a/polygon/sync/peerinfo/peer_with_block_num_info.go +++ b/polygon/sync/peer_with_block_num_info.go @@ -1,4 +1,4 @@ -package peerinfo +package sync import "math/big" diff --git a/polygon/sync/sentry.go b/polygon/sync/sentry.go index aa8e5198cc6..ffea66b08d7 100644 --- a/polygon/sync/sentry.go +++ b/polygon/sync/sentry.go @@ -5,13 +5,12 @@ import ( "math/big" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/polygon/sync/peerinfo" ) -//go:generate mockgen -destination=./mock/sentry_mock.go -package=mock . Sentry +//go:generate mockgen -destination=./sentry_mock.go -package=sync . Sentry type Sentry interface { MaxPeers() int - PeersWithBlockNumInfo() peerinfo.PeersWithBlockNumInfo + PeersWithBlockNumInfo() PeersWithBlockNumInfo DownloadHeaders(ctx context.Context, start *big.Int, end *big.Int, peerID string) ([]*types.Header, error) Penalize(peerID string) } diff --git a/polygon/sync/mock/sentry_mock.go b/polygon/sync/sentry_mock.go similarity index 91% rename from polygon/sync/mock/sentry_mock.go rename to polygon/sync/sentry_mock.go index 09da633586b..7070b629766 100644 --- a/polygon/sync/mock/sentry_mock.go +++ b/polygon/sync/sentry_mock.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ledgerwatch/erigon/polygon/sync (interfaces: Sentry) -// Package mock is a generated GoMock package. -package mock +// Package sync is a generated GoMock package. +package sync import ( context "context" @@ -11,7 +11,6 @@ import ( gomock "github.com/golang/mock/gomock" types "github.com/ledgerwatch/erigon/core/types" - peerinfo "github.com/ledgerwatch/erigon/polygon/sync/peerinfo" ) // MockSentry is a mock of Sentry interface. @@ -67,10 +66,10 @@ func (mr *MockSentryMockRecorder) MaxPeers() *gomock.Call { } // PeersWithBlockNumInfo mocks base method. -func (m *MockSentry) PeersWithBlockNumInfo() peerinfo.PeersWithBlockNumInfo { +func (m *MockSentry) PeersWithBlockNumInfo() PeersWithBlockNumInfo { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PeersWithBlockNumInfo") - ret0, _ := ret[0].(peerinfo.PeersWithBlockNumInfo) + ret0, _ := ret[0].(PeersWithBlockNumInfo) return ret0 } diff --git a/polygon/sync/spans_cache.go b/polygon/sync/spans_cache.go new file mode 100644 index 00000000000..6d359849536 --- /dev/null +++ b/polygon/sync/spans_cache.go @@ -0,0 +1,38 @@ +package sync + +import ( + heimdallspan "github.com/ledgerwatch/erigon/polygon/heimdall" +) + +type SpansCache struct { + spans map[uint64]*heimdallspan.HeimdallSpan +} + +func NewSpansCache() *SpansCache { + return &SpansCache{ + spans: make(map[uint64]*heimdallspan.HeimdallSpan), + } +} + +func (cache *SpansCache) Add(span *heimdallspan.HeimdallSpan) { + cache.spans[span.StartBlock] = span +} + +// SpanAt finds a span that contains blockNum. +func (cache *SpansCache) SpanAt(blockNum uint64) *heimdallspan.HeimdallSpan { + for _, span := range cache.spans { + if (span.StartBlock <= blockNum) && (blockNum <= span.EndBlock) { + return span + } + } + return nil +} + +// Prune removes spans that ended before blockNum. +func (cache *SpansCache) Prune(blockNum uint64) { + for key, span := range cache.spans { + if span.EndBlock < blockNum { + delete(cache.spans, key) + } + } +} diff --git a/polygon/sync/state_point.go b/polygon/sync/state_point.go index dfd61da3858..c8f9c39971e 100644 --- a/polygon/sync/state_point.go +++ b/polygon/sync/state_point.go @@ -4,11 +4,10 @@ import ( "math/big" "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" + "github.com/ledgerwatch/erigon/polygon/heimdall" ) -func statePointFromCheckpoint(checkpoint *checkpoint.Checkpoint) *statePoint { +func statePointFromCheckpoint(checkpoint *heimdall.Checkpoint) *statePoint { return &statePoint{ proposer: checkpoint.Proposer, startBlock: new(big.Int).Set(checkpoint.StartBlock), @@ -20,7 +19,7 @@ func statePointFromCheckpoint(checkpoint *checkpoint.Checkpoint) *statePoint { } } -func statePointFromMilestone(milestone *milestone.Milestone) *statePoint { +func statePointFromMilestone(milestone *heimdall.Milestone) *statePoint { return &statePoint{ proposer: milestone.Proposer, startBlock: new(big.Int).Set(milestone.StartBlock), diff --git a/polygon/sync/state_point_headers_verifier.go b/polygon/sync/state_point_headers_verifier.go new file mode 100644 index 00000000000..5ee550c92a5 --- /dev/null +++ b/polygon/sync/state_point_headers_verifier.go @@ -0,0 +1,22 @@ +package sync + +import ( + "bytes" + "fmt" + + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor" +) + +type StatePointHeadersVerifier func(statePoint *statePoint, headers []*types.Header) error + +func VerifyStatePointHeaders(statePoint *statePoint, headers []*types.Header) error { + rootHash, err := bor.ComputeHeadersRootHash(headers) + if err != nil { + return fmt.Errorf("VerifyStatePointHeaders: failed to compute headers root hash %w", err) + } + if !bytes.Equal(rootHash, statePoint.rootHash[:]) { + return fmt.Errorf("VerifyStatePointHeaders: bad headers root hash") + } + return nil +} diff --git a/polygon/sync/state_points.go b/polygon/sync/state_points.go index 5577f24d2f4..650dc80f99e 100644 --- a/polygon/sync/state_points.go +++ b/polygon/sync/state_points.go @@ -1,11 +1,10 @@ package sync import ( - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" + "github.com/ledgerwatch/erigon/polygon/heimdall" ) -func statePointsFromCheckpoints(checkpoints []*checkpoint.Checkpoint) statePoints { +func statePointsFromCheckpoints(checkpoints []*heimdall.Checkpoint) statePoints { statePoints := make(statePoints, len(checkpoints)) for i, checkpoint := range checkpoints { statePoints[i] = statePointFromCheckpoint(checkpoint) @@ -14,7 +13,7 @@ func statePointsFromCheckpoints(checkpoints []*checkpoint.Checkpoint) statePoint return statePoints } -func statePointsFromMilestones(milestones []*milestone.Milestone) statePoints { +func statePointsFromMilestones(milestones []*heimdall.Milestone) statePoints { statePoints := make(statePoints, len(milestones)) for i, milestone := range milestones { statePoints[i] = statePointFromMilestone(milestone) diff --git a/polygon/sync/validator_set_interface.go b/polygon/sync/validator_set_interface.go new file mode 100644 index 00000000000..a9e5f3a97a9 --- /dev/null +++ b/polygon/sync/validator_set_interface.go @@ -0,0 +1,13 @@ +package sync + +import ( + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/polygon/bor" +) + +// valset.ValidatorSet abstraction for unit tests +type validatorSetInterface interface { + bor.ValidateHeaderTimeSignerSuccessionNumber + IncrementProposerPriority(times int) + Difficulty(signer libcommon.Address) (uint64, error) +} diff --git a/spectest/consts.go b/spectest/consts.go index 0de7e99f04c..36be88ac62f 100644 --- a/spectest/consts.go +++ b/spectest/consts.go @@ -1,7 +1,8 @@ package spectest const ( - PreSsz = "pre.ssz_snappy" - PostSsz = "post.ssz_snappy" - MetaYaml = "meta.yaml" + PreSsz = "pre.ssz_snappy" + PostSsz = "post.ssz_snappy" + MetaYaml = "meta.yaml" + ObjectSSZ = "object.ssz_snappy" ) diff --git a/spectest/util.go b/spectest/util.go index b34ae6ced81..220a9a2ee09 100644 --- a/spectest/util.go +++ b/spectest/util.go @@ -2,12 +2,13 @@ package spectest import ( "fmt" + "io/fs" + "os" + clparams2 "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/utils" - "io/fs" - "os" "gopkg.in/yaml.v3" @@ -80,6 +81,25 @@ func ReadBlock(root fs.FS, version clparams2.StateVersion, index int) (*cltypes. return blk, nil } + +func ReadBlockByPath(root fs.FS, version clparams2.StateVersion, path string) (*cltypes.SignedBeaconBlock, error) { + var blockBytes []byte + var err error + blockBytes, err = fs.ReadFile(root, path) + if os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, err + } + blk := cltypes.NewSignedBeaconBlock(&clparams2.MainnetBeaconConfig) + if err = utils.DecodeSSZSnappy(blk, blockBytes, int(version)); err != nil { + return nil, err + } + + return blk, nil +} + func ReadAnchorBlock(root fs.FS, version clparams2.StateVersion, name string) (*cltypes.BeaconBlock, error) { var blockBytes []byte var err error diff --git a/tests/bor/helper/miner.go b/tests/bor/helper/miner.go index fc420100a16..ab94206a868 100644 --- a/tests/bor/helper/miner.go +++ b/tests/bor/helper/miner.go @@ -4,6 +4,8 @@ import ( "context" "crypto/ecdsa" "encoding/json" + "fmt" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" "math/big" "os" "time" @@ -28,7 +30,6 @@ import ( // InitGenesis initializes genesis file from json with sprint size and chain name as configurable inputs func InitGenesis(fileLocation string, sprintSize uint64, chainName string) types.Genesis { - // sprint size = 8 in genesis genesisData, err := os.ReadFile(fileLocation) if err != nil { @@ -36,14 +37,23 @@ func InitGenesis(fileLocation string, sprintSize uint64, chainName string) types } genesis := &types.Genesis{} - if err := json.Unmarshal(genesisData, genesis); err != nil { panic(err) } - genesis.Config.Bor.Sprint["0"] = sprintSize genesis.Config.ChainName = chainName + if genesis.Config.BorJSON != nil { + borConfig := &borcfg.BorConfig{} + err = json.Unmarshal(genesis.Config.BorJSON, borConfig) + if err != nil { + panic(fmt.Sprintf("Could not parse 'bor' config for %s: %v", fileLocation, err)) + } + + borConfig.Sprint["0"] = sprintSize + genesis.Config.Bor = borConfig + } + return *genesis } diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 632bdc48752..01d73c57483 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -46,6 +46,7 @@ import ( "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/trie" + "github.com/ledgerwatch/log/v3" ) // StateTest checks transaction processing without block context. @@ -182,7 +183,7 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork} } vmconfig.ExtraEips = eips - block, _, err := core.GenesisToBlock(t.genesis(config), "") + block, _, err := core.GenesisToBlock(t.genesis(config), "", log.Root()) if err != nil { return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork} } diff --git a/tests/testdata b/tests/testdata index 06e276776bc..428f218d7d6 160000 --- a/tests/testdata +++ b/tests/testdata @@ -1 +1 @@ -Subproject commit 06e276776bc87817c38f6efb492bf6f4527fa904 +Subproject commit 428f218d7d6f4a52544e12684afbfe6e2882ffbf diff --git a/turbo/app/README.md b/turbo/app/README.md new file mode 100644 index 00000000000..c7bc4b7f61d --- /dev/null +++ b/turbo/app/README.md @@ -0,0 +1,71 @@ +# Erigon Sub Commands + +## Backup + +## Import + +## Init + +## Support + +## Snapshots + +This sub command can be used for manipulating snapshot files + +### Uploader + +The `snapshots uploader` command starts a version of erigon customized for uploading snapshot files to +a remote location. + +It breaks the stage execution process after the senders stage and then uses the snapshot stage to send +uploaded headers, bodies and (in the case of polygon) bor spans and events to snapshot files. Because +this process avoids execution in run signifigantly faster than a standard erigon configuration. + +The uploader uses rclone to send seedable (100K or 500K blocks) to a remote storage location specified +in the rclone config file. + +The **uploader** is configured to minimize disk usage by doing the following: + +* It removes snapshots once they are loaded +* It agressively prunes the database once entites are transferred to snapshots + +in addition to this it has the following performance related features: + +* maximises the workers allocated to snaphot processing to improve thoughtput +* Can be started from scratch by downloading the latest snapshots from the remote location to seed processing + +The following configuration can be used to upload blocks from genesis where: + +| | | +|---|---| +| sync.loop.prune.limit=500000 | Sets the records to be pruned to the database to 500,000 per iteration (as opposed to 100) | +| upload.location=r2:erigon-v2-snapshots-bor-mainnet | Specified the rclone loaction to upload snapshot to | +| upload.from=earliest | Sets the upload start location to be the earliest availible block, which will be 0 in the case of a fresh installtion, or specified by the last block in the chaindata db | +| upload.snapshot.limit=1500000 | Tells the uploader to keep a maximum 1,500,000 blocks in the `snapshots` before deleting the aged snapshot | +| snapshot.version=2 | Indivates the version to be appended to snapshot file names when they are creatated| + + +```shell +erigon/build/bin/erigon snapshots uploader --datadir=~/snapshots/bor-mainnet --chain=bor-mainnet \ + --bor.heimdall=https://heimdall-api.polygon.technology --bor.milestone=false --sync.loop.prune.limit=500000 \ + --upload.location=r2:erigon-v2-snapshots-bor-mainnet --upload.from=earliest --snapshot.version=2 \ + --upload.snapshot.limit=1500000 +``` + +In order to start with the lates uploaded block when starting with an empty drive set the `upload.from` flag to `latest`. e.g. + +```shell +--upload.from=latest +``` + +The configuration of the uploader implicitly sets the following flag values on start-up: + +```shell + --sync.loop.break.after=Senders + --sync.loop.block.limit=100000 + --sync.loop.prune.limit=100000 + --upload.snapshot.limit=1500000 + --nodownloader=true + --http.enables=false + --txpool.disable=true +``` diff --git a/turbo/app/import_cmd.go b/turbo/app/import_cmd.go index 4b23fcf4e76..085b2dbb53c 100644 --- a/turbo/app/import_cmd.go +++ b/turbo/app/import_cmd.go @@ -12,6 +12,7 @@ import ( "syscall" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" "github.com/urfave/cli/v2" @@ -221,7 +222,7 @@ func InsertChain(ethereum *eth.Ethereum, chain *core.ChainPack, logger log.Logge blockReader, _ := ethereum.BlockIO() hook := stages.NewHook(ethereum.SentryCtx(), ethereum.ChainDB(), ethereum.Notifications(), ethereum.StagedSync(), blockReader, ethereum.ChainConfig(), logger, sentryControlServer.UpdateHead) - err := stages.StageLoopIteration(ethereum.SentryCtx(), ethereum.ChainDB(), nil, ethereum.StagedSync(), initialCycle, logger, blockReader, hook, false) + err := stages.StageLoopIteration(ethereum.SentryCtx(), ethereum.ChainDB(), wrap.TxContainer{}, ethereum.StagedSync(), initialCycle, logger, blockReader, hook, false) if err != nil { return err } diff --git a/turbo/app/make_app.go b/turbo/app/make_app.go index a591af54265..d4e5f17bbb2 100644 --- a/turbo/app/make_app.go +++ b/turbo/app/make_app.go @@ -51,23 +51,8 @@ func MakeApp(name string, action cli.ActionFunc, cliFlags []cli.Flag) *cli.App { // run default action return action(context) } - app.Flags = append(cliFlags, debug.Flags...) // debug flags are required - app.Flags = append(app.Flags, utils.MetricFlags...) - app.Flags = append(app.Flags, logging.Flags...) - app.Flags = append(app.Flags, &utils.ConfigFlag) - // remove exact duplicate flags, keeping only the first one. this will allow easier composition later down the line - allFlags := app.Flags - newFlags := make([]cli.Flag, 0, len(allFlags)) - seen := map[string]struct{}{} - for _, vv := range allFlags { - v := vv - if _, ok := seen[v.String()]; ok { - continue - } - newFlags = append(newFlags, v) - } - app.Flags = newFlags + app.Flags = appFlags(cliFlags) app.After = func(ctx *cli.Context) error { debug.Exit() @@ -83,6 +68,28 @@ func MakeApp(name string, action cli.ActionFunc, cliFlags []cli.Flag) *cli.App { return app } +func appFlags(cliFlags []cli.Flag) []cli.Flag { + + flags := append(cliFlags, debug.Flags...) // debug flags are required + flags = append(flags, utils.MetricFlags...) + flags = append(flags, logging.Flags...) + flags = append(flags, &utils.ConfigFlag) + + // remove exact duplicate flags, keeping only the first one. this will allow easier composition later down the line + allFlags := flags + newFlags := make([]cli.Flag, 0, len(allFlags)) + seen := map[string]struct{}{} + for _, vv := range allFlags { + v := vv + if _, ok := seen[v.String()]; ok { + continue + } + newFlags = append(newFlags, v) + } + + return newFlags +} + // MigrateFlags makes all global flag values available in the // context. This should be called as early as possible in app.Before. // diff --git a/turbo/app/snapshots_cmd.go b/turbo/app/snapshots_cmd.go index 482d15bf266..c4419b11444 100644 --- a/turbo/app/snapshots_cmd.go +++ b/turbo/app/snapshots_cmd.go @@ -8,16 +8,20 @@ import ( "errors" "fmt" "io" + "net/http" "os" "path/filepath" "runtime" "time" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/common/dir" + "github.com/ledgerwatch/erigon-lib/metrics" "github.com/ledgerwatch/log/v3" "github.com/urfave/cli/v2" + "golang.org/x/sync/semaphore" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" @@ -33,11 +37,15 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/blockio" + "github.com/ledgerwatch/erigon/diagnostics" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/params" + erigoncli "github.com/ledgerwatch/erigon/turbo/cli" "github.com/ledgerwatch/erigon/turbo/debug" "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/ledgerwatch/erigon/turbo/node" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" ) @@ -79,8 +87,41 @@ var snapshotCommand = cli.Command{ &SnapshotFromFlag, &SnapshotToFlag, &SnapshotEveryFlag, + &SnapshotVersionFlag, }), }, + { + Name: "uploader", + Action: doUploaderCommand, + Usage: "run erigon in snapshot upload mode (no execution)", + Flags: joinFlags(erigoncli.DefaultFlags, + []cli.Flag{ + &SnapshotVersionFlag, + &erigoncli.UploadLocationFlag, + &erigoncli.UploadFromFlag, + &erigoncli.FrozenBlockLimitFlag, + }), + Before: func(ctx *cli.Context) error { + ctx.Set(erigoncli.SyncLoopBreakAfterFlag.Name, "Senders") + ctx.Set(utils.NoDownloaderFlag.Name, "true") + ctx.Set(utils.HTTPEnabledFlag.Name, "false") + ctx.Set(utils.TxPoolDisableFlag.Name, "true") + + if !ctx.IsSet(erigoncli.SyncLoopBlockLimitFlag.Name) { + ctx.Set(erigoncli.SyncLoopBlockLimitFlag.Name, "100000") + } + + if !ctx.IsSet(erigoncli.FrozenBlockLimitFlag.Name) { + ctx.Set(erigoncli.FrozenBlockLimitFlag.Name, "1500000") + } + + if !ctx.IsSet(erigoncli.SyncLoopPruneLimitFlag.Name) { + ctx.Set(erigoncli.SyncLoopPruneLimitFlag.Name, "100000") + } + + return nil + }, + }, { Name: "uncompress", Action: doUncompress, @@ -116,6 +157,20 @@ var snapshotCommand = cli.Command{ }, }), }, + { + Name: "integrity", + Action: doIntegrity, + Flags: joinFlags([]cli.Flag{ + &utils.DataDirFlag, + }), + }, + //{ + // Name: "bodies_decrement_datafix", + // Action: doBodiesDecrement, + // Flags: joinFlags([]cli.Flag{ + // &utils.DataDirFlag, + // }), + //}, }, } @@ -135,12 +190,50 @@ var ( Usage: "Do operation every N blocks", Value: 1_000, } + SnapshotVersionFlag = cli.IntFlag{ + Name: "snapshot.version", + Usage: "Snapshot files version.", + Value: 1, + } SnapshotRebuildFlag = cli.BoolFlag{ Name: "rebuild", Usage: "Force rebuild", } ) +func doIntegrity(cliCtx *cli.Context) error { + logger, _, err := debug.Setup(cliCtx, true /* root logger */) + if err != nil { + return err + } + + ctx := cliCtx.Context + dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name)) + chainDB := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen() + defer chainDB.Close() + + cfg := ethconfig.NewSnapCfg(true, false, true) + chainConfig := fromdb.ChainConfig(chainDB) + blockSnaps, borSnaps, blockRetire, agg, err := openSnaps(ctx, cfg, dirs, snapcfg.KnownCfg(chainConfig.ChainName, 0).Version, chainDB, logger) + if err != nil { + return err + } + defer blockSnaps.Close() + defer borSnaps.Close() + defer agg.Close() + + blockReader, _ := blockRetire.IO() + if err := blockReader.(*freezeblocks.BlockReader).IntegrityTxnID(false); err != nil { + return err + } + + //if err := integrity.E3HistoryNoSystemTxs(ctx, chainDB, agg); err != nil { + // return err + //} + + return nil +} + func doDiff(cliCtx *cli.Context) error { defer log.Info("Done") srcF, dstF := cliCtx.String("src"), cliCtx.String("dst") @@ -210,6 +303,7 @@ func doDecompressSpeed(cliCtx *cli.Context) error { }() return nil } + func doRam(cliCtx *cli.Context) error { var logger log.Logger var err error @@ -246,20 +340,19 @@ func doIndicesCommand(cliCtx *cli.Context) error { dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name)) rebuild := cliCtx.Bool(SnapshotRebuildFlag.Name) - //from := cliCtx.Uint64(SnapshotFromFlag.Name) - - chainDB := mdbx.NewMDBX(logger).Path(dirs.Chaindata).MustOpen() + chainDB := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen() defer chainDB.Close() dir.MustExist(dirs.SnapHistory) - chainConfig := fromdb.ChainConfig(chainDB) if rebuild { panic("not implemented") } cfg := ethconfig.NewSnapCfg(true, false, true) - blockSnaps, borSnaps, br, agg, err := openSnaps(ctx, cfg, dirs, chainDB, logger) + chainConfig := fromdb.ChainConfig(chainDB) + blockSnaps, borSnaps, br, agg, err := openSnaps(ctx, cfg, dirs, snapcfg.KnownCfg(chainConfig.ChainName, 0).Version, chainDB, logger) + if err != nil { return err } @@ -277,27 +370,30 @@ func doIndicesCommand(cliCtx *cli.Context) error { return nil } -func openSnaps(ctx context.Context, cfg ethconfig.BlocksFreezing, dirs datadir.Dirs, chainDB kv.RwDB, logger log.Logger) ( +func openSnaps(ctx context.Context, cfg ethconfig.BlocksFreezing, dirs datadir.Dirs, version uint8, chainDB kv.RwDB, logger log.Logger) ( blockSnaps *freezeblocks.RoSnapshots, borSnaps *freezeblocks.BorRoSnapshots, br *freezeblocks.BlockRetire, agg *libstate.AggregatorV3, err error, ) { - blockSnaps = freezeblocks.NewRoSnapshots(cfg, dirs.Snap, logger) + blockSnaps = freezeblocks.NewRoSnapshots(cfg, dirs.Snap, version, logger) if err = blockSnaps.ReopenFolder(); err != nil { return } - blockSnaps.LogStat() + blockSnaps.LogStat("open") - borSnaps = freezeblocks.NewBorRoSnapshots(cfg, dirs.Snap, logger) + borSnaps = freezeblocks.NewBorRoSnapshots(cfg, dirs.Snap, version, logger) if err = borSnaps.ReopenFolder(); err != nil { return } - borSnaps.LogStat() - - agg, err = libstate.NewAggregatorV3(ctx, dirs.SnapHistory, dirs.Tmp, ethconfig.HistoryV3AggregationStep, chainDB, logger) - if err != nil { - return - } - agg.SetWorkers(estimate.CompressSnapshot.Workers()) - err = agg.OpenFolder() + borSnaps.LogStat("open") + agg = openAgg(ctx, dirs, chainDB, logger) + err = chainDB.View(ctx, func(tx kv.Tx) error { + ac := agg.MakeContext() + defer ac.Close() + //ac.LogStats(tx, func(endTxNumMinimax uint64) uint64 { + // _, histBlockNumProgress, _ := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax) + // return histBlockNumProgress + //}) + return nil + }) if err != nil { return } @@ -425,11 +521,16 @@ func doRetireCommand(cliCtx *cli.Context) error { from := cliCtx.Uint64(SnapshotFromFlag.Name) to := cliCtx.Uint64(SnapshotToFlag.Name) every := cliCtx.Uint64(SnapshotEveryFlag.Name) - db := mdbx.NewMDBX(logger).Label(kv.ChainDB).Path(dirs.Chaindata).MustOpen() + version := uint8(cliCtx.Int(SnapshotVersionFlag.Name)) + if version != 0 { + snapcfg.SnapshotVersion(version) + } + + db := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen() defer db.Close() cfg := ethconfig.NewSnapCfg(true, false, true) - blockSnaps, borSnaps, br, agg, err := openSnaps(ctx, cfg, dirs, db, logger) + blockSnaps, borSnaps, br, agg, err := openSnaps(ctx, cfg, dirs, version, db, logger) if err != nil { return err } @@ -458,7 +559,7 @@ func doRetireCommand(cliCtx *cli.Context) error { } logger.Info("Params", "from", from, "to", to, "every", every) - if err := br.RetireBlocks(ctx, forwardProgress, log.LvlInfo, nil, nil); err != nil { + if err := br.RetireBlocks(ctx, 0, forwardProgress, log.LvlInfo, nil, nil); err != nil { return err } @@ -554,3 +655,156 @@ func doRetireCommand(cliCtx *cli.Context) error { return nil } + +func doUploaderCommand(cliCtx *cli.Context) error { + var logger log.Logger + var err error + var metricsMux *http.ServeMux + + if logger, metricsMux, err = debug.Setup(cliCtx, true /* root logger */); err != nil { + return err + } + + // initializing the node and providing the current git commit there + + logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit) + erigonInfoGauge := metrics.GetOrCreateGauge(fmt.Sprintf(`erigon_info{version="%s",commit="%s"}`, params.Version, params.GitCommit)) + erigonInfoGauge.Set(1) + + if version := uint8(cliCtx.Int(SnapshotVersionFlag.Name)); version != 0 { + snapcfg.SnapshotVersion(version) + } + + nodeCfg := node.NewNodConfigUrfave(cliCtx, logger) + if err := datadir.ApplyMigrations(nodeCfg.Dirs); err != nil { + return err + } + + ethCfg := node.NewEthConfigUrfave(cliCtx, nodeCfg, logger) + + ethNode, err := node.New(cliCtx.Context, nodeCfg, ethCfg, logger) + if err != nil { + log.Error("Erigon startup", "err", err) + return err + } + + if metricsMux != nil { + diagnostics.Setup(cliCtx, metricsMux, ethNode) + } + + err = ethNode.Serve() + if err != nil { + log.Error("error while serving an Erigon node", "err", err) + } + return err +} + +/* +func doBodiesDecrement(cliCtx *cli.Context) error { + logger, _, err := debug.Setup(cliCtx, true) + if err != nil { + return err + } + dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name)) + ctx := cliCtx.Context + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() + + list, err := snaptype.Segments(dirs.Snap, 1) + if err != nil { + return err + } + var l []snaptype.FileInfo + for _, f := range list { + if f.T != snaptype.Bodies { + continue + } + if f.From < 14_500_000 { + continue + } + l = append(l, f) + } + migrateSingleBody := func(srcF, dstF string) error { + src, err := compress.NewDecompressor(srcF) + if err != nil { + return err + } + defer src.Close() + dst, err := compress.NewCompressor(ctx, "compress", dstF, dirs.Tmp, compress.MinPatternScore, estimate.CompressSnapshot.Workers(), log.LvlInfo, logger) + if err != nil { + return err + } + defer dst.Close() + + i := 0 + srcG := src.MakeGetter() + var buf []byte + dstBuf := bytes.NewBuffer(nil) + for srcG.HasNext() { + i++ + buf, _ = srcG.Next(buf[:0]) + body := &types.BodyForStorage{} + if err := rlp.Decode(bytes.NewReader(buf), body); err != nil { + return err + } + body.BaseTxId -= 1 + dstBuf.Reset() + if err := rlp.Encode(dstBuf, body); err != nil { + return err + } + + if err := dst.AddWord(dstBuf.Bytes()); err != nil { + return err + } + + select { + case <-logEvery.C: + logger.Info("[bodies] progress", "f", src.FileName(), "progress", fmt.Sprintf("%dK/%dK", i/1_000, src.Count()/1_000)) + default: + } + } + if err := dst.Compress(); err != nil { + return err + } + src.Close() + dst.Close() + os.Rename(srcF, srcF+".back") + os.Rename(dstF, srcF) + os.Remove(srcF + ".torrent") + os.Remove(srcF + ".idx") + ext := filepath.Ext(srcF) + withoutExt := srcF[:len(srcF)-len(ext)] + _ = os.Remove(withoutExt + ".idx") + return nil + } + for _, f := range l { + srcF, dstF := f.Path, f.Path+"2" + if err := migrateSingleBody(srcF, dstF); err != nil { + return err + } + } + + return nil +} +*/ + +func dbCfg(label kv.Label, path string) mdbx.MdbxOpts { + const ThreadsLimit = 9_000 + limiterB := semaphore.NewWeighted(ThreadsLimit) + opts := mdbx.NewMDBX(log.New()).Path(path).Label(label).RoTxsLimiter(limiterB) + // integration tool don't intent to create db, then easiest way to open db - it's pass mdbx.Accede flag, which allow + // to read all options from DB, instead of overriding them + opts = opts.Accede() + return opts +} +func openAgg(ctx context.Context, dirs datadir.Dirs, chainDB kv.RwDB, logger log.Logger) *libstate.AggregatorV3 { + agg, err := libstate.NewAggregatorV3(ctx, dirs.Snap, dirs.Tmp, ethconfig.HistoryV3AggregationStep, chainDB, logger) + if err != nil { + panic(err) + } + if err = agg.OpenFolder(); err != nil { + panic(err) + } + agg.SetWorkers(estimate.CompressSnapshot.Workers()) + return agg +} diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index aba7b4a0f98..347a72d694e 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -149,7 +149,6 @@ var DefaultFlags = []cli.Flag{ &utils.HeimdallURLFlag, &utils.WebSeedsFlag, &utils.WithoutHeimdallFlag, - &utils.HeimdallgRPCAddressFlag, &utils.BorBlockPeriodFlag, &utils.BorBlockSizeFlag, &utils.WithHeimdallMilestones, @@ -174,6 +173,9 @@ var DefaultFlags = []cli.Flag{ &utils.BeaconAPIFlag, &utils.BeaconApiAddrFlag, + &utils.BeaconApiAllowMethodsFlag, + &utils.BeaconApiAllowOriginsFlag, + &utils.BeaconApiAllowCredentialsFlag, &utils.BeaconApiPortFlag, &utils.BeaconApiReadTimeoutFlag, &utils.BeaconApiWriteTimeoutFlag, @@ -187,4 +189,7 @@ var DefaultFlags = []cli.Flag{ &utils.RPCSlowFlag, &utils.TxPoolGossipDisableFlag, + &SyncLoopBlockLimitFlag, + &SyncLoopBreakAfterFlag, + &SyncLoopPruneLimitFlag, } diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 715ed3cd539..083fc8ee272 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -10,6 +10,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/c2h5oh/datasize" @@ -148,6 +149,42 @@ var ( Value: "", } + SyncLoopPruneLimitFlag = cli.UintFlag{ + Name: "sync.loop.prune.limit", + Usage: "Sets the maximum number of block to prune per loop iteration", + Value: 100, + } + + SyncLoopBreakAfterFlag = cli.StringFlag{ + Name: "sync.loop.break.after", + Usage: "Sets the last stage of the sync loop to run", + Value: "", + } + + SyncLoopBlockLimitFlag = cli.UintFlag{ + Name: "sync.loop.block.limit", + Usage: "Sets the maximum number of blocks to process per loop iteration", + Value: 0, // unlimited + } + + UploadLocationFlag = cli.StringFlag{ + Name: "upload.location", + Usage: "Location to upload snapshot segments to", + Value: "", + } + + UploadFromFlag = cli.StringFlag{ + Name: "upload.from", + Usage: "Blocks to upload from: number, or 'earliest' (start of the chain), 'latest' (last segment previously uploaded)", + Value: "latest", + } + + FrozenBlockLimitFlag = cli.UintFlag{ + Name: "upload.snapshot.limit", + Usage: "Sets the maximum number of snapshot blocks to hold on the local disk when uploading", + Value: 1500000, + } + BadBlockFlag = cli.StringFlag{ Name: "bad.block", Usage: "Marks block with given hex string as bad and forces initial reorg before normal staged sync", @@ -255,6 +292,32 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log. cfg.Sync.LoopThrottle = syncLoopThrottle } + if limit := ctx.Uint(SyncLoopPruneLimitFlag.Name); limit > 0 { + cfg.Sync.PruneLimit = int(limit) + } + + if stage := ctx.String(SyncLoopBreakAfterFlag.Name); len(stage) > 0 { + cfg.Sync.BreakAfterStage = stage + } + + if limit := ctx.Uint(SyncLoopBlockLimitFlag.Name); limit > 0 { + cfg.Sync.LoopBlockLimit = limit + } + + if location := ctx.String(UploadLocationFlag.Name); len(location) > 0 { + cfg.Sync.UploadLocation = location + } + + if blockno := ctx.String(UploadFromFlag.Name); len(blockno) > 0 { + cfg.Sync.UploadFrom = rpc.AsBlockNumber(blockno) + } else { + cfg.Sync.UploadFrom = rpc.LatestBlockNumber + } + + if limit := ctx.Uint(FrozenBlockLimitFlag.Name); limit > 0 { + cfg.Sync.FrozenBlockLimit = uint64(limit) + } + if ctx.String(BadBlockFlag.Name) != "" { bytes, err := hexutil.Decode(ctx.String(BadBlockFlag.Name)) if err != nil { @@ -354,10 +417,15 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config, logger log.Logg } apis := ctx.String(utils.HTTPApiFlag.Name) - logger.Info("starting HTTP APIs", "APIs", apis) c := &httpcfg.HttpCfg{ - Enabled: ctx.Bool(utils.HTTPEnabledFlag.Name), + Enabled: func() bool { + if ctx.IsSet(utils.HTTPEnabledFlag.Name) { + return ctx.Bool(utils.HTTPEnabledFlag.Name) + } + + return true + }(), HttpServerEnabled: ctx.Bool(utils.HTTPServerEnabledFlag.Name), Dirs: cfg.Dirs, @@ -412,6 +480,11 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config, logger log.Logg StateCache: kvcache.DefaultCoherentConfig, RPCSlowLogThreshold: ctx.Duration(utils.RPCSlowFlag.Name), } + + if c.Enabled { + logger.Info("starting HTTP APIs", "port", c.HttpPort, "APIs", apis) + } + if ctx.IsSet(utils.HttpCompressionFlag.Name) { c.HttpCompression = ctx.Bool(utils.HttpCompressionFlag.Name) } else { diff --git a/turbo/debug/flags.go b/turbo/debug/flags.go index 1be6efa51b3..a2bd0f10a94 100644 --- a/turbo/debug/flags.go +++ b/turbo/debug/flags.go @@ -184,7 +184,7 @@ func Setup(ctx *cli.Context, rootLogger bool) (log.Logger, *http.ServeMux, error RaiseFdLimit() - logger := logging.SetupLoggerCtx("erigon", ctx, rootLogger) + logger := logging.SetupLoggerCtx("erigon", ctx, log.LvlInfo, log.LvlInfo, rootLogger) if traceFile := ctx.String(traceFlag.Name); traceFile != "" { if err := Handler.StartGoTrace(traceFile); err != nil { diff --git a/turbo/engineapi/engine_block_downloader/block_downloader.go b/turbo/engineapi/engine_block_downloader/block_downloader.go index 7c08b42ee86..83c0e4dfece 100644 --- a/turbo/engineapi/engine_block_downloader/block_downloader.go +++ b/turbo/engineapi/engine_block_downloader/block_downloader.go @@ -5,7 +5,6 @@ import ( "context" "encoding/binary" "fmt" - "github.com/ledgerwatch/erigon-lib/kv/dbutils" "math/big" "sync" "sync/atomic" @@ -18,6 +17,8 @@ import ( "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/gointerfaces/execution" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/dbutils" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/rlp" @@ -101,9 +102,9 @@ func (e *EngineBlockDownloader) scheduleHeadersDownload( } if heightToDownload == 0 { - e.logger.Info("[EngineBlockDownloader] Downloading PoS headers...", "height", "unknown", "hash", hashToDownload, "requestId", requestId) + e.logger.Info("[EngineBlockDownloader] Downloading PoS headers...", "hash", hashToDownload, "requestId", requestId) } else { - e.logger.Info("[EngineBlockDownloader] Downloading PoS headers...", "height", heightToDownload, "hash", hashToDownload, "requestId", requestId) + e.logger.Info("[EngineBlockDownloader] Downloading PoS headers...", "hash", hashToDownload, "requestId", requestId, "height", heightToDownload) } e.hd.SetRequestId(requestId) diff --git a/turbo/engineapi/engine_helpers/fork_validator.go b/turbo/engineapi/engine_helpers/fork_validator.go index 9f742279a95..76902dcb0f4 100644 --- a/turbo/engineapi/engine_helpers/fork_validator.go +++ b/turbo/engineapi/engine_helpers/fork_validator.go @@ -17,19 +17,21 @@ import ( "context" "errors" "fmt" - "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru" "sync" + "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" + "github.com/ledgerwatch/erigon-lib/wrap" + "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/turbo/engineapi/engine_types" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/turbo/shards" @@ -38,7 +40,7 @@ import ( // the maximum point from the current head, past which side forks are not validated anymore. const maxForkDepth = 32 // 32 slots is the duration of an epoch thus there cannot be side forks in PoS deeper than 32 blocks from head. -type validatePayloadFunc func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody, *shards.Notifications) error +type validatePayloadFunc func(wrap.TxContainer, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody, *shards.Notifications) error type ForkValidator struct { // current memory batch containing chain head that extend canonical fork. @@ -132,7 +134,7 @@ func (fv *ForkValidator) FlushExtendingFork(tx kv.RwTx, accumulator *shards.Accu // if the payload extends the canonical chain, then we stack it in extendingFork without any unwind. // if the payload is a fork then we unwind to the point where the fork meets the canonical chain, and there we check whether it is valid. // if for any reason none of the actions above can be performed due to lack of information, we accept the payload and avoid validation. -func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *types.RawBody, extendCanonical bool) (status engine_types.EngineStatus, latestValidHash libcommon.Hash, validationError error, criticalError error) { +func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *types.RawBody, extendCanonical bool, logger log.Logger) (status engine_types.EngineStatus, latestValidHash libcommon.Hash, validationError error, criticalError error) { fv.lock.Lock() defer fv.lock.Unlock() if fv.validatePayload == nil { @@ -149,8 +151,10 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t log.Debug("Execution ForkValidator.ValidatePayload", "extendCanonical", extendCanonical) if extendCanonical { - extendingFork := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir) + extendingFork := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger) defer extendingFork.Close() + var txc wrap.TxContainer + txc.Tx = extendingFork fv.extendingForkNotifications = &shards.Notifications{ Events: shards.NewEvents(), @@ -159,7 +163,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t // Update fork head hash. fv.extendingForkHeadHash = header.Hash() fv.extendingForkNumber = header.Number.Uint64() - status, latestValidHash, validationError, criticalError = fv.validateAndStorePayload(extendingFork, header, body, 0, nil, nil, fv.extendingForkNotifications) + status, latestValidHash, validationError, criticalError = fv.validateAndStorePayload(txc, header, body, 0, nil, nil, fv.extendingForkNotifications) if criticalError != nil { return } @@ -186,7 +190,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t return } - log.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint) + logger.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint) var bodiesChain []*types.RawBody var headersChain []*types.Header @@ -222,19 +226,21 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t if criticalError != nil { return } - log.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint) + logger.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint) } // Do not set an unwind point if we are already there. if unwindPoint == fv.currentHeight { unwindPoint = 0 } - batch := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir) + batch := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger) defer batch.Rollback() + var txc wrap.TxContainer + txc.Tx = batch notifications := &shards.Notifications{ Events: shards.NewEvents(), Accumulator: shards.NewAccumulator(), } - return fv.validateAndStorePayload(batch, header, body, unwindPoint, headersChain, bodiesChain, notifications) + return fv.validateAndStorePayload(txc, header, body, unwindPoint, headersChain, bodiesChain, notifications) } // Clear wipes out current extending fork data, this method is called after fcu is called, @@ -254,9 +260,9 @@ func (fv *ForkValidator) ClearWithUnwind(accumulator *shards.Accumulator, c shar } // validateAndStorePayload validate and store a payload fork chain if such chain results valid. -func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, +func (fv *ForkValidator) validateAndStorePayload(txc wrap.TxContainer, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, notifications *shards.Notifications) (status engine_types.EngineStatus, latestValidHash libcommon.Hash, validationError error, criticalError error) { - if err := fv.validatePayload(tx, header, body, unwindPoint, headersChain, bodiesChain, notifications); err != nil { + if err := fv.validatePayload(txc, header, body, unwindPoint, headersChain, bodiesChain, notifications); err != nil { if errors.Is(err, consensus.ErrInvalidBlock) { validationError = err } else { @@ -268,11 +274,11 @@ func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Heade latestValidHash = header.Hash() if validationError != nil { var latestValidNumber uint64 - latestValidNumber, criticalError = stages.GetStageProgress(tx, stages.IntermediateHashes) + latestValidNumber, criticalError = stages.GetStageProgress(txc.Tx, stages.IntermediateHashes) if criticalError != nil { return } - latestValidHash, criticalError = rawdb.ReadCanonicalHash(tx, latestValidNumber) + latestValidHash, criticalError = rawdb.ReadCanonicalHash(txc.Tx, latestValidNumber) if criticalError != nil { return } @@ -286,7 +292,7 @@ func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Heade // If we do not have the body we can recover it from the batch. if body != nil { - if _, criticalError = rawdb.WriteRawBodyIfNotExists(tx, header.Hash(), header.Number.Uint64(), body); criticalError != nil { + if _, criticalError = rawdb.WriteRawBodyIfNotExists(txc.Tx, header.Hash(), header.Number.Uint64(), body); criticalError != nil { return } } diff --git a/turbo/execution/eth1/ethereum_execution.go b/turbo/execution/eth1/ethereum_execution.go index baa1e8e82df..1a367602192 100644 --- a/turbo/execution/eth1/ethereum_execution.go +++ b/turbo/execution/eth1/ethereum_execution.go @@ -9,6 +9,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/execution" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/log/v3" "golang.org/x/sync/semaphore" "google.golang.org/protobuf/types/known/emptypb" @@ -183,7 +184,7 @@ func (e *EthereumExecutionModule) ValidateChain(ctx context.Context, req *execut extendingHash := e.forkValidator.ExtendingForkHeadHash() extendCanonical := extendingHash == libcommon.Hash{} && header.ParentHash == currentHeadHash - status, lvh, validationError, criticalError := e.forkValidator.ValidatePayload(tx, header, body.RawBody(), extendCanonical) + status, lvh, validationError, criticalError := e.forkValidator.ValidatePayload(tx, header, body.RawBody(), extendCanonical, e.logger) if criticalError != nil { return nil, criticalError } @@ -229,18 +230,25 @@ func (e *EthereumExecutionModule) purgeBadChain(ctx context.Context, tx kv.RwTx, func (e *EthereumExecutionModule) Start(ctx context.Context) { e.semaphore.Acquire(ctx, 1) defer e.semaphore.Release(1) - // Run the forkchoice - if err := e.executionPipeline.Run(e.db, nil, true); err != nil { - if !errors.Is(err, context.Canceled) { - e.logger.Error("Could not start execution service", "err", err) + + more := true + + for more { + var err error + + if more, err = e.executionPipeline.Run(e.db, wrap.TxContainer{}, true); err != nil { + if !errors.Is(err, context.Canceled) { + e.logger.Error("Could not start execution service", "err", err) + } + continue } - return - } - if err := e.executionPipeline.RunPrune(e.db, nil, true); err != nil { - if !errors.Is(err, context.Canceled) { - e.logger.Error("Could not start execution service", "err", err) + + if err := e.executionPipeline.RunPrune(e.db, nil, true); err != nil { + if !errors.Is(err, context.Canceled) { + e.logger.Error("Could not start execution service", "err", err) + } + continue } - return } } diff --git a/turbo/execution/eth1/forkchoice.go b/turbo/execution/eth1/forkchoice.go index d6f88ec1269..36a431c29f4 100644 --- a/turbo/execution/eth1/forkchoice.go +++ b/turbo/execution/eth1/forkchoice.go @@ -10,6 +10,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/execution" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" @@ -138,18 +139,17 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, blockHas return } + // Only Optimism allows unwinding to previously canonical hashes unwindingToCanonical := false if e.config.IsOptimism() { headHash := rawdb.ReadHeadBlockHash(tx) - unwindingToCanonical = blockHash != headHash + unwindingToCanonical = (blockHash != headHash) && (canonicalHash == blockHash) if unwindingToCanonical { - e.logger.Info("Optimism ForkChoice is choosing to unwind to a previously canonical block", "blockHash", blockHash, "blockNumber", fcuHeader.Number.Uint64()) + e.logger.Info("Optimism ForkChoice is choosing to unwind to a previously canonical block", "blockHash", blockHash, "blockNumber", fcuHeader.Number.Uint64(), "headHash", headHash) } } - // Optimism allows unwinding to previously canonical hashes if canonicalHash == blockHash { - // if block hash is part of the canonical chain treat it as no-op. writeForkChoiceHashes(tx, blockHash, safeHash, finalizedHash) valid, err := e.verifyForkchoiceHashes(ctx, tx, blockHash, finalizedHash, safeHash) @@ -254,7 +254,7 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, blockHas } // Run the unwind - if err := e.executionPipeline.RunUnwind(e.db, tx); err != nil { + if err := e.executionPipeline.RunUnwind(e.db, wrap.TxContainer{Tx: tx}); err != nil { err = fmt.Errorf("updateForkChoice: %w", err) sendForkchoiceErrorWithoutWaiting(outcomeCh, err) return @@ -325,7 +325,7 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, blockHas } } // Run the forkchoice - if err := e.executionPipeline.Run(e.db, tx, false); err != nil { + if _, err := e.executionPipeline.Run(e.db, wrap.TxContainer{Tx: tx}, false); err != nil { err = fmt.Errorf("updateForkChoice: %w", err) sendForkchoiceErrorWithoutWaiting(outcomeCh, err) return diff --git a/turbo/execution/eth1/inserters.go b/turbo/execution/eth1/inserters.go index 368a75a3f07..c49a34de86a 100644 --- a/turbo/execution/eth1/inserters.go +++ b/turbo/execution/eth1/inserters.go @@ -29,8 +29,9 @@ func (e *EthereumExecutionModule) InsertBlocks(ctx context.Context, req *executi return nil, fmt.Errorf("ethereumExecutionModule.InsertBlocks: cannot convert headers: %s", err) } body := eth1_utils.ConvertRawBlockBodyFromRpc(block.Body) + height := header.Number.Uint64() // Parent's total difficulty - parentTd, err := rawdb.ReadTd(tx, header.ParentHash, header.Number.Uint64()-1) + parentTd, err := rawdb.ReadTd(tx, header.ParentHash, height-1) if err != nil || parentTd == nil { return nil, fmt.Errorf("parent's total difficulty not found with hash %x and height %d: %v", header.ParentHash, header.Number.Uint64()-1, err) } @@ -38,13 +39,13 @@ func (e *EthereumExecutionModule) InsertBlocks(ctx context.Context, req *executi // Sum TDs. td := parentTd.Add(parentTd, header.Difficulty) if err := rawdb.WriteHeader(tx, header); err != nil { - return nil, fmt.Errorf("ethereumExecutionModule.InsertHeaders: could not insert: %s", err) + return nil, fmt.Errorf("ethereumExecutionModule.InsertHeaders: writeHeader: %s", err) } - if err := rawdb.WriteTd(tx, header.Hash(), header.Number.Uint64(), td); err != nil { - return nil, fmt.Errorf("ethereumExecutionModule.InsertHeaders: could not insert: %s", err) + if err := rawdb.WriteTd(tx, header.Hash(), height, td); err != nil { + return nil, fmt.Errorf("ethereumExecutionModule.InsertHeaders: writeTd: %s", err) } - if _, err := rawdb.WriteRawBodyIfNotExists(tx, header.Hash(), header.Number.Uint64(), body); err != nil { - return nil, fmt.Errorf("ethereumExecutionModule.InsertBlocks: could not insert: %s", err) + if _, err := rawdb.WriteRawBodyIfNotExists(tx, header.Hash(), height, body); err != nil { + return nil, fmt.Errorf("ethereumExecutionModule.InsertBlocks: writeBody: %s", err) } } if err := tx.Commit(); err != nil { diff --git a/turbo/jsonrpc/bor_api.go b/turbo/jsonrpc/bor_api.go index 41ee908d3e3..835bd8cc170 100644 --- a/turbo/jsonrpc/bor_api.go +++ b/turbo/jsonrpc/bor_api.go @@ -7,8 +7,8 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/valset" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/valset" "github.com/ledgerwatch/erigon/rpc" ) diff --git a/turbo/jsonrpc/bor_helper.go b/turbo/jsonrpc/bor_helper.go index c4a14e1b119..db0ad4ea60b 100644 --- a/turbo/jsonrpc/bor_helper.go +++ b/turbo/jsonrpc/bor_helper.go @@ -7,14 +7,14 @@ import ( "fmt" "sort" - "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/valset" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/valset" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" ) @@ -89,7 +89,7 @@ func getHeaderByHash(ctx context.Context, api *BorImpl, tx kv.Tx, hash common.Ha } // ecrecover extracts the Ethereum account address from a signed header. -func ecrecover(header *types.Header, c *chain.BorConfig) (common.Address, error) { +func ecrecover(header *types.Header, c *borcfg.BorConfig) (common.Address, error) { // Retrieve the signature from the header extra-data if len(header.Extra) < extraSeal { return common.Address{}, errMissingSignature @@ -117,6 +117,8 @@ func validatorContains(a []*valset.Validator, x *valset.Validator) (*valset.Vali return nil, false } +type ValidatorSet = valset.ValidatorSet + // getUpdatedValidatorSet applies changes to a validator set and returns a new validator set func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*valset.Validator) *ValidatorSet { v := oldValidatorSet @@ -146,8 +148,8 @@ func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*valset.Val // author returns the Ethereum address recovered // from the signature in the header's extra-data section. func author(api *BorImpl, tx kv.Tx, header *types.Header) (common.Address, error) { - config, _ := api.chainConfig(tx) - return ecrecover(header, config.Bor) + borEngine, _ := api.bor() + return ecrecover(header, borEngine.Config()) } func rankMapDifficulties(values map[common.Address]uint64) []difficultiesKV { diff --git a/turbo/jsonrpc/bor_snapshot.go b/turbo/jsonrpc/bor_snapshot.go index 7a6ef67f4c8..f59c29951ce 100644 --- a/turbo/jsonrpc/bor_snapshot.go +++ b/turbo/jsonrpc/bor_snapshot.go @@ -6,27 +6,27 @@ import ( "errors" "fmt" - "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" - "github.com/ledgerwatch/erigon/consensus/bor/valset" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" + "github.com/ledgerwatch/erigon/polygon/bor/valset" "github.com/ledgerwatch/erigon/rpc" ) type Snapshot struct { - config *chain.BorConfig // Consensus engine parameters to fine tune behavior + config *borcfg.BorConfig // Consensus engine parameters to fine tune behavior - Number uint64 `json:"number"` // Block number where the snapshot was created - Hash common.Hash `json:"hash"` // Block hash where the snapshot was created - ValidatorSet *ValidatorSet `json:"validatorSet"` // Validator set at this moment - Recents map[uint64]common.Address `json:"recents"` // Set of recent signers for spam protections + Number uint64 `json:"number"` // Block number where the snapshot was created + Hash common.Hash `json:"hash"` // Block hash where the snapshot was created + ValidatorSet *ValidatorSet `json:"validatorSet"` // Validator set at this moment } // GetSnapshot retrieves the state snapshot at a given block. @@ -278,19 +278,6 @@ func (api *BorImpl) GetVoteOnHash(ctx context.Context, starBlockNr uint64, endBl return false, fmt.Errorf("hash mismatch: localChainHash %s, milestoneHash %s", localEndBlockHash, hash) } - bor, err := api.bor() - - if err != nil { - return false, errors.New("bor engine not available") - } - - err = bor.HeimdallClient.FetchMilestoneID(ctx, milestoneId) - - if err != nil { - service.UnlockMutex(false, "", endBlockNr, common.Hash{}) - return false, errors.New("milestone ID doesn't exist in Heimdall") - } - service.UnlockMutex(true, milestoneId, endBlockNr, localEndBlock.Hash()) return true, nil @@ -463,37 +450,10 @@ func (s *Snapshot) copy() *Snapshot { Number: s.Number, Hash: s.Hash, ValidatorSet: s.ValidatorSet.Copy(), - Recents: make(map[uint64]common.Address), - } - for block, signer := range s.Recents { - cpy.Recents[block] = signer } - return cpy } -// GetSignerSuccessionNumber returns the relative position of signer in terms of the in-turn proposer -func (s *Snapshot) GetSignerSuccessionNumber(signer common.Address) (int, error) { - validators := s.ValidatorSet.Validators - proposer := s.ValidatorSet.GetProposer().Address - proposerIndex, _ := s.ValidatorSet.GetByAddress(proposer) - if proposerIndex == -1 { - return -1, &bor.UnauthorizedProposerError{Number: s.Number, Proposer: proposer.Bytes()} - } - signerIndex, _ := s.ValidatorSet.GetByAddress(signer) - if signerIndex == -1 { - return -1, &bor.UnauthorizedSignerError{Number: s.Number, Signer: signer.Bytes()} - } - - tempIndex := signerIndex - if proposerIndex != tempIndex { - if tempIndex < proposerIndex { - tempIndex = tempIndex + len(validators) - } - } - return tempIndex - proposerIndex, nil -} - // signers retrieves the list of authorized signers in ascending order. func (s *Snapshot) signers() []common.Address { sigs := make([]common.Address, 0, len(s.ValidatorSet.Validators)) @@ -524,12 +484,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { for _, header := range headers { // Remove any votes on checkpoint blocks number := header.Number.Uint64() - - // Delete the oldest signer from the recent list to allow it signing again - currentSprint := s.config.CalculateSprint(number) - if number >= currentSprint { - delete(snap.Recents, number-currentSprint) - } + currentLen := s.config.CalculateSprintLength(number) // Resolve the authorization key and check against signers signer, err := ecrecover(header, s.config) @@ -538,20 +493,13 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { } // check if signer is in validator set - if !snap.ValidatorSet.HasAddress(signer.Bytes()) { - return nil, &bor.UnauthorizedSignerError{Number: number, Signer: signer.Bytes()} + if !snap.ValidatorSet.HasAddress(signer) { + return nil, &valset.UnauthorizedSignerError{Number: number, Signer: signer.Bytes()} } - if _, err = snap.GetSignerSuccessionNumber(signer); err != nil { - return nil, err - } - - // add recents - snap.Recents[number] = signer - // change validator set and change proposer - if number > 0 && (number+1)%currentSprint == 0 { - if err := bor.ValidateHeaderExtraField(header.Extra); err != nil { + if number > 0 && (number+1)%currentLen == 0 { + if err := bor.ValidateHeaderExtraLength(header.Extra); err != nil { return nil, err } validatorBytes := header.Extra[extraVanity : len(header.Extra)-extraSeal] @@ -563,6 +511,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { snap.ValidatorSet = v } } + snap.Number += uint64(len(headers)) snap.Hash = headers[len(headers)-1].Hash() @@ -628,8 +577,8 @@ func loadSnapshot(api *BorImpl, db kv.Tx, borDb kv.Tx, hash common.Hash) (*Snaps if err := json.Unmarshal(blob, snap); err != nil { return nil, err } - config, _ := api.chainConfig(db) - snap.config = config.Bor + borEngine, _ := api.bor() + snap.config = borEngine.Config() // update total voting power if err := snap.ValidatorSet.UpdateTotalVotingPower(); err != nil { diff --git a/turbo/jsonrpc/daemon.go b/turbo/jsonrpc/daemon.go index f426b4c448b..f0023519af4 100644 --- a/turbo/jsonrpc/daemon.go +++ b/turbo/jsonrpc/daemon.go @@ -7,8 +7,8 @@ import ( libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli/httpcfg" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" "github.com/ledgerwatch/erigon/consensus/clique" + "github.com/ledgerwatch/erigon/polygon/bor" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" diff --git a/turbo/jsonrpc/erigon_system.go b/turbo/jsonrpc/erigon_system.go index eac0b351554..54bcf65d4ac 100644 --- a/turbo/jsonrpc/erigon_system.go +++ b/turbo/jsonrpc/erigon_system.go @@ -7,9 +7,9 @@ import ( "github.com/ledgerwatch/erigon-lib/common" - borfinality "github.com/ledgerwatch/erigon/consensus/bor/finality" - "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" "github.com/ledgerwatch/erigon/core/forkid" + borfinality "github.com/ledgerwatch/erigon/polygon/bor/finality" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" ) diff --git a/turbo/jsonrpc/eth_block.go b/turbo/jsonrpc/eth_block.go index 52fb5a8e10a..81981842ca7 100644 --- a/turbo/jsonrpc/eth_block.go +++ b/turbo/jsonrpc/eth_block.go @@ -8,11 +8,13 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" + + "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core" @@ -313,7 +315,8 @@ func (api *APIImpl) GetBlockByHash(ctx context.Context, numberOrHash rpc.BlockNu response, err := ethapi.RPCMarshalBlockEx(block, true, fullTx, borTx, borTxHash, additionalFields, receipts) if chainConfig.Bor != nil { - response["miner"], _ = ecrecover(block.Header(), chainConfig.Bor) + borConfig := chainConfig.Bor.(*borcfg.BorConfig) + response["miner"], _ = ecrecover(block.Header(), borConfig) } if err == nil && int64(number) == rpc.PendingBlockNumber.Int64() { diff --git a/turbo/jsonrpc/eth_call.go b/turbo/jsonrpc/eth_call.go index 5e7fd39b03f..46fbc1edfdd 100644 --- a/turbo/jsonrpc/eth_call.go +++ b/turbo/jsonrpc/eth_call.go @@ -404,7 +404,7 @@ func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, sto if latestBlock-blockNr > uint64(api.MaxGetProofRewindBlockCount) { return nil, fmt.Errorf("requested block is too old, block must be within %d blocks of the head block number (currently %d)", uint64(api.MaxGetProofRewindBlockCount), latestBlock) } - batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp) + batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp, api.logger) defer batch.Rollback() unwindState := &stagedsync.UnwindState{UnwindPoint: blockNr} diff --git a/turbo/jsonrpc/eth_subscribe_test.go b/turbo/jsonrpc/eth_subscribe_test.go index 26e67192516..25b252ef67e 100644 --- a/turbo/jsonrpc/eth_subscribe_test.go +++ b/turbo/jsonrpc/eth_subscribe_test.go @@ -8,6 +8,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/direct" "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/stretchr/testify/require" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" @@ -55,7 +56,7 @@ func TestEthSubscribe(t *testing.T) { highestSeenHeader := chain.TopBlock.NumberU64() hook := stages.NewHook(m.Ctx, m.DB, m.Notifications, m.Sync, m.BlockReader, m.ChainConfig, m.Log, m.UpdateHead) - if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, logger, m.BlockReader, hook, false); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, logger, m.BlockReader, hook, false); err != nil { t.Fatal(err) } diff --git a/turbo/jsonrpc/send_transaction_test.go b/turbo/jsonrpc/send_transaction_test.go index ba705745d34..6f0cb860a47 100644 --- a/turbo/jsonrpc/send_transaction_test.go +++ b/turbo/jsonrpc/send_transaction_test.go @@ -10,6 +10,7 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" @@ -70,7 +71,7 @@ func oneBlockStep(mockSentry *mock.MockSentry, require *require.Assertions, t *t mockSentry.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed initialCycle := mock.MockInsertAsInitialCycle - if err := stages.StageLoopIteration(mockSentry.Ctx, mockSentry.DB, nil, mockSentry.Sync, initialCycle, log.New(), mockSentry.BlockReader, nil, false); err != nil { + if err := stages.StageLoopIteration(mockSentry.Ctx, mockSentry.DB, wrap.TxContainer{}, mockSentry.Sync, initialCycle, log.New(), mockSentry.BlockReader, nil, false); err != nil { t.Fatal(err) } } diff --git a/turbo/jsonrpc/txpool_api.go b/turbo/jsonrpc/txpool_api.go index 05663be4a88..96ff0435cad 100644 --- a/turbo/jsonrpc/txpool_api.go +++ b/turbo/jsonrpc/txpool_api.go @@ -3,6 +3,7 @@ package jsonrpc import ( "context" "fmt" + "github.com/ledgerwatch/erigon-lib/common/hexutil" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -14,9 +15,10 @@ import ( "github.com/ledgerwatch/erigon/core/types" ) -// NetAPI the interface for the net_ RPC commands +// TxPoolAPI the interface for the txpool_ RPC commands type TxPoolAPI interface { Content(ctx context.Context) (map[string]map[string]map[string]*RPCTransaction, error) + ContentFrom(ctx context.Context, addr libcommon.Address) (map[string]map[string]*RPCTransaction, error) } // TxPoolAPIImpl data structure to store things needed for net_ commands @@ -116,6 +118,76 @@ func (api *TxPoolAPIImpl) Content(ctx context.Context) (map[string]map[string]ma return content, nil } +func (api *TxPoolAPIImpl) ContentFrom(ctx context.Context, addr libcommon.Address) (map[string]map[string]*RPCTransaction, error) { + reply, err := api.pool.All(ctx, &proto_txpool.AllRequest{}) + if err != nil { + return nil, err + } + + content := map[string]map[string]*RPCTransaction{ + "pending": make(map[string]*RPCTransaction), + "baseFee": make(map[string]*RPCTransaction), + "queued": make(map[string]*RPCTransaction), + } + + pending := make([]types.Transaction, 0, 4) + baseFee := make([]types.Transaction, 0, 4) + queued := make([]types.Transaction, 0, 4) + for i := range reply.Txs { + txn, err := types.DecodeWrappedTransaction(reply.Txs[i].RlpTx) + if err != nil { + return nil, fmt.Errorf("decoding transaction from: %x: %w", reply.Txs[i].RlpTx, err) + } + sender := gointerfaces.ConvertH160toAddress(reply.Txs[i].Sender) + if sender != addr { + continue + } + + switch reply.Txs[i].TxnType { + case proto_txpool.AllReply_PENDING: + pending = append(pending, txn) + case proto_txpool.AllReply_BASE_FEE: + baseFee = append(baseFee, txn) + case proto_txpool.AllReply_QUEUED: + queued = append(queued, txn) + } + } + + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + cc, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + curHeader := rawdb.ReadCurrentHeader(tx) + if curHeader == nil { + return nil, nil + } + // Flatten the pending transactions + dump := make(map[string]*RPCTransaction) + for _, txn := range pending { + dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + } + content["pending"] = dump + // Flatten the baseFee transactions + dump = make(map[string]*RPCTransaction) + for _, txn := range baseFee { + dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + } + content["baseFee"] = dump + // Flatten the queued transactions + dump = make(map[string]*RPCTransaction) + for _, txn := range queued { + dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + } + content["queued"] = dump + return content, nil +} + // Status returns the number of pending and queued transaction in the pool. func (api *TxPoolAPIImpl) Status(ctx context.Context) (map[string]hexutil.Uint, error) { reply, err := api.pool.Status(ctx, &proto_txpool.StatusRequest{}) diff --git a/turbo/jsonrpc/validator_set.go b/turbo/jsonrpc/validator_set.go deleted file mode 100644 index 70c6a5ee5f0..00000000000 --- a/turbo/jsonrpc/validator_set.go +++ /dev/null @@ -1,704 +0,0 @@ -package jsonrpc - -// Tendermint leader selection algorithm - -import ( - "bytes" - "fmt" - "math" - "math/big" - "sort" - "strings" - - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/log/v3" - - "github.com/ledgerwatch/erigon/consensus/bor/valset" -) - -// MaxTotalVotingPower - the maximum allowed total voting power. -// It needs to be sufficiently small to, in all cases: -// 1. prevent clipping in incrementProposerPriority() -// 2. let (diff+diffMax-1) not overflow in IncrementProposerPriority() -// (Proof of 1 is tricky, left to the reader). -// It could be higher, but this is sufficiently large for our purposes, -// and leaves room for defensive purposes. -// PriorityWindowSizeFactor - is a constant that when multiplied with the total voting power gives -// the maximum allowed distance between validator priorities. - -const ( - MaxTotalVotingPower = int64(math.MaxInt64) / 8 - PriorityWindowSizeFactor = 2 -) - -// ValidatorSet represent a set of *Validator at a given height. -// The validators can be fetched by address or index. -// The index is in order of .Address, so the indices are fixed -// for all rounds of a given blockchain height - ie. the validators -// are sorted by their address. -// On the other hand, the .ProposerPriority of each validator and -// the designated .GetProposer() of a set changes every round, -// upon calling .IncrementProposerPriority(). -// NOTE: Not goroutine-safe. -// NOTE: All get/set to validators should copy the value for safety. -type ValidatorSet struct { - // NOTE: persisted via reflect, must be exported. - Validators []*valset.Validator `json:"validators"` - Proposer *valset.Validator `json:"proposer"` - - // cached (unexported) - totalVotingPower int64 -} - -// NewValidatorSet initializes a ValidatorSet by copying over the -// values from `valz`, a list of Validators. If valz is nil or empty, -// the new ValidatorSet will have an empty list of Validators. -// The addresses of validators in `valz` must be unique otherwise the -// function panics. -func NewValidatorSet(valz []*valset.Validator) *ValidatorSet { - vals := &ValidatorSet{} - err := vals.updateWithChangeSet(valz, false) - if err != nil { - panic(fmt.Sprintf("cannot create validator set: %s", err)) - } - if len(valz) > 0 { - vals.IncrementProposerPriority(1) - } - return vals -} - -// Nil or empty validator sets are invalid. -func (vals *ValidatorSet) IsNilOrEmpty() bool { - return vals == nil || len(vals.Validators) == 0 -} - -// Increment ProposerPriority and update the proposer on a copy, and return it. -func (vals *ValidatorSet) CopyIncrementProposerPriority(times int) *ValidatorSet { - copy := vals.Copy() - copy.IncrementProposerPriority(times) - return copy -} - -// IncrementProposerPriority increments ProposerPriority of each validator and updates the -// proposer. Panics if validator set is empty. -// `times` must be positive. -func (vals *ValidatorSet) IncrementProposerPriority(times int) { - if vals.IsNilOrEmpty() { - panic("empty validator set") - } - if times <= 0 { - panic("Cannot call IncrementProposerPriority with non-positive times") - } - - // Cap the difference between priorities to be proportional to 2*totalPower by - // re-normalizing priorities, i.e., rescale all priorities by multiplying with: - // 2*totalVotingPower/(maxPriority - minPriority) - diffMax := PriorityWindowSizeFactor * vals.TotalVotingPower() - vals.RescalePriorities(diffMax) - vals.shiftByAvgProposerPriority() - - var proposer *valset.Validator - // Call IncrementProposerPriority(1) times times. - for i := 0; i < times; i++ { - proposer = vals.incrementProposerPriority() - } - - vals.Proposer = proposer -} - -func (vals *ValidatorSet) RescalePriorities(diffMax int64) { - if vals.IsNilOrEmpty() { - panic("empty validator set") - } - // NOTE: This check is merely a sanity check which could be - // removed if all tests would init. voting power appropriately; - // i.e. diffMax should always be > 0 - if diffMax <= 0 { - return - } - - // Calculating ceil(diff/diffMax): - // Re-normalization is performed by dividing by an integer for simplicity. - // NOTE: This may make debugging priority issues easier as well. - diff := computeMaxMinPriorityDiff(vals) - ratio := (diff + diffMax - 1) / diffMax - if diff > diffMax { - for _, val := range vals.Validators { - val.ProposerPriority = val.ProposerPriority / ratio - } - } -} - -func (vals *ValidatorSet) incrementProposerPriority() *valset.Validator { - for _, val := range vals.Validators { - // Check for overflow for sum. - newPrio := safeAddClip(val.ProposerPriority, val.VotingPower) - val.ProposerPriority = newPrio - } - // Decrement the validator with most ProposerPriority. - mostest := vals.getValWithMostPriority() - // Mind the underflow. - mostest.ProposerPriority = safeSubClip(mostest.ProposerPriority, vals.TotalVotingPower()) - - return mostest -} - -// Should not be called on an empty validator set. -func (vals *ValidatorSet) computeAvgProposerPriority() int64 { - n := int64(len(vals.Validators)) - sum := big.NewInt(0) - for _, val := range vals.Validators { - sum.Add(sum, big.NewInt(val.ProposerPriority)) - } - avg := sum.Div(sum, big.NewInt(n)) - if avg.IsInt64() { - return avg.Int64() - } - - // This should never happen: each val.ProposerPriority is in bounds of int64. - panic(fmt.Sprintf("Cannot represent avg ProposerPriority as an int64 %v", avg)) -} - -// Compute the difference between the max and min ProposerPriority of that set. -func computeMaxMinPriorityDiff(vals *ValidatorSet) int64 { - if vals.IsNilOrEmpty() { - panic("empty validator set") - } - max := int64(math.MinInt64) - min := int64(math.MaxInt64) - for _, v := range vals.Validators { - if v.ProposerPriority < min { - min = v.ProposerPriority - } - if v.ProposerPriority > max { - max = v.ProposerPriority - } - } - diff := max - min - if diff < 0 { - return -1 * diff - } else { - return diff - } -} - -func (vals *ValidatorSet) getValWithMostPriority() *valset.Validator { - var res *valset.Validator - for _, val := range vals.Validators { - res = res.Cmp(val) - } - return res -} - -func (vals *ValidatorSet) shiftByAvgProposerPriority() { - if vals.IsNilOrEmpty() { - panic("empty validator set") - } - avgProposerPriority := vals.computeAvgProposerPriority() - for _, val := range vals.Validators { - val.ProposerPriority = safeSubClip(val.ProposerPriority, avgProposerPriority) - } -} - -// Makes a copy of the validator list. -func validatorListCopy(valsList []*valset.Validator) []*valset.Validator { - if valsList == nil { - return nil - } - valsCopy := make([]*valset.Validator, len(valsList)) - for i, val := range valsList { - valsCopy[i] = val.Copy() - } - return valsCopy -} - -// Copy each validator into a new ValidatorSet. -func (vals *ValidatorSet) Copy() *ValidatorSet { - return &ValidatorSet{ - Validators: validatorListCopy(vals.Validators), - Proposer: vals.Proposer, - totalVotingPower: vals.totalVotingPower, - } -} - -// HasAddress returns true if address given is in the validator set, false - -// otherwise. -func (vals *ValidatorSet) HasAddress(address []byte) bool { - idx := sort.Search(len(vals.Validators), func(i int) bool { - return bytes.Compare(address, vals.Validators[i].Address.Bytes()) <= 0 - }) - return idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address.Bytes(), address) -} - -// GetByAddress returns an index of the validator with address and validator -// itself if found. Otherwise, -1 and nil are returned. -func (vals *ValidatorSet) GetByAddress(address libcommon.Address) (index int, val *valset.Validator) { - idx := sort.Search(len(vals.Validators), func(i int) bool { - return bytes.Compare(address.Bytes(), vals.Validators[i].Address.Bytes()) <= 0 - }) - if idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address.Bytes(), address.Bytes()) { - return idx, vals.Validators[idx].Copy() - } - return -1, nil -} - -// GetByIndex returns the validator's address and validator itself by index. -// It returns nil values if index is less than 0 or greater or equal to -// len(ValidatorSet.Validators). -func (vals *ValidatorSet) GetByIndex(index int) (address []byte, val *valset.Validator) { - if index < 0 || index >= len(vals.Validators) { - return nil, nil - } - val = vals.Validators[index] - return val.Address.Bytes(), val.Copy() -} - -// Size returns the length of the validator set. -func (vals *ValidatorSet) Size() int { - return len(vals.Validators) -} - -// Force recalculation of the set's total voting power. -func (vals *ValidatorSet) UpdateTotalVotingPower() error { - - sum := int64(0) - for _, val := range vals.Validators { - // mind overflow - sum = safeAddClip(sum, val.VotingPower) - if sum > MaxTotalVotingPower { - return &valset.TotalVotingPowerExceededError{Sum: sum, Validators: vals.Validators} - } - } - vals.totalVotingPower = sum - return nil -} - -// TotalVotingPower returns the sum of the voting powers of all validators. -// It recomputes the total voting power if required. -func (vals *ValidatorSet) TotalVotingPower() int64 { - if vals.totalVotingPower == 0 { - log.Info("invoking updateTotalVotingPower before returning it") - if err := vals.UpdateTotalVotingPower(); err != nil { - // Can/should we do better? - panic(err) - } - } - return vals.totalVotingPower -} - -// GetProposer returns the current proposer. If the validator set is empty, nil -// is returned. -func (vals *ValidatorSet) GetProposer() (proposer *valset.Validator) { - if len(vals.Validators) == 0 { - return nil - } - if vals.Proposer == nil { - vals.Proposer = vals.findProposer() - } - return vals.Proposer.Copy() -} - -func (vals *ValidatorSet) findProposer() *valset.Validator { - var proposer *valset.Validator - for _, val := range vals.Validators { - if proposer == nil || !bytes.Equal(val.Address.Bytes(), proposer.Address.Bytes()) { - proposer = proposer.Cmp(val) - } - } - return proposer -} - -// Hash returns the Merkle root hash build using validators (as leaves) in the -// set. -// func (vals *ValidatorSet) Hash() []byte { -// if len(vals.Validators) == 0 { -// return nil -// } -// bzs := make([][]byte, len(vals.Validators)) -// for i, val := range vals.Validators { -// bzs[i] = val.Bytes() -// } -// return merkle.SimpleHashFromByteSlices(bzs) -// } - -// Iterate will run the given function over the set. -func (vals *ValidatorSet) Iterate(fn func(index int, val *valset.Validator) bool) { - for i, val := range vals.Validators { - stop := fn(i, val.Copy()) - if stop { - break - } - } -} - -// Checks changes against duplicates, splits the changes in updates and removals, sorts them by address. -// -// Returns: -// updates, removals - the sorted lists of updates and removals -// err - non-nil if duplicate entries or entries with negative voting power are seen -// -// No changes are made to 'origChanges'. -func processChanges(origChanges []*valset.Validator) (updates, removals []*valset.Validator, err error) { - // Make a deep copy of the changes and sort by address. - changes := validatorListCopy(origChanges) - sort.Sort(ValidatorsByAddress(changes)) - - removals = make([]*valset.Validator, 0, len(changes)) - updates = make([]*valset.Validator, 0, len(changes)) - var prevAddr libcommon.Address - - // Scan changes by address and append valid validators to updates or removals lists. - for _, valUpdate := range changes { - if bytes.Equal(valUpdate.Address.Bytes(), prevAddr.Bytes()) { - err = fmt.Errorf("duplicate entry %v in %v", valUpdate, changes) - return nil, nil, err - } - if valUpdate.VotingPower < 0 { - err = fmt.Errorf("voting power can't be negative: %v", valUpdate) - return nil, nil, err - } - if valUpdate.VotingPower > MaxTotalVotingPower { - err = fmt.Errorf("to prevent clipping/ overflow, voting power can't be higher than %v: %v ", - MaxTotalVotingPower, valUpdate) - return nil, nil, err - } - if valUpdate.VotingPower == 0 { - removals = append(removals, valUpdate) - } else { - updates = append(updates, valUpdate) - } - prevAddr = valUpdate.Address - } - return updates, removals, err -} - -// Verifies a list of updates against a validator set, making sure the allowed -// total voting power would not be exceeded if these updates would be applied to the set. -// -// Returns: -// updatedTotalVotingPower - the new total voting power if these updates would be applied -// numNewValidators - number of new validators -// err - non-nil if the maximum allowed total voting power would be exceeded -// -// 'updates' should be a list of proper validator changes, i.e. they have been verified -// by processChanges for duplicates and invalid values. -// No changes are made to the validator set 'vals'. -func verifyUpdates(updates []*valset.Validator, vals *ValidatorSet) (updatedTotalVotingPower int64, numNewValidators int, err error) { - - updatedTotalVotingPower = vals.TotalVotingPower() - - for _, valUpdate := range updates { - address := valUpdate.Address - _, val := vals.GetByAddress(address) - if val == nil { - // New validator, add its voting power the the total. - updatedTotalVotingPower += valUpdate.VotingPower - numNewValidators++ - } else { - // Updated validator, add the difference in power to the total. - updatedTotalVotingPower += valUpdate.VotingPower - val.VotingPower - } - overflow := updatedTotalVotingPower > MaxTotalVotingPower - if overflow { - err = fmt.Errorf( - "failed to add/update validator %v, total voting power would exceed the max allowed %v", - valUpdate, MaxTotalVotingPower) - return 0, 0, err - } - } - - return updatedTotalVotingPower, numNewValidators, nil -} - -// Computes the proposer priority for the validators not present in the set based on 'updatedTotalVotingPower'. -// Leaves unchanged the priorities of validators that are changed. -// -// 'updates' parameter must be a list of unique validators to be added or updated. -// No changes are made to the validator set 'vals'. -func computeNewPriorities(updates []*valset.Validator, vals *ValidatorSet, updatedTotalVotingPower int64) { - - for _, valUpdate := range updates { - address := valUpdate.Address - _, val := vals.GetByAddress(address) - if val == nil { - // add val - // Set ProposerPriority to -C*totalVotingPower (with C ~= 1.125) to make sure validators can't - // un-bond and then re-bond to reset their (potentially previously negative) ProposerPriority to zero. - // - // Contract: updatedVotingPower < MaxTotalVotingPower to ensure ProposerPriority does - // not exceed the bounds of int64. - // - // Compute ProposerPriority = -1.125*totalVotingPower == -(updatedVotingPower + (updatedVotingPower >> 3)). - valUpdate.ProposerPriority = -(updatedTotalVotingPower + (updatedTotalVotingPower >> 3)) - } else { - valUpdate.ProposerPriority = val.ProposerPriority - } - } - -} - -// Merges the vals' validator list with the updates list. -// When two elements with same address are seen, the one from updates is selected. -// Expects updates to be a list of updates sorted by address with no duplicates or errors, -// must have been validated with verifyUpdates() and priorities computed with computeNewPriorities(). -func (vals *ValidatorSet) applyUpdates(updates []*valset.Validator) { - - existing := vals.Validators - merged := make([]*valset.Validator, len(existing)+len(updates)) - i := 0 - - for len(existing) > 0 && len(updates) > 0 { - if bytes.Compare(existing[0].Address.Bytes(), updates[0].Address.Bytes()) < 0 { // unchanged validator - merged[i] = existing[0] - existing = existing[1:] - } else { - // Apply add or update. - merged[i] = updates[0] - if bytes.Equal(existing[0].Address.Bytes(), updates[0].Address.Bytes()) { - // bor.Validator is present in both, advance existing. - existing = existing[1:] - } - updates = updates[1:] - } - i++ - } - - // Add the elements which are left. - for j := 0; j < len(existing); j++ { - merged[i] = existing[j] - i++ - } - // OR add updates which are left. - for j := 0; j < len(updates); j++ { - merged[i] = updates[j] - i++ - } - - vals.Validators = merged[:i] -} - -// Checks that the validators to be removed are part of the validator set. -// No changes are made to the validator set 'vals'. -func verifyRemovals(deletes []*valset.Validator, vals *ValidatorSet) error { - - for _, valUpdate := range deletes { - address := valUpdate.Address - _, val := vals.GetByAddress(address) - if val == nil { - return fmt.Errorf("failed to find validator %X to remove", address) - } - } - if len(deletes) > len(vals.Validators) { - panic("more deletes than validators") - } - return nil -} - -// Removes the validators specified in 'deletes' from validator set 'vals'. -// Should not fail as verification has been done before. -func (vals *ValidatorSet) applyRemovals(deletes []*valset.Validator) { - - existing := vals.Validators - - merged := make([]*valset.Validator, len(existing)-len(deletes)) - i := 0 - - // Loop over deletes until we removed all of them. - for len(deletes) > 0 { - if bytes.Equal(existing[0].Address.Bytes(), deletes[0].Address.Bytes()) { - deletes = deletes[1:] - } else { // Leave it in the resulting slice. - merged[i] = existing[0] - i++ - } - existing = existing[1:] - } - - // Add the elements which are left. - for j := 0; j < len(existing); j++ { - merged[i] = existing[j] - i++ - } - - vals.Validators = merged[:i] -} - -// Main function used by UpdateWithChangeSet() and NewValidatorSet(). -// If 'allowDeletes' is false then delete operations (identified by validators with voting power 0) -// are not allowed and will trigger an error if present in 'changes'. -// The 'allowDeletes' flag is set to false by NewValidatorSet() and to true by UpdateWithChangeSet(). -func (vals *ValidatorSet) updateWithChangeSet(changes []*valset.Validator, allowDeletes bool) error { - - if len(changes) < 1 { - return nil - } - - // Check for duplicates within changes, split in 'updates' and 'deletes' lists (sorted). - updates, deletes, err := processChanges(changes) - if err != nil { - return err - } - - if !allowDeletes && len(deletes) != 0 { - return fmt.Errorf("cannot process validators with voting power 0: %v", deletes) - } - - // Verify that applying the 'deletes' against 'vals' will not result in error. - if err := verifyRemovals(deletes, vals); err != nil { - return err - } - - // Verify that applying the 'updates' against 'vals' will not result in error. - updatedTotalVotingPower, numNewValidators, err := verifyUpdates(updates, vals) - if err != nil { - return err - } - - // Check that the resulting set will not be empty. - if numNewValidators == 0 && len(vals.Validators) == len(deletes) { - return fmt.Errorf("applying the validator changes would result in empty set") - } - - // Compute the priorities for updates. - computeNewPriorities(updates, vals, updatedTotalVotingPower) - - // Apply updates and removals. - vals.applyUpdates(updates) - vals.applyRemovals(deletes) - - if err := vals.UpdateTotalVotingPower(); err != nil { - return err - } - - // Scale and center. - vals.RescalePriorities(PriorityWindowSizeFactor * vals.TotalVotingPower()) - vals.shiftByAvgProposerPriority() - - return nil -} - -// UpdateWithChangeSet attempts to update the validator set with 'changes'. -// It performs the following steps: -// - validates the changes making sure there are no duplicates and splits them in updates and deletes -// - verifies that applying the changes will not result in errors -// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities -// across old and newly added validators are fair -// - computes the priorities of new validators against the final set -// - applies the updates against the validator set -// - applies the removals against the validator set -// - performs scaling and centering of priority values -// -// If an error is detected during verification steps, it is returned and the validator set -// is not changed. -func (vals *ValidatorSet) UpdateWithChangeSet(changes []*valset.Validator) error { - return vals.updateWithChangeSet(changes, true) -} - -//----------------- -// ErrTooMuchChange - -func IsErrTooMuchChange(err error) bool { - switch err.(type) { - case errTooMuchChange: - return true - default: - return false - } -} - -type errTooMuchChange struct { - got int64 - needed int64 -} - -func (e errTooMuchChange) Error() string { - return fmt.Sprintf("Invalid commit -- insufficient old voting power: got %v, needed %v", e.got, e.needed) -} - -//---------------- - -func (vals *ValidatorSet) String() string { - return vals.StringIndented("") -} - -func (vals *ValidatorSet) StringIndented(indent string) string { - if vals == nil { - return "nil-ValidatorSet" - } - var valStrings []string - vals.Iterate(func(index int, val *valset.Validator) bool { - valStrings = append(valStrings, val.String()) - return false - }) - return fmt.Sprintf(`ValidatorSet{ -%s Proposer: %v -%s Validators: -%s %v -%s}`, - indent, vals.GetProposer().String(), - indent, - indent, strings.Join(valStrings, "\n"+indent+" "), - indent) - -} - -//------------------------------------- -// Implements sort for sorting validators by address. - -// Sort validators by address. -type ValidatorsByAddress []*valset.Validator - -func (valz ValidatorsByAddress) Len() int { - return len(valz) -} - -func (valz ValidatorsByAddress) Less(i, j int) bool { - return bytes.Compare(valz[i].Address.Bytes(), valz[j].Address.Bytes()) == -1 -} - -func (valz ValidatorsByAddress) Swap(i, j int) { - valz[i], valz[j] = valz[j], valz[i] -} - -/////////////////////////////////////////////////////////////////////////////// -// safe addition/subtraction - -func safeAdd(a, b int64) (int64, bool) { - if b > 0 && a > math.MaxInt64-b { - return -1, true - } else if b < 0 && a < math.MinInt64-b { - return -1, true - } - return a + b, false -} - -func safeSub(a, b int64) (int64, bool) { - if b > 0 && a < math.MinInt64+b { - return -1, true - } else if b < 0 && a > math.MaxInt64+b { - return -1, true - } - return a - b, false -} - -func safeAddClip(a, b int64) int64 { - c, overflow := safeAdd(a, b) - if overflow { - if b < 0 { - return math.MinInt64 - } - return math.MaxInt64 - } - return c -} - -func safeSubClip(a, b int64) int64 { - c, overflow := safeSub(a, b) - if overflow { - if b > 0 { - return math.MinInt64 - } - return math.MaxInt64 - } - return c -} diff --git a/turbo/logging/logging.go b/turbo/logging/logging.go index 988fa7fb5da..f36b0999b4c 100644 --- a/turbo/logging/logging.go +++ b/turbo/logging/logging.go @@ -3,7 +3,6 @@ package logging import ( "flag" "os" - "path" "path/filepath" "strconv" @@ -21,7 +20,8 @@ import ( // This function which is used in Erigon itself. // Note: urfave and cobra are two CLI frameworks/libraries for the same functionalities // and it would make sense to choose one over another -func SetupLoggerCtx(filePrefix string, ctx *cli.Context, rootHandler bool) log.Logger { +func SetupLoggerCtx(filePrefix string, ctx *cli.Context, + consoleDefaultLevel log.Lvl, dirDefaultLevel log.Lvl, rootHandler bool) log.Logger { var consoleJson = ctx.Bool(LogJsonFlag.Name) || ctx.Bool(LogConsoleJsonFlag.Name) var dirJson = ctx.Bool(LogDirJsonFlag.Name) @@ -30,13 +30,13 @@ func SetupLoggerCtx(filePrefix string, ctx *cli.Context, rootHandler bool) log.L // try verbosity flag consoleLevel, lErr = tryGetLogLevel(ctx.String(LogVerbosityFlag.Name)) if lErr != nil { - consoleLevel = log.LvlInfo + consoleLevel = consoleDefaultLevel } } dirLevel, dErr := tryGetLogLevel(ctx.String(LogDirVerbosityFlag.Name)) if dErr != nil { - dirLevel = log.LvlInfo + dirLevel = dirDefaultLevel } dirPath := "" @@ -202,7 +202,7 @@ func initSeparatedLogging( } lumberjack := &lumberjack.Logger{ - Filename: path.Join(dirPath, filePrefix+".log"), + Filename: filepath.Join(dirPath, filePrefix+".log"), MaxSize: 100, // megabytes MaxBackups: 3, MaxAge: 28, //days diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index 497eaa9523a..2a6ccc0544b 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -9,13 +9,13 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" - borfinality "github.com/ledgerwatch/erigon/consensus/bor/finality" - "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + borfinality "github.com/ledgerwatch/erigon/polygon/bor/finality" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" "github.com/ledgerwatch/erigon/rpc" ) diff --git a/turbo/services/interfaces.go b/turbo/services/interfaces.go index 130337fbf3d..0857520ac88 100644 --- a/turbo/services/interfaces.go +++ b/turbo/services/interfaces.go @@ -90,17 +90,19 @@ type FullBlockReader interface { } type BlockSnapshots interface { - LogStat() + LogStat(label string) ReopenFolder() error SegmentsMax() uint64 + SegmentsMin() uint64 } // BlockRetire - freezing blocks: moving old data from DB to snapshot files type BlockRetire interface { PruneAncientBlocks(tx kv.RwTx, limit int) error - RetireBlocksInBackground(ctx context.Context, maxBlockNumInDB uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []DownloadRequest) error, onDelete func(l []string) error) + RetireBlocksInBackground(ctx context.Context, miBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []DownloadRequest) error, onDelete func(l []string) error) HasNewFrozenFiles() bool BuildMissedIndicesIfNeed(ctx context.Context, logPrefix string, notifier DBEventNotifier, cc *chain.Config) error + SetWorkers(workers int) } /* @@ -124,6 +126,7 @@ type DBEventNotifier interface { } type DownloadRequest struct { + Version uint8 Path string TorrentHash string } diff --git a/turbo/shards/state_change_accumulator.go b/turbo/shards/state_change_accumulator.go index 8ba19686ee5..cf0cc8c563b 100644 --- a/turbo/shards/state_change_accumulator.go +++ b/turbo/shards/state_change_accumulator.go @@ -32,6 +32,7 @@ func (a *Accumulator) Reset(plainStateID uint64) { a.storageChangeIndex = nil a.plainStateID = plainStateID } + func (a *Accumulator) SendAndReset(ctx context.Context, c StateChangeConsumer, pendingBaseFee uint64, pendingBlobFee uint64, blockGasLimit uint64, finalizedBlock uint64) { if a == nil || c == nil || len(a.changes) == 0 { return diff --git a/turbo/snapshotsync/freezeblocks/block_reader.go b/turbo/snapshotsync/freezeblocks/block_reader.go index cb9dfc3f0f4..fc6834937fa 100644 --- a/turbo/snapshotsync/freezeblocks/block_reader.go +++ b/turbo/snapshotsync/freezeblocks/block_reader.go @@ -5,10 +5,11 @@ import ( "context" "encoding/binary" "fmt" - "github.com/ledgerwatch/erigon/consensus/bor" "math" "sort" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/common/length" @@ -16,10 +17,10 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/recsplit" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/polygon/bor" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/services" ) @@ -247,7 +248,9 @@ type BlockReader struct { } func NewBlockReader(snapshots services.BlockSnapshots, borSnapshots services.BlockSnapshots) *BlockReader { - return &BlockReader{sn: snapshots.(*RoSnapshots), borSn: borSnapshots.(*BorRoSnapshots)} + borSn, _ := borSnapshots.(*BorRoSnapshots) + sn, _ := snapshots.(*RoSnapshots) + return &BlockReader{sn: sn, borSn: borSn} } func (r *BlockReader) CanPruneTo(currentBlockInDB uint64) uint64 { @@ -262,8 +265,13 @@ func (r *BlockReader) BorSnapshots() services.BlockSnapshots { return nil } -func (r *BlockReader) FrozenBlocks() uint64 { return r.sn.BlocksAvailable() } -func (r *BlockReader) FrozenBorBlocks() uint64 { return r.borSn.BlocksAvailable() } +func (r *BlockReader) FrozenBlocks() uint64 { return r.sn.BlocksAvailable() } +func (r *BlockReader) FrozenBorBlocks() uint64 { + if r.borSn != nil { + return r.borSn.BlocksAvailable() + } + return 0 +} func (r *BlockReader) FrozenFiles() []string { files := r.sn.Files() if r.borSn != nil { @@ -279,16 +287,18 @@ func (r *BlockReader) HeadersRange(ctx context.Context, walker func(header *type } func (r *BlockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (h *types.Header, err error) { - blockHash, err := rawdb.ReadCanonicalHash(tx, blockHeight) - if err != nil { - return nil, err - } - if blockHash == (common.Hash{}) { - return nil, nil - } - h = rawdb.ReadHeader(tx, blockHash, blockHeight) - if h != nil { - return h, nil + if tx != nil { + blockHash, err := rawdb.ReadCanonicalHash(tx, blockHeight) + if err != nil { + return nil, err + } + if blockHash == (common.Hash{}) { + return nil, nil + } + h = rawdb.ReadHeader(tx, blockHash, blockHeight) + if h != nil { + return h, nil + } } view := r.sn.View() @@ -366,9 +376,11 @@ func (r *BlockReader) CanonicalHash(ctx context.Context, tx kv.Getter, blockHeig } func (r *BlockReader) Header(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (h *types.Header, err error) { - h = rawdb.ReadHeader(tx, hash, blockHeight) - if h != nil { - return h, nil + if tx != nil { + h = rawdb.ReadHeader(tx, hash, blockHeight) + if h != nil { + return h, nil + } } view := r.sn.View() @@ -385,13 +397,14 @@ func (r *BlockReader) Header(ctx context.Context, tx kv.Getter, hash common.Hash } func (r *BlockReader) BodyWithTransactions(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { - - body, err = rawdb.ReadBodyWithTransactions(tx, hash, blockHeight) - if err != nil { - return nil, err - } - if body != nil { - return body, nil + if tx != nil { + body, err = rawdb.ReadBodyWithTransactions(tx, hash, blockHeight) + if err != nil { + return nil, err + } + if body != nil { + return body, nil + } } view := r.sn.View() @@ -472,7 +485,7 @@ func (r *BlockReader) BlockWithSenders(ctx context.Context, tx kv.Getter, hash c } func (r *BlockReader) blockWithSenders(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64, forceCanonical bool) (block *types.Block, senders []common.Address, err error) { maxBlockNumInFiles := r.sn.BlocksAvailable() - if maxBlockNumInFiles == 0 || blockHeight > maxBlockNumInFiles { + if tx != nil && (maxBlockNumInFiles == 0 || blockHeight > maxBlockNumInFiles) { if forceCanonical { canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockHeight) if err != nil { @@ -490,6 +503,10 @@ func (r *BlockReader) blockWithSenders(ctx context.Context, tx kv.Getter, hash c return block, senders, nil } + if r.sn == nil { + return + } + view := r.sn.View() defer view.Close() seg, ok := view.HeadersSegment(blockHeight) @@ -577,7 +594,7 @@ func (r *BlockReader) headerFromSnapshot(blockHeight uint64, sn *HeaderSegment, func (r *BlockReader) headerFromSnapshotByHash(hash common.Hash, sn *HeaderSegment, buf []byte) (*types.Header, error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.ranges.from, sn.ranges.to, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.from, sn.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things @@ -629,7 +646,7 @@ func (r *BlockReader) bodyFromSnapshot(blockHeight uint64, sn *BodySegment, buf func (r *BlockReader) bodyForStorageFromSnapshot(blockHeight uint64, sn *BodySegment, buf []byte) (*types.BodyForStorage, []byte, error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.ranges.from, sn.ranges.to, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.from, sn.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things @@ -659,7 +676,7 @@ func (r *BlockReader) bodyForStorageFromSnapshot(blockHeight uint64, sn *BodySeg func (r *BlockReader) txsFromSnapshot(baseTxnID uint64, txsAmount uint32, txsSeg *TxnSegment, buf []byte) (txs []types.Transaction, senders []common.Address, err error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, txsSeg.ranges.from, txsSeg.ranges.to, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, txsSeg.from, txsSeg.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things @@ -843,7 +860,7 @@ func (r *BlockReader) IterateFrozenBodies(f func(blockNum, baseTxNum, txAmount u var buf []byte g := sn.seg.MakeGetter() - blockNum := sn.ranges.from + blockNum := sn.from var b types.BodyForStorage for g.HasNext() { buf, _ = g.Next(buf[:0]) @@ -858,6 +875,33 @@ func (r *BlockReader) IterateFrozenBodies(f func(blockNum, baseTxNum, txAmount u } return nil } + +func (r *BlockReader) IntegrityTxnID(failFast bool) error { + defer log.Info("[integrity] IntegrityTxnID done") + view := r.sn.View() + defer view.Close() + + var expectedFirstTxnID uint64 + for _, snb := range view.Bodies() { + firstBlockNum := snb.idxBodyNumber.BaseDataID() + sn, _ := view.TxsSegment(firstBlockNum) + b, _, err := r.bodyForStorageFromSnapshot(firstBlockNum, snb, nil) + if err != nil { + return err + } + if b.BaseTxId != expectedFirstTxnID { + err := fmt.Errorf("[integrity] IntegrityTxnID: bn=%d, baseID=%d, cnt=%d, expectedFirstTxnID=%d", firstBlockNum, b.BaseTxId, sn.Seg.Count(), expectedFirstTxnID) + if failFast { + return err + } else { + log.Error(err.Error()) + } + } + expectedFirstTxnID = b.BaseTxId + uint64(sn.Seg.Count()) + } + return nil +} + func (r *BlockReader) BadHeaderNumber(ctx context.Context, tx kv.Getter, hash common.Hash) (blockHeight *uint64, err error) { return rawdb.ReadBadHeaderNumber(tx, hash) } @@ -953,6 +997,10 @@ func (r *BlockReader) EventLookup(ctx context.Context, tx kv.Getter, txnHash com return *n, true, nil } + if r.borSn == nil { + return 0, false, nil + } + view := r.borSn.View() defer view.Close() @@ -1045,10 +1093,10 @@ func (r *BlockReader) EventsByBlock(ctx context.Context, tx kv.Tx, hash common.H result := []rlp.RawValue{} for i := len(segments) - 1; i >= 0; i-- { sn := segments[i] - if sn.ranges.from > blockHeight { + if sn.from > blockHeight { continue } - if sn.ranges.to <= blockHeight { + if sn.to <= blockHeight { continue } if sn.IdxBorTxnHash == nil { @@ -1071,6 +1119,10 @@ func (r *BlockReader) EventsByBlock(ctx context.Context, tx kv.Tx, hash common.H } func (r *BlockReader) LastFrozenEventID() uint64 { + if r.borSn == nil { + return 0 + } + view := r.borSn.View() defer view.Close() segments := view.Events() @@ -1099,6 +1151,10 @@ func (r *BlockReader) LastFrozenEventID() uint64 { } func (r *BlockReader) LastFrozenSpanID() uint64 { + if r.borSn == nil { + return 0 + } + view := r.borSn.View() defer view.Close() segments := view.Spans() @@ -1117,7 +1173,7 @@ func (r *BlockReader) LastFrozenSpanID() uint64 { return 0 } - lastSpanID := bor.SpanIDAt(lastSegment.ranges.to) + lastSpanID := bor.SpanIDAt(lastSegment.to) if lastSpanID > 0 { lastSpanID-- } @@ -1138,7 +1194,7 @@ func (r *BlockReader) Span(ctx context.Context, tx kv.Getter, spanId uint64) ([] return nil, err } if v == nil { - return nil, fmt.Errorf("span %d not found (db), frosenBlocks=%d", spanId, maxBlockNumInFiles) + return nil, fmt.Errorf("span %d not found (db), frozenBlocks=%d", spanId, maxBlockNumInFiles) } return common.Copy(v), nil } @@ -1150,11 +1206,11 @@ func (r *BlockReader) Span(ctx context.Context, tx kv.Getter, spanId uint64) ([] if sn.idx == nil { continue } - spanFrom := bor.SpanIDAt(sn.ranges.from) + spanFrom := bor.SpanIDAt(sn.from) if spanId < spanFrom { continue } - spanTo := bor.SpanIDAt(sn.ranges.to) + spanTo := bor.SpanIDAt(sn.to) if spanId >= spanTo { continue } @@ -1190,10 +1246,10 @@ func (r *BlockReader) Integrity(ctx context.Context) error { view := r.sn.View() defer view.Close() for _, seg := range view.Headers() { - if err := r.ensureHeaderNumber(seg.ranges.from, seg); err != nil { + if err := r.ensureHeaderNumber(seg.from, seg); err != nil { return err } - if err := r.ensureHeaderNumber(seg.ranges.to-1, seg); err != nil { + if err := r.ensureHeaderNumber(seg.to-1, seg); err != nil { return err } } diff --git a/turbo/snapshotsync/freezeblocks/block_reader_test.go b/turbo/snapshotsync/freezeblocks/block_reader_test.go index 8f1347df9a7..a408ea2b820 100644 --- a/turbo/snapshotsync/freezeblocks/block_reader_test.go +++ b/turbo/snapshotsync/freezeblocks/block_reader_test.go @@ -24,8 +24,8 @@ func TestBlockReaderLastFrozenSpanIDWhenSegmentFilesArePresent(t *testing.T) { logger := testlog.Logger(t, log.LvlInfo) dir := t.TempDir() createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) - createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, logger) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) defer borRoSnapshots.Close() err := borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -39,7 +39,7 @@ func TestBlockReaderLastFrozenSpanIDWhenSegmentFilesAreNotPresent(t *testing.T) logger := testlog.Logger(t, log.LvlInfo) dir := t.TempDir() - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) defer borRoSnapshots.Close() err := borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -56,14 +56,14 @@ func TestBlockReaderLastFrozenSpanIDReturnsLastSegWithIdx(t *testing.T) { createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) createTestBorEventSegmentFile(t, 500_000, 1_000_000, 264, dir, logger) createTestBorEventSegmentFile(t, 1_000_000, 1_500_000, 528, dir, logger) - createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, logger) - createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, logger) - createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, 1, logger) // delete idx file for last bor span segment to simulate segment with missing idx file - idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1_000_000, 1_500_000, snaptype.BorSpans.String())) + idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, snaptype.BorSpans.String())) err := os.Remove(idxFileToDelete) require.NoError(t, err) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) defer borRoSnapshots.Close() err = borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -80,20 +80,20 @@ func TestBlockReaderLastFrozenSpanIDReturnsZeroWhenAllSegmentsDoNotHaveIdx(t *te createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) createTestBorEventSegmentFile(t, 500_000, 1_000_000, 264, dir, logger) createTestBorEventSegmentFile(t, 1_000_000, 1_500_000, 528, dir, logger) - createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, logger) - createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, logger) - createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, 1, logger) // delete idx file for all bor span segments to simulate segments with missing idx files - idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(0, 500_000, snaptype.BorSpans.String())) + idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1, 1, 500_000, snaptype.BorSpans.String())) err := os.Remove(idxFileToDelete) require.NoError(t, err) - idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(500_000, 1_000_000, snaptype.BorSpans.String())) + idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1, 500_000, 1_000_000, snaptype.BorSpans.String())) err = os.Remove(idxFileToDelete) require.NoError(t, err) - idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1_000_000, 1_500_000, snaptype.BorSpans.String())) + idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, snaptype.BorSpans.String())) err = os.Remove(idxFileToDelete) require.NoError(t, err) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) defer borRoSnapshots.Close() err = borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -108,8 +108,8 @@ func TestBlockReaderLastFrozenEventIDWhenSegmentFilesArePresent(t *testing.T) { logger := testlog.Logger(t, log.LvlInfo) dir := t.TempDir() createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) - createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, logger) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) defer borRoSnapshots.Close() err := borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -123,7 +123,7 @@ func TestBlockReaderLastFrozenEventIDWhenSegmentFilesAreNotPresent(t *testing.T) logger := testlog.Logger(t, log.LvlInfo) dir := t.TempDir() - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) defer borRoSnapshots.Close() err := borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -140,14 +140,14 @@ func TestBlockReaderLastFrozenEventIDReturnsLastSegWithIdx(t *testing.T) { createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) createTestBorEventSegmentFile(t, 500_000, 1_000_000, 264, dir, logger) createTestBorEventSegmentFile(t, 1_000_000, 1_500_000, 528, dir, logger) - createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, logger) - createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, logger) - createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, 1, logger) // delete idx file for last bor events segment to simulate segment with missing idx file - idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1_000_000, 1_500_000, snaptype.BorEvents.String())) + idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, snaptype.BorEvents.String())) err := os.Remove(idxFileToDelete) require.NoError(t, err) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) defer borRoSnapshots.Close() err = borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -164,20 +164,20 @@ func TestBlockReaderLastFrozenEventIDReturnsZeroWhenAllSegmentsDoNotHaveIdx(t *t createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) createTestBorEventSegmentFile(t, 500_000, 1_000_000, 264, dir, logger) createTestBorEventSegmentFile(t, 1_000_000, 1_500_000, 528, dir, logger) - createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, logger) - createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, logger) - createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, logger) + createTestSegmentFile(t, 0, 500_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 500_000, 1_000_000, snaptype.BorSpans, dir, 1, logger) + createTestSegmentFile(t, 1_000_000, 1_500_000, snaptype.BorSpans, dir, 1, logger) // delete idx files for all bor events segment to simulate segment files with missing idx files - idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(0, 500_000, snaptype.BorEvents.String())) + idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1, 0, 500_000, snaptype.BorEvents.String())) err := os.Remove(idxFileToDelete) require.NoError(t, err) - idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(500_000, 1_000_000, snaptype.BorEvents.String())) + idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1, 500_000, 1_000_000, snaptype.BorEvents.String())) err = os.Remove(idxFileToDelete) require.NoError(t, err) - idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1_000_000, 1_500_000, snaptype.BorEvents.String())) + idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, snaptype.BorEvents.String())) err = os.Remove(idxFileToDelete) require.NoError(t, err) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) defer borRoSnapshots.Close() err = borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -190,7 +190,7 @@ func createTestBorEventSegmentFile(t *testing.T, from, to, eventId uint64, dir s compressor, err := compress.NewCompressor( context.Background(), "test", - filepath.Join(dir, snaptype.SegmentFileName(from, to, snaptype.BorEvents)), + filepath.Join(dir, snaptype.SegmentFileName(1, from, to, snaptype.BorEvents)), dir, 100, 1, @@ -211,7 +211,7 @@ func createTestBorEventSegmentFile(t *testing.T, from, to, eventId uint64, dir s KeyCount: 1, BucketSize: 10, TmpDir: dir, - IndexFile: filepath.Join(dir, snaptype.IdxFileName(from, to, snaptype.BorEvents.String())), + IndexFile: filepath.Join(dir, snaptype.IdxFileName(1, from, to, snaptype.BorEvents.String())), LeafSize: 8, }, logger, diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index eecf518be76..866a21553d4 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "os" - "path" "path/filepath" "reflect" "runtime" @@ -17,9 +16,11 @@ import ( "sync/atomic" "time" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/chain/snapcfg" common2 "github.com/ledgerwatch/erigon-lib/common" @@ -32,7 +33,6 @@ import ( "github.com/ledgerwatch/erigon-lib/compress" "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" - "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/recsplit" types2 "github.com/ledgerwatch/erigon-lib/types" @@ -46,31 +46,32 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/polygon/bor" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/silkworm" - "github.com/ledgerwatch/log/v3" - "golang.org/x/exp/slices" - "golang.org/x/sync/errgroup" ) type HeaderSegment struct { seg *compress.Decompressor // value: first_byte_of_header_hash + header_rlp idxHeaderHash *recsplit.Index // header_hash -> headers_segment_offset - ranges Range + Range + version uint8 } type BodySegment struct { seg *compress.Decompressor // value: rlp(types.BodyForStorage) idxBodyNumber *recsplit.Index // block_num_u64 -> bodies_segment_offset - ranges Range + Range + version uint8 } type TxnSegment struct { Seg *compress.Decompressor // value: first_byte_of_transaction_hash + sender_address + transaction_rlp IdxTxnHash *recsplit.Index // transaction_hash -> transactions_segment_offset IdxTxnHash2BlockNum *recsplit.Index // transaction_hash -> block_number - ranges Range + Range + version uint8 } func (sn *HeaderSegment) closeIdx() { @@ -89,10 +90,25 @@ func (sn *HeaderSegment) close() { sn.closeSeg() sn.closeIdx() } + +func (sn *HeaderSegment) openFiles() []string { + var files []string + + if sn.seg.IsOpen() { + files = append(files, sn.seg.FilePath()) + } + + if sn.idxHeaderHash != nil { + files = append(files, sn.idxHeaderHash.FilePath()) + } + + return files +} + func (sn *HeaderSegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.Headers) - sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.from, sn.to, snaptype.Headers) + sn.seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -119,8 +135,8 @@ func (sn *HeaderSegment) reopenIdx(dir string) (err error) { if sn.seg == nil { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.Headers.String()) - sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.Headers.String()) + sn.idxHeaderHash, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -145,10 +161,24 @@ func (sn *BodySegment) close() { sn.closeIdx() } +func (sn *BodySegment) openFiles() []string { + var files []string + + if sn.seg.IsOpen() { + files = append(files, sn.seg.FilePath()) + } + + if sn.idxBodyNumber != nil { + files = append(files, sn.idxBodyNumber.FilePath()) + } + + return files +} + func (sn *BodySegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.Bodies) - sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.from, sn.to, snaptype.Bodies) + sn.seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -176,8 +206,8 @@ func (sn *BodySegment) reopenIdx(dir string) (err error) { if sn.seg == nil { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.Bodies.String()) - sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.Bodies.String()) + sn.idxBodyNumber, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -204,10 +234,29 @@ func (sn *TxnSegment) close() { sn.closeSeg() sn.closeIdx() } + +func (sn *TxnSegment) openFiles() []string { + var files []string + + if sn.Seg.IsOpen() { + files = append(files, sn.Seg.FilePath()) + } + + if sn.IdxTxnHash != nil && sn.IdxTxnHash.IsOpen() { + files = append(files, sn.IdxTxnHash.FilePath()) + } + + if sn.IdxTxnHash2BlockNum != nil && sn.IdxTxnHash2BlockNum.IsOpen() { + files = append(files, sn.IdxTxnHash2BlockNum.FilePath()) + } + + return files +} + func (sn *TxnSegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.Transactions) - sn.Seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.from, sn.to, snaptype.Transactions) + sn.Seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -218,8 +267,8 @@ func (sn *TxnSegment) reopenIdx(dir string) (err error) { if sn.Seg == nil { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.Transactions.String()) - sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.Transactions.String()) + sn.IdxTxnHash, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -239,8 +288,8 @@ func (sn *TxnSegment) reopenIdx(dir string) (err error) { } */ - fileName = snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.Transactions2Block.String()) - sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName = snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.Transactions2Block.String()) + sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -289,7 +338,7 @@ func (s *bodySegments) ViewSegment(blockNum uint64, f func(*BodySegment) error) s.lock.RLock() defer s.lock.RUnlock() for _, seg := range s.segments { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return true, f(seg) @@ -311,7 +360,7 @@ func (s *txnSegments) ViewSegment(blockNum uint64, f func(*TxnSegment) error) (f s.lock.RLock() defer s.lock.RUnlock() for _, seg := range s.segments { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return true, f(seg) @@ -331,7 +380,11 @@ type RoSnapshots struct { segmentsMax atomic.Uint64 // all types of .seg files are available - up to this number idxMax atomic.Uint64 // all types of .idx files are available - up to this number cfg ethconfig.BlocksFreezing + version uint8 logger log.Logger + + // allows for pruning segments - this is the min availible segment + segmentsMin atomic.Uint64 } // NewRoSnapshots - opens all snapshots. But to simplify everything: @@ -339,21 +392,30 @@ type RoSnapshots struct { // - all snapshots of given blocks range must exist - to make this blocks range available // - gaps are not allowed // - segment have [from:to) semantic -func NewRoSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, logger log.Logger) *RoSnapshots { - return &RoSnapshots{dir: snapDir, cfg: cfg, Headers: &headerSegments{}, Bodies: &bodySegments{}, Txs: &txnSegments{}, logger: logger} +func NewRoSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, version uint8, logger log.Logger) *RoSnapshots { + return &RoSnapshots{dir: snapDir, cfg: cfg, version: version, Headers: &headerSegments{}, Bodies: &bodySegments{}, Txs: &txnSegments{}, logger: logger} } +func (s *RoSnapshots) Version() uint8 { return s.version } func (s *RoSnapshots) Cfg() ethconfig.BlocksFreezing { return s.cfg } func (s *RoSnapshots) Dir() string { return s.dir } func (s *RoSnapshots) SegmentsReady() bool { return s.segmentsReady.Load() } func (s *RoSnapshots) IndicesReady() bool { return s.indicesReady.Load() } func (s *RoSnapshots) IndicesMax() uint64 { return s.idxMax.Load() } func (s *RoSnapshots) SegmentsMax() uint64 { return s.segmentsMax.Load() } -func (s *RoSnapshots) BlocksAvailable() uint64 { return cmp.Min(s.segmentsMax.Load(), s.idxMax.Load()) } -func (s *RoSnapshots) LogStat() { +func (s *RoSnapshots) SegmentsMin() uint64 { return s.segmentsMin.Load() } +func (s *RoSnapshots) SetSegmentsMin(min uint64) { s.segmentsMin.Store(min) } +func (s *RoSnapshots) BlocksAvailable() uint64 { + if s == nil { + return 0 + } + + return cmp.Min(s.segmentsMax.Load(), s.idxMax.Load()) +} +func (s *RoSnapshots) LogStat(label string) { var m runtime.MemStats dbg.ReadMemStats(&m) - s.logger.Info("[snapshots] Blocks Stat", + s.logger.Info(fmt.Sprintf("[snapshots:%s] Blocks Stat", label), "blocks", fmt.Sprintf("%dk", (s.BlocksAvailable()+1)/1000), "indices", fmt.Sprintf("%dk", (s.IndicesMax()+1)/1000), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) @@ -445,19 +507,19 @@ func (s *RoSnapshots) idxAvailability() uint64 { if seg.idxHeaderHash == nil { break } - headers = seg.ranges.to - 1 + headers = seg.to - 1 } for _, seg := range s.Bodies.segments { if seg.idxBodyNumber == nil { break } - bodies = seg.ranges.to - 1 + bodies = seg.to - 1 } for _, seg := range s.Txs.segments { if seg.IdxTxnHash == nil || seg.IdxTxnHash2BlockNum == nil { break } - txs = seg.ranges.to - 1 + txs = seg.to - 1 } return cmp.Min(headers, cmp.Min(bodies, txs)) } @@ -487,7 +549,7 @@ func (s *RoSnapshots) Files() (list []string) { if seg.seg == nil { continue } - if seg.ranges.from > maxBlockNumInFiles { + if seg.from > maxBlockNumInFiles { continue } _, fName := filepath.Split(seg.seg.FilePath()) @@ -497,7 +559,7 @@ func (s *RoSnapshots) Files() (list []string) { if seg.seg == nil { continue } - if seg.ranges.from > maxBlockNumInFiles { + if seg.from > maxBlockNumInFiles { continue } _, fName := filepath.Split(seg.seg.FilePath()) @@ -507,7 +569,7 @@ func (s *RoSnapshots) Files() (list []string) { if seg.Seg == nil { continue } - if seg.ranges.from > maxBlockNumInFiles { + if seg.from > maxBlockNumInFiles { continue } _, fName := filepath.Split(seg.Seg.FilePath()) @@ -517,8 +579,39 @@ func (s *RoSnapshots) Files() (list []string) { return list } +func (s *RoSnapshots) OpenFiles() (list []string) { + s.Headers.lock.RLock() + defer s.Headers.lock.RUnlock() + s.Bodies.lock.RLock() + defer s.Bodies.lock.RUnlock() + s.Txs.lock.RLock() + defer s.Txs.lock.RUnlock() + + for _, header := range s.Headers.segments { + list = append(list, header.openFiles()...) + } + + for _, body := range s.Bodies.segments { + list = append(list, body.openFiles()...) + } + + for _, txs := range s.Txs.segments { + list = append(list, txs.openFiles()...) + } + + return list +} + // ReopenList stops on optimistic=false, continue opening files on optimistic=true func (s *RoSnapshots) ReopenList(fileNames []string, optimistic bool) error { + return s.rebuildSegments(fileNames, true, optimistic) +} + +func (s *RoSnapshots) InitSegments(fileNames []string) error { + return s.rebuildSegments(fileNames, false, true) +} + +func (s *RoSnapshots) rebuildSegments(fileNames []string, open bool, optimistic bool) error { s.Headers.lock.Lock() defer s.Headers.lock.Unlock() s.Bodies.lock.Lock() @@ -552,22 +645,25 @@ Loop: } } if !exists { - sn = &HeaderSegment{ranges: Range{f.From, f.To}} + sn = &HeaderSegment{version: f.Version, Range: Range{f.From, f.To}} } - if err := sn.reopenSeg(s.dir); err != nil { - if errors.Is(err, os.ErrNotExist) { + + if open { + if err := sn.reopenSeg(s.dir); err != nil { + if errors.Is(err, os.ErrNotExist) { + if optimistic { + continue Loop + } else { + break Loop + } + } if optimistic { + s.logger.Warn("[snapshots] open segment", "err", err) continue Loop } else { - break Loop + return err } } - if optimistic { - s.logger.Warn("[snapshots] open segment", "err", err) - continue Loop - } else { - return err - } } if !exists { @@ -575,8 +671,11 @@ Loop: // then make segment available even if index open may fail s.Headers.segments = append(s.Headers.segments, sn) } - if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { - return err + + if open { + if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { + return err + } } case snaptype.Bodies: var sn *BodySegment @@ -592,28 +691,34 @@ Loop: } } if !exists { - sn = &BodySegment{ranges: Range{f.From, f.To}} + sn = &BodySegment{version: f.Version, Range: Range{f.From, f.To}} } - if err := sn.reopenSeg(s.dir); err != nil { - if errors.Is(err, os.ErrNotExist) { + + if open { + if err := sn.reopenSeg(s.dir); err != nil { + if errors.Is(err, os.ErrNotExist) { + if optimistic { + continue Loop + } else { + break Loop + } + } if optimistic { + s.logger.Warn("[snapshots] open segment", "err", err) continue Loop } else { - break Loop + return err } } - if optimistic { - s.logger.Warn("[snapshots] open segment", "err", err) - continue Loop - } else { - return err - } } if !exists { s.Bodies.segments = append(s.Bodies.segments, sn) } - if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { - return err + + if open { + if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { + return err + } } case snaptype.Transactions: var sn *TxnSegment @@ -629,28 +734,35 @@ Loop: } } if !exists { - sn = &TxnSegment{ranges: Range{f.From, f.To}} + sn = &TxnSegment{version: f.Version, Range: Range{f.From, f.To}} } - if err := sn.reopenSeg(s.dir); err != nil { - if errors.Is(err, os.ErrNotExist) { + + if open { + if err := sn.reopenSeg(s.dir); err != nil { + if errors.Is(err, os.ErrNotExist) { + if optimistic { + continue Loop + } else { + break Loop + } + } if optimistic { + s.logger.Warn("[snapshots] open segment", "err", err) continue Loop } else { - break Loop + return err } } - if optimistic { - s.logger.Warn("[snapshots] open segment", "err", err) - continue Loop - } else { - return err - } } + if !exists { s.Txs.segments = append(s.Txs.segments, sn) } - if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { - return err + + if open { + if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { + return err + } } default: processed = false @@ -680,7 +792,7 @@ func (s *RoSnapshots) Ranges() (ranges []Range) { defer view.Close() for _, sn := range view.Headers() { - ranges = append(ranges, sn.ranges) + ranges = append(ranges, sn.Range) } return ranges } @@ -688,7 +800,14 @@ func (s *RoSnapshots) Ranges() (ranges []Range) { func (s *RoSnapshots) OptimisticalyReopenFolder() { _ = s.ReopenFolder() } func (s *RoSnapshots) OptimisticalyReopenWithDB(db kv.RoDB) { _ = s.ReopenWithDB(db) } func (s *RoSnapshots) ReopenFolder() error { - files, _, err := Segments(s.dir) + return s.ReopenSegments(snaptype.BlockSnapshotTypes) +} + +func (s *RoSnapshots) ReopenSegments(types []snaptype.Type) error { + files, _, err := segments(s.dir, s.version, 0, func(dir string, in []snaptype.FileInfo) (res []snaptype.FileInfo) { + return typeOfSegmentsMustExist(dir, in, types) + }) + if err != nil { return err } @@ -699,6 +818,7 @@ func (s *RoSnapshots) ReopenFolder() error { } return s.ReopenList(list, false) } + func (s *RoSnapshots) ReopenWithDB(db kv.RoDB) error { if err := db.View(context.Background(), func(tx kv.Tx) error { snList, _, err := rawdb.ReadSnapshots(tx) @@ -809,15 +929,15 @@ func (s *RoSnapshots) PrintDebug() { defer s.Txs.lock.RUnlock() fmt.Println(" == Snapshots, Header") for _, sn := range s.Headers.segments { - fmt.Printf("%d, %t\n", sn.ranges.from, sn.idxHeaderHash == nil) + fmt.Printf("%d, %t\n", sn.from, sn.idxHeaderHash == nil) } fmt.Println(" == Snapshots, Body") for _, sn := range s.Bodies.segments { - fmt.Printf("%d, %t\n", sn.ranges.from, sn.idxBodyNumber == nil) + fmt.Printf("%d, %t\n", sn.from, sn.idxBodyNumber == nil) } fmt.Println(" == Snapshots, Txs") for _, sn := range s.Txs.segments { - fmt.Printf("%d, %t, %t\n", sn.ranges.from, sn.IdxTxnHash == nil, sn.IdxTxnHash2BlockNum == nil) + fmt.Printf("%d, %t, %t\n", sn.from, sn.IdxTxnHash == nil, sn.IdxTxnHash2BlockNum == nil) } } @@ -879,7 +999,7 @@ func buildIdx(ctx context.Context, sn snaptype.FileInfo, chainConfig *chain.Conf //log.Info("[snapshots] build idx", "file", fName) switch sn.T { case snaptype.Headers: - if err := HeadersIdx(ctx, chainConfig, sn.Path, sn.From, tmpDir, p, lvl, logger); err != nil { + if err := HeadersIdx(ctx, sn.Path, sn.Version, sn.From, tmpDir, p, lvl, logger); err != nil { return err } case snaptype.Bodies: @@ -888,17 +1008,17 @@ func buildIdx(ctx context.Context, sn snaptype.FileInfo, chainConfig *chain.Conf } case snaptype.Transactions: dir, _ := filepath.Split(sn.Path) - if err := TransactionsIdx(ctx, chainConfig, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { + if err := TransactionsIdx(ctx, chainConfig, sn.Version, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { return err } case snaptype.BorEvents: dir, _ := filepath.Split(sn.Path) - if err := BorEventsIdx(ctx, sn.Path, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { + if err := BorEventsIdx(ctx, sn.Path, sn.Version, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { return err } case snaptype.BorSpans: dir, _ := filepath.Split(sn.Path) - if err := BorSpansIdx(ctx, sn.Path, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { + if err := BorSpansIdx(ctx, sn.Path, sn.Version, sn.From, sn.To, dir, tmpDir, p, lvl, logger); err != nil { return err } } @@ -906,11 +1026,11 @@ func buildIdx(ctx context.Context, sn snaptype.FileInfo, chainConfig *chain.Conf return nil } -func BuildMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs, chainConfig *chain.Config, workers int, logger log.Logger) error { +func BuildMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs, version uint8, minIndex uint64, chainConfig *chain.Config, workers int, logger log.Logger) error { dir, tmpDir := dirs.Snap, dirs.Tmp //log.Log(lvl, "[snapshots] Build indices", "from", min) - segments, _, err := Segments(dir) + segments, _, err := Segments(dir, version, minIndex) if err != nil { return err } @@ -973,10 +1093,10 @@ func BuildMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs } } -func BuildBorMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs, chainConfig *chain.Config, workers int, logger log.Logger) error { +func BuildBorMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs, version uint8, minIndex uint64, chainConfig *chain.Config, workers int, logger log.Logger) error { dir, tmpDir := dirs.Snap, dirs.Tmp - segments, _, err := BorSegments(dir) + segments, _, err := BorSegments(dir, version, minIndex) if err != nil { return err } @@ -985,7 +1105,7 @@ func BuildBorMissedIndices(logPrefix string, ctx context.Context, dirs datadir.D g, gCtx := errgroup.WithContext(ctx) g.SetLimit(workers) - for _, t := range []snaptype.Type{snaptype.BorEvents, snaptype.BorSpans} { + for _, t := range snaptype.BorSnapshotTypes { for _, segment := range segments { if segment.T != t { continue @@ -1050,8 +1170,8 @@ func sendDiagnostics(startIndexingTime time.Time, indexPercent map[string]int, a }) } -func noGaps(in []snaptype.FileInfo) (out []snaptype.FileInfo, missingSnapshots []Range) { - var prevTo uint64 +func noGaps(in []snaptype.FileInfo, from uint64) (out []snaptype.FileInfo, missingSnapshots []Range) { + prevTo := from for _, f := range in { if f.To <= prevTo { continue @@ -1066,14 +1186,14 @@ func noGaps(in []snaptype.FileInfo) (out []snaptype.FileInfo, missingSnapshots [ return out, missingSnapshots } -func allTypeOfSegmentsMustExist(dir string, in []snaptype.FileInfo) (res []snaptype.FileInfo) { +func typeOfSegmentsMustExist(dir string, in []snaptype.FileInfo, types []snaptype.Type) (res []snaptype.FileInfo) { MainLoop: for _, f := range in { if f.From == f.To { continue } - for _, t := range snaptype.BlockSnapshotTypes { - p := filepath.Join(dir, snaptype.SegmentFileName(f.From, f.To, t)) + for _, t := range types { + p := filepath.Join(dir, snaptype.SegmentFileName(f.Version, f.From, f.To, t)) if !dir2.FileExist(p) { continue MainLoop } @@ -1083,21 +1203,12 @@ MainLoop: return res } +func allTypeOfSegmentsMustExist(dir string, in []snaptype.FileInfo) (res []snaptype.FileInfo) { + return typeOfSegmentsMustExist(dir, in, snaptype.BlockSnapshotTypes) +} + func borSegmentsMustExist(dir string, in []snaptype.FileInfo) (res []snaptype.FileInfo) { -MainLoop: - for _, f := range in { - if f.From == f.To { - continue - } - for _, t := range []snaptype.Type{snaptype.BorEvents, snaptype.BorSpans} { - p := filepath.Join(dir, snaptype.SegmentFileName(f.From, f.To, t)) - if !dir2.FileExist(p) { - continue MainLoop - } - } - res = append(res, f) - } - return res + return typeOfSegmentsMustExist(dir, in, []snaptype.Type{snaptype.BorEvents, snaptype.BorSpans}) } // noOverlaps - keep largest ranges and avoid overlap @@ -1125,8 +1236,8 @@ func noOverlaps(in []snaptype.FileInfo) (res []snaptype.FileInfo) { return res } -func SegmentsCaplin(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { - list, err := snaptype.Segments(dir) +func SegmentsCaplin(dir string, version uint8, minBlock uint64) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { + list, err := snaptype.Segments(dir, version) if err != nil { return nil, missingSnapshots, err } @@ -1140,15 +1251,19 @@ func SegmentsCaplin(dir string) (res []snaptype.FileInfo, missingSnapshots []Ran } l = append(l, f) } - l, m = noGaps(noOverlaps(l)) + l, m = noGaps(noOverlaps(l), minBlock) res = append(res, l...) missingSnapshots = append(missingSnapshots, m...) } return res, missingSnapshots, nil } -func Segments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { - list, err := snaptype.Segments(dir) +func Segments(dir string, version uint8, minBlock uint64) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { + return segments(dir, version, minBlock, allTypeOfSegmentsMustExist) +} + +func segments(dir string, version uint8, minBlock uint64, segmentsTypeCheck func(dir string, in []snaptype.FileInfo) []snaptype.FileInfo) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { + list, err := snaptype.Segments(dir, version) if err != nil { return nil, missingSnapshots, err } @@ -1161,7 +1276,7 @@ func Segments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, er } l = append(l, f) } - l, m = noGaps(noOverlaps(allTypeOfSegmentsMustExist(dir, l))) + l, m = noGaps(noOverlaps(segmentsTypeCheck(dir, l)), minBlock) res = append(res, l...) missingSnapshots = append(missingSnapshots, m...) } @@ -1173,7 +1288,7 @@ func Segments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, er } l = append(l, f) } - l, _ = noGaps(noOverlaps(allTypeOfSegmentsMustExist(dir, l))) + l, _ = noGaps(noOverlaps(segmentsTypeCheck(dir, l)), minBlock) res = append(res, l...) } { @@ -1184,7 +1299,7 @@ func Segments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, er } l = append(l, f) } - l, _ = noGaps(noOverlaps(allTypeOfSegmentsMustExist(dir, l))) + l, _ = noGaps(noOverlaps(segmentsTypeCheck(dir, l)), minBlock) res = append(res, l...) } @@ -1203,6 +1318,7 @@ func chooseSegmentEnd(from, to, blocksPerFile uint64) uint64 { } type BlockRetire struct { + maxScheduledBlock atomic.Uint64 working atomic.Bool needSaveFilesListInDB atomic.Bool @@ -1222,6 +1338,10 @@ func NewBlockRetire(compressWorkers int, dirs datadir.Dirs, blockReader services return &BlockRetire{workers: compressWorkers, tmpDir: dirs.Tmp, dirs: dirs, blockReader: blockReader, blockWriter: blockWriter, db: db, chainConfig: chainConfig, notifier: notifier, logger: logger} } +func (br *BlockRetire) SetWorkers(workers int) { + br.workers = workers +} + func (br *BlockRetire) IO() (services.FullBlockReader, *blockio.BlockWriter) { return br.blockReader, br.blockWriter } @@ -1286,28 +1406,33 @@ func CanDeleteTo(curBlockNum uint64, blocksInSnapshots uint64) (blockTo uint64) return cmp.Min(hardLimit, blocksInSnapshots+1) } -func (br *BlockRetire) retireBlocks(ctx context.Context, blockFrom, blockTo uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDelete func(l []string) error) error { +func (br *BlockRetire) retireBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDelete func(l []string) error) (bool, error) { notifier, logger, blockReader, tmpDir, db, workers := br.notifier, br.logger, br.blockReader, br.tmpDir, br.db, br.workers - logger.Log(lvl, "[snapshots] Retire Blocks", "range", fmt.Sprintf("%dk-%dk", blockFrom/1000, blockTo/1000)) snapshots := br.snapshots() - firstTxNum := blockReader.(*BlockReader).FirstTxNumNotInSnapshots() - // in future we will do it in background - if err := DumpBlocks(ctx, blockFrom, blockTo, snaptype.Erigon2MergeLimit, tmpDir, snapshots.Dir(), firstTxNum, db, workers, lvl, logger, blockReader); err != nil { - return fmt.Errorf("DumpBlocks: %w", err) - } - if err := snapshots.ReopenFolder(); err != nil { - return fmt.Errorf("reopen: %w", err) - } - snapshots.LogStat() - if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size - notifier.OnNewSnapshot() + blockFrom, blockTo, ok := CanRetire(maxBlockNum, minBlockNum) + + if ok { + logger.Log(lvl, "[snapshots] Retire Blocks", "range", fmt.Sprintf("%dk-%dk", blockFrom/1000, blockTo/1000)) + // in future we will do it in background + if err := DumpBlocks(ctx, snapshots.version, blockFrom, blockTo, snaptype.Erigon2MergeLimit, tmpDir, snapshots.Dir(), db, workers, lvl, logger, blockReader); err != nil { + return ok, fmt.Errorf("DumpBlocks: %w", err) + } + if err := snapshots.ReopenFolder(); err != nil { + return ok, fmt.Errorf("reopen: %w", err) + } + snapshots.LogStat("retire") + if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size + notifier.OnNewSnapshot() + } } + merger := NewMerger(tmpDir, workers, lvl, db, br.chainConfig, logger) rangesToMerge := merger.FindMergeRanges(snapshots.Ranges(), snapshots.BlocksAvailable()) if len(rangesToMerge) == 0 { - return nil + return ok, nil } + ok = true // have something to merge onMerge := func(r Range) error { if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size notifier.OnNewSnapshot() @@ -1325,10 +1450,10 @@ func (br *BlockRetire) retireBlocks(ctx context.Context, blockFrom, blockTo uint } err := merger.Merge(ctx, snapshots, rangesToMerge, snapshots.Dir(), true /* doIndex */, onMerge, onDelete) if err != nil { - return err + return ok, err } - return nil + return ok, nil } func (br *BlockRetire) PruneAncientBlocks(tx kv.RwTx, limit int) error { @@ -1340,12 +1465,16 @@ func (br *BlockRetire) PruneAncientBlocks(tx kv.RwTx, limit int) error { return err } canDeleteTo := CanDeleteTo(currentProgress, br.blockReader.FrozenBlocks()) + + br.logger.Info("[snapshots] Prune Blocks", "to", canDeleteTo, "limit", limit) if err := br.blockWriter.PruneBlocks(context.Background(), tx, canDeleteTo, limit); err != nil { return err } includeBor := br.chainConfig.Bor != nil if includeBor { canDeleteTo := CanDeleteTo(currentProgress, br.blockReader.FrozenBorBlocks()) + br.logger.Info("[snapshots] Prune Bor Blocks", "to", canDeleteTo, "limit", limit) + if err := br.blockWriter.PruneBorBlocks(context.Background(), tx, canDeleteTo, limit, bor.SpanIDAt); err != nil { return err } @@ -1353,54 +1482,79 @@ func (br *BlockRetire) PruneAncientBlocks(tx kv.RwTx, limit int) error { return nil } -func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProgress uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error) { - ok := br.working.CompareAndSwap(false, true) - if !ok { - // go-routine is still working +func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error) { + if maxBlockNum > br.maxScheduledBlock.Load() { + br.maxScheduledBlock.Store(maxBlockNum) + } + + if !br.working.CompareAndSwap(false, true) { return } + go func() { + defer br.working.Store(false) - if err := br.RetireBlocks(ctx, forwardProgress, lvl, seedNewSnapshots, onDeleteSnapshots); err != nil { - br.logger.Warn("[snapshots] retire blocks", "err", err) + for { + maxBlockNum := br.maxScheduledBlock.Load() + + err := br.RetireBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots) + + if err != nil { + br.logger.Warn("[snapshots] retire blocks", "err", err) + return + } + + if maxBlockNum == br.maxScheduledBlock.Load() { + return + } } }() } -func (br *BlockRetire) RetireBlocks(ctx context.Context, forwardProgress uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error) error { +func (br *BlockRetire) RetireBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error) (err error) { includeBor := br.chainConfig.Bor != nil + if includeBor { // "bor snaps" can be behind "block snaps", it's ok: for example because of `kill -9` in the middle of merge - for br.blockReader.FrozenBorBlocks() < br.blockReader.FrozenBlocks() { - blockFrom, blockTo, ok := CanRetire(forwardProgress, br.blockReader.FrozenBorBlocks()) + if frozen := br.blockReader.FrozenBlocks(); frozen > minBlockNum { + minBlockNum = frozen + } + + for br.blockReader.FrozenBorBlocks() < minBlockNum { + ok, err := br.retireBorBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots) + if err != nil { + return err + } if !ok { break } - if err := br.retireBorBlocks(ctx, blockFrom, blockTo, lvl, seedNewSnapshots, onDeleteSnapshots); err != nil { - return err - } } } + var ok, okBor bool for { - blockFrom, blockTo, ok := CanRetire(forwardProgress, br.blockReader.FrozenBlocks()) - if !ok { - break + if frozen := br.blockReader.FrozenBlocks(); frozen > minBlockNum { + minBlockNum = frozen } - if err := br.retireBlocks(ctx, blockFrom, blockTo, lvl, seedNewSnapshots, onDeleteSnapshots); err != nil { + + ok, err = br.retireBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots) + if err != nil { return err } if includeBor { - blockFrom, blockTo, ok = CanRetire(forwardProgress, br.blockReader.FrozenBorBlocks()) - if ok { - if err := br.retireBorBlocks(ctx, blockFrom, blockTo, lvl, seedNewSnapshots, onDeleteSnapshots); err != nil { - return err - } + okBor, err = br.retireBorBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots) + if err != nil { + return err } } + haveMore := ok || okBor + if !haveMore { + break + } } + return nil } @@ -1421,7 +1575,7 @@ func (br *BlockRetire) buildMissedIndicesIfNeed(ctx context.Context, logPrefix s if snapshots.IndicesMax() >= snapshots.SegmentsMax() { return nil } - snapshots.LogStat() + snapshots.LogStat("missed-idx") if !snapshots.Cfg().Produce && snapshots.IndicesMax() == 0 { return fmt.Errorf("please remove --snap.stop, erigon can't work without creating basic indices") } @@ -1434,14 +1588,14 @@ func (br *BlockRetire) buildMissedIndicesIfNeed(ctx context.Context, logPrefix s // wait for Downloader service to download all expected snapshots indexWorkers := estimate.IndexSnapshot.Workers() - if err := BuildMissedIndices(logPrefix, ctx, br.dirs, cc, indexWorkers, br.logger); err != nil { + if err := BuildMissedIndices(logPrefix, ctx, br.dirs, snapshots.Version(), snapshots.SegmentsMin(), cc, indexWorkers, br.logger); err != nil { return fmt.Errorf("BuildMissedIndices: %w", err) } if err := snapshots.ReopenFolder(); err != nil { return err } - snapshots.LogStat() + snapshots.LogStat("missed-idx:reopen") if notifier != nil { notifier.OnNewSnapshot() } @@ -1458,7 +1612,7 @@ func (br *BlockRetire) buildBorMissedIndicesIfNeed(ctx context.Context, logPrefi return nil } - borSnapshots.LogStat() + borSnapshots.LogStat("bor:missed-idx") if !borSnapshots.Cfg().Produce && borSnapshots.IndicesMax() == 0 { return fmt.Errorf("please remove --snap.stop, erigon can't work without creating basic indices") } @@ -1471,93 +1625,97 @@ func (br *BlockRetire) buildBorMissedIndicesIfNeed(ctx context.Context, logPrefi // wait for Downloader service to download all expected snapshots indexWorkers := estimate.IndexSnapshot.Workers() - if err := BuildBorMissedIndices(logPrefix, ctx, br.dirs, cc, indexWorkers, br.logger); err != nil { + if err := BuildBorMissedIndices(logPrefix, ctx, br.dirs, borSnapshots.Version(), borSnapshots.SegmentsMin(), cc, indexWorkers, br.logger); err != nil { return fmt.Errorf("BuildBorMissedIndices: %w", err) } if err := borSnapshots.ReopenFolder(); err != nil { return err } - borSnapshots.LogStat() + borSnapshots.LogStat("bor:missed-idx:reopen") if notifier != nil { notifier.OnNewSnapshot() } return nil } -func DumpBlocks(ctx context.Context, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { +func DumpBlocks(ctx context.Context, version uint8, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { if blocksPerFile == 0 { return nil } chainConfig := fromdb.ChainConfig(chainDB) + firstTxNum := blockReader.(*BlockReader).FirstTxNumNotInSnapshots() for i := blockFrom; i < blockTo; i = chooseSegmentEnd(i, blockTo, blocksPerFile) { - if err := dumpBlocksRange(ctx, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, firstTxNum, chainDB, *chainConfig, workers, lvl, logger, blockReader); err != nil { + lastTxNum, err := dumpBlocksRange(ctx, version, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, firstTxNum, chainDB, *chainConfig, workers, lvl, logger) + if err != nil { return err } + firstTxNum = lastTxNum + 1 } return nil } -func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, chainConfig chain.Config, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { +func dumpBlocksRange(ctx context.Context, version uint8, blockFrom, blockTo uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, chainConfig chain.Config, workers int, lvl log.Lvl, logger log.Logger) (lastTxNum uint64, err error) { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() { - segName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Headers) + segName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Headers) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot Headers", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger) if err != nil { - return err + return lastTxNum, err } defer sn.Close() if err := DumpHeaders(ctx, chainDB, blockFrom, blockTo, workers, lvl, logger, func(v []byte) error { return sn.AddWord(v) }); err != nil { - return fmt.Errorf("DumpHeaders: %w", err) + return lastTxNum, fmt.Errorf("DumpHeaders: %w", err) } if err := sn.Compress(); err != nil { - return fmt.Errorf("compress: %w", err) + return lastTxNum, fmt.Errorf("compress: %w", err) } p := &background.Progress{} if err := buildIdx(ctx, f, &chainConfig, tmpDir, p, lvl, logger); err != nil { - return err + return lastTxNum, err } } { - segName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Bodies) + segName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Bodies) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot Bodies", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger) if err != nil { - return err + return lastTxNum, err } defer sn.Close() - if err := DumpBodies(ctx, chainDB, blockFrom, blockTo, firstTxNum, workers, lvl, logger, func(v []byte) error { + lastTxNum, err = DumpBodies(ctx, chainDB, blockFrom, blockTo, firstTxNum, lvl, logger, func(v []byte) error { return sn.AddWord(v) - }); err != nil { - return fmt.Errorf("DumpBodies: %w", err) + }) + if err != nil { + return lastTxNum, fmt.Errorf("DumpBodies: %w", err) } if err := sn.Compress(); err != nil { - return fmt.Errorf("compress: %w", err) + return lastTxNum, fmt.Errorf("compress: %w", err) } p := &background.Progress{} if err := buildIdx(ctx, f, &chainConfig, tmpDir, p, lvl, logger); err != nil { - return err + return lastTxNum, err } } { - segName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Transactions) + segName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Transactions) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot Txs", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger) if err != nil { - return fmt.Errorf("NewCompressor: %w, %s", err, f.Path) + return lastTxNum, fmt.Errorf("NewCompressor: %w, %s", err, f.Path) } defer sn.Close() @@ -1565,56 +1723,56 @@ func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, sna return sn.AddWord(v) }) if err != nil { - return fmt.Errorf("DumpTxs: %w", err) + return lastTxNum, fmt.Errorf("DumpTxs: %w", err) } if expectedCount != sn.Count() { - return fmt.Errorf("incorrect tx count: %d, expected from db: %d", sn.Count(), expectedCount) + return lastTxNum, fmt.Errorf("incorrect tx count: %d, expected from db: %d", sn.Count(), expectedCount) } snapDir, fileName := filepath.Split(f.Path) ext := filepath.Ext(fileName) logger.Log(lvl, "[snapshots] Compression start", "file", fileName[:len(fileName)-len(ext)], "workers", sn.Workers()) t := time.Now() - _, expectedCount, err = txsAmountBasedOnBodiesSnapshots(snapDir, blockFrom, blockTo) + _, expectedCount, err = txsAmountBasedOnBodiesSnapshots(snapDir, version, blockFrom, blockTo) if err != nil { - return err + return lastTxNum, err } if expectedCount != sn.Count() { - return fmt.Errorf("incorrect tx count: %d, expected from snapshots: %d", sn.Count(), expectedCount) + return lastTxNum, fmt.Errorf("incorrect tx count: %d, expected from snapshots: %d", sn.Count(), expectedCount) } if err := sn.Compress(); err != nil { - return fmt.Errorf("compress: %w", err) + return lastTxNum, fmt.Errorf("compress: %w", err) } logger.Log(lvl, "[snapshots] Compression", "took", time.Since(t), "ratio", sn.Ratio.String(), "file", fileName[:len(fileName)-len(ext)]) p := &background.Progress{} if err := buildIdx(ctx, f, &chainConfig, tmpDir, p, lvl, logger); err != nil { - return err + return lastTxNum, err } } - return nil + return lastTxNum, nil } func hasIdxFile(sn snaptype.FileInfo, logger log.Logger) bool { dir, _ := filepath.Split(sn.Path) - fName := snaptype.IdxFileName(sn.From, sn.To, sn.T.String()) + fName := snaptype.IdxFileName(sn.Version, sn.From, sn.To, sn.T.String()) var result = true switch sn.T { case snaptype.Headers, snaptype.Bodies, snaptype.BorEvents, snaptype.BorSpans, snaptype.BeaconBlocks: - idx, err := recsplit.OpenIndex(path.Join(dir, fName)) + idx, err := recsplit.OpenIndex(filepath.Join(dir, fName)) if err != nil { return false } idx.Close() case snaptype.Transactions: - idx, err := recsplit.OpenIndex(path.Join(dir, fName)) + idx, err := recsplit.OpenIndex(filepath.Join(dir, fName)) if err != nil { return false } idx.Close() - fName = snaptype.IdxFileName(sn.From, sn.To, snaptype.Transactions2Block.String()) - idx, err = recsplit.OpenIndex(path.Join(dir, fName)) + fName = snaptype.IdxFileName(sn.Version, sn.From, sn.To, snaptype.Transactions2Block.String()) + idx, err = recsplit.OpenIndex(filepath.Join(dir, fName)) if err != nil { return false } @@ -1623,6 +1781,12 @@ func hasIdxFile(sn snaptype.FileInfo, logger log.Logger) bool { return result } +var bufPool = sync.Pool{ + New: func() any { + return make([]byte, 16*4096) + }, +} + // DumpTxs - [from, to) // Format: hash[0]_1byte + sender_address_2bytes + txnRlp func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainConfig *chain.Config, workers int, lvl log.Lvl, logger log.Logger, collect func([]byte) error) (expectedCount int, err error) { @@ -1634,12 +1798,12 @@ func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainCo chainID, _ := uint256.FromBig(chainConfig.ChainID) numBuf := make([]byte, 8) - parseCtx := types2.NewTxParseContext(*chainID) - parseCtx.WithSender(false) - slot := types2.TxSlot{} - var sender [20]byte - parse := func(v, valueBuf []byte, senders []common2.Address, j int) ([]byte, error) { - if _, err := parseCtx.ParseTransaction(v, 0, &slot, sender[:], false /* hasEnvelope */, false /* wrappedWithBlobs */, nil); err != nil { + + parse := func(ctx *types2.TxParseContext, v, valueBuf []byte, senders []common2.Address, j int) ([]byte, error) { + var sender [20]byte + slot := types2.TxSlot{} + + if _, err := ctx.ParseTransaction(v, 0, &slot, sender[:], false /* hasEnvelope */, false /* wrappedWithBlobs */, nil); err != nil { return valueBuf, err } if len(senders) > 0 { @@ -1652,8 +1816,8 @@ func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainCo valueBuf = append(valueBuf, v...) return valueBuf, nil } - valueBuf := make([]byte, 16*4096) - addSystemTx := func(tx kv.Tx, txId uint64) error { + + addSystemTx := func(ctx *types2.TxParseContext, tx kv.Tx, txId uint64) error { binary.BigEndian.PutUint64(numBuf, txId) tv, err := tx.GetOne(kv.EthTx, numBuf) if err != nil { @@ -1666,8 +1830,12 @@ func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainCo return nil } - parseCtx.WithSender(false) - valueBuf, err = parse(tv, valueBuf, nil, 0) + ctx.WithSender(false) + + valueBuf := bufPool.Get().([]byte) + defer bufPool.Put(valueBuf) //nolint + + valueBuf, err = parse(ctx, tv, valueBuf, nil, 0) if err != nil { return err } @@ -1712,30 +1880,89 @@ func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainCo return false, err } - j := 0 + workers := estimate.AlmostAllCPUs() + + if workers > 3 { + workers = workers / 3 * 2 + } - if err := addSystemTx(tx, body.BaseTxId); err != nil { + if workers > int(body.TxAmount-2) { + if int(body.TxAmount-2) > 1 { + workers = int(body.TxAmount - 2) + } else { + workers = 1 + } + } + + parsers := errgroup.Group{} + parsers.SetLimit(workers) + + valueBufs := make([][]byte, workers) + parseCtxs := make([]*types2.TxParseContext, workers) + + for i := 0; i < workers; i++ { + valueBuf := bufPool.Get().([]byte) + defer bufPool.Put(valueBuf) //nolint + valueBufs[i] = valueBuf + parseCtxs[i] = types2.NewTxParseContext(*chainID) + } + + if err := addSystemTx(parseCtxs[0], tx, body.BaseTxId); err != nil { return false, err } + binary.BigEndian.PutUint64(numBuf, body.BaseTxId+1) + + collected := -1 + collectorLock := sync.Mutex{} + collections := sync.NewCond(&collectorLock) + + var j int + if err := tx.ForAmount(kv.EthTx, numBuf, body.TxAmount-2, func(_, tv []byte) error { - parseCtx.WithSender(len(senders) == 0) - valueBuf, err = parse(tv, valueBuf, senders, j) - if err != nil { - return fmt.Errorf("%w, block: %d", err, blockNum) - } - // first tx byte => sender adress => tx rlp - if err := collect(valueBuf); err != nil { - return err - } + tx := j j++ + parsers.Go(func() error { + parseCtx := parseCtxs[tx%workers] + + parseCtx.WithSender(len(senders) == 0) + parseCtx.WithAllowPreEip2s(blockNum <= chainConfig.HomesteadBlock.Uint64()) + + valueBuf, err := parse(parseCtx, tv, valueBufs[tx%workers], senders, tx) + + if err != nil { + return fmt.Errorf("%w, block: %d", err, blockNum) + } + + collectorLock.Lock() + defer collectorLock.Unlock() + + for collected < tx-1 { + collections.Wait() + } + + // first tx byte => sender adress => tx rlp + if err := collect(valueBuf); err != nil { + return err + } + + collected = tx + collections.Broadcast() + + return nil + }) + return nil }); err != nil { return false, fmt.Errorf("ForAmount: %w", err) } - if err := addSystemTx(tx, body.BaseTxId+uint64(body.TxAmount)-1); err != nil { + if err := parsers.Wait(); err != nil { + return false, fmt.Errorf("ForAmount parser: %w", err) + } + + if err := addSystemTx(parseCtxs[0], tx, body.BaseTxId+uint64(body.TxAmount)-1); err != nil { return false, err } @@ -1813,7 +2040,7 @@ func DumpHeaders(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, wor } // DumpBodies - [from, to) -func DumpBodies(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, firstTxNum uint64, workers int, lvl log.Lvl, logger log.Logger, collect func([]byte) error) error { +func DumpBodies(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, firstTxNum uint64, lvl log.Lvl, logger log.Logger, collect func([]byte) error) (uint64, error) { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() @@ -1870,16 +2097,16 @@ func DumpBodies(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, firs } return true, nil }); err != nil { - return err + return firstTxNum, err } - return nil + return firstTxNum, nil } var EmptyTxHash = common2.Hash{} -func txsAmountBasedOnBodiesSnapshots(snapDir string, blockFrom, blockTo uint64) (firstTxID uint64, expectedCount int, err error) { - bodySegmentPath := filepath.Join(snapDir, snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Bodies)) +func txsAmountBasedOnBodiesSnapshots(snapDir string, version uint8, blockFrom, blockTo uint64) (firstTxID uint64, expectedCount int, err error) { + bodySegmentPath := filepath.Join(snapDir, snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Bodies)) bodiesSegment, err := compress.NewDecompressor(bodySegmentPath) if err != nil { return @@ -1915,25 +2142,25 @@ func txsAmountBasedOnBodiesSnapshots(snapDir string, blockFrom, blockTo uint64) return } -func TransactionsIdx(ctx context.Context, chainConfig *chain.Config, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { +func TransactionsIdx(ctx context.Context, chainConfig *chain.Config, version uint8, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("TransactionsIdx: at=%d-%d, %v, %s", blockFrom, blockTo, rec, dbg.Stack()) } }() firstBlockNum := blockFrom - firstTxID, expectedCount, err := txsAmountBasedOnBodiesSnapshots(snapDir, blockFrom, blockTo) + firstTxID, expectedCount, err := txsAmountBasedOnBodiesSnapshots(snapDir, version, blockFrom, blockTo) if err != nil { return err } - bodySegmentPath := filepath.Join(snapDir, snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Bodies)) + bodySegmentPath := filepath.Join(snapDir, snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Bodies)) bodiesSegment, err := compress.NewDecompressor(bodySegmentPath) if err != nil { return } defer bodiesSegment.Close() - segFileName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.Transactions) + segFileName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.Transactions) segmentFilePath := filepath.Join(snapDir, segFileName) d, err := compress.NewDecompressor(segmentFilePath) if err != nil { @@ -1943,32 +2170,33 @@ func TransactionsIdx(ctx context.Context, chainConfig *chain.Config, blockFrom, if d.Count() != expectedCount { return fmt.Errorf("TransactionsIdx: at=%d-%d, pre index building, expect: %d, got %d", blockFrom, blockTo, expectedCount, d.Count()) } - p.Name.Store(&segFileName) - p.Total.Store(uint64(d.Count() * 2)) + + if p != nil { + p.Name.Store(&segFileName) + p.Total.Store(uint64(d.Count() * 2)) + } txnHashIdx, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{ - KeyCount: d.Count(), - Enums: true, - BucketSize: 2000, - LeafSize: 8, - TmpDir: tmpDir, - IndexFile: filepath.Join(snapDir, snaptype.IdxFileName(blockFrom, blockTo, snaptype.Transactions.String())), - BaseDataID: firstTxID, - EtlBufLimit: etl.BufferOptimalSize / 2, + KeyCount: d.Count(), + Enums: true, + BucketSize: 2000, + LeafSize: 8, + TmpDir: tmpDir, + IndexFile: filepath.Join(snapDir, snaptype.IdxFileName(version, blockFrom, blockTo, snaptype.Transactions.String())), + BaseDataID: firstTxID, }, logger) if err != nil { return err } txnHash2BlockNumIdx, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{ - KeyCount: d.Count(), - Enums: false, - BucketSize: 2000, - LeafSize: 8, - TmpDir: tmpDir, - IndexFile: filepath.Join(snapDir, snaptype.IdxFileName(blockFrom, blockTo, snaptype.Transactions2Block.String())), - BaseDataID: firstBlockNum, - EtlBufLimit: etl.BufferOptimalSize / 2, + KeyCount: d.Count(), + Enums: false, + BucketSize: 2000, + LeafSize: 8, + TmpDir: tmpDir, + IndexFile: filepath.Join(snapDir, snaptype.IdxFileName(version, blockFrom, blockTo, snaptype.Transactions2Block.String())), + BaseDataID: firstBlockNum, }, logger) if err != nil { return err @@ -1998,7 +2226,10 @@ RETRY: } for g.HasNext() { - p.Processed.Add(1) + if p != nil { + p.Processed.Add(1) + } + word, nextPos = g.Next(word[:0]) select { case <-ctx.Done(): @@ -2067,7 +2298,7 @@ RETRY: } // HeadersIdx - headerHash -> offset (analog of kv.HeaderNumber) -func HeadersIdx(ctx context.Context, chainConfig *chain.Config, segmentFilePath string, firstBlockNumInSegment uint64, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { +func HeadersIdx(ctx context.Context, segmentFilePath string, version uint8, firstBlockNumInSegment uint64, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { defer func() { if rec := recover(); rec != nil { _, fName := filepath.Split(segmentFilePath) @@ -2081,15 +2312,20 @@ func HeadersIdx(ctx context.Context, chainConfig *chain.Config, segmentFilePath } defer d.Close() - _, fname := filepath.Split(segmentFilePath) - p.Name.Store(&fname) - p.Total.Store(uint64(d.Count())) + if p != nil { + _, fname := filepath.Split(segmentFilePath) + p.Name.Store(&fname) + p.Total.Store(uint64(d.Count())) + } hasher := crypto.NewKeccakState() defer cryptopool.ReturnToPoolKeccak256(hasher) var h common2.Hash if err := Idx(ctx, d, firstBlockNumInSegment, tmpDir, log.LvlDebug, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { - p.Processed.Add(1) + if p != nil { + p.Processed.Add(1) + } + headerRlp := word[1:] hasher.Reset() hasher.Write(headerRlp) @@ -2120,12 +2356,16 @@ func BodiesIdx(ctx context.Context, segmentFilePath string, firstBlockNumInSegme } defer d.Close() - _, fname := filepath.Split(segmentFilePath) - p.Name.Store(&fname) - p.Total.Store(uint64(d.Count())) + if p != nil { + _, fname := filepath.Split(segmentFilePath) + p.Name.Store(&fname) + p.Total.Store(uint64(d.Count())) + } if err := Idx(ctx, d, firstBlockNumInSegment, tmpDir, log.LvlDebug, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { - p.Processed.Add(1) + if p != nil { + p.Processed.Add(1) + } n := binary.PutUvarint(num, i) if err := idx.AddKey(num[:n], offset); err != nil { return err @@ -2144,14 +2384,13 @@ func Idx(ctx context.Context, d *compress.Decompressor, firstDataID uint64, tmpD var idxFilePath = segmentFileName[0:len(segmentFileName)-len(extension)] + ".idx" rs, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{ - KeyCount: d.Count(), - Enums: true, - BucketSize: 2000, - LeafSize: 8, - TmpDir: tmpDir, - IndexFile: idxFilePath, - BaseDataID: firstDataID, - EtlBufLimit: etl.BufferOptimalSize / 2, + KeyCount: d.Count(), + Enums: true, + BucketSize: 2000, + LeafSize: 8, + TmpDir: tmpDir, + IndexFile: idxFilePath, + BaseDataID: firstDataID, }, logger) if err != nil { return err @@ -2300,7 +2539,7 @@ func (v *View) Bodies() []*BodySegment { return v.s.Bodies.segments } func (v *View) Txs() []*TxnSegment { return v.s.Txs.segments } func (v *View) HeadersSegment(blockNum uint64) (*HeaderSegment, bool) { for _, seg := range v.Headers() { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return seg, true @@ -2309,7 +2548,7 @@ func (v *View) HeadersSegment(blockNum uint64) (*HeaderSegment, bool) { } func (v *View) BodiesSegment(blockNum uint64) (*BodySegment, bool) { for _, seg := range v.Bodies() { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return seg, true @@ -2318,7 +2557,7 @@ func (v *View) BodiesSegment(blockNum uint64) (*BodySegment, bool) { } func (v *View) TxsSegment(blockNum uint64) (*TxnSegment, bool) { for _, seg := range v.Txs() { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return seg, true @@ -2336,10 +2575,10 @@ func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (map[snap tSegments := view.Txs() for i, sn := range hSegments { - if sn.ranges.from < from { + if sn.from < from { continue } - if sn.ranges.to > to { + if sn.to > to { break } toMerge[snaptype.Headers] = append(toMerge[snaptype.Headers], hSegments[i].seg.FilePath()) @@ -2364,7 +2603,7 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges } for _, t := range snaptype.BlockSnapshotTypes { - segName := snaptype.SegmentFileName(r.from, r.to, t) + segName := snaptype.SegmentFileName(snapshots.version, r.from, r.to, t) f, ok := snaptype.ParseFileName(snapDir, segName) if !ok { continue @@ -2383,13 +2622,15 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges if err := snapshots.ReopenFolder(); err != nil { return fmt.Errorf("ReopenSegments: %w", err) } - snapshots.LogStat() + + snapshots.LogStat("merge") if onMerge != nil { if err := onMerge(r); err != nil { return err } } + for _, t := range snaptype.BlockSnapshotTypes { if len(toMerge[t]) == 0 { continue @@ -2399,10 +2640,10 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges return err } } - m.removeOldFiles(toMerge[t], snapDir) + m.removeOldFiles(toMerge[t], snapDir, snapshots.Version()) } } - m.logger.Log(m.lvl, "[snapshots] Merge done", "from", mergeRanges[0].from) + m.logger.Log(m.lvl, "[snapshots] Merge done", "from", mergeRanges[0].from, "to", mergeRanges[0].to) return nil } @@ -2455,7 +2696,7 @@ func (m *Merger) merge(ctx context.Context, toMerge []string, targetFile string, return nil } -func (m *Merger) removeOldFiles(toDel []string, snapDir string) { +func (m *Merger) removeOldFiles(toDel []string, snapDir string, version uint8) { for _, f := range toDel { _ = os.Remove(f) _ = os.Remove(f + ".torrent") @@ -2467,7 +2708,7 @@ func (m *Merger) removeOldFiles(toDel []string, snapDir string) { _ = os.Remove(withoutExt + "-to-block.idx") } } - tmpFiles, err := snaptype.TmpFiles(snapDir) + tmpFiles, err := snaptype.TmpFiles(snapDir, version) if err != nil { return } diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go index 76ddb03c7de..2cb17f77d80 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go @@ -19,8 +19,8 @@ import ( "github.com/ledgerwatch/erigon/params" ) -func createTestSegmentFile(t *testing.T, from, to uint64, name snaptype.Type, dir string, logger log.Logger) { - c, err := compress.NewCompressor(context.Background(), "test", filepath.Join(dir, snaptype.SegmentFileName(from, to, name)), dir, 100, 1, log.LvlDebug, logger) +func createTestSegmentFile(t *testing.T, from, to uint64, name snaptype.Type, dir string, version uint8, logger log.Logger) { + c, err := compress.NewCompressor(context.Background(), "test", filepath.Join(dir, snaptype.SegmentFileName(version, from, to, name)), dir, 100, 1, log.LvlDebug, logger) require.NoError(t, err) defer c.Close() c.DisableFsync() @@ -32,7 +32,7 @@ func createTestSegmentFile(t *testing.T, from, to uint64, name snaptype.Type, di KeyCount: 1, BucketSize: 10, TmpDir: dir, - IndexFile: filepath.Join(dir, snaptype.IdxFileName(from, to, name.String())), + IndexFile: filepath.Join(dir, snaptype.IdxFileName(1, from, to, name.String())), LeafSize: 8, }, logger) require.NoError(t, err) @@ -47,7 +47,7 @@ func createTestSegmentFile(t *testing.T, from, to uint64, name snaptype.Type, di KeyCount: 1, BucketSize: 10, TmpDir: dir, - IndexFile: filepath.Join(dir, snaptype.IdxFileName(from, to, snaptype.Transactions2Block.String())), + IndexFile: filepath.Join(dir, snaptype.IdxFileName(1, from, to, snaptype.Transactions2Block.String())), LeafSize: 8, }, logger) require.NoError(t, err) @@ -94,7 +94,7 @@ func TestMergeSnapshots(t *testing.T) { dir, require := t.TempDir(), require.New(t) createFile := func(from, to uint64) { for _, snT := range snaptype.BlockSnapshotTypes { - createTestSegmentFile(t, from, to, snT, dir, logger) + createTestSegmentFile(t, from, to, snT, dir, 1, logger) } } @@ -102,7 +102,7 @@ func TestMergeSnapshots(t *testing.T) { for i := uint64(0); i < N; i++ { createFile(i*10_000, (i+1)*10_000) } - s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, logger) + s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 1, logger) defer s.Close() require.NoError(s.ReopenFolder()) { @@ -114,7 +114,7 @@ func TestMergeSnapshots(t *testing.T) { require.NoError(err) } - expectedFileName := snaptype.SegmentFileName(100_000, 200_000, snaptype.Transactions) + expectedFileName := snaptype.SegmentFileName(1, 100_000, 200_000, snaptype.Transactions) d, err := compress.NewDecompressor(filepath.Join(dir, expectedFileName)) require.NoError(err) defer d.Close() @@ -130,7 +130,7 @@ func TestMergeSnapshots(t *testing.T) { require.NoError(err) } - expectedFileName = snaptype.SegmentFileName(600_000, 700_000, snaptype.Transactions) + expectedFileName = snaptype.SegmentFileName(1, 600_000, 700_000, snaptype.Transactions) d, err = compress.NewDecompressor(filepath.Join(dir, expectedFileName)) require.NoError(err) defer d.Close() @@ -160,11 +160,11 @@ func TestCanRetire(t *testing.T) { func TestOpenAllSnapshot(t *testing.T) { logger := log.New() dir, require := t.TempDir(), require.New(t) - chainSnapshotCfg := snapcfg.KnownCfg(networkname.MainnetChainName) + chainSnapshotCfg := snapcfg.KnownCfg(networkname.MainnetChainName, 0) chainSnapshotCfg.ExpectBlocks = math.MaxUint64 cfg := ethconfig.BlocksFreezing{Enabled: true} - createFile := func(from, to uint64, name snaptype.Type) { createTestSegmentFile(t, from, to, name, dir, logger) } - s := NewRoSnapshots(cfg, dir, logger) + createFile := func(from, to uint64, name snaptype.Type) { createTestSegmentFile(t, from, to, name, dir, 1, logger) } + s := NewRoSnapshots(cfg, dir, 1, logger) defer s.Close() err := s.ReopenFolder() require.NoError(err) @@ -172,14 +172,14 @@ func TestOpenAllSnapshot(t *testing.T) { s.Close() createFile(500_000, 1_000_000, snaptype.Bodies) - s = NewRoSnapshots(cfg, dir, logger) + s = NewRoSnapshots(cfg, dir, 1, logger) defer s.Close() require.Equal(0, len(s.Bodies.segments)) //because, no headers and transactions snapshot files are created s.Close() createFile(500_000, 1_000_000, snaptype.Headers) createFile(500_000, 1_000_000, snaptype.Transactions) - s = NewRoSnapshots(cfg, dir, logger) + s = NewRoSnapshots(cfg, dir, 1, logger) err = s.ReopenFolder() require.NoError(err) require.Equal(0, len(s.Headers.segments)) @@ -188,7 +188,7 @@ func TestOpenAllSnapshot(t *testing.T) { createFile(0, 500_000, snaptype.Bodies) createFile(0, 500_000, snaptype.Headers) createFile(0, 500_000, snaptype.Transactions) - s = NewRoSnapshots(cfg, dir, logger) + s = NewRoSnapshots(cfg, dir, 1, logger) defer s.Close() err = s.ReopenFolder() @@ -200,11 +200,11 @@ func TestOpenAllSnapshot(t *testing.T) { seg, ok := view.TxsSegment(10) require.True(ok) - require.Equal(int(seg.ranges.to), 500_000) + require.Equal(int(seg.to), 500_000) seg, ok = view.TxsSegment(500_000) require.True(ok) - require.Equal(int(seg.ranges.to), 1_000_000) + require.Equal(int(seg.to), 1_000_000) _, ok = view.TxsSegment(1_000_000) require.False(ok) @@ -212,7 +212,7 @@ func TestOpenAllSnapshot(t *testing.T) { // Erigon may create new snapshots by itself - with high bigger than hardcoded ExpectedBlocks // ExpectedBlocks - says only how much block must come from Torrent chainSnapshotCfg.ExpectBlocks = 500_000 - 1 - s = NewRoSnapshots(cfg, dir, logger) + s = NewRoSnapshots(cfg, dir, 1, logger) err = s.ReopenFolder() require.NoError(err) defer s.Close() @@ -222,7 +222,7 @@ func TestOpenAllSnapshot(t *testing.T) { createFile(500_000, 900_000, snaptype.Bodies) createFile(500_000, 900_000, snaptype.Transactions) chainSnapshotCfg.ExpectBlocks = math.MaxUint64 - s = NewRoSnapshots(cfg, dir, logger) + s = NewRoSnapshots(cfg, dir, 1, logger) defer s.Close() err = s.ReopenFolder() require.NoError(err) diff --git a/turbo/snapshotsync/freezeblocks/bor_snapshots.go b/turbo/snapshotsync/freezeblocks/bor_snapshots.go index 72574c7eeeb..af486ef9347 100644 --- a/turbo/snapshotsync/freezeblocks/bor_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/bor_snapshots.go @@ -6,9 +6,7 @@ import ( "encoding/binary" "errors" "fmt" - "github.com/ledgerwatch/erigon/consensus/bor" "os" - "path" "path/filepath" "reflect" "runtime" @@ -16,6 +14,9 @@ import ( "sync/atomic" "time" + "github.com/ledgerwatch/log/v3" + "golang.org/x/exp/slices" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/chain/snapcfg" common2 "github.com/ledgerwatch/erigon-lib/common" @@ -32,15 +33,15 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/polygon/bor" "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/log/v3" - "golang.org/x/exp/slices" ) type BorEventSegment struct { seg *compress.Decompressor // value: event_rlp IdxBorTxnHash *recsplit.Index // bor_transaction_hash -> bor_event_segment_offset - ranges Range + Range + version uint8 } func (sn *BorEventSegment) closeIdx() { @@ -61,8 +62,8 @@ func (sn *BorEventSegment) close() { } func (sn *BorEventSegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.BorEvents) - sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.from, sn.to, snaptype.BorEvents) + sn.seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -74,8 +75,8 @@ func (sn *BorEventSegment) reopenIdx(dir string) (err error) { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.BorEvents.String()) - sn.IdxBorTxnHash, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.BorEvents.String()) + sn.IdxBorTxnHash, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -105,9 +106,10 @@ type borEventSegments struct { } type BorSpanSegment struct { - seg *compress.Decompressor // value: span_json - idx *recsplit.Index // span_id -> offset - ranges Range + seg *compress.Decompressor // value: span_json + idx *recsplit.Index // span_id -> offset + Range + version uint8 } func (sn *BorSpanSegment) closeIdx() { @@ -128,8 +130,8 @@ func (sn *BorSpanSegment) close() { } func (sn *BorSpanSegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.BorSpans) - sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.from, sn.to, snaptype.BorSpans) + sn.seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -140,8 +142,8 @@ func (sn *BorSpanSegment) reopenIdx(dir string) (err error) { if sn.seg == nil { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.BorSpans.String()) - sn.idx, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.from, sn.to, snaptype.BorSpans.String()) + sn.idx, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -170,28 +172,33 @@ type borSpanSegments struct { segments []*BorSpanSegment } -func (br *BlockRetire) retireBorBlocks(ctx context.Context, blockFrom, blockTo uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDelete func(l []string) error) error { +func (br *BlockRetire) retireBorBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDelete func(l []string) error) (bool, error) { chainConfig := fromdb.ChainConfig(br.db) notifier, logger, blockReader, tmpDir, db, workers := br.notifier, br.logger, br.blockReader, br.tmpDir, br.db, br.workers - logger.Log(lvl, "[bor snapshots] Retire Bor Blocks", "range", fmt.Sprintf("%dk-%dk", blockFrom/1000, blockTo/1000)) snapshots := br.borSnapshots() firstTxNum := blockReader.(*BlockReader).FirstTxNumNotInSnapshots() - - if err := DumpBorBlocks(ctx, chainConfig, blockFrom, blockTo, snaptype.Erigon2MergeLimit, tmpDir, snapshots.Dir(), firstTxNum, db, workers, lvl, logger, blockReader); err != nil { - return fmt.Errorf("DumpBorBlocks: %w", err) - } - if err := snapshots.ReopenFolder(); err != nil { - return fmt.Errorf("reopen: %w", err) - } - snapshots.LogStat() - if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size - notifier.OnNewSnapshot() + blockFrom, blockTo, ok := CanRetire(maxBlockNum, minBlockNum) + if ok { + logger.Log(lvl, "[bor snapshots] Retire Bor Blocks", "range", fmt.Sprintf("%dk-%dk", blockFrom/1000, blockTo/1000)) + if err := DumpBorBlocks(ctx, chainConfig, snapshots.version, blockFrom, blockTo, snaptype.Erigon2MergeLimit, tmpDir, snapshots.Dir(), firstTxNum, db, workers, lvl, logger, blockReader); err != nil { + return ok, fmt.Errorf("DumpBorBlocks: %w", err) + } + if err := snapshots.ReopenFolder(); err != nil { + return ok, fmt.Errorf("reopen: %w", err) + } + snapshots.LogStat("retire") + if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size + notifier.OnNewSnapshot() + } } + merger := NewBorMerger(tmpDir, workers, lvl, db, chainConfig, notifier, logger) rangesToMerge := merger.FindMergeRanges(snapshots.Ranges()) + logger.Log(lvl, "[bor snapshots] Retire Bor Blocks", "rangesToMerge", fmt.Sprintf("%s", Ranges(rangesToMerge))) if len(rangesToMerge) == 0 { - return nil + return ok, nil } + ok = true // have something to merge onMerge := func(r Range) error { if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size notifier.OnNewSnapshot() @@ -209,30 +216,29 @@ func (br *BlockRetire) retireBorBlocks(ctx context.Context, blockFrom, blockTo u } err := merger.Merge(ctx, snapshots, rangesToMerge, snapshots.Dir(), true /* doIndex */, onMerge, onDelete) if err != nil { - return err + return ok, err } - return nil + return ok, nil } - -func DumpBorBlocks(ctx context.Context, chainConfig *chain.Config, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { +func DumpBorBlocks(ctx context.Context, chainConfig *chain.Config, version uint8, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { if blocksPerFile == 0 { return nil } for i := blockFrom; i < blockTo; i = chooseSegmentEnd(i, blockTo, blocksPerFile) { - if err := dumpBorBlocksRange(ctx, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, firstTxNum, chainDB, *chainConfig, workers, lvl, logger, blockReader); err != nil { + if err := dumpBorBlocksRange(ctx, version, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, firstTxNum, chainDB, *chainConfig, workers, lvl, logger, blockReader); err != nil { return err } } return nil } -func dumpBorBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, chainConfig chain.Config, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { +func dumpBorBlocksRange(ctx context.Context, version uint8, blockFrom, blockTo uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, chainConfig chain.Config, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() { - segName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.BorEvents) + segName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.BorEvents) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot BorEvents", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger) @@ -255,7 +261,7 @@ func dumpBorBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, } } { - segName := snaptype.SegmentFileName(blockFrom, blockTo, snaptype.BorSpans) + segName := snaptype.SegmentFileName(version, blockFrom, blockTo, snaptype.BorSpans) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot BorSpans", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger) @@ -401,7 +407,7 @@ func DumpBorSpans(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, wo return nil } -func BorEventsIdx(ctx context.Context, segmentFilePath string, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { +func BorEventsIdx(ctx context.Context, segmentFilePath string, version uint8, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("BorEventsIdx: at=%d-%d, %v, %s", blockFrom, blockTo, rec, dbg.Stack()) @@ -435,7 +441,7 @@ func BorEventsIdx(ctx context.Context, segmentFilePath string, blockFrom, blockT default: } } - var idxFilePath = filepath.Join(snapDir, snaptype.IdxFileName(blockFrom, blockTo, snaptype.BorEvents.String())) + var idxFilePath = filepath.Join(snapDir, snaptype.IdxFileName(version, blockFrom, blockTo, snaptype.BorEvents.String())) rs, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{ KeyCount: blockCount, @@ -487,7 +493,7 @@ RETRY: return nil } -func BorSpansIdx(ctx context.Context, segmentFilePath string, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { +func BorSpansIdx(ctx context.Context, segmentFilePath string, version uint8, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("BorSpansIdx: at=%d-%d, %v, %s", blockFrom, blockTo, rec, dbg.Stack()) @@ -500,7 +506,7 @@ func BorSpansIdx(ctx context.Context, segmentFilePath string, blockFrom, blockTo } defer d.Close() g := d.MakeGetter() - var idxFilePath = filepath.Join(snapDir, snaptype.IdxFileName(blockFrom, blockTo, snaptype.BorSpans.String())) + var idxFilePath = filepath.Join(snapDir, snaptype.IdxFileName(version, blockFrom, blockTo, snaptype.BorSpans.String())) baseSpanId := bor.SpanIDAt(blockFrom) @@ -561,6 +567,9 @@ type BorRoSnapshots struct { idxMax atomic.Uint64 // all types of .idx files are available - up to this number cfg ethconfig.BlocksFreezing logger log.Logger + version uint8 + + segmentsMin atomic.Uint64 } // NewBorRoSnapshots - opens all bor snapshots. But to simplify everything: @@ -568,30 +577,33 @@ type BorRoSnapshots struct { // - all snapshots of given blocks range must exist - to make this blocks range available // - gaps are not allowed // - segment have [from:to) semantic -func NewBorRoSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, logger log.Logger) *BorRoSnapshots { - return &BorRoSnapshots{dir: snapDir, cfg: cfg, Events: &borEventSegments{}, Spans: &borSpanSegments{}, logger: logger} +func NewBorRoSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, version uint8, logger log.Logger) *BorRoSnapshots { + return &BorRoSnapshots{dir: snapDir, version: version, cfg: cfg, Events: &borEventSegments{}, Spans: &borSpanSegments{}, logger: logger} } +func (s *BorRoSnapshots) Version() uint8 { return s.version } func (s *BorRoSnapshots) Cfg() ethconfig.BlocksFreezing { return s.cfg } func (s *BorRoSnapshots) Dir() string { return s.dir } func (s *BorRoSnapshots) SegmentsReady() bool { return s.segmentsReady.Load() } func (s *BorRoSnapshots) IndicesReady() bool { return s.indicesReady.Load() } func (s *BorRoSnapshots) IndicesMax() uint64 { return s.idxMax.Load() } func (s *BorRoSnapshots) SegmentsMax() uint64 { return s.segmentsMax.Load() } +func (s *BorRoSnapshots) SegmentsMin() uint64 { return s.segmentsMin.Load() } +func (s *BorRoSnapshots) SetSegmentsMin(min uint64) { s.segmentsMin.Store(min) } func (s *BorRoSnapshots) BlocksAvailable() uint64 { return cmp.Min(s.segmentsMax.Load(), s.idxMax.Load()) } -func (s *BorRoSnapshots) LogStat() { +func (s *BorRoSnapshots) LogStat(label string) { var m runtime.MemStats dbg.ReadMemStats(&m) - s.logger.Info("[bor snapshots] Blocks Stat", + s.logger.Info(fmt.Sprintf("[bor snapshots:%s] Blocks Stat", label), "blocks", fmt.Sprintf("%dk", (s.SegmentsMax()+1)/1000), "indices", fmt.Sprintf("%dk", (s.IndicesMax()+1)/1000), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) } -func BorSegments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { - list, err := snaptype.Segments(dir) +func BorSegments(dir string, version uint8, min uint64) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { + list, err := snaptype.Segments(dir, version) if err != nil { return nil, missingSnapshots, err } @@ -604,7 +616,7 @@ func BorSegments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, } l = append(l, f) } - l, m = noGaps(noOverlaps(borSegmentsMustExist(dir, l))) + l, m = noGaps(noOverlaps(borSegmentsMustExist(dir, l)), min) res = append(res, l...) missingSnapshots = append(missingSnapshots, m...) } @@ -616,13 +628,75 @@ func BorSegments(dir string) (res []snaptype.FileInfo, missingSnapshots []Range, } l = append(l, f) } - l, _ = noGaps(noOverlaps(borSegmentsMustExist(dir, l))) + l, _ = noGaps(noOverlaps(borSegmentsMustExist(dir, l)), min) res = append(res, l...) } return res, missingSnapshots, nil } +// this is one off code to fix an issue in 2.49.x->2.52.x which missed +// removal of intermediate segments after a merge operation +func removeBorOverlaps(dir string, version uint8, active []snaptype.FileInfo, max uint64) { + list, err := snaptype.Segments(dir, version) + + if err != nil { + return + } + + var toDel []string + l := make([]snaptype.FileInfo, 0, len(list)) + + for _, f := range list { + if !(f.T == snaptype.BorSpans || f.T == snaptype.BorEvents) { + continue + } + l = append(l, f) + } + + // added overhead to make sure we don't delete in the + // current 500k block segment + if max > 500_001 { + max -= 500_001 + } + + for _, f := range l { + if max < f.From { + continue + } + + for _, a := range active { + if a.T != snaptype.BorSpans { + continue + } + + if f.From < a.From { + continue + } + + if f.From == a.From { + if f.To < a.To { + toDel = append(toDel, f.Path) + } + + break + } + + if f.From < a.To { + toDel = append(toDel, f.Path) + break + } + } + } + + for _, f := range toDel { + _ = os.Remove(f) + ext := filepath.Ext(f) + withoutExt := f[:len(f)-len(ext)] + _ = os.Remove(withoutExt + ".idx") + } +} + func (s *BorRoSnapshots) EnsureExpectedBlocksAreAvailable(cfg *snapcfg.Cfg) error { if s.BlocksAvailable() < cfg.ExpectBlocks { return fmt.Errorf("app must wait until all expected bor snapshots are available. Expected: %d, Available: %d", cfg.ExpectBlocks, s.BlocksAvailable()) @@ -689,13 +763,13 @@ func (s *BorRoSnapshots) idxAvailability() uint64 { if seg.IdxBorTxnHash == nil { break } - events = seg.ranges.to - 1 + events = seg.to - 1 } for _, seg := range s.Spans.segments { if seg.idx == nil { break } - spans = seg.ranges.to - 1 + spans = seg.to - 1 } return cmp.Min(events, spans) } @@ -723,7 +797,7 @@ func (s *BorRoSnapshots) Files() (list []string) { if seg.seg == nil { continue } - if seg.ranges.from > max { + if seg.from > max { continue } _, fName := filepath.Split(seg.seg.FilePath()) @@ -733,7 +807,7 @@ func (s *BorRoSnapshots) Files() (list []string) { if seg.seg == nil { continue } - if seg.ranges.from > max { + if seg.from > max { continue } _, fName := filepath.Split(seg.seg.FilePath()) @@ -777,7 +851,7 @@ Loop: } } if !exists { - sn = &BorEventSegment{ranges: Range{f.From, f.To}} + sn = &BorEventSegment{version: f.Version, Range: Range{f.From, f.To}} } if err := sn.reopenSeg(s.dir); err != nil { if errors.Is(err, os.ErrNotExist) { @@ -817,7 +891,7 @@ Loop: } } if !exists { - sn = &BorSpanSegment{ranges: Range{f.From, f.To}} + sn = &BorSpanSegment{version: f.Version, Range: Range{f.From, f.To}} } if err := sn.reopenSeg(s.dir); err != nil { if errors.Is(err, os.ErrNotExist) { @@ -871,7 +945,7 @@ func (s *BorRoSnapshots) Ranges() (ranges []Range) { defer view.Close() for _, sn := range view.Events() { - ranges = append(ranges, sn.ranges) + ranges = append(ranges, sn.Range) } return ranges } @@ -879,10 +953,15 @@ func (s *BorRoSnapshots) Ranges() (ranges []Range) { func (s *BorRoSnapshots) OptimisticalyReopenFolder() { _ = s.ReopenFolder() } func (s *BorRoSnapshots) OptimisticalyReopenWithDB(db kv.RoDB) { _ = s.ReopenWithDB(db) } func (s *BorRoSnapshots) ReopenFolder() error { - files, _, err := BorSegments(s.dir) + files, _, err := BorSegments(s.dir, s.version, s.segmentsMin.Load()) if err != nil { return err } + + // this is one off code to fix an issue in 2.49.x->2.52.x which missed + // removal of intermediate segments after a merge operation + removeBorOverlaps(s.dir, s.version, files, s.BlocksAvailable()) + list := make([]string, 0, len(files)) for _, f := range files { _, fName := filepath.Split(f.Path) @@ -970,11 +1049,11 @@ func (s *BorRoSnapshots) PrintDebug() { defer s.Spans.lock.RUnlock() fmt.Println(" == BorSnapshots, Event") for _, sn := range s.Events.segments { - fmt.Printf("%d, %t\n", sn.ranges.from, sn.IdxBorTxnHash == nil) + fmt.Printf("%d, %t\n", sn.from, sn.IdxBorTxnHash == nil) } fmt.Println(" == BorSnapshots, Span") for _, sn := range s.Spans.segments { - fmt.Printf("%d, %t\n", sn.ranges.from, sn.idx == nil) + fmt.Printf("%d, %t\n", sn.from, sn.idx == nil) } } @@ -1002,7 +1081,7 @@ func (v *BorView) Events() []*BorEventSegment { return v.s.Events.segments } func (v *BorView) Spans() []*BorSpanSegment { return v.s.Spans.segments } func (v *BorView) EventsSegment(blockNum uint64) (*BorEventSegment, bool) { for _, seg := range v.Events() { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return seg, true @@ -1011,7 +1090,7 @@ func (v *BorView) EventsSegment(blockNum uint64) (*BorEventSegment, bool) { } func (v *BorView) SpansSegment(blockNum uint64) (*BorSpanSegment, bool) { for _, seg := range v.Spans() { - if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { + if !(blockNum >= seg.from && blockNum < seg.to) { continue } return seg, true @@ -1068,10 +1147,10 @@ func (m *BorMerger) filesByRange(snapshots *BorRoSnapshots, from, to uint64) (ma sSegments := view.Spans() for i, sn := range eSegments { - if sn.ranges.from < from { + if sn.from < from { continue } - if sn.ranges.to > to { + if sn.to > to { break } toMerge[snaptype.BorEvents] = append(toMerge[snaptype.BorEvents], eSegments[i].seg.FilePath()) @@ -1094,8 +1173,8 @@ func (m *BorMerger) Merge(ctx context.Context, snapshots *BorRoSnapshots, mergeR return err } - for _, t := range []snaptype.Type{snaptype.BorEvents, snaptype.BorSpans} { - segName := snaptype.SegmentFileName(r.from, r.to, t) + for _, t := range snaptype.BorSnapshotTypes { + segName := snaptype.SegmentFileName(snapshots.Version(), r.from, r.to, t) f, ok := snaptype.ParseFileName(snapDir, segName) if !ok { continue @@ -1113,20 +1192,24 @@ func (m *BorMerger) Merge(ctx context.Context, snapshots *BorRoSnapshots, mergeR if err := snapshots.ReopenFolder(); err != nil { return fmt.Errorf("ReopenSegments: %w", err) } - snapshots.LogStat() + snapshots.LogStat("merge") if err := onMerge(r); err != nil { return err } - for _, t := range snaptype.BlockSnapshotTypes { + + for _, t := range snaptype.BorSnapshotTypes { if len(toMerge[t]) == 0 { continue } + if err := onDelete(toMerge[t]); err != nil { return err } + } - for _, t := range []snaptype.Type{snaptype.BorEvents, snaptype.BorSpans} { - m.removeOldFiles(toMerge[t], snapDir) + time.Sleep(1 * time.Second) // i working on blocking API - to ensure client does not use old snapsthos - and then delete them + for _, t := range snaptype.BorSnapshotTypes { + m.removeOldFiles(toMerge[t], snapDir, snapshots.Version()) } } m.logger.Log(m.lvl, "[bor snapshots] Merge done", "from", mergeRanges[0].from, "to", mergeRanges[0].to) @@ -1176,14 +1259,14 @@ func (m *BorMerger) merge(ctx context.Context, toMerge []string, targetFile stri return nil } -func (m *BorMerger) removeOldFiles(toDel []string, snapDir string) { +func (m *BorMerger) removeOldFiles(toDel []string, snapDir string, version uint8) { for _, f := range toDel { _ = os.Remove(f) ext := filepath.Ext(f) withoutExt := f[:len(f)-len(ext)] _ = os.Remove(withoutExt + ".idx") } - tmpFiles, err := snaptype.TmpFiles(snapDir) + tmpFiles, err := snaptype.TmpFiles(snapDir, version) if err != nil { return } diff --git a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go index 4661224dd4e..ee7d2e70284 100644 --- a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "os" - "path" "path/filepath" "sync" "sync/atomic" @@ -33,6 +32,7 @@ type BeaconBlockSegment struct { seg *compress.Decompressor // value: chunked(ssz(SignedBeaconBlocks)) idxSlot *recsplit.Index // slot -> beacon_slot_segment_offset ranges Range + version uint8 } func (sn *BeaconBlockSegment) closeIdx() { @@ -53,8 +53,8 @@ func (sn *BeaconBlockSegment) close() { } func (sn *BeaconBlockSegment) reopenSeg(dir string) (err error) { sn.closeSeg() - fileName := snaptype.SegmentFileName(sn.ranges.from, sn.ranges.to, snaptype.BeaconBlocks) - sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) + fileName := snaptype.SegmentFileName(sn.version, sn.ranges.from, sn.ranges.to, snaptype.BeaconBlocks) + sn.seg, err = compress.NewDecompressor(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -82,8 +82,8 @@ func (sn *BeaconBlockSegment) reopenIdx(dir string) (err error) { if sn.seg == nil { return nil } - fileName := snaptype.IdxFileName(sn.ranges.from, sn.ranges.to, snaptype.BeaconBlocks.String()) - sn.idxSlot, err = recsplit.OpenIndex(path.Join(dir, fileName)) + fileName := snaptype.IdxFileName(sn.version, sn.ranges.from, sn.ranges.to, snaptype.BeaconBlocks.String()) + sn.idxSlot, err = recsplit.OpenIndex(filepath.Join(dir, fileName)) if err != nil { return fmt.Errorf("%w, fileName: %s", err, fileName) } @@ -146,6 +146,9 @@ type CaplinSnapshots struct { idxMax atomic.Uint64 // all types of .idx files are available - up to this number cfg ethconfig.BlocksFreezing logger log.Logger + // allows for pruning segments - this is the min availible segment + segmentsMin atomic.Uint64 + version uint8 // chain cfg beaconCfg *clparams.BeaconChainConfig } @@ -155,10 +158,11 @@ type CaplinSnapshots struct { // - all snapshots of given blocks range must exist - to make this blocks range available // - gaps are not allowed // - segment have [from:to) semantic -func NewCaplinSnapshots(cfg ethconfig.BlocksFreezing, beaconCfg *clparams.BeaconChainConfig, snapDir string, logger log.Logger) *CaplinSnapshots { - return &CaplinSnapshots{dir: snapDir, cfg: cfg, BeaconBlocks: &beaconBlockSegments{}, logger: logger, beaconCfg: beaconCfg} +func NewCaplinSnapshots(cfg ethconfig.BlocksFreezing, beaconCfg *clparams.BeaconChainConfig, snapDir string, version uint8, logger log.Logger) *CaplinSnapshots { + return &CaplinSnapshots{dir: snapDir, version: version, cfg: cfg, BeaconBlocks: &beaconBlockSegments{}, logger: logger, beaconCfg: beaconCfg} } +func (s *CaplinSnapshots) Version() uint8 { return s.version } func (s *CaplinSnapshots) IndicesMax() uint64 { return s.idxMax.Load() } func (s *CaplinSnapshots) SegmentsMax() uint64 { return s.segmentsMax.Load() } @@ -207,7 +211,7 @@ Loop: } } if !exists { - sn = &BeaconBlockSegment{ranges: Range{f.From, f.To}} + sn = &BeaconBlockSegment{version: s.version, ranges: Range{f.From, f.To}} } if err := sn.reopenSeg(s.dir); err != nil { if errors.Is(err, os.ErrNotExist) { @@ -266,7 +270,7 @@ func (s *CaplinSnapshots) idxAvailability() uint64 { } func (s *CaplinSnapshots) ReopenFolder() error { - files, _, err := SegmentsCaplin(s.dir) + files, _, err := SegmentsCaplin(s.dir, s.version, s.segmentsMin.Load()) if err != nil { return err } @@ -338,8 +342,8 @@ func (v *CaplinView) BeaconBlocksSegment(slot uint64) (*BeaconBlockSegment, bool return nil, false } -func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, b persistence.BlockSource, fromSlot uint64, toSlot uint64, tmpDir, snapDir string, workers int, lvl log.Lvl, logger log.Logger) error { - segName := snaptype.SegmentFileName(fromSlot, toSlot, snaptype.BeaconBlocks) +func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, b persistence.BlockSource, version uint8, fromSlot uint64, toSlot uint64, tmpDir, snapDir string, workers int, lvl log.Lvl, logger log.Logger) error { + segName := snaptype.SegmentFileName(version, fromSlot, toSlot, snaptype.BeaconBlocks) f, _ := snaptype.ParseFileName(snapDir, segName) sn, err := compress.NewCompressor(ctx, "Snapshot BeaconBlocks", f.Path, tmpDir, compress.MinPatternScore, workers, lvl, logger) @@ -397,10 +401,10 @@ func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, b persistence.BlockS // Generate .idx file, which is the slot => offset mapping. p := &background.Progress{} - return BeaconBlocksIdx(ctx, f, path.Join(snapDir, segName), fromSlot, toSlot, tmpDir, p, lvl, logger) + return BeaconBlocksIdx(ctx, f, filepath.Join(snapDir, segName), fromSlot, toSlot, tmpDir, p, lvl, logger) } -func DumpBeaconBlocks(ctx context.Context, db kv.RoDB, b persistence.BlockSource, fromSlot, toSlot, blocksPerFile uint64, tmpDir, snapDir string, workers int, lvl log.Lvl, logger log.Logger) error { +func DumpBeaconBlocks(ctx context.Context, db kv.RoDB, b persistence.BlockSource, version uint8, fromSlot, toSlot, blocksPerFile uint64, tmpDir, snapDir string, workers int, lvl log.Lvl, logger log.Logger) error { if blocksPerFile == 0 { return nil } @@ -411,7 +415,7 @@ func DumpBeaconBlocks(ctx context.Context, db kv.RoDB, b persistence.BlockSource } to := chooseSegmentEnd(i, toSlot, blocksPerFile) logger.Log(lvl, "Dumping beacon blocks", "from", i, "to", to) - if err := dumpBeaconBlocksRange(ctx, db, b, i, to, tmpDir, snapDir, workers, lvl, logger); err != nil { + if err := dumpBeaconBlocksRange(ctx, db, b, version, i, to, tmpDir, snapDir, workers, lvl, logger); err != nil { return err } } @@ -424,7 +428,7 @@ func (s *CaplinSnapshots) BuildMissingIndices(ctx context.Context, logger log.Lo // } // wait for Downloader service to download all expected snapshots - segments, _, err := SegmentsCaplin(s.dir) + segments, _, err := SegmentsCaplin(s.dir, s.version, 0) if err != nil { return err } diff --git a/turbo/snapshotsync/freezeblocks/dump_test.go b/turbo/snapshotsync/freezeblocks/dump_test.go index 5136f711534..e056c64d2d7 100644 --- a/turbo/snapshotsync/freezeblocks/dump_test.go +++ b/turbo/snapshotsync/freezeblocks/dump_test.go @@ -1,6 +1,7 @@ package freezeblocks_test import ( + "github.com/ledgerwatch/erigon/polygon/bor/borcfg" "math/big" "testing" @@ -51,9 +52,9 @@ func TestDump(t *testing.T) { } withConfig := func(config chain.Config, sprints map[string]uint64) *chain.Config { - bor := *config.Bor + bor := *config.Bor.(*borcfg.BorConfig) + bor.Sprint = sprints config.Bor = &bor - config.Bor.Sprint = sprints return &config } @@ -181,7 +182,7 @@ func TestDump(t *testing.T) { txsAmount := uint64(0) var baseIdList []uint64 firstTxNum := uint64(0) - err := freezeblocks.DumpBodies(m.Ctx, m.DB, 0, uint64(test.chainSize-3), firstTxNum, 1, log.LvlInfo, log.New(), func(v []byte) error { + _, err := freezeblocks.DumpBodies(m.Ctx, m.DB, 0, uint64(test.chainSize-3), firstTxNum, log.LvlInfo, log.New(), func(v []byte) error { i++ body := &types.BodyForStorage{} require.NoError(rlp.DecodeBytes(v, body)) @@ -197,7 +198,7 @@ func TestDump(t *testing.T) { firstTxNum += txsAmount i = 0 baseIdList = baseIdList[:0] - err = freezeblocks.DumpBodies(m.Ctx, m.DB, 2, uint64(2*test.chainSize), firstTxNum, 1, log.LvlInfo, log.New(), func(v []byte) error { + _, err = freezeblocks.DumpBodies(m.Ctx, m.DB, 2, uint64(2*test.chainSize), firstTxNum, log.LvlInfo, log.New(), func(v []byte) error { i++ body := &types.BodyForStorage{} require.NoError(rlp.DecodeBytes(v, body)) @@ -215,7 +216,7 @@ func TestDump(t *testing.T) { i := 0 var baseIdList []uint64 firstTxNum := uint64(1000) - err := freezeblocks.DumpBodies(m.Ctx, m.DB, 2, uint64(test.chainSize), firstTxNum, 1, log.LvlInfo, log.New(), func(v []byte) error { + lastTxNum, err := freezeblocks.DumpBodies(m.Ctx, m.DB, 2, uint64(test.chainSize), firstTxNum, log.LvlInfo, log.New(), func(v []byte) error { i++ body := &types.BodyForStorage{} require.NoError(rlp.DecodeBytes(v, body)) @@ -225,6 +226,8 @@ func TestDump(t *testing.T) { require.NoError(err) require.Equal(test.chainSize-2, i) require.Equal(baseIdRange(int(firstTxNum), 3, test.chainSize-2), baseIdList) + require.Equal(lastTxNum, baseIdList[len(baseIdList)-1]+3) + require.Equal(lastTxNum, firstTxNum+uint64(i*3)) }) t.Run("blocks", func(t *testing.T) { if test.chainSize < 1000 || test.chainSize%1000 != 0 { @@ -236,10 +239,10 @@ func TestDump(t *testing.T) { logger := log.New() tmpDir, snapDir := t.TempDir(), t.TempDir() - snConfig := snapcfg.KnownCfg(networkname.MainnetChainName) + snConfig := snapcfg.KnownCfg(networkname.MainnetChainName, 0) snConfig.ExpectBlocks = math.MaxUint64 - err := freezeblocks.DumpBlocks(m.Ctx, 0, uint64(test.chainSize), uint64(test.chainSize), tmpDir, snapDir, 0, m.DB, 1, log.LvlInfo, logger, m.BlockReader) + err := freezeblocks.DumpBlocks(m.Ctx, 1, 0, uint64(test.chainSize), uint64(test.chainSize), tmpDir, snapDir, m.DB, 1, log.LvlInfo, logger, m.BlockReader) require.NoError(err) }) } diff --git a/turbo/snapshotsync/snapshotsync.go b/turbo/snapshotsync/snapshotsync.go index 0c9d7663db5..7197fc68384 100644 --- a/turbo/snapshotsync/snapshotsync.go +++ b/turbo/snapshotsync/snapshotsync.go @@ -67,7 +67,7 @@ func RequestSnapshotsDownload(ctx context.Context, downloadRequest []services.Do // WaitForDownloader - wait for Downloader service to download all expected snapshots // for MVP we sync with Downloader only once, in future will send new snapshots also -func WaitForDownloader(logPrefix string, ctx context.Context, histV3 bool, caplin CaplinMode, agg *state.AggregatorV3, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient) error { +func WaitForDownloader(ctx context.Context, logPrefix string, histV3 bool, caplin CaplinMode, agg *state.AggregatorV3, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient, stagesIdsList []string) error { snapshots := blockReader.Snapshots() borSnapshots := blockReader.BorSnapshots() if blockReader.FreezingCfg().NoDownloader { @@ -87,7 +87,9 @@ func WaitForDownloader(logPrefix string, ctx context.Context, histV3 bool, capli // - Erigon "download once": means restart/upgrade/downgrade must not download files (and will be fast) // - After "download once" - Erigon will produce and seed new files - preverifiedBlockSnapshots := snapcfg.KnownCfg(cc.ChainName).Preverified + // send all hashes to the Downloader service + snapCfg := snapcfg.KnownCfg(cc.ChainName, 0) + preverifiedBlockSnapshots := snapCfg.Preverified downloadRequest := make([]services.DownloadRequest, 0, len(preverifiedBlockSnapshots)) // build all download requests @@ -160,6 +162,7 @@ Loop: log.Info(fmt.Sprintf("[%s] download finished", logPrefix), "time", time.Since(downloadStartTime).String()) break Loop } else { + diagnostics.Send(diagnostics.SyncStagesList{Stages: stagesIdsList}) diagnostics.Send(diagnostics.SnapshotDownloadStatistics{ Downloaded: stats.BytesCompleted, Total: stats.BytesTotal, diff --git a/turbo/stages/blockchain_test.go b/turbo/stages/blockchain_test.go index ad193e9ea88..c09fd913c00 100644 --- a/turbo/stages/blockchain_test.go +++ b/turbo/stages/blockchain_test.go @@ -25,6 +25,7 @@ import ( "math/big" "testing" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/holiman/uint256" @@ -32,8 +33,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/ledgerwatch/erigon-lib/chain" - chain2 "github.com/ledgerwatch/erigon-lib/chain" + libchain "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" @@ -616,7 +616,7 @@ func TestEIP155Transition(t *testing.T) { funds = big.NewInt(1000000000) deleteAddr = libcommon.Address{1} gspec = &types.Genesis{ - Config: &chain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)}, + Config: &libchain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)}, Alloc: types.GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}}, } ) @@ -689,7 +689,7 @@ func TestEIP155Transition(t *testing.T) { } // generate an invalid chain id transaction - config := &chain2.Config{ChainID: big.NewInt(2), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)} + config := &libchain.Config{ChainID: big.NewInt(2), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)} chain, chainErr = core.GenerateChain(config, chain.TopBlock, m.Engine, m.DB, 4, func(i int, block *core.BlockGen) { var ( basicTx = func(signer types.Signer) (types.Transaction, error) { @@ -741,7 +741,7 @@ func doModesTest(t *testing.T, pm prune.Mode) error { funds = big.NewInt(1000000000) deleteAddr = libcommon.Address{1} gspec = &types.Genesis{ - Config: &chain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)}, + Config: &libchain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)}, Alloc: types.GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}}, } ) @@ -959,7 +959,7 @@ func TestEIP161AccountRemoval(t *testing.T) { funds = big.NewInt(1000000000) theAddr = libcommon.Address{1} gspec = &types.Genesis{ - Config: &chain.Config{ + Config: &libchain.Config{ ChainID: big.NewInt(1), HomesteadBlock: new(big.Int), TangerineWhistleBlock: new(big.Int), diff --git a/turbo/stages/genesis_test.go b/turbo/stages/genesis_test.go index 67e685eb366..c94b72ee15b 100644 --- a/turbo/stages/genesis_test.go +++ b/turbo/stages/genesis_test.go @@ -87,7 +87,7 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == nil", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir) + core.MustCommitGenesis(&customg, db, tmpdir, logger) return core.CommitGenesisBlock(db, nil, tmpdir, logger) }, wantHash: customghash, @@ -96,7 +96,7 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == sepolia", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir) + core.MustCommitGenesis(&customg, db, tmpdir, logger) return core.CommitGenesisBlock(db, core.SepoliaGenesisBlock(), tmpdir, logger) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.SepoliaGenesisHash}, @@ -106,7 +106,7 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == bor-mainnet", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir) + core.MustCommitGenesis(&customg, db, tmpdir, logger) return core.CommitGenesisBlock(db, core.BorMainnetGenesisBlock(), tmpdir, logger) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.BorMainnetGenesisHash}, @@ -116,7 +116,7 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == mumbai", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir) + core.MustCommitGenesis(&customg, db, tmpdir, logger) return core.CommitGenesisBlock(db, core.MumbaiGenesisBlock(), tmpdir, logger) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.MumbaiGenesisHash}, @@ -126,7 +126,7 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == amoy", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir) + core.MustCommitGenesis(&customg, db, tmpdir, logger) return core.CommitGenesisBlock(db, core.AmoyGenesisBlock(), tmpdir, logger) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.AmoyGenesisHash}, @@ -136,7 +136,7 @@ func TestSetupGenesis(t *testing.T) { { name: "compatible config in DB", fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&oldcustomg, db, tmpdir) + core.MustCommitGenesis(&oldcustomg, db, tmpdir, logger) return core.CommitGenesisBlock(db, &customg, tmpdir, logger) }, wantHash: customghash, @@ -176,7 +176,7 @@ func TestSetupGenesis(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() _, db, _ := temporal.NewTestDB(t, datadir.New(tmpdir), nil) - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New())) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 1, log.New())) config, genesis, err := test.fn(db) // Check the return values. if !reflect.DeepEqual(err, test.wantErr) { diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index f9b64b074ea..3c6fc8fde54 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -623,12 +623,15 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult // InsertHeaders attempts to insert headers into the database, verifying them first // It returns true in the first return value if the system is "in sync" -func (hd *HeaderDownload) InsertHeaders(hf FeedHeaderFunc, terminalTotalDifficulty *big.Int, logPrefix string, logChannel <-chan time.Time, currentTime uint64) (bool, error) { +func (hd *HeaderDownload) InsertHeaders(hf FeedHeaderFunc, headerLimit uint, terminalTotalDifficulty *big.Int, logPrefix string, logChannel <-chan time.Time, currentTime uint64) (bool, error) { var more = true var err error var force bool var blocksToTTD uint64 var blockTime uint64 + + startHeight := hd.highestInDb + for more { if more, force, blocksToTTD, blockTime, err = hd.InsertHeader(hf, terminalTotalDifficulty, logPrefix, logChannel); err != nil { return false, err @@ -636,9 +639,13 @@ func (hd *HeaderDownload) InsertHeaders(hf FeedHeaderFunc, terminalTotalDifficul if force { return true, nil } + + if headerLimit > 0 && hd.highestInDb-startHeight > uint64(headerLimit) { + break + } } if blocksToTTD > 0 { - hd.logger.Info("Estimated to reaching TTD", "blocks", blocksToTTD) + hd.logger.Trace("Estimated to reaching TTD", "blocks", blocksToTTD) } hd.lock.RLock() defer hd.lock.RUnlock() diff --git a/turbo/stages/mock/mock_sentry.go b/turbo/stages/mock/mock_sentry.go index 33dbf572ea6..6feee7c5994 100644 --- a/turbo/stages/mock/mock_sentry.go +++ b/turbo/stages/mock/mock_sentry.go @@ -33,9 +33,8 @@ import ( "github.com/ledgerwatch/erigon-lib/txpool" "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" types2 "github.com/ledgerwatch/erigon-lib/types" - + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" @@ -51,8 +50,10 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/prune" + "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/polygon/bor" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/builder" "github.com/ledgerwatch/erigon/turbo/engineapi/engine_helpers" @@ -258,9 +259,9 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK histV3, db, agg := temporal.NewTestDB(nil, dirs, nil) cfg.HistoryV3 = histV3 - erigonGrpcServeer := remotedbserver.NewKvServer(ctx, db, nil, nil, logger) - allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, logger) - allBorSnapshots := freezeblocks.NewBorRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, logger) + erigonGrpcServeer := remotedbserver.NewKvServer(ctx, db, nil, nil, nil, logger) + allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, 1, logger) + allBorSnapshots := freezeblocks.NewBorRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, 1, logger) mock := &MockSentry{ Ctx: ctx, cancel: ctxCancel, DB: db, agg: agg, tb: tb, @@ -310,7 +311,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK shanghaiTime := mock.ChainConfig.ShanghaiTime cancunTime := mock.ChainConfig.CancunTime maxBlobsPerBlock := mock.ChainConfig.GetMaxBlobsPerBlock() - mock.TxPool, err = txpool.New(newTxs, mock.DB, poolCfg, kvcache.NewDummy(), *chainID, shanghaiTime, nil /* agraBlock */, cancunTime, maxBlobsPerBlock, logger) + mock.TxPool, err = txpool.New(newTxs, mock.DB, poolCfg, kvcache.NewDummy(), *chainID, shanghaiTime, nil /* agraBlock */, cancunTime, maxBlobsPerBlock, nil, logger) if err != nil { tb.Fatal(err) } @@ -328,7 +329,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.TxPoolFetch.ConnectSentries() mock.StreamWg.Wait() - go txpool.MainLoop(mock.Ctx, mock.txPoolDB, mock.DB, mock.TxPool, newTxs, mock.TxPoolSend, mock.TxPoolGrpcServer.NewSlotsStreams, func() {}) + go txpool.MainLoop(mock.Ctx, mock.txPoolDB, mock.TxPool, newTxs, mock.TxPoolSend, mock.TxPoolGrpcServer.NewSlotsStreams, func() {}) } // Committed genesis will be shared between download and mock sentry @@ -342,20 +343,20 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK } latestBlockBuiltStore := builder.NewLatestBlockBuiltStore() - inMemoryExecution := func(batch kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, + inMemoryExecution := func(txc wrap.TxContainer, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, notifications *shards.Notifications) error { terseLogger := log.New() terseLogger.SetHandler(log.LvlFilterHandler(log.LvlWarn, log.StderrHandler)) // Needs its own notifications to not update RPC daemon and txpool about pending blocks stateSync := stages2.NewInMemoryExecution(mock.Ctx, mock.DB, &cfg, mock.sentriesClient, dirs, notifications, mock.BlockReader, blockWriter, mock.agg, nil, terseLogger) - chainReader := stagedsync.NewChainReaderImpl(mock.ChainConfig, batch, mock.BlockReader, logger) + chainReader := stagedsync.NewChainReaderImpl(mock.ChainConfig, txc.Tx, mock.BlockReader, logger) // We start the mining step - if err := stages2.StateStep(ctx, chainReader, mock.Engine, batch, blockWriter, stateSync, mock.sentriesClient.Bd, header, body, unwindPoint, headersChain, bodiesChain, histV3); err != nil { + if err := stages2.StateStep(ctx, chainReader, mock.Engine, txc, blockWriter, stateSync, mock.sentriesClient.Bd, header, body, unwindPoint, headersChain, bodiesChain, histV3); err != nil { logger.Warn("Could not validate block", "err", err) return err } - progress, err := stages.GetStageProgress(batch, stages.IntermediateHashes) + progress, err := stages.GetStageProgress(txc.Tx, stages.IntermediateHashes) if err != nil { return err } @@ -411,9 +412,10 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK miningStatePos := stagedsync.NewProposingState(&cfg.Miner) miningStatePos.MiningConfig.Etherbase = param.SuggestedFeeRecipient proposingSync := stagedsync.New( + cfg.Sync, stagedsync.MiningStages(mock.Ctx, stagedsync.StageMiningCreateBlockCfg(mock.DB, miningStatePos, *mock.ChainConfig, mock.Engine, mock.txPoolDB, param, tmpdir, mock.BlockReader), - stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miningStatePos, *mock.ChainConfig, nil, mock.BlockReader, nil, nil, recents, signatures), + stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miningStatePos, *mock.ChainConfig, nil, mock.BlockReader, nil, nil, nil, recents, signatures), stagedsync.StageMiningExecCfg(mock.DB, miningStatePos, mock.Notifications.Events, *mock.ChainConfig, mock.Engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, mock.TxPool, mock.txPoolDB, mock.BlockReader), stagedsync.StageHashStateCfg(mock.DB, dirs, cfg.HistoryV3), stagedsync.StageTrieCfg(mock.DB, false, true, true, tmpdir, mock.BlockReader, nil, histV3, mock.agg), @@ -421,7 +423,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder, logger) // We start the mining step - if err := stages2.MiningStep(ctx, mock.DB, proposingSync, tmpdir); err != nil { + if err := stages2.MiningStep(ctx, mock.DB, proposingSync, tmpdir, logger); err != nil { return nil, err } block := <-miningStatePos.MiningResultPOSCh @@ -430,13 +432,14 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK blockRetire := freezeblocks.NewBlockRetire(1, dirs, mock.BlockReader, blockWriter, mock.DB, mock.ChainConfig, mock.Notifications.Events, logger) mock.Sync = stagedsync.New( + cfg.Sync, stagedsync.DefaultStages(mock.Ctx, - stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, dirs, blockRetire, snapshotsDownloader, mock.BlockReader, mock.Notifications.Events, mock.HistoryV3, mock.agg, false, nil), - stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, mock.BlockReader, blockWriter, dirs.Tmp, mock.Notifications, engine_helpers.NewForkValidatorMock(1), nil), - stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, stagedsync.MiningState{}, *mock.ChainConfig, nil /* heimdallClient */, mock.BlockReader, nil, nil, recents, signatures), + stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, cfg.Sync, dirs, blockRetire, snapshotsDownloader, mock.BlockReader, mock.Notifications, mock.HistoryV3, mock.agg, false, nil), + stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, cfg.Sync, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, mock.BlockReader, blockWriter, dirs.Tmp, mock.Notifications, engine_helpers.NewForkValidatorMock(1), nil), + stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, stagedsync.MiningState{}, *mock.ChainConfig, nil /* heimdallClient */, mock.BlockReader, nil, nil, nil, recents, signatures), stagedsync.StageBlockHashesCfg(mock.DB, mock.Dirs.Tmp, mock.ChainConfig, blockWriter), - stagedsync.StageBodiesCfg(mock.DB, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, mock.BlockReader, cfg.HistoryV3, blockWriter), - stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd), + stagedsync.StageBodiesCfg(mock.DB, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, mock.BlockReader, cfg.HistoryV3, blockWriter, nil), + stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd, nil), stagedsync.StageExecuteBlocksCfg( mock.DB, prune, @@ -471,9 +474,9 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK ) cfg.Genesis = gspec - pipelineStages := stages2.NewPipelineStages(mock.Ctx, db, &cfg, mock.sentriesClient, mock.Notifications, + pipelineStages := stages2.NewPipelineStages(mock.Ctx, db, &cfg, p2p.Config{}, mock.sentriesClient, mock.Notifications, snapshotsDownloader, mock.BlockReader, blockRetire, mock.agg, nil, forkValidator, logger, checkStateRoot) - mock.posStagedSync = stagedsync.New(pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) + mock.posStagedSync = stagedsync.New(cfg.Sync, pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) mock.Eth1ExecutionService = eth1.NewEthereumExecutionModule(mock.BlockReader, mock.DB, mock.posStagedSync, forkValidator, mock.ChainConfig, assembleBlockPOS, nil, mock.Notifications.Accumulator, mock.Notifications.StateChangesConsumer, logger, engine, histV3) @@ -494,9 +497,10 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.PendingBlocks = miner.PendingResultCh mock.MinedBlocks = miner.MiningResultCh mock.MiningSync = stagedsync.New( + cfg.Sync, stagedsync.MiningStages(mock.Ctx, stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, nil, nil, dirs.Tmp, mock.BlockReader), - stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miner, *mock.ChainConfig, nil /*heimdallClient*/, mock.BlockReader, nil, nil, recents, signatures), + stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miner, *mock.ChainConfig, nil /*heimdallClient*/, mock.BlockReader, nil, nil, nil, recents, signatures), stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, dirs.Tmp, nil, 0, mock.TxPool, nil, mock.BlockReader), stagedsync.StageHashStateCfg(mock.DB, dirs, cfg.HistoryV3), stagedsync.StageTrieCfg(mock.DB, false, true, false, dirs.Tmp, mock.BlockReader, mock.sentriesClient.Hd, cfg.HistoryV3, mock.agg), @@ -604,6 +608,12 @@ func (ms *MockSentry) insertPoWBlocks(chain *core.ChainPack) error { return nil } + for i := 0; i < chain.Length(); i++ { + if err := chain.Blocks[i].HashCheck(); err != nil { + return err + } + } + // Send NewBlock message b, err := rlp.EncodeToBytes(ð.NewBlockPacket{ Block: chain.Blocks[n-1], @@ -660,7 +670,7 @@ func (ms *MockSentry) insertPoWBlocks(chain *core.ChainPack) error { initialCycle := MockInsertAsInitialCycle hook := stages2.NewHook(ms.Ctx, ms.DB, ms.Notifications, ms.Sync, ms.BlockReader, ms.ChainConfig, ms.Log, ms.UpdateHead) - if err = stages2.StageLoopIteration(ms.Ctx, ms.DB, nil, ms.Sync, initialCycle, ms.Log, ms.BlockReader, hook, false); err != nil { + if err = stages2.StageLoopIteration(ms.Ctx, ms.DB, wrap.TxContainer{}, ms.Sync, initialCycle, ms.Log, ms.BlockReader, hook, false); err != nil { return err } if ms.TxPool != nil { diff --git a/turbo/stages/mock/sentry_mock_test.go b/turbo/stages/mock/sentry_mock_test.go index 54b62f1cd3a..c03baff9086 100644 --- a/turbo/stages/mock/sentry_mock_test.go +++ b/turbo/stages/mock/sentry_mock_test.go @@ -7,6 +7,7 @@ import ( "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" @@ -58,7 +59,7 @@ func TestHeaderStep(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed initialCycle := mock.MockInsertAsInitialCycle - if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { t.Fatal(err) } } @@ -97,7 +98,7 @@ func TestMineBlockWith1Tx(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle := mock.MockInsertAsInitialCycle - if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, log.New(), m.BlockReader, nil, false); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, log.New(), m.BlockReader, nil, false); err != nil { t.Fatal(err) } } @@ -119,7 +120,7 @@ func TestMineBlockWith1Tx(t *testing.T) { } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed - err = stages.MiningStep(m.Ctx, m.DB, m.MiningSync, "") + err = stages.MiningStep(m.Ctx, m.DB, m.MiningSync, "", log.Root()) require.NoError(err) got := <-m.PendingBlocks @@ -166,7 +167,7 @@ func TestReorg(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle := mock.MockInsertAsInitialCycle - if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { t.Fatal(err) } @@ -219,7 +220,7 @@ func TestReorg(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle = false - if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { t.Fatal(err) } @@ -262,7 +263,7 @@ func TestReorg(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed // This is unwind step - if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { t.Fatal(err) } @@ -299,7 +300,7 @@ func TestReorg(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle = mock.MockInsertAsInitialCycle - if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { t.Fatal(err) } } @@ -396,7 +397,7 @@ func TestAnchorReplace(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle := mock.MockInsertAsInitialCycle - if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil { t.Fatal(err) } } @@ -502,7 +503,7 @@ func TestAnchorReplace2(t *testing.T) { initialCycle := mock.MockInsertAsInitialCycle hook := stages.NewHook(m.Ctx, m.DB, m.Notifications, m.Sync, m.BlockReader, m.ChainConfig, m.Log, m.UpdateHead) - if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, hook, false); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, hook, false); err != nil { t.Fatal(err) } } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 817d236304b..1fe6d0df13b 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -19,11 +19,12 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon-lib/wrap" + "github.com/ledgerwatch/erigon/polygon/bor/finality" + + "github.com/ledgerwatch/erigon/polygon/heimdall" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/bor/finality/flags" - "github.com/ledgerwatch/erigon/consensus/bor/heimdall" "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/blockio" @@ -34,6 +35,8 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client" + "github.com/ledgerwatch/erigon/polygon/bor" + "github.com/ledgerwatch/erigon/polygon/bor/finality/flags" "github.com/ledgerwatch/erigon/turbo/engineapi/engine_helpers" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/shards" @@ -68,7 +71,7 @@ func StageLoop(ctx context.Context, } // Estimate the current top height seen from the peer - err := StageLoopIteration(ctx, db, nil, sync, initialCycle, logger, blockReader, hook, forcePartialCommit) + err := StageLoopIteration(ctx, db, wrap.TxContainer{}, sync, initialCycle, logger, blockReader, hook, forcePartialCommit) if err != nil { if errors.Is(err, libcommon.ErrStopped) || errors.Is(err, context.Canceled) { @@ -99,15 +102,15 @@ func StageLoop(ctx context.Context, } } -func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stagedsync.Sync, initialCycle bool, logger log.Logger, blockReader services.FullBlockReader, hook *Hook, forcePartialCommit bool) (err error) { +func StageLoopIteration(ctx context.Context, db kv.RwDB, txc wrap.TxContainer, sync *stagedsync.Sync, initialCycle bool, logger log.Logger, blockReader services.FullBlockReader, hook *Hook, forcePartialCommit bool) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) } }() // avoid crash because Erigon's core does many things - externalTx := tx != nil - finishProgressBefore, borProgressBefore, headersProgressBefore, err := stagesHeadersAndFinish(db, tx) + externalTx := txc.Tx != nil + finishProgressBefore, borProgressBefore, headersProgressBefore, err := stagesHeadersAndFinish(db, txc.Tx) if err != nil { return err } @@ -134,19 +137,19 @@ func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stage // - Prune(limited time)+Commit(sync). Write to disk happening here. if canRunCycleInOneTransaction && !externalTx { - tx, err = db.BeginRwNosync(ctx) + txc.Tx, err = db.BeginRwNosync(ctx) if err != nil { return err } - defer tx.Rollback() + defer txc.Tx.Rollback() } if hook != nil { - if err = hook.BeforeRun(tx, isSynced); err != nil { + if err = hook.BeforeRun(txc.Tx, isSynced); err != nil { return err } } - err = sync.Run(db, tx, initialCycle) + _, err = sync.Run(db, txc, initialCycle) if err != nil { return err } @@ -154,10 +157,10 @@ func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stage var tableSizes []interface{} var commitTime time.Duration if canRunCycleInOneTransaction && !externalTx { - tableSizes = stagedsync.PrintTables(db, tx) // Need to do this before commit to access tx + tableSizes = stagedsync.PrintTables(db, txc.Tx) // Need to do this before commit to access tx commitStart := time.Now() - errTx := tx.Commit() - tx = nil + errTx := txc.Tx.Commit() + txc.Tx = nil if errTx != nil { return errTx } @@ -166,7 +169,7 @@ func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stage // -- send notifications START if hook != nil { - if err = hook.AfterRun(tx, finishProgressBefore); err != nil { + if err = hook.AfterRun(txc.Tx, finishProgressBefore); err != nil { return err } } @@ -182,7 +185,7 @@ func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stage // -- send notifications END // -- Prune+commit(sync) - if err := stageLoopStepPrune(ctx, db, tx, sync, initialCycle); err != nil { + if err := stageLoopStepPrune(ctx, db, txc.Tx, sync, initialCycle); err != nil { return err } @@ -327,13 +330,14 @@ func (h *Hook) afterRun(tx kv.Tx, finishProgressBefore uint64) error { pendingBlobFee = f.Uint64() } + h.logger.Debug("[hook] Sending state changes", "currentBlock", currentHeader.Number.Uint64(), "finalizedBlock", finalizedBlock) notifications.Accumulator.SendAndReset(h.ctx, notifications.StateChangesConsumer, pendingBaseFee.Uint64(), pendingBlobFee, currentHeader.GasLimit, finalizedBlock) } // -- send notifications END return nil } -func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync, tmpDir string) (err error) { +func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync, tmpDir string, logger log.Logger) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) @@ -346,10 +350,11 @@ func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync, tmpDir } defer tx.Rollback() - miningBatch := membatchwithdb.NewMemoryBatch(tx, tmpDir) + miningBatch := membatchwithdb.NewMemoryBatch(tx, tmpDir, logger) defer miningBatch.Rollback() + txc := wrap.TxContainer{Tx: miningBatch} - if err = mining.Run(nil, miningBatch, false /* firstCycle */); err != nil { + if _, err = mining.Run(nil, txc, false /* firstCycle */); err != nil { return err } tx.Rollback() @@ -373,18 +378,20 @@ func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader c if err := rawdb.WriteHeader(batch, currentHeader); err != nil { return err } + prevHash, err := rawdb.ReadCanonicalHash(batch, currentHeight) + if err != nil { + return err + } if err := rawdb.WriteCanonicalHash(batch, currentHash, currentHeight); err != nil { return err } if err := rawdb.WriteHeadHeaderHash(batch, currentHash); err != nil { return err } - var ok bool - var err error - if ok, err = rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil { + if _, err := rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil { return err } - if histV3 && ok { + if histV3 && prevHash != currentHash { if err := rawdb.AppendCanonicalTxNums(batch, currentHeight); err != nil { return err } @@ -398,7 +405,7 @@ func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader c return nil } -func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine consensus.Engine, batch kv.RwTx, blockWriter *blockio.BlockWriter, stateSync *stagedsync.Sync, Bd *bodydownload.BodyDownload, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, histV3 bool) (err error) { +func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine consensus.Engine, txc wrap.TxContainer, blockWriter *blockio.BlockWriter, stateSync *stagedsync.Sync, Bd *bodydownload.BodyDownload, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, histV3 bool) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) @@ -409,11 +416,11 @@ func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine co if unwindPoint > 0 { // Run it through the unwind stateSync.UnwindTo(unwindPoint, stagedsync.StagedUnwind) - if err = stateSync.RunUnwind(nil, batch); err != nil { + if err = stateSync.RunUnwind(nil, txc); err != nil { return err } } - if err := rawdb.TruncateCanonicalChain(ctx, batch, header.Number.Uint64()+1); err != nil { + if err := rawdb.TruncateCanonicalChain(ctx, txc.Tx, header.Number.Uint64()+1); err != nil { return err } // Once we unwound we can start constructing the chain (assumption: len(headersChain) == len(bodiesChain)) @@ -421,11 +428,11 @@ func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine co currentHeader := headersChain[i] currentBody := bodiesChain[i] - if err := addAndVerifyBlockStep(batch, engine, chainReader, currentHeader, currentBody, histV3); err != nil { + if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, currentHeader, currentBody, histV3); err != nil { return err } // Run state sync - if err = stateSync.RunNoInterrupt(nil, batch, false /* firstCycle */); err != nil { + if err = stateSync.RunNoInterrupt(nil, txc, false /* firstCycle */); err != nil { return err } } @@ -435,11 +442,11 @@ func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine co return nil } // Prepare memory state for block execution - if err := addAndVerifyBlockStep(batch, engine, chainReader, header, body, histV3); err != nil { + if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, header, body, histV3); err != nil { return err } // Run state sync - if err = stateSync.RunNoInterrupt(nil, batch, false /* firstCycle */); err != nil { + if err = stateSync.RunNoInterrupt(nil, txc, false /* firstCycle */); err != nil { return err } return nil @@ -465,7 +472,7 @@ func NewDefaultStages(ctx context.Context, agg *state.AggregatorV3, silkworm *silkworm.Silkworm, forkValidator *engine_helpers.ForkValidator, - heimdallClient heimdall.IHeimdallClient, + heimdallClient heimdall.HeimdallClient, recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], signatures *lru.ARCCache[libcommon.Hash, libcommon.Address], logger log.Logger, @@ -477,19 +484,36 @@ func NewDefaultStages(ctx context.Context, // Hence we run it in the test mode. runInTestMode := cfg.ImportMode - var loopBreakCheck func() bool + var loopBreakCheck func(int) bool if heimdallClient != nil && flags.Milestone { - loopBreakCheck = heimdall.MilestoneRewindPending + loopBreakCheck = func(int) bool { + return finality.IsMilestoneRewindPending() + } + } + + if cfg.Sync.LoopBlockLimit > 0 { + previousBreakCheck := loopBreakCheck + loopBreakCheck = func(loopCount int) bool { + if loopCount > int(cfg.Sync.LoopBlockLimit) { + return true + } + + if previousBreakCheck != nil { + return previousBreakCheck(loopCount) + } + + return false + } } return stagedsync.DefaultStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, dirs, blockRetire, snapDownloader, blockReader, notifications.Events, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, silkworm), - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, forkValidator, loopBreakCheck), - stagedsync.StageBorHeimdallCfg(db, snapDb, stagedsync.MiningState{}, *controlServer.ChainConfig, heimdallClient, blockReader, controlServer.Hd, controlServer.Penalize, recents, signatures), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, silkworm), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, forkValidator, loopBreakCheck), + stagedsync.StageBorHeimdallCfg(db, snapDb, stagedsync.MiningState{}, *controlServer.ChainConfig, heimdallClient, blockReader, controlServer.Hd, controlServer.Penalize, loopBreakCheck, recents, signatures), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), - stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, loopBreakCheck), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -523,6 +547,7 @@ func NewDefaultStages(ctx context.Context, func NewPipelineStages(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config, + p2pCfg p2p.Config, controlServer *sentry_multi_client.MultiClient, notifications *shards.Notifications, snapDownloader proto_downloader.DownloaderClient, @@ -541,10 +566,64 @@ func NewPipelineStages(ctx context.Context, // Hence we run it in the test mode. runInTestMode := cfg.ImportMode - return stagedsync.PipelineStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, dirs, blockRetire, snapDownloader, blockReader, notifications.Events, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, silkworm), + var loopBreakCheck func(int) bool + + if cfg.Sync.LoopBlockLimit > 0 { + previousBreakCheck := loopBreakCheck + loopBreakCheck = func(loopCount int) bool { + if loopCount > int(cfg.Sync.LoopBlockLimit) { + return true + } + + if previousBreakCheck != nil { + return previousBreakCheck(loopCount) + } + + return false + } + } + + if len(cfg.Sync.UploadLocation) == 0 { + return stagedsync.PipelineStages(ctx, + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, silkworm), + stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), + stagedsync.StageExecuteBlocksCfg( + db, + cfg.Prune, + cfg.BatchSize, + nil, + controlServer.ChainConfig, + controlServer.Engine, + &vm.Config{}, + notifications.Accumulator, + cfg.StateStream, + /*stateStream=*/ false, + cfg.HistoryV3, + dirs, + blockReader, + controlServer.Hd, + cfg.Genesis, + cfg.Sync, + agg, + silkwormForExecutionStage(silkworm, cfg), + ), + stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3), + stagedsync.StageTrieCfg(db, checkStateRoot, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg), + stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), + stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp), + stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), + stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader), + stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator), + runInTestMode) + } + + return stagedsync.UploaderPipelineStages(ctx, + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, silkworm), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, forkValidator, loopBreakCheck), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, loopBreakCheck), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -573,17 +652,19 @@ func NewPipelineStages(ctx context.Context, stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader), stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator), runInTestMode) + } func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config, controlServer *sentry_multi_client.MultiClient, dirs datadir.Dirs, notifications *shards.Notifications, blockReader services.FullBlockReader, blockWriter *blockio.BlockWriter, agg *state.AggregatorV3, silkworm *silkworm.Silkworm, logger log.Logger) *stagedsync.Sync { return stagedsync.New( + cfg.Sync, stagedsync.StateStages(ctx, - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, nil, nil, nil), - stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, nil, nil, nil), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, nil), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, true, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, true, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, nil), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, diff --git a/turbo/transactions/tracing.go b/turbo/transactions/tracing.go index 060a823dd83..96792bd5f4b 100644 --- a/turbo/transactions/tracing.go +++ b/turbo/transactions/tracing.go @@ -17,7 +17,6 @@ import ( ethereum "github.com/ledgerwatch/erigon" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor/statefull" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -26,6 +25,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/eth/tracers/logger" + "github.com/ledgerwatch/erigon/polygon/bor/statefull" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" )