diff --git a/packages/lodestar/src/chain/blocks/importBlock.ts b/packages/lodestar/src/chain/blocks/importBlock.ts index 9c0c06b550e..e19d4feb673 100644 --- a/packages/lodestar/src/chain/blocks/importBlock.ts +++ b/packages/lodestar/src/chain/blocks/importBlock.ts @@ -7,15 +7,13 @@ import { computeStartSlotAtEpoch, getEffectiveBalanceIncrementsZeroInactive, computeEpochAtSlot, - isBellatrixStateType, RootCache, - processSlots, } from "@chainsafe/lodestar-beacon-state-transition"; import {IForkChoice, OnBlockPrecachedData, ForkChoiceError, ForkChoiceErrorCode} from "@chainsafe/lodestar-fork-choice"; import {ILogger} from "@chainsafe/lodestar-utils"; import {IChainForkConfig} from "@chainsafe/lodestar-config"; import {IMetrics} from "../../metrics/index.js"; -import {IExecutionEngine, PayloadId} from "../../execution/engine/interface.js"; +import {IExecutionEngine} from "../../execution/engine/interface.js"; import {IBeaconDb} from "../../db/index.js"; import {ZERO_HASH_HEX} from "../../constants/index.js"; import {CheckpointStateCache, StateContextCache, toCheckpointHex} from "../stateCache/index.js"; @@ -24,7 +22,6 @@ import {ChainEventEmitter} from "../emitter.js"; import {LightClientServer} from "../lightClient/index.js"; import {SeenAggregatedAttestations} from "../seenCache/seenAggregateAndProof.js"; import {SeenBlockAttesters} from "../seenCache/seenBlockAttesters.js"; -import {prepareExecutionPayload} from "../factory/block/body.js"; import {IEth1ForBlockProduction} from "../../eth1/index.js"; import {BeaconProposerCache} from "../beaconProposerCache.js"; import {IBeaconClock} from "../clock/index.js"; @@ -225,8 +222,7 @@ export async function importBlock(chain: ImportBlockModules, fullyVerifiedBlock: // Emit ChainEvent.forkChoiceHead event const oldHead = chain.forkChoice.getHead(); - chain.forkChoice.updateHead(); - const newHead = chain.forkChoice.getHead(); + const newHead = chain.forkChoice.updateHead(); const currFinalizedEpoch = chain.forkChoice.getFinalizedCheckpoint().epoch; if (newHead.blockRoot !== oldHead.blockRoot) { @@ -268,34 +264,32 @@ export async function importBlock(chain: ImportBlockModules, fullyVerifiedBlock: } } - void maybeIssueNextProposerEngineFcU(chain, postState).then((payloadId) => { - // NOTE: forkChoice.fsStore.finalizedCheckpoint MUST only change is response to an onBlock event - // Notify execution layer of head and finalized updates only if has already - // not been done via payloadId generation. But even if this fcU follows the - // payloadId one, there is no harm as the ELs will just ignore it. - if (payloadId === null && (newHead.blockRoot !== oldHead.blockRoot || currFinalizedEpoch !== prevFinalizedEpoch)) { - /** - * On post BELLATRIX_EPOCH but pre TTD, blocks include empty execution payload with a zero block hash. - * The consensus clients must not send notifyForkchoiceUpdate before TTD since the execution client will error. - * So we must check that: - * - `headBlockHash !== null` -> Pre BELLATRIX_EPOCH - * - `headBlockHash !== ZERO_HASH` -> Pre TTD - */ - const headBlockHash = chain.forkChoice.getHead().executionPayloadBlockHash ?? ZERO_HASH_HEX; - /** - * After BELLATRIX_EPOCH and TTD it's okay to send a zero hash block hash for the finalized block. This will happen if - * the current finalized block does not contain any execution payload at all (pre MERGE_EPOCH) or if it contains a - * zero block hash (pre TTD) - */ - const safeBlockHash = chain.forkChoice.getJustifiedBlock().executionPayloadBlockHash ?? ZERO_HASH_HEX; - const finalizedBlockHash = chain.forkChoice.getFinalizedBlock().executionPayloadBlockHash ?? ZERO_HASH_HEX; - if (headBlockHash !== ZERO_HASH_HEX) { - chain.executionEngine.notifyForkchoiceUpdate(headBlockHash, safeBlockHash, finalizedBlockHash).catch((e) => { - chain.logger.error("Error pushing notifyForkchoiceUpdate()", {headBlockHash, finalizedBlockHash}, e); - }); - } + // NOTE: forkChoice.fsStore.finalizedCheckpoint MUST only change in response to an onBlock event + // Notifying EL of head and finalized updates as below is usually done within the 1st 4s of the slot. + // If there is an advanced payload generation in the next slot, we'll notify EL again 4s before next + // slot via PrepareNextSlotScheduler. There is no harm updating the ELs with same data, it will just ignore it. + if (newHead.blockRoot !== oldHead.blockRoot || currFinalizedEpoch !== prevFinalizedEpoch) { + /** + * On post BELLATRIX_EPOCH but pre TTD, blocks include empty execution payload with a zero block hash. + * The consensus clients must not send notifyForkchoiceUpdate before TTD since the execution client will error. + * So we must check that: + * - `headBlockHash !== null` -> Pre BELLATRIX_EPOCH + * - `headBlockHash !== ZERO_HASH` -> Pre TTD + */ + const headBlockHash = chain.forkChoice.getHead().executionPayloadBlockHash ?? ZERO_HASH_HEX; + /** + * After BELLATRIX_EPOCH and TTD it's okay to send a zero hash block hash for the finalized block. This will happen if + * the current finalized block does not contain any execution payload at all (pre MERGE_EPOCH) or if it contains a + * zero block hash (pre TTD) + */ + const safeBlockHash = chain.forkChoice.getJustifiedBlock().executionPayloadBlockHash ?? ZERO_HASH_HEX; + const finalizedBlockHash = chain.forkChoice.getFinalizedBlock().executionPayloadBlockHash ?? ZERO_HASH_HEX; + if (headBlockHash !== ZERO_HASH_HEX) { + chain.executionEngine.notifyForkchoiceUpdate(headBlockHash, safeBlockHash, finalizedBlockHash).catch((e) => { + chain.logger.error("Error pushing notifyForkchoiceUpdate()", {headBlockHash, finalizedBlockHash}, e); + }); } - }); + } // Emit ChainEvent.block event // @@ -329,38 +323,6 @@ export async function importBlock(chain: ImportBlockModules, fullyVerifiedBlock: }); } -async function maybeIssueNextProposerEngineFcU( - chain: ImportBlockModules, - state: CachedBeaconStateAllForks -): Promise { - const prepareSlot = state.slot + 1; - const prepareEpoch = computeEpochAtSlot(prepareSlot); - // No need to try building block if we are not synced - if (prepareSlot !== chain.clock.currentSlot + 1 || prepareEpoch < chain.config.BELLATRIX_FORK_EPOCH) { - return null; - } - const prepareState = processSlots(state, prepareSlot); - // TODO wait till third/last interval of the slot to actual send an fcU - // so that any head change is accomodated before that. However this could - // be optimized if the last block receieved is already head. This will be - // especially meaningful for mev boost which might have more delays - // because of how protocol is designed - if (isBellatrixStateType(prepareState)) { - try { - const proposerIndex = prepareState.epochCtx.getBeaconProposer(prepareSlot); - const feeRecipient = chain.beaconProposerCache.get(proposerIndex); - if (feeRecipient) { - const safeBlockHash = chain.forkChoice.getJustifiedBlock().executionPayloadBlockHash ?? ZERO_HASH_HEX; - const finalizedBlockHash = chain.forkChoice.getFinalizedBlock().executionPayloadBlockHash ?? ZERO_HASH_HEX; - return prepareExecutionPayload(chain, safeBlockHash, finalizedBlockHash, prepareState, feeRecipient); - } - } catch (e) { - chain.logger.error("Error on issuing next proposer engine fcU", {}, e as Error); - } - } - return null; -} - /** * Returns the closest state to postState.currentJustifiedCheckpoint in the same fork as postState * diff --git a/packages/lodestar/src/chain/chain.ts b/packages/lodestar/src/chain/chain.ts index 398602ceb4b..a19b70736d6 100644 --- a/packages/lodestar/src/chain/chain.ts +++ b/packages/lodestar/src/chain/chain.ts @@ -48,7 +48,7 @@ import { } from "./opPools/index.js"; import {LightClientServer} from "./lightClient/index.js"; import {Archiver} from "./archiver/index.js"; -import {PrecomputeNextEpochTransitionScheduler} from "./precomputeNextEpochTransition.js"; +import {PrepareNextSlotScheduler} from "./prepareNextSlot.js"; import {ReprocessController} from "./reprocess.js"; import {SeenAggregatedAttestations} from "./seenCache/seenAggregateAndProof.js"; import {SeenBlockAttesters} from "./seenCache/seenBlockAttesters.js"; @@ -230,7 +230,7 @@ export class BeaconChain implements IBeaconChain { this.lightClientServer = lightClientServer; this.archiver = new Archiver(db, this, logger, signal, opts); - new PrecomputeNextEpochTransitionScheduler(this, this.config, metrics, this.logger, signal); + new PrepareNextSlotScheduler(this, this.config, metrics, this.logger, signal); metrics?.opPool.aggregatedAttestationPoolSize.addCollect(() => this.onScrapeMetrics()); diff --git a/packages/lodestar/src/chain/precomputeNextEpochTransition.ts b/packages/lodestar/src/chain/precomputeNextEpochTransition.ts deleted file mode 100644 index dcccd90a2cf..00000000000 --- a/packages/lodestar/src/chain/precomputeNextEpochTransition.ts +++ /dev/null @@ -1,88 +0,0 @@ -import {computeEpochAtSlot} from "@chainsafe/lodestar-beacon-state-transition"; -import {IChainForkConfig} from "@chainsafe/lodestar-config"; -import {SLOTS_PER_EPOCH} from "@chainsafe/lodestar-params"; -import {Slot} from "@chainsafe/lodestar-types"; -import {ILogger, sleep} from "@chainsafe/lodestar-utils"; -import {IMetrics} from "../metrics/index.js"; -import {ChainEvent} from "./emitter.js"; -import {IBeaconChain} from "./interface.js"; -import {RegenCaller} from "./regen/index.js"; - -/** - * When node is synced and 1/3 slot before an epoch, we want to prepare for the next epoch - * transition from our head so that: - * + validators vote for block head on time through attestation - * + validators propose blocks on time - */ -export class PrecomputeNextEpochTransitionScheduler { - constructor( - private readonly chain: IBeaconChain, - private readonly config: IChainForkConfig, - private readonly metrics: IMetrics | null, - private readonly logger: ILogger, - private readonly signal: AbortSignal - ) { - this.chain.emitter.on(ChainEvent.clockSlot, this.prepareForNextEpoch); - - this.signal.addEventListener( - "abort", - () => { - this.chain.emitter.off(ChainEvent.clockSlot, this.prepareForNextEpoch); - }, - {once: true} - ); - } - - /** - * Use clockSlot instead of clockEpoch to schedule the task at more exact time. - */ - prepareForNextEpoch = async (clockSlot: Slot): Promise => { - // only interested in last slot of epoch - if ((clockSlot + 1) % SLOTS_PER_EPOCH !== 0) { - return; - } - - // Precalculate epoch transition 2/3 of the way through the last slot of the epoch - const msToPrecalculateTime = (this.config.SECONDS_PER_SLOT * 1000 * 2) / 3; - await sleep(msToPrecalculateTime, this.signal); - - const {slot: headSlot, blockRoot} = this.chain.forkChoice.getHead(); - const nextEpoch = computeEpochAtSlot(clockSlot) + 1; - // Don't want to pre compute epoch transition at pre genesis - if (nextEpoch <= 0) return; - // node may be syncing or out of synced - if (headSlot < clockSlot) { - this.metrics?.precomputeNextEpochTransition.count.inc({result: "skip"}, 1); - this.logger.debug("Skipping PrecomputeEpochScheduler - head slot is not current slot", { - nextEpoch, - headSlot, - slot: clockSlot, - }); - return; - } - - // we want to make sure headSlot === clockSlot to do early epoch transition - const nextSlot = clockSlot + 1; - this.logger.verbose("Running PrecomputeEpochScheduler", {nextEpoch, headSlot, nextSlot}); - - // this takes 2s - 4s as of Oct 2021, no need to wait for this or the clock drift - // assuming there is no reorg, it caches the checkpoint state & helps avoid doing a full state transition in the next slot - // + when gossip block comes, we need to validate and run state transition - // + if next slot is a skipped slot, it'd help getting target checkpoint state faster to validate attestations - this.chain.regen - .getBlockSlotState(blockRoot, nextSlot, RegenCaller.precomputeEpoch) - .then(() => { - this.metrics?.precomputeNextEpochTransition.count.inc({result: "success"}, 1); - const previousHits = this.chain.checkpointStateCache.updatePreComputedCheckpoint(blockRoot, nextEpoch); - if (previousHits === 0) { - this.metrics?.precomputeNextEpochTransition.waste.inc(); - } - this.metrics?.precomputeNextEpochTransition.hits.set(previousHits ?? 0); - this.logger.verbose("Completed PrecomputeEpochScheduler", {nextEpoch, headSlot, nextSlot}); - }) - .catch((e) => { - this.metrics?.precomputeNextEpochTransition.count.inc({result: "error"}, 1); - this.logger.error("Failed to precompute epoch transition", nextEpoch, e); - }); - }; -} diff --git a/packages/lodestar/src/chain/prepareNextSlot.ts b/packages/lodestar/src/chain/prepareNextSlot.ts new file mode 100644 index 00000000000..55e39c34425 --- /dev/null +++ b/packages/lodestar/src/chain/prepareNextSlot.ts @@ -0,0 +1,130 @@ +import {computeEpochAtSlot, isBellatrixStateType} from "@chainsafe/lodestar-beacon-state-transition"; +import {IChainForkConfig} from "@chainsafe/lodestar-config"; +import {ForkSeq, SLOTS_PER_EPOCH} from "@chainsafe/lodestar-params"; +import {Slot} from "@chainsafe/lodestar-types"; +import {ILogger, sleep} from "@chainsafe/lodestar-utils"; +import {GENESIS_EPOCH, ZERO_HASH_HEX} from "../constants/constants.js"; +import {IMetrics} from "../metrics/index.js"; +import {ChainEvent} from "./emitter.js"; +import {prepareExecutionPayload} from "./factory/block/body.js"; +import {IBeaconChain} from "./interface.js"; +import {RegenCaller} from "./regen/index.js"; + +/* With 12s slot times, this scheduler will run 4s before the start of each slot (`12 / 3 = 4`). */ +const SCHEDULER_LOOKAHEAD_FACTOR = 3; + +/* We don't want to do more epoch transition than this */ +const PREPARE_EPOCH_LIMIT = 1; + +/** + * At Bellatrix, if we are responsible for proposing in next slot, we want to prepare payload + * 4s (1/3 slot) before the start of next slot + * + * For all forks, when clock is 1/3 slot before an epoch, we want to prepare for the next epoch + * transition from our head so that: + * + validators vote for block head on time through attestation + * + validators propose blocks on time + * + For Bellatrix, to compute proposers of next epoch so that we can prepare new payloads + * + */ +export class PrepareNextSlotScheduler { + constructor( + private readonly chain: IBeaconChain, + private readonly config: IChainForkConfig, + private readonly metrics: IMetrics | null, + private readonly logger: ILogger, + private readonly signal: AbortSignal + ) { + this.chain.emitter.on(ChainEvent.clockSlot, this.prepareForNextSlot); + + this.signal.addEventListener( + "abort", + () => { + this.chain.emitter.off(ChainEvent.clockSlot, this.prepareForNextSlot); + }, + {once: true} + ); + } + + /** + * Use clockSlot instead of clockEpoch to schedule the task at more exact time. + */ + prepareForNextSlot = async (clockSlot: Slot): Promise => { + const prepareSlot = clockSlot + 1; + const prepareEpoch = computeEpochAtSlot(prepareSlot); + const isLastEpochSlot = (clockSlot + 1) % SLOTS_PER_EPOCH === 0; + if (this.config.getForkSeq(prepareEpoch) < ForkSeq.bellatrix && !isLastEpochSlot) { + return; + } + + const slotMs = this.config.SECONDS_PER_SLOT * 1000; + // At 1/3 slot time before the next slot, we either prepare payload or precompute epoch transition + await sleep(slotMs - slotMs / SCHEDULER_LOOKAHEAD_FACTOR, this.signal); + + // calling updateHead() here before we produce a block to reduce reorg possibility + const {slot: headSlot, blockRoot: headRoot} = this.chain.forkChoice.updateHead(); + const nextEpoch = computeEpochAtSlot(clockSlot) + 1; + // Do nothing at pre genesis + if (nextEpoch <= GENESIS_EPOCH) return; + + const headEpoch = computeEpochAtSlot(headSlot); + if (prepareEpoch - headEpoch > PREPARE_EPOCH_LIMIT) { + this.metrics?.precomputeNextEpochTransition.count.inc({result: "skip"}, 1); + this.logger.debug("Skipping PrepareNextSlotScheduler - head slot is too behind current slot", { + nextEpoch, + headSlot, + clockSlot, + }); + + return; + } + + if (prepareEpoch > headEpoch) { + this.logger.verbose("Running PrepareNextSlotScheduler epoch transition", {nextEpoch, headSlot, prepareSlot}); + } + + // No need to wait for this or the clock drift + // Pre Bellatrix: we only do precompute state transition for the last slot of epoch + // For Bellatrix, we always do the `processSlots()` to prepare payload for the next slot + this.chain.regen + .getBlockSlotState(headRoot, prepareSlot, RegenCaller.precomputeEpoch) + .then((prepareState) => { + // assuming there is no reorg, it caches the checkpoint state & helps avoid doing a full state transition in the next slot + // + when gossip block comes, we need to validate and run state transition + // + if next slot is a skipped slot, it'd help getting target checkpoint state faster to validate attestations + if (prepareEpoch > headEpoch) { + this.metrics?.precomputeNextEpochTransition.count.inc({result: "success"}, 1); + const previousHits = this.chain.checkpointStateCache.updatePreComputedCheckpoint(headRoot, nextEpoch); + if (previousHits === 0) { + this.metrics?.precomputeNextEpochTransition.waste.inc(); + } + this.metrics?.precomputeNextEpochTransition.hits.set(previousHits ?? 0); + this.logger.verbose("Completed PrepareNextSlotScheduler epoch transition", { + nextEpoch, + headSlot, + prepareSlot, + }); + } + + if (isBellatrixStateType(prepareState)) { + const proposerIndex = prepareState.epochCtx.getBeaconProposer(prepareSlot); + const feeRecipient = this.chain.beaconProposerCache.get(proposerIndex); + if (feeRecipient) { + const safeBlockHash = this.chain.forkChoice.getJustifiedBlock().executionPayloadBlockHash ?? ZERO_HASH_HEX; + const finalizedBlockHash = + this.chain.forkChoice.getFinalizedBlock().executionPayloadBlockHash ?? ZERO_HASH_HEX; + void prepareExecutionPayload(this.chain, safeBlockHash, finalizedBlockHash, prepareState, feeRecipient); + this.logger.verbose("PrepareNextSlotScheduler prepared new payload", { + prepareSlot, + proposerIndex, + feeRecipient, + }); + } + } + }) + .catch((e) => { + this.metrics?.precomputeNextEpochTransition.count.inc({result: "error"}, 1); + this.logger.error("Failed to precompute epoch transition", nextEpoch, e); + }); + }; +} diff --git a/packages/lodestar/test/unit/api/impl/debug/index.test.ts b/packages/lodestar/test/unit/api/impl/debug/index.test.ts index 76660dfa72b..e0ded292bdf 100644 --- a/packages/lodestar/test/unit/api/impl/debug/index.test.ts +++ b/packages/lodestar/test/unit/api/impl/debug/index.test.ts @@ -13,7 +13,7 @@ import {INetwork, Network} from "../../../../../src/network/index.js"; import {IBeaconChain} from "../../../../../src/chain/index.js"; import {generateProtoBlock} from "../../../../utils/block.js"; import {StubbedBeaconDb} from "../../../../utils/stub/index.js"; -import {generateState} from "../../../../utils/state.js"; +import {generateCachedAltairState, generateState} from "../../../../utils/state.js"; import {setupApiImplTestServer} from "../index.test.js"; import {SinonStubFn} from "../../../../utils/types.js"; @@ -60,7 +60,7 @@ describe.skip("api - debug - beacon", function () { }); it("getStateV2 - should be able to convert to json", async function () { - resolveStateIdStub.resolves(generateState({}, minimalConfig, true)); + resolveStateIdStub.resolves(generateCachedAltairState({}, minimalConfig)); const {data: state} = await debugApi.getStateV2("something"); expect(() => ssz.altair.BeaconState.toJson(state as altair.BeaconState)).to.not.throw(); }); diff --git a/packages/lodestar/test/unit/chain/precomputeNextEpochTransition.test.ts b/packages/lodestar/test/unit/chain/precomputeNextEpochTransition.test.ts deleted file mode 100644 index 3a8544613a0..00000000000 --- a/packages/lodestar/test/unit/chain/precomputeNextEpochTransition.test.ts +++ /dev/null @@ -1,86 +0,0 @@ -import {expect} from "chai"; -import sinon, {SinonStubbedInstance} from "sinon"; -import {config} from "@chainsafe/lodestar-config/default"; -import {ForkChoice, ProtoBlock} from "@chainsafe/lodestar-fork-choice"; -import {WinstonLogger} from "@chainsafe/lodestar-utils"; -import {SLOTS_PER_EPOCH} from "@chainsafe/lodestar-params"; -import {BeaconChain, ChainEventEmitter} from "../../../src/chain/index.js"; -import {LocalClock} from "../../../src/chain/clock/index.js"; -import {PrecomputeNextEpochTransitionScheduler} from "../../../src/chain/precomputeNextEpochTransition.js"; -import {StateRegenerator} from "../../../src/chain/regen/index.js"; - -describe("PrecomputeEpochScheduler", () => { - const sandbox = sinon.createSandbox(); - const abortController = new AbortController(); - - let preComputeScheduler: PrecomputeNextEpochTransitionScheduler; - let forkChoiceStub: SinonStubbedInstance & ForkChoice; - let regenStub: SinonStubbedInstance & StateRegenerator; - let loggerStub: SinonStubbedInstance & WinstonLogger; - - beforeEach(() => { - sandbox.useFakeTimers(); - const chainStub = sandbox.createStubInstance(BeaconChain) as SinonStubbedInstance & BeaconChain; - const clockStub = sandbox.createStubInstance(LocalClock) as SinonStubbedInstance & LocalClock; - chainStub.clock = clockStub; - forkChoiceStub = sandbox.createStubInstance(ForkChoice) as SinonStubbedInstance & ForkChoice; - chainStub.forkChoice = forkChoiceStub; - const emitterStub = sandbox.createStubInstance(ChainEventEmitter) as SinonStubbedInstance & - ChainEventEmitter; - chainStub.emitter = emitterStub; - regenStub = sandbox.createStubInstance(StateRegenerator) as SinonStubbedInstance & - StateRegenerator; - chainStub.regen = regenStub; - loggerStub = sandbox.createStubInstance(WinstonLogger) as SinonStubbedInstance & WinstonLogger; - preComputeScheduler = new PrecomputeNextEpochTransitionScheduler( - chainStub, - config, - null, - loggerStub, - abortController.signal - ); - }); - - afterEach(() => { - sandbox.restore(); - }); - - it("should not run due to not last slot of epoch", async () => { - await preComputeScheduler.prepareForNextEpoch(3); - expect(forkChoiceStub.getHead.called).to.be.false; - }); - - it("should skip, headSlot is less than clock slot", async () => { - forkChoiceStub.getHead.returns({slot: SLOTS_PER_EPOCH - 2} as ProtoBlock); - await Promise.all([ - preComputeScheduler.prepareForNextEpoch(SLOTS_PER_EPOCH - 1), - sandbox.clock.tickAsync((config.SECONDS_PER_SLOT * 1000 * 2) / 3), - ]); - expect(forkChoiceStub.getHead.called, "expect forkChoice.getHead to be called").to.be.true; - expect(regenStub.getBlockSlotState.called, "expect regen.getBlockSlotState not to be called").to.be.false; - }); - - it("should run regen.getBlockSlotState", async () => { - forkChoiceStub.getHead.returns({slot: SLOTS_PER_EPOCH - 1} as ProtoBlock); - regenStub.getBlockSlotState.resolves(); - await Promise.all([ - preComputeScheduler.prepareForNextEpoch(SLOTS_PER_EPOCH - 1), - sandbox.clock.tickAsync((config.SECONDS_PER_SLOT * 1000 * 2) / 3), - ]); - expect(forkChoiceStub.getHead.called, "expect forkChoice.getHead to be called").to.be.true; - expect(regenStub.getBlockSlotState.called, "expect regen.getBlockSlotState to be called").to.be.true; - }); - - it("should handle regen.getBlockSlotState error", async () => { - forkChoiceStub.getHead.returns({slot: SLOTS_PER_EPOCH - 1} as ProtoBlock); - regenStub.getBlockSlotState.rejects("Unit test error"); - expect(loggerStub.error.calledOnce).to.be.false; - await Promise.all([ - preComputeScheduler.prepareForNextEpoch(SLOTS_PER_EPOCH - 1), - sandbox.clock.tickAsync((config.SECONDS_PER_SLOT * 1000 * 2) / 3), - ]); - expect(forkChoiceStub.getHead.called, "expect forkChoice.getHead to be called").to.be.true; - expect(regenStub.getBlockSlotState.called, "expect regen.getBlockSlotState to be called").to.be.true; - expect(loggerStub.error.calledOnce, "expect log error on rejected regen.getBlockSlotState").to.be.true; - }); -}); diff --git a/packages/lodestar/test/unit/chain/prepareNextSlot.test.ts b/packages/lodestar/test/unit/chain/prepareNextSlot.test.ts new file mode 100644 index 00000000000..3f648373ee7 --- /dev/null +++ b/packages/lodestar/test/unit/chain/prepareNextSlot.test.ts @@ -0,0 +1,151 @@ +import {expect} from "chai"; +import sinon, {SinonStubbedInstance} from "sinon"; +import {config} from "@chainsafe/lodestar-config/default"; +import {ForkChoice, ProtoBlock} from "@chainsafe/lodestar-fork-choice"; +import {WinstonLogger} from "@chainsafe/lodestar-utils"; +import {ForkSeq, SLOTS_PER_EPOCH} from "@chainsafe/lodestar-params"; +import {IChainForkConfig} from "@chainsafe/lodestar-config"; +import {BeaconChain, ChainEventEmitter} from "../../../src/chain/index.js"; +import {LocalClock} from "../../../src/chain/clock/index.js"; +import {PrepareNextSlotScheduler} from "../../../src/chain/prepareNextSlot.js"; +import {StateRegenerator} from "../../../src/chain/regen/index.js"; +import {SinonStubFn} from "../../utils/types.js"; +import {generateCachedBellatrixState} from "../../utils/state.js"; +import {BeaconProposerCache} from "../../../src/chain/beaconProposerCache.js"; +import {PayloadIdCache} from "../../../src/execution/engine/payloadIdCache.js"; +import {ExecutionEngineHttp} from "../../../src/execution/engine/http.js"; +import {IExecutionEngine} from "../../../src/execution/engine/interface.js"; + +describe("PrepareNextSlot scheduler", () => { + const sandbox = sinon.createSandbox(); + const abortController = new AbortController(); + + let scheduler: PrepareNextSlotScheduler; + let forkChoiceStub: SinonStubbedInstance & ForkChoice; + let regenStub: SinonStubbedInstance & StateRegenerator; + let loggerStub: SinonStubbedInstance & WinstonLogger; + let beaconProposerCacheStub: SinonStubbedInstance & BeaconProposerCache; + let getForkSeqStub: SinonStubFn; + let executionEngineStub: SinonStubbedInstance & ExecutionEngineHttp; + + beforeEach(() => { + sandbox.useFakeTimers(); + const chainStub = sandbox.createStubInstance(BeaconChain) as SinonStubbedInstance & BeaconChain; + const clockStub = sandbox.createStubInstance(LocalClock) as SinonStubbedInstance & LocalClock; + chainStub.clock = clockStub; + forkChoiceStub = sandbox.createStubInstance(ForkChoice) as SinonStubbedInstance & ForkChoice; + chainStub.forkChoice = forkChoiceStub; + const emitterStub = sandbox.createStubInstance(ChainEventEmitter) as SinonStubbedInstance & + ChainEventEmitter; + chainStub.emitter = emitterStub; + regenStub = sandbox.createStubInstance(StateRegenerator) as SinonStubbedInstance & + StateRegenerator; + chainStub.regen = regenStub; + loggerStub = sandbox.createStubInstance(WinstonLogger) as SinonStubbedInstance & WinstonLogger; + beaconProposerCacheStub = sandbox.createStubInstance( + BeaconProposerCache + ) as SinonStubbedInstance & BeaconProposerCache; + ((chainStub as unknown) as {beaconProposerCache: BeaconProposerCache})[ + "beaconProposerCache" + ] = beaconProposerCacheStub; + getForkSeqStub = sandbox.stub(config, "getForkSeq"); + executionEngineStub = sandbox.createStubInstance(ExecutionEngineHttp) as SinonStubbedInstance & + ExecutionEngineHttp; + ((chainStub as unknown) as {executionEngine: IExecutionEngine}).executionEngine = executionEngineStub; + ((chainStub as unknown) as {config: IChainForkConfig}).config = (config as unknown) as IChainForkConfig; + scheduler = new PrepareNextSlotScheduler(chainStub, config, null, loggerStub, abortController.signal); + }); + + afterEach(() => { + sandbox.restore(); + }); + + it("pre bellatrix - should not run due to not last slot of epoch", async () => { + getForkSeqStub.returns(ForkSeq.phase0); + await scheduler.prepareForNextSlot(3); + expect(forkChoiceStub.updateHead.called).to.be.false; + }); + + it("pre bellatrix - should skip, headSlot is more than 1 epoch to prepare slot", async () => { + getForkSeqStub.returns(ForkSeq.phase0); + forkChoiceStub.updateHead.returns({slot: SLOTS_PER_EPOCH - 2} as ProtoBlock); + await Promise.all([ + scheduler.prepareForNextSlot(2 * SLOTS_PER_EPOCH - 1), + sandbox.clock.tickAsync((config.SECONDS_PER_SLOT * 1000 * 2) / 3), + ]); + expect(forkChoiceStub.updateHead.called, "expect forkChoice.updateHead to be called").to.be.true; + expect(regenStub.getBlockSlotState.called, "expect regen.getBlockSlotState not to be called").to.be.false; + }); + + it("pre bellatrix - should run regen.getBlockSlotState", async () => { + getForkSeqStub.returns(ForkSeq.phase0); + forkChoiceStub.updateHead.returns({slot: SLOTS_PER_EPOCH - 1} as ProtoBlock); + regenStub.getBlockSlotState.resolves(); + await Promise.all([ + scheduler.prepareForNextSlot(SLOTS_PER_EPOCH - 1), + sandbox.clock.tickAsync((config.SECONDS_PER_SLOT * 1000 * 2) / 3), + ]); + expect(forkChoiceStub.updateHead.called, "expect forkChoice.updateHead to be called").to.be.true; + expect(regenStub.getBlockSlotState.called, "expect regen.getBlockSlotState to be called").to.be.true; + }); + + it("pre bellatrix - should handle regen.getBlockSlotState error", async () => { + getForkSeqStub.returns(ForkSeq.phase0); + forkChoiceStub.updateHead.returns({slot: SLOTS_PER_EPOCH - 1} as ProtoBlock); + regenStub.getBlockSlotState.rejects("Unit test error"); + expect(loggerStub.error.calledOnce).to.be.false; + await Promise.all([ + scheduler.prepareForNextSlot(SLOTS_PER_EPOCH - 1), + sandbox.clock.tickAsync((config.SECONDS_PER_SLOT * 1000 * 2) / 3), + ]); + expect(forkChoiceStub.updateHead.called, "expect forkChoice.updateHead to be called").to.be.true; + expect(regenStub.getBlockSlotState.called, "expect regen.getBlockSlotState to be called").to.be.true; + expect(loggerStub.error.calledOnce, "expect log error on rejected regen.getBlockSlotState").to.be.true; + }); + + it("bellatrix - should skip, headSlot is more than 1 epoch to prepare slot", async () => { + getForkSeqStub.returns(ForkSeq.bellatrix); + forkChoiceStub.updateHead.returns({slot: SLOTS_PER_EPOCH - 2} as ProtoBlock); + await Promise.all([ + scheduler.prepareForNextSlot(2 * SLOTS_PER_EPOCH - 1), + sandbox.clock.tickAsync((config.SECONDS_PER_SLOT * 1000 * 2) / 3), + ]); + expect(forkChoiceStub.updateHead.called, "expect forkChoice.updateHead to be called").to.be.true; + expect(regenStub.getBlockSlotState.called, "expect regen.getBlockSlotState not to be called").to.be.false; + }); + + it("bellatrix - should skip, no block proposer", async () => { + getForkSeqStub.returns(ForkSeq.bellatrix); + forkChoiceStub.updateHead.returns({slot: SLOTS_PER_EPOCH - 3} as ProtoBlock); + const state = generateCachedBellatrixState(); + regenStub.getBlockSlotState.resolves(state); + await Promise.all([ + scheduler.prepareForNextSlot(SLOTS_PER_EPOCH - 1), + sandbox.clock.tickAsync((config.SECONDS_PER_SLOT * 1000 * 2) / 3), + ]); + expect(forkChoiceStub.updateHead.called, "expect forkChoice.updateHead to be called").to.be.true; + expect(regenStub.getBlockSlotState.called, "expect regen.getBlockSlotState to be called").to.be.true; + }); + + it("bellatrix - should prepare payload", async () => { + getForkSeqStub.returns(ForkSeq.bellatrix); + forkChoiceStub.updateHead.returns({slot: SLOTS_PER_EPOCH - 3} as ProtoBlock); + forkChoiceStub.getJustifiedBlock.returns({} as ProtoBlock); + forkChoiceStub.getFinalizedBlock.returns({} as ProtoBlock); + const state = generateCachedBellatrixState(); + regenStub.getBlockSlotState.resolves(state); + beaconProposerCacheStub.get.returns("0x fee recipient address"); + ((executionEngineStub as unknown) as {payloadIdCache: PayloadIdCache}).payloadIdCache = new PayloadIdCache(); + + await Promise.all([ + scheduler.prepareForNextSlot(SLOTS_PER_EPOCH - 2), + sandbox.clock.tickAsync((config.SECONDS_PER_SLOT * 1000 * 2) / 3), + ]); + + expect(forkChoiceStub.updateHead.called, "expect forkChoice.updateHead to be called").to.be.true; + expect(regenStub.getBlockSlotState.called, "expect regen.getBlockSlotState to be called").to.be.true; + expect(forkChoiceStub.getJustifiedBlock.called, "expect forkChoice.getJustifiedBlock to be called").to.be.true; + expect(forkChoiceStub.getFinalizedBlock.called, "expect forkChoice.getFinalizedBlock to be called").to.be.true; + expect(executionEngineStub.notifyForkchoiceUpdate.calledOnce, "expect CL call notifyForkchoiceUpdate").to.be.true; + }); +}); diff --git a/packages/lodestar/test/utils/state.ts b/packages/lodestar/test/utils/state.ts index e78a3b9e02e..96f038a41fb 100644 --- a/packages/lodestar/test/utils/state.ts +++ b/packages/lodestar/test/utils/state.ts @@ -12,12 +12,15 @@ import { EPOCHS_PER_HISTORICAL_VECTOR, EPOCHS_PER_SLASHINGS_VECTOR, FAR_FUTURE_EPOCH, + ForkSeq, MAX_EFFECTIVE_BALANCE, SLOTS_PER_HISTORICAL_ROOT, SYNC_COMMITTEE_SIZE, } from "@chainsafe/lodestar-params"; import bls from "@chainsafe/bls"; +import {CachedBeaconStateBellatrix} from "@chainsafe/lodestar-beacon-state-transition"; +import {BeaconStateBellatrix} from "@chainsafe/lodestar-beacon-state-transition"; import {GENESIS_EPOCH, GENESIS_SLOT, ZERO_HASH} from "../../src/constants/index.js"; import {generateEmptyBlock} from "./block.js"; import {generateValidator, generateValidators} from "./validator.js"; @@ -39,7 +42,7 @@ type TestBeaconState = Partial; export function generateState( opts: TestBeaconState = {}, config = minimalConfig, - isAltair = false, + forkSeq: ForkSeq = ForkSeq.phase0, withPubkey = false ): BeaconStateAllForks { const validatorOpts = { @@ -106,26 +109,36 @@ export function generateState( ...opts, }; - if (isAltair) { - const defaultAltairState: altair.BeaconState = { - ...ssz.altair.BeaconState.defaultValue(), - ...defaultState, - previousEpochParticipation: [...[0xff, 0xff], ...Array.from({length: numValidators - 2}, () => 0)], - currentEpochParticipation: [...[0xff, 0xff], ...Array.from({length: numValidators - 2}, () => 0)], - currentSyncCommittee: { - pubkeys: Array.from({length: SYNC_COMMITTEE_SIZE}, (_, i) => validators[i % validators.length].pubkey), - aggregatePubkey: ssz.BLSPubkey.defaultValue(), - }, - nextSyncCommittee: { - pubkeys: Array.from({length: SYNC_COMMITTEE_SIZE}, (_, i) => validators[i % validators.length].pubkey), - aggregatePubkey: ssz.BLSPubkey.defaultValue(), - }, - }; + if (forkSeq === ForkSeq.phase0) { + return ssz.phase0.BeaconState.toViewDU(defaultState); + } + const defaultAltairState: altair.BeaconState = { + ...ssz.altair.BeaconState.defaultValue(), + ...defaultState, + previousEpochParticipation: [...[0xff, 0xff], ...Array.from({length: numValidators - 2}, () => 0)], + currentEpochParticipation: [...[0xff, 0xff], ...Array.from({length: numValidators - 2}, () => 0)], + currentSyncCommittee: { + pubkeys: Array.from({length: SYNC_COMMITTEE_SIZE}, (_, i) => validators[i % validators.length].pubkey), + aggregatePubkey: ssz.BLSPubkey.defaultValue(), + }, + nextSyncCommittee: { + pubkeys: Array.from({length: SYNC_COMMITTEE_SIZE}, (_, i) => validators[i % validators.length].pubkey), + aggregatePubkey: ssz.BLSPubkey.defaultValue(), + }, + }; + + if (forkSeq === ForkSeq.altair) { return ssz.altair.BeaconState.toViewDU(defaultAltairState); - } else { - return ssz.phase0.BeaconState.toViewDU(defaultState); } + + const payloadHeader = ssz.bellatrix.ExecutionPayloadHeader.defaultValue(); + + // Bellatrix + return ssz.bellatrix.BeaconState.toViewDU({ + ...defaultAltairState, + latestExecutionPayloadHeader: {...payloadHeader, blockNumber: 2022}, + }); } /** @@ -136,7 +149,23 @@ export function generateCachedState( config = minimalConfig, isAltair = false ): CachedBeaconStateAllForks { - const state = generateState(opts, config, isAltair); + const state = generateState(opts, config, isAltair ? ForkSeq.altair : ForkSeq.phase0); + return createCachedBeaconState(state, { + config: createIBeaconConfig(config, state.genesisValidatorsRoot), + // This is a performance test, there's no need to have a global shared cache of keys + pubkey2index: new PubkeyIndexMap(), + index2pubkey: [], + }); +} + +/** + * This generates state with default pubkey + */ +export function generateCachedAltairState( + opts: TestBeaconState = {}, + config = minimalConfig +): CachedBeaconStateAllForks { + const state = generateState(opts, config, ForkSeq.altair); return createCachedBeaconState(state, { config: createIBeaconConfig(config, state.genesisValidatorsRoot), // This is a performance test, there's no need to have a global shared cache of keys @@ -145,6 +174,22 @@ export function generateCachedState( }); } +/** + * This generates state with default pubkey + */ +export function generateCachedBellatrixState( + opts: TestBeaconState = {}, + config = minimalConfig +): CachedBeaconStateBellatrix { + const state = generateState(opts, config, ForkSeq.bellatrix); + return createCachedBeaconState(state as BeaconStateBellatrix, { + config: createIBeaconConfig(config, state.genesisValidatorsRoot), + // This is a performance test, there's no need to have a global shared cache of keys + pubkey2index: new PubkeyIndexMap(), + index2pubkey: [], + }); +} + /** * This generates state with real pubkey */