From 332bfc02547f5f4cf01b873c586279627d4415c2 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Sun, 8 May 2022 19:14:00 +0200 Subject: [PATCH] Concurrent block verification steps Basic range sync perf test Re-use existing CachedBeaconState from anchorState Verify block segment in parallel Fix perf test range Default to pool of size 4 Yield on getBlockSignatureSets Fix test type Ensure db is clean-up after each tmpdb usage Add INFURA_ETH2_CREDENTIALS to benchmark GA --- .../src/allForks/signatureSets/index.ts | 23 +- .../src/altair/block/processSyncCommittee.ts | 24 +- .../src/cache/stateCache.ts | 9 + packages/beacon-state-transition/src/index.ts | 2 +- .../unit/signatureSets/signatureSets.test.ts | 2 +- .../lodestar/src/chain/blocks/importBlock.ts | 4 +- packages/lodestar/src/chain/blocks/index.ts | 84 +-- packages/lodestar/src/chain/blocks/types.ts | 6 +- .../lodestar/src/chain/blocks/verifyBlock.ts | 600 ++++++++++++------ .../src/chain/bls/multithread/poolSize.ts | 2 +- packages/lodestar/src/chain/chain.ts | 25 +- .../lodestar/src/chain/lightClient/index.ts | 14 +- packages/lodestar/src/chain/options.ts | 1 + .../lodestar/src/node/utils/interop/state.ts | 3 + packages/lodestar/src/sync/constants.ts | 10 +- packages/lodestar/src/sync/range/batch.ts | 8 +- .../beacon/repositories/blockArchive.test.ts | 8 +- .../test/e2e/interop/genesisState.test.ts | 8 +- .../perf/chain/verifyImportBlocks.test.ts | 1 + .../lodestar/test/spec/presets/operations.ts | 2 + .../unit/chain/blocks/verifyBlock.test.ts | 13 +- .../test/unit/sync/range/batch.test.ts | 3 +- packages/lodestar/test/utils/db.ts | 21 +- 23 files changed, 531 insertions(+), 342 deletions(-) diff --git a/packages/beacon-state-transition/src/allForks/signatureSets/index.ts b/packages/beacon-state-transition/src/allForks/signatureSets/index.ts index 1d6f9048981..5404ae4167c 100644 --- a/packages/beacon-state-transition/src/allForks/signatureSets/index.ts +++ b/packages/beacon-state-transition/src/allForks/signatureSets/index.ts @@ -20,20 +20,13 @@ export * from "./voluntaryExits.js"; * Includes all signatures on the block (except the deposit signatures) for verification. * Deposits are not included because they can legally have invalid signatures. */ -export function getAllBlockSignatureSets( +export function getBlockSignatureSets( state: CachedBeaconStateAllForks, - signedBlock: allForks.SignedBeaconBlock -): ISignatureSet[] { - return [getProposerSignatureSet(state, signedBlock), ...getAllBlockSignatureSetsExceptProposer(state, signedBlock)]; -} - -/** - * Includes all signatures on the block (except the deposit signatures) for verification. - * Useful since block proposer signature is verified beforehand on gossip validation - */ -export function getAllBlockSignatureSetsExceptProposer( - state: CachedBeaconStateAllForks, - signedBlock: allForks.SignedBeaconBlock + signedBlock: allForks.SignedBeaconBlock, + opts?: { + /** Useful since block proposer signature is verified beforehand on gossip validation */ + skipProposerSignature?: boolean; + } ): ISignatureSet[] { const signatureSets = [ getRandaoRevealSignatureSet(state, signedBlock.message), @@ -43,6 +36,10 @@ export function getAllBlockSignatureSetsExceptProposer( ...getVoluntaryExitsSignatureSets(state, signedBlock), ]; + if (!opts?.skipProposerSignature) { + signatureSets.push(getProposerSignatureSet(state, signedBlock)); + } + // Only after altair fork, validate tSyncCommitteeSignature if (computeEpochAtSlot(signedBlock.message.slot) >= state.config.ALTAIR_FORK_EPOCH) { const syncCommitteeSignatureSet = getSyncCommitteeSignatureSet( diff --git a/packages/beacon-state-transition/src/altair/block/processSyncCommittee.ts b/packages/beacon-state-transition/src/altair/block/processSyncCommittee.ts index cde8dc6d677..752f2ee1ca0 100644 --- a/packages/beacon-state-transition/src/altair/block/processSyncCommittee.ts +++ b/packages/beacon-state-transition/src/altair/block/processSyncCommittee.ts @@ -1,14 +1,7 @@ import {altair, ssz} from "@chainsafe/lodestar-types"; import {DOMAIN_SYNC_COMMITTEE} from "@chainsafe/lodestar-params"; import {byteArrayEquals} from "@chainsafe/ssz"; - -import { - computeSigningRoot, - getBlockRootAtSlot, - ISignatureSet, - SignatureSetType, - verifySignatureSet, -} from "../../util/index.js"; +import {computeSigningRoot, ISignatureSet, SignatureSetType, verifySignatureSet} from "../../util/index.js"; import {CachedBeaconStateAllForks} from "../../types.js"; import {G2_POINT_AT_INFINITY} from "../../constants/index.js"; import {getUnparticipantValues} from "../../util/array.js"; @@ -66,13 +59,18 @@ export function getSyncCommitteeSignatureSet( // ``` // However we need to run the function getSyncCommitteeSignatureSet() for all the blocks in a epoch // with the same state when verifying blocks in batch on RangeSync. Therefore we use the block.slot. - // - // This function expects that block.slot <= state.slot, otherwise we can't get the root sign by the sync committee. - // process_sync_committee() is run at the end of process_block(). process_block() is run after process_slots() - // which in the spec forces state.slot to equal block.slot. const previousSlot = Math.max(block.slot, 1) - 1; - const rootSigned = getBlockRootAtSlot(state, previousSlot); + // The spec uses the state to get the root at previousSlot + // ```python + // get_block_root_at_slot(state, previous_slot) + // ``` + // However we need to run the function getSyncCommitteeSignatureSet() for all the blocks in a epoch + // with the same state when verifying blocks in batch on RangeSync. + // + // On skipped slots state block roots just copy the latest block, so using the parentRoot here is equivalent. + // So getSyncCommitteeSignatureSet() can be called with a state in any slot (with the correct shuffling) + const rootSigned = block.parentRoot; if (!participantIndices) { const committeeIndices = state.epochCtx.currentSyncCommitteeIndexed.validatorIndices; diff --git a/packages/beacon-state-transition/src/cache/stateCache.ts b/packages/beacon-state-transition/src/cache/stateCache.ts index 452653f013b..f47d0db3eed 100644 --- a/packages/beacon-state-transition/src/cache/stateCache.ts +++ b/packages/beacon-state-transition/src/cache/stateCache.ts @@ -149,3 +149,12 @@ export function getCachedBeaconState( return cachedState; } + +/** + * Typeguard to check if a state contains a BeaconStateCache + */ +export function isCachedBeaconState( + state: T | (T & BeaconStateCache) +): state is T & BeaconStateCache { + return (state as T & BeaconStateCache).epochCtx !== undefined; +} diff --git a/packages/beacon-state-transition/src/index.ts b/packages/beacon-state-transition/src/index.ts index 08bf71f6111..0d9cd14aab8 100644 --- a/packages/beacon-state-transition/src/index.ts +++ b/packages/beacon-state-transition/src/index.ts @@ -24,7 +24,7 @@ export { } from "./types.js"; // Main state caches -export {createCachedBeaconState, BeaconStateCache} from "./cache/stateCache.js"; +export {createCachedBeaconState, BeaconStateCache, isCachedBeaconState} from "./cache/stateCache.js"; export {EpochContext, EpochContextImmutableData, createEmptyEpochContextImmutableData} from "./cache/epochContext.js"; export {EpochProcess, beforeProcessEpoch} from "./cache/epochProcess.js"; diff --git a/packages/beacon-state-transition/test/unit/signatureSets/signatureSets.test.ts b/packages/beacon-state-transition/test/unit/signatureSets/signatureSets.test.ts index 3c689afc09f..4a71392bb91 100644 --- a/packages/beacon-state-transition/test/unit/signatureSets/signatureSets.test.ts +++ b/packages/beacon-state-transition/test/unit/signatureSets/signatureSets.test.ts @@ -63,7 +63,7 @@ describe("signatureSets", () => { const state = generateCachedState(config, {validators}); - const signatureSets = allForks.getAllBlockSignatureSets(state, signedBlock); + const signatureSets = allForks.getBlockSignatureSets(state, signedBlock); expect(signatureSets.length).to.equal( // block signature 1 + diff --git a/packages/lodestar/src/chain/blocks/importBlock.ts b/packages/lodestar/src/chain/blocks/importBlock.ts index ed6c5ec4711..da71ef08523 100644 --- a/packages/lodestar/src/chain/blocks/importBlock.ts +++ b/packages/lodestar/src/chain/blocks/importBlock.ts @@ -73,7 +73,7 @@ export type ImportBlockModules = { * - Send events after everything is done */ export async function importBlock(chain: ImportBlockModules, fullyVerifiedBlock: FullyVerifiedBlock): Promise { - const {block, postState, parentBlock, skipImportingAttestations, executionStatus} = fullyVerifiedBlock; + const {block, postState, parentBlockSlot, skipImportingAttestations, executionStatus} = fullyVerifiedBlock; const pendingEvents = new PendingEvents(chain.emitter); // - Observe attestations @@ -210,7 +210,7 @@ export async function importBlock(chain: ImportBlockModules, fullyVerifiedBlock: chain.lightClientServer.onImportBlockHead( block.message as altair.BeaconBlock, postState as CachedBeaconStateAltair, - parentBlock + parentBlockSlot ); } catch (e) { chain.logger.error("Error lightClientServer.onImportBlock", {slot: block.message.slot}, e as Error); diff --git a/packages/lodestar/src/chain/blocks/index.ts b/packages/lodestar/src/chain/blocks/index.ts index a5ff70f531d..c29eab65496 100644 --- a/packages/lodestar/src/chain/blocks/index.ts +++ b/packages/lodestar/src/chain/blocks/index.ts @@ -1,11 +1,10 @@ /* eslint-disable @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-assignment */ import {allForks} from "@chainsafe/lodestar-types"; -import {sleep} from "@chainsafe/lodestar-utils"; import {ChainEvent} from "../emitter.js"; import {JobItemQueue} from "../../util/queue/index.js"; import {BlockError, BlockErrorCode, ChainSegmentError} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; -import {verifyBlock, VerifyBlockModules} from "./verifyBlock.js"; +import {verifyBlocks, VerifyBlockModules} from "./verifyBlock.js"; import {importBlock, ImportBlockModules} from "./importBlock.js"; import {assertLinearChainSegment} from "./utils/chainSegment.js"; import {PartiallyVerifiedBlock} from "./types.js"; @@ -44,6 +43,10 @@ export class BlockProcessor { } } +/////////////////////////// +// TODO: Run this functions with spec tests of many blocks +/////////////////////////// + /** * Validate and process a block * @@ -59,26 +62,7 @@ export async function processBlock( partiallyVerifiedBlock: PartiallyVerifiedBlock, opts: BlockProcessOpts ): Promise { - try { - const fullyVerifiedBlock = await verifyBlock(modules, partiallyVerifiedBlock, opts); - await importBlock(modules, fullyVerifiedBlock); - } catch (e) { - // above functions should only throw BlockError - const err = getBlockError(e, partiallyVerifiedBlock.block); - - if ( - partiallyVerifiedBlock.ignoreIfKnown && - (err.type.code === BlockErrorCode.ALREADY_KNOWN || err.type.code === BlockErrorCode.GENESIS_BLOCK) - ) { - // Flag ignoreIfKnown causes BlockErrorCodes ALREADY_KNOWN, GENESIS_BLOCK to resolve. - // Return before emitting to not cause loud logging. - return; - } - - modules.emitter.emit(ChainEvent.errorBlock, err); - - throw err; - } + await processChainSegment(modules, [partiallyVerifiedBlock], opts); } /** @@ -89,41 +73,37 @@ export async function processChainSegment( partiallyVerifiedBlocks: PartiallyVerifiedBlock[], opts: BlockProcessOpts ): Promise { - const blocks = partiallyVerifiedBlocks.map((b) => b.block); - assertLinearChainSegment(modules.config, blocks); - - let importedBlocks = 0; + if (partiallyVerifiedBlocks.length === 0) { + return; // TODO: or throw? + } else if (partiallyVerifiedBlocks.length > 1) { + assertLinearChainSegment( + modules.config, + partiallyVerifiedBlocks.map((b) => b.block) + ); + } - for (const partiallyVerifiedBlock of partiallyVerifiedBlocks) { - try { - // TODO: Re-use preState - const fullyVerifiedBlock = await verifyBlock(modules, partiallyVerifiedBlock, opts); - await importBlock(modules, fullyVerifiedBlock); - importedBlocks++; + // TODO: Does this makes sense with current batch verify approach? + // No block is imported until all blocks are verified + const importedBlocks = 0; - // this avoids keeping our node busy processing blocks - await sleep(0); - } catch (e) { - // above functions should only throw BlockError - const err = getBlockError(e, partiallyVerifiedBlock.block); + try { + const fullyVerifiedBlocks = await verifyBlocks(modules, partiallyVerifiedBlocks, opts); - if ( - partiallyVerifiedBlock.ignoreIfKnown && - (err.type.code === BlockErrorCode.ALREADY_KNOWN || err.type.code === BlockErrorCode.GENESIS_BLOCK) - ) { - continue; - } - if (partiallyVerifiedBlock.ignoreIfFinalized && err.type.code == BlockErrorCode.WOULD_REVERT_FINALIZED_SLOT) { - continue; - } + for (const fullyVerifiedBlock of fullyVerifiedBlocks) { + // No need to sleep(0) here since `importBlock` includes a disk write + // TODO: Consider batching importBlock too if it takes significant time + await importBlock(modules, fullyVerifiedBlock); + } + } catch (e) { + // above functions should only throw BlockError + const err = getBlockError(e, partiallyVerifiedBlocks[0].block); - modules.emitter.emit(ChainEvent.errorBlock, err); + modules.emitter.emit(ChainEvent.errorBlock, err); - // Convert to ChainSegmentError to append `importedBlocks` data - const chainSegmentError = new ChainSegmentError(partiallyVerifiedBlock.block, err.type, importedBlocks); - chainSegmentError.stack = err.stack; - throw chainSegmentError; - } + // Convert to ChainSegmentError to append `importedBlocks` data + const chainSegmentError = new ChainSegmentError(partiallyVerifiedBlocks[0].block, err.type, importedBlocks); + chainSegmentError.stack = err.stack; + throw chainSegmentError; } } diff --git a/packages/lodestar/src/chain/blocks/types.ts b/packages/lodestar/src/chain/blocks/types.ts index d48ce4e5d50..68fc822310b 100644 --- a/packages/lodestar/src/chain/blocks/types.ts +++ b/packages/lodestar/src/chain/blocks/types.ts @@ -1,6 +1,6 @@ import {CachedBeaconStateAllForks} from "@chainsafe/lodestar-beacon-state-transition"; -import {IProtoBlock, ExecutionStatus} from "@chainsafe/lodestar-fork-choice"; -import {allForks} from "@chainsafe/lodestar-types"; +import {ExecutionStatus} from "@chainsafe/lodestar-fork-choice"; +import {allForks, Slot} from "@chainsafe/lodestar-types"; export type FullyVerifiedBlockFlags = { /** @@ -50,7 +50,7 @@ export type PartiallyVerifiedBlockFlags = FullyVerifiedBlockFlags & { export type FullyVerifiedBlock = FullyVerifiedBlockFlags & { block: allForks.SignedBeaconBlock; postState: CachedBeaconStateAllForks; - parentBlock: IProtoBlock; + parentBlockSlot: Slot; }; /** diff --git a/packages/lodestar/src/chain/blocks/verifyBlock.ts b/packages/lodestar/src/chain/blocks/verifyBlock.ts index c7eaf5e42da..347d3d858af 100644 --- a/packages/lodestar/src/chain/blocks/verifyBlock.ts +++ b/packages/lodestar/src/chain/blocks/verifyBlock.ts @@ -3,11 +3,12 @@ import { computeStartSlotAtEpoch, allForks, bellatrix, + computeEpochAtSlot, } from "@chainsafe/lodestar-beacon-state-transition"; import {toHexString} from "@chainsafe/ssz"; import {IForkChoice, IProtoBlock, ExecutionStatus, assertValidTerminalPowBlock} from "@chainsafe/lodestar-fork-choice"; import {IChainForkConfig} from "@chainsafe/lodestar-config"; -import {ILogger} from "@chainsafe/lodestar-utils"; +import {ErrorAborted, ILogger, sleep} from "@chainsafe/lodestar-utils"; import {IMetrics} from "../../metrics/index.js"; import {IExecutionEngine} from "../../executionEngine/index.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; @@ -15,10 +16,10 @@ import {IBeaconClock} from "../clock/index.js"; import {BlockProcessOpts} from "../options.js"; import {IStateRegenerator, RegenCaller} from "../regen/index.js"; import {IBlsVerifier} from "../bls/index.js"; +import {FullyVerifiedBlock, PartiallyVerifiedBlock} from "./types.js"; import {ExecutePayloadStatus} from "../../executionEngine/interface.js"; import {byteArrayEquals} from "../../util/bytes.js"; import {IEth1ForBlockProduction} from "../../eth1/index.js"; -import {FullyVerifiedBlock, PartiallyVerifiedBlock} from "./types.js"; import {POS_PANDA_MERGE_TRANSITION_BANNER} from "./utils/pandaMergeTransitionBanner.js"; export type VerifyBlockModules = { @@ -37,26 +38,31 @@ export type VerifyBlockModules = { * Fully verify a block to be imported immediately after. Does not produce any side-effects besides adding intermediate * states in the state cache through regen. */ -export async function verifyBlock( +export async function verifyBlocks( chain: VerifyBlockModules, - partiallyVerifiedBlock: PartiallyVerifiedBlock, + partiallyVerifiedBlocks: PartiallyVerifiedBlock[], opts: BlockProcessOpts -): Promise { - const parentBlock = verifyBlockSanityChecks(chain, partiallyVerifiedBlock); +): Promise { + const {parentBlock, relevantPartiallyVerifiedBlocks} = verifyBlocksSanityChecks(chain, partiallyVerifiedBlocks); + + // No relevant blocks, skip verifyBlocksInEpoch() + if (relevantPartiallyVerifiedBlocks.length === 0) { + return []; + } - const {postState, executionStatus} = await verifyBlockStateTransition(chain, partiallyVerifiedBlock, opts); + const {postStates, executionStatuses} = await verifyBlocksInEpoch(chain, relevantPartiallyVerifiedBlocks, opts); - return { + return partiallyVerifiedBlocks.map((partiallyVerifiedBlock, i) => ({ block: partiallyVerifiedBlock.block, - postState, - parentBlock, + postState: postStates[i], + parentBlockSlot: i === 0 ? parentBlock.slot : partiallyVerifiedBlocks[i - 1].block.message.slot, skipImportingAttestations: partiallyVerifiedBlock.skipImportingAttestations, - executionStatus, - }; + executionStatus: executionStatuses[i], + })); } /** - * Verifies som early cheap sanity checks on the block before running the full state transition. + * Verifies some early cheap sanity checks on the block before running the full state transition. * * - Parent is known to the fork-choice * - Check skipped slots limit @@ -67,217 +73,276 @@ export async function verifyBlock( * - Not finalized slot * - Not already known */ -export function verifyBlockSanityChecks( +export function verifyBlocksSanityChecks( chain: VerifyBlockModules, - partiallyVerifiedBlock: PartiallyVerifiedBlock -): IProtoBlock { - const {block} = partiallyVerifiedBlock; - const blockSlot = block.message.slot; - - // Not genesis block - if (blockSlot === 0) { - throw new BlockError(block, {code: BlockErrorCode.GENESIS_BLOCK}); + partiallyVerifiedBlocks: PartiallyVerifiedBlock[] +): {parentBlock: IProtoBlock; relevantPartiallyVerifiedBlocks: PartiallyVerifiedBlock[]} { + if (partiallyVerifiedBlocks.length === 0) { + throw Error("Empty partiallyVerifiedBlocks"); } - // Not finalized slot - const finalizedSlot = computeStartSlotAtEpoch(chain.forkChoice.getFinalizedCheckpoint().epoch); - if (blockSlot <= finalizedSlot) { - throw new BlockError(block, {code: BlockErrorCode.WOULD_REVERT_FINALIZED_SLOT, blockSlot, finalizedSlot}); - } + const block0 = partiallyVerifiedBlocks[0].block; - // Parent is known to the fork-choice - const parentRoot = toHexString(block.message.parentRoot); + // block0 parent is known to the fork-choice. + // No need to check the rest of block parents, they are checked in assertLinearChainSegment() + const parentRoot = toHexString(block0.message.parentRoot); const parentBlock = chain.forkChoice.getBlockHex(parentRoot); if (!parentBlock) { - throw new BlockError(block, {code: BlockErrorCode.PARENT_UNKNOWN, parentRoot}); + throw new BlockError(block0, {code: BlockErrorCode.PARENT_UNKNOWN, parentRoot}); } - // Check skipped slots limit - // TODO + const relevantPartiallyVerifiedBlocks = partiallyVerifiedBlocks.filter((partiallyVerifiedBlock) => { + const {block, ignoreIfFinalized, ignoreIfKnown} = partiallyVerifiedBlock; + const blockSlot = block.message.slot; - // Block not in the future, also checks for infinity - const currentSlot = chain.clock.currentSlot; - if (blockSlot > currentSlot) { - throw new BlockError(block, {code: BlockErrorCode.FUTURE_SLOT, blockSlot, currentSlot}); - } + // Not genesis block + // IGNORE if `partiallyVerifiedBlock.ignoreIfKnown` + if (blockSlot === 0) { + if (ignoreIfKnown) return false; + throw new BlockError(block, {code: BlockErrorCode.GENESIS_BLOCK}); + } - // Not already known - const blockHash = toHexString(chain.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message)); - if (chain.forkChoice.hasBlockHex(blockHash)) { - throw new BlockError(block, {code: BlockErrorCode.ALREADY_KNOWN, root: blockHash}); - } + // Not finalized slot + // IGNORE if `partiallyVerifiedBlock.ignoreIfFinalized` + const finalizedSlot = computeStartSlotAtEpoch(chain.forkChoice.getFinalizedCheckpoint().epoch); + if (blockSlot <= finalizedSlot) { + if (ignoreIfFinalized) return false; + throw new BlockError(block, {code: BlockErrorCode.WOULD_REVERT_FINALIZED_SLOT, blockSlot, finalizedSlot}); + } + + // Check skipped slots limit + // TODO + + // Block not in the future, also checks for infinity + const currentSlot = chain.clock.currentSlot; + if (blockSlot > currentSlot) { + throw new BlockError(block, {code: BlockErrorCode.FUTURE_SLOT, blockSlot, currentSlot}); + } + + // Not already known + // IGNORE if `partiallyVerifiedBlock.ignoreIfKnown` + const blockHash = toHexString( + chain.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message) + ); + if (chain.forkChoice.hasBlockHex(blockHash)) { + if (ignoreIfKnown) return false; + throw new BlockError(block, {code: BlockErrorCode.ALREADY_KNOWN, root: blockHash}); + } + + return true; + }); - return parentBlock; + return {parentBlock, relevantPartiallyVerifiedBlocks}; } /** - * Verifies a block is fully valid running the full state transition. To relieve the main thread signatures are - * verified separately in workers with chain.bls worker pool. + * Verifies 1 or more blocks are fully valid; from a linear sequence of blocks. * - * - Advance state to block's slot - per_slot_processing() - * - STFN - per_block_processing() - * - Check state root matches + * To relieve the main thread signatures are verified separately in workers with chain.bls worker pool. + * In parallel it: + * - Run full state transition in sequence + * - Verify all block's signatures in parallel + * - Submit execution payloads to EL in sequence + * + * If there's an error during one of the steps, the rest are aborted with an AbortController. */ -export async function verifyBlockStateTransition( +export async function verifyBlocksInEpoch( chain: VerifyBlockModules, - partiallyVerifiedBlock: PartiallyVerifiedBlock, + partiallyVerifiedBlocks: PartiallyVerifiedBlock[], opts: BlockProcessOpts -): Promise<{postState: CachedBeaconStateAllForks; executionStatus: ExecutionStatus}> { - const {block, validProposerSignature, validSignatures} = partiallyVerifiedBlock; +): Promise<{postStates: CachedBeaconStateAllForks[]; executionStatuses: ExecutionStatus[]}> { + if (partiallyVerifiedBlocks.length === 0) { + throw Error("Empty partiallyVerifiedBlocks"); + } + + const block0 = partiallyVerifiedBlocks[0].block; + const epoch = computeEpochAtSlot(block0.message.slot); + + // Ensure all blocks are in the same epoch + for (let i = 1; i < partiallyVerifiedBlocks.length; i++) { + const blockSlot = partiallyVerifiedBlocks[i].block.message.slot; + if (epoch !== computeEpochAtSlot(blockSlot)) { + throw Error(`Block ${i} slot ${blockSlot} not in same epoch ${epoch}`); + } + } // TODO: Skip in process chain segment // Retrieve preState from cache (regen) - const preState = await chain.regen.getPreState(block.message, RegenCaller.processBlocksInEpoch).catch((e) => { - throw new BlockError(block, {code: BlockErrorCode.PRESTATE_MISSING, error: e as Error}); + const preState0 = await chain.regen.getPreState(block0.message, RegenCaller.processBlocksInEpoch).catch((e) => { + throw new BlockError(block0, {code: BlockErrorCode.PRESTATE_MISSING, error: e as Error}); }); - const isMergeTransitionBlock = - bellatrix.isBellatrixStateType(preState) && - bellatrix.isBellatrixBlockBodyType(block.message.body) && - bellatrix.isMergeTransitionBlock(preState, block.message.body); - - // STFN - per_slot_processing() + per_block_processing() - // NOTE: `regen.getPreState()` should have dialed forward the state already caching checkpoint states - const useBlsBatchVerify = !opts?.disableBlsBatchVerify; - const postState = allForks.stateTransition( - preState, - block, - { - // false because it's verified below with better error typing - verifyStateRoot: false, - // if block is trusted don't verify proposer or op signature - verifyProposer: !useBlsBatchVerify && !validSignatures && !validProposerSignature, - verifySignatures: !useBlsBatchVerify && !validSignatures, - }, - chain.metrics - ); + // Ensure the state is in the same epoch as block0 + if (epoch !== computeEpochAtSlot(preState0.slot)) { + throw Error(`preState must be dialed to block epoch ${epoch}`); + } - /** Not null if execution is enabled */ - const executionPayloadEnabled = - bellatrix.isBellatrixStateType(postState) && - bellatrix.isBellatrixBlockBodyType(block.message.body) && - bellatrix.isExecutionEnabled(postState, block.message.body) - ? block.message.body.executionPayload - : null; + const abortController = new AbortController(); + + try { + const [{postStates}, , {executionStatuses}] = await Promise.all([ + // Run state transition only + // TODO: Ensure it yields to allow flushing to workers and engine API + verifyBlockStateTransitionOnly(chain, preState0, partiallyVerifiedBlocks, abortController.signal, opts), - // Verify signatures after running state transition, so all SyncCommittee signed roots are known at this point. + // All signatures at once + verifyBlocksSignatures(chain, preState0, partiallyVerifiedBlocks), + + // Execution payloads + verifyBlockExecutionPayloads(chain, partiallyVerifiedBlocks, preState0, abortController.signal, opts), + ]); + + return {postStates, executionStatuses}; + } finally { + abortController.abort(); + } +} + +/** + * Verifies 1 or more blocks are fully valid running the full state transition; from a linear sequence of blocks. + * + * - Advance state to block's slot - per_slot_processing() + * - For each block: + * - STFN - per_block_processing() + * - Check state root matches + */ +export async function verifyBlockStateTransitionOnly( + chain: VerifyBlockModules, + preState0: CachedBeaconStateAllForks, + partiallyVerifiedBlocks: PartiallyVerifiedBlock[], + signal: AbortSignal, + opts: BlockProcessOpts +): Promise<{postStates: CachedBeaconStateAllForks[]}> { + const postStates = new Array(partiallyVerifiedBlocks.length); + + for (let i = 0; i < partiallyVerifiedBlocks.length; i++) { + const {block, validProposerSignature, validSignatures} = partiallyVerifiedBlocks[i]; + const preState = i === 0 ? preState0 : postStates[i - 1]; + + // STFN - per_slot_processing() + per_block_processing() + // NOTE: `regen.getPreState()` should have dialed forward the state already caching checkpoint states + const useBlsBatchVerify = !opts?.disableBlsBatchVerify; + const postState = allForks.stateTransition( + preState, + block, + { + // false because it's verified below with better error typing + verifyStateRoot: false, + // if block is trusted don't verify proposer or op signature + verifyProposer: !useBlsBatchVerify && !validSignatures && !validProposerSignature, + verifySignatures: !useBlsBatchVerify && !validSignatures, + }, + chain.metrics + ); + + // Check state root matches + if (!byteArrayEquals(block.message.stateRoot, postState.hashTreeRoot())) { + throw new BlockError(block, { + code: BlockErrorCode.INVALID_STATE_ROOT, + root: postState.hashTreeRoot(), + expectedRoot: block.message.stateRoot, + preState, + postState, + }); + } + + postStates[i] = postState; + + // If blocks are invalid in execution the main promise could resolve before this loop ends. + // In that case stop processing blocks and return early. + if (signal.aborted) { + throw new ErrorAborted("verifyBlockStateTransitionOnly"); + } + + // this avoids keeping our node busy processing blocks + if (i < partiallyVerifiedBlocks.length - 1) { + await sleep(0); + } + } + + return {postStates}; +} + +/** + * Verifies 1 or more block's signatures from a group of blocks in the same epoch. + * getBlockSignatureSets() guarantees to return the correct signingRoots as long as all blocks belong in the same + * epoch as `preState0`. Otherwise the shufflings won't be correct. + * + * Since all data is known in advance all signatures are verified at once in parallel. + */ +export async function verifyBlocksSignatures( + chain: VerifyBlockModules, + preState0: CachedBeaconStateAllForks, + partiallyVerifiedBlocks: PartiallyVerifiedBlock[] +): Promise { + const isValidPromises: Promise[] = []; + + // Verifies signatures after running state transition, so all SyncCommittee signed roots are known at this point. // We must ensure block.slot <= state.slot before running getAllBlockSignatureSets(). // NOTE: If in the future multiple blocks signatures are verified at once, all blocks must be in the same epoch // so the attester and proposer shufflings are correct. - if (useBlsBatchVerify && !validSignatures) { - const signatureSets = validProposerSignature - ? allForks.getAllBlockSignatureSetsExceptProposer(postState, block) - : allForks.getAllBlockSignatureSets(postState, block); - - if ( - signatureSets.length > 0 && - !(await chain.bls.verifySignatureSets(signatureSets, { - verifyOnMainThread: partiallyVerifiedBlock?.blsVerifyOnMainThread, - })) - ) { - throw new BlockError(block, {code: BlockErrorCode.INVALID_SIGNATURE, state: postState}); + for (const partiallyVerifiedBlock of partiallyVerifiedBlocks) { + const {block, validProposerSignature, validSignatures} = partiallyVerifiedBlock; + + // Skip all signature verification + if (validSignatures) { + continue; } - } - let executionStatus: ExecutionStatus; - if (executionPayloadEnabled) { - // TODO: Handle better notifyNewPayload() returning error is syncing - const execResult = await chain.executionEngine.notifyNewPayload(executionPayloadEnabled); - - switch (execResult.status) { - case ExecutePayloadStatus.VALID: - executionStatus = ExecutionStatus.Valid; - chain.forkChoice.validateLatestHash(execResult.latestValidHash, null); - break; // OK - - case ExecutePayloadStatus.INVALID: { - // If the parentRoot is not same as latestValidHash, then the branch from latestValidHash - // to parentRoot needs to be invalidated - const parentHashHex = toHexString(block.message.parentRoot); - chain.forkChoice.validateLatestHash( - execResult.latestValidHash, - parentHashHex !== execResult.latestValidHash ? parentHashHex : null - ); - throw new BlockError(block, { - code: BlockErrorCode.EXECUTION_ENGINE_ERROR, - execStatus: execResult.status, - errorMessage: execResult.validationError ?? "", - }); - } + const signatureSetsBlock = allForks.getBlockSignatureSets(preState0, block, { + skipProposerSignature: validProposerSignature, + }); - // Accepted and Syncing have the same treatment, as final validation of block is pending - case ExecutePayloadStatus.ACCEPTED: - case ExecutePayloadStatus.SYNCING: { - // It's okay to ignore SYNCING status as EL could switch into syncing - // 1. On intial startup/restart - // 2. When some reorg might have occured and EL doesn't has a parent root - // (observed on devnets) - // 3. Because of some unavailable (and potentially invalid) root but there is no way - // of knowing if this is invalid/unavailable. For unavailable block, some proposer - // will (sooner or later) build on the available parent head which will - // eventually win in fork-choice as other validators vote on VALID blocks. - // Once EL catches up again and respond VALID, the fork choice will be updated which - // will either validate or prune invalid blocks - // - // When to import such blocks: - // From: https://github.com/ethereum/consensus-specs/pull/2844 - // A block MUST NOT be optimistically imported, unless either of the following - // conditions are met: - // - // 1. Parent of the block has execution - // 2. The justified checkpoint has execution enabled - // 3. The current slot (as per the system clock) is at least - // SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY ahead of the slot of the block being - // imported. - - const parentRoot = toHexString(block.message.parentRoot); - const parentBlock = chain.forkChoice.getBlockHex(parentRoot); - const justifiedBlock = chain.forkChoice.getJustifiedBlock(); - - if ( - !parentBlock || - // Following condition is the !(Not) of the safe import condition - (parentBlock.executionStatus === ExecutionStatus.PreMerge && - justifiedBlock.executionStatus === ExecutionStatus.PreMerge && - block.message.slot + opts.safeSlotsToImportOptimistically > chain.clock.currentSlot) - ) { - throw new BlockError(block, { - code: BlockErrorCode.EXECUTION_ENGINE_ERROR, - execStatus: ExecutePayloadStatus.UNSAFE_OPTIMISTIC_STATUS, - errorMessage: `not safe to import ${execResult.status} payload within ${opts.safeSlotsToImportOptimistically} of currentSlot, status=${execResult.status}`, - }); - } - - executionStatus = ExecutionStatus.Syncing; - break; - } + isValidPromises.push(chain.bls.verifySignatureSets(signatureSetsBlock)); - // If the block has is not valid, or it referenced an invalid terminal block then the - // block is invalid, however it has no bearing on any forkChoice cleanup - // - // There can be other reasons for which EL failed some of the observed ones are - // 1. Connection refused / can't connect to EL port - // 2. EL Internal Error - // 3. Geth sometimes gives invalid merkel root error which means invalid - // but expects it to be handled in CL as of now. But we should log as warning - // and give it as optimistic treatment and expect any other non-geth CL<>EL - // combination to reject the invalid block and propose a block. - // On kintsugi devnet, this has been observed to cause contiguous proposal failures - // as the network is geth dominated, till a non geth node proposes and moves network - // forward - // For network/unreachable errors, an optimization can be added to replay these blocks - // back. But for now, lets assume other mechanisms like unknown parent block of a future - // child block will cause it to replay - - case ExecutePayloadStatus.INVALID_BLOCK_HASH: - case ExecutePayloadStatus.ELERROR: - case ExecutePayloadStatus.UNAVAILABLE: - throw new BlockError(block, { - code: BlockErrorCode.EXECUTION_ENGINE_ERROR, - execStatus: execResult.status, - errorMessage: execResult.validationError, - }); + // getBlockSignatureSets() takes 45ms in benchmarks for 2022Q2 mainnet blocks (100 sigs). When syncing a 32 blocks + // segments it will block the event loop for 1400 ms, which is too much. This sleep will allow the event loop to + // yield, which will cause one block's state transition to run. However, the tradeoff is okay and doesn't slow sync + if (isValidPromises.length % 8 === 0) { + await sleep(0); } + } + + // TODO: Submit each block's signature as a separate job to track which blocks are valid + if (isValidPromises.length > 0) { + const isValid = (await Promise.all(isValidPromises)).every((isValid) => isValid === true); + if (!isValid) { + throw new BlockError(partiallyVerifiedBlocks[0].block, { + code: BlockErrorCode.INVALID_SIGNATURE, + state: preState0, + }); + } + } +} + +/** + * Verifies 1 or more execution payloads from a linear sequence of blocks. + * + * Since the EL client must be aware of each parent, all payloads must be submited in sequence. + */ +export async function verifyBlockExecutionPayloads( + chain: VerifyBlockModules, + partiallyVerifiedBlocks: PartiallyVerifiedBlock[], + preState0: CachedBeaconStateAllForks, + signal: AbortSignal, + opts: BlockProcessOpts +): Promise<{executionStatuses: ExecutionStatus[]}> { + const executionStatuses = new Array(partiallyVerifiedBlocks.length); + + for (const {block} of partiallyVerifiedBlocks) { + // If blocks are invalid in consensus the main promise could resolve before this loop ends. + // In that case stop sending blocks to execution engine + if (signal.aborted) { + throw new ErrorAborted("verifyBlockExecutionPayloads"); + } + + const {executionStatus} = await verifyBlockExecutionPayload(chain, block, preState0, opts); + executionStatuses.push(executionStatus); + + const isMergeTransitionBlock = + bellatrix.isBellatrixStateType(preState0) && + bellatrix.isBellatrixBlockBodyType(block.message.body) && + bellatrix.isMergeTransitionBlock(preState0, block.message.body); // If this is a merge transition block, check to ensure if it references // a valid terminal PoW block. @@ -321,34 +386,149 @@ export async function verifyBlockStateTransition( return null; })); + // executionStatus will never == ExecutionStatus.PreMerge if it's the mergeBlock. But gotta make TS happy =D + if (executionStatus === ExecutionStatus.PreMerge) { + throw Error("Merge block must not have executionStatus == PreMerge"); + } + assertValidTerminalPowBlock(chain.config, mergeBlock, {executionStatus, powBlock, powBlockParent}); + + // Valid execution payload, but may not be in a valid beacon chain block. However, this log only prints the + // execution block's data, so even if the wrapping beacon chain block is invalid, this is still the merge block. + // However, if the wrapping beacon chain block is invalid this log may happen twice. Note that only blocks valid + // to gossip validation arrive here, so the signature and proposer are validated. + logOnPowBlock(chain, mergeBlock); } - } else { - // isExecutionEnabled() -> false - executionStatus = ExecutionStatus.PreMerge; } - // Check state root matches - if (!byteArrayEquals(block.message.stateRoot, postState.hashTreeRoot())) { - throw new BlockError(block, { - code: BlockErrorCode.INVALID_STATE_ROOT, - root: postState.hashTreeRoot(), - expectedRoot: block.message.stateRoot, - preState, - postState, - }); - } + return {executionStatuses}; +} + +/** + * Verifies a single block execution payload by sending it to the EL client (via HTTP). + */ +export async function verifyBlockExecutionPayload( + chain: VerifyBlockModules, + block: allForks.SignedBeaconBlock, + preState0: CachedBeaconStateAllForks, + opts: BlockProcessOpts +): Promise<{executionStatus: ExecutionStatus}> { + /** Not null if execution is enabled */ + const executionPayloadEnabled = + bellatrix.isBellatrixStateType(preState0) && + bellatrix.isBellatrixBlockBodyType(block.message.body) && + // Safe to use with a state previous to block's preState. isMergeComplete can only transition from false to true. + // - If preState0 is after merge block: condition is true, and will always be true + // - If preState0 is before merge block: the block could lie but then state transition function will throw above + // It is kinda safe to send non-trusted payloads to the execution client because at most it can trigger sync. + // TODO: If this becomes a problem, do some basic verification beforehand, like checking the proposer signature. + bellatrix.isExecutionEnabled(preState0, block.message.body) + ? block.message.body.executionPayload + : null; - // All checks have passed, if this is a merge transition block we can log - if (isMergeTransitionBlock) { - logOnPowBlock(chain, block as bellatrix.SignedBeaconBlock); + if (!executionPayloadEnabled) { + // isExecutionEnabled() -> false + return {executionStatus: ExecutionStatus.PreMerge}; } - return {postState, executionStatus}; + // TODO: Handle better notifyNewPayload() returning error is syncing + const execResult = await chain.executionEngine.notifyNewPayload(executionPayloadEnabled); + + switch (execResult.status) { + case ExecutePayloadStatus.VALID: + chain.forkChoice.validateLatestHash(execResult.latestValidHash, null); + return {executionStatus: ExecutionStatus.Valid}; + + case ExecutePayloadStatus.INVALID: { + // If the parentRoot is not same as latestValidHash, then the branch from latestValidHash + // to parentRoot needs to be invalidated + const parentHashHex = toHexString(block.message.parentRoot); + chain.forkChoice.validateLatestHash( + execResult.latestValidHash, + parentHashHex !== execResult.latestValidHash ? parentHashHex : null + ); + throw new BlockError(block, { + code: BlockErrorCode.EXECUTION_ENGINE_ERROR, + execStatus: execResult.status, + errorMessage: execResult.validationError ?? "", + }); + } + + // Accepted and Syncing have the same treatment, as final validation of block is pending + case ExecutePayloadStatus.ACCEPTED: + case ExecutePayloadStatus.SYNCING: { + // It's okay to ignore SYNCING status as EL could switch into syncing + // 1. On intial startup/restart + // 2. When some reorg might have occured and EL doesn't has a parent root + // (observed on devnets) + // 3. Because of some unavailable (and potentially invalid) root but there is no way + // of knowing if this is invalid/unavailable. For unavailable block, some proposer + // will (sooner or later) build on the available parent head which will + // eventually win in fork-choice as other validators vote on VALID blocks. + // Once EL catches up again and respond VALID, the fork choice will be updated which + // will either validate or prune invalid blocks + // + // When to import such blocks: + // From: https://github.com/ethereum/consensus-specs/pull/2844 + // A block MUST NOT be optimistically imported, unless either of the following + // conditions are met: + // + // 1. Parent of the block has execution + // 2. The justified checkpoint has execution enabled + // 3. The current slot (as per the system clock) is at least + // SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY ahead of the slot of the block being + // imported. + + const parentRoot = toHexString(block.message.parentRoot); + const parentBlock = chain.forkChoice.getBlockHex(parentRoot); + const justifiedBlock = chain.forkChoice.getJustifiedBlock(); + + if ( + !parentBlock || + // Following condition is the !(Not) of the safe import condition + (parentBlock.executionStatus === ExecutionStatus.PreMerge && + justifiedBlock.executionStatus === ExecutionStatus.PreMerge && + block.message.slot + opts.safeSlotsToImportOptimistically > chain.clock.currentSlot) + ) { + throw new BlockError(block, { + code: BlockErrorCode.EXECUTION_ENGINE_ERROR, + execStatus: ExecutePayloadStatus.UNSAFE_OPTIMISTIC_STATUS, + errorMessage: `not safe to import ${execResult.status} payload within ${opts.safeSlotsToImportOptimistically} of currentSlot, status=${execResult.status}`, + }); + } + + return {executionStatus: ExecutionStatus.Syncing}; + } + + // If the block has is not valid, or it referenced an invalid terminal block then the + // block is invalid, however it has no bearing on any forkChoice cleanup + // + // There can be other reasons for which EL failed some of the observed ones are + // 1. Connection refused / can't connect to EL port + // 2. EL Internal Error + // 3. Geth sometimes gives invalid merkel root error which means invalid + // but expects it to be handled in CL as of now. But we should log as warning + // and give it as optimistic treatment and expect any other non-geth CL<>EL + // combination to reject the invalid block and propose a block. + // On kintsugi devnet, this has been observed to cause contiguous proposal failures + // as the network is geth dominated, till a non geth node proposes and moves network + // forward + // For network/unreachable errors, an optimization can be added to replay these blocks + // back. But for now, lets assume other mechanisms like unknown parent block of a future + // child block will cause it to replay + + case ExecutePayloadStatus.INVALID_BLOCK_HASH: + case ExecutePayloadStatus.ELERROR: + case ExecutePayloadStatus.UNAVAILABLE: + throw new BlockError(block, { + code: BlockErrorCode.EXECUTION_ENGINE_ERROR, + execStatus: execResult.status, + errorMessage: execResult.validationError, + }); + } } -function logOnPowBlock(chain: VerifyBlockModules, block: bellatrix.SignedBeaconBlock): void { - const mergeBlock = block.message; +function logOnPowBlock(chain: VerifyBlockModules, mergeBlock: bellatrix.BeaconBlock): void { const mergeBlockHash = toHexString(chain.config.getForkTypes(mergeBlock.slot).BeaconBlock.hashTreeRoot(mergeBlock)); const mergeExecutionHash = toHexString(mergeBlock.body.executionPayload.blockHash); const mergePowHash = toHexString(mergeBlock.body.executionPayload.parentHash); diff --git a/packages/lodestar/src/chain/bls/multithread/poolSize.ts b/packages/lodestar/src/chain/bls/multithread/poolSize.ts index e32c976c668..c63bc59610a 100644 --- a/packages/lodestar/src/chain/bls/multithread/poolSize.ts +++ b/packages/lodestar/src/chain/bls/multithread/poolSize.ts @@ -7,7 +7,7 @@ try { defaultPoolSize = (await import("node:os")).cpus().length; } } catch (e) { - defaultPoolSize = 8; + defaultPoolSize = 4; } /** diff --git a/packages/lodestar/src/chain/chain.ts b/packages/lodestar/src/chain/chain.ts index 408e5ac3e8d..1afe173b2aa 100644 --- a/packages/lodestar/src/chain/chain.ts +++ b/packages/lodestar/src/chain/chain.ts @@ -8,6 +8,7 @@ import { CachedBeaconStateAllForks, computeStartSlotAtEpoch, createCachedBeaconState, + isCachedBeaconState, Index2PubkeyCache, PubkeyIndexMap, } from "@chainsafe/lodestar-beacon-state-transition"; @@ -147,18 +148,26 @@ export class BeaconChain implements IBeaconChain { this.seenAggregatedAttestations = new SeenAggregatedAttestations(metrics); this.seenContributionAndProof = new SeenContributionAndProof(metrics); - // Initialize single global instance of state caches - this.pubkey2index = new PubkeyIndexMap(); - this.index2pubkey = []; + // anchorState may already by a CachedBeaconState. If so, don't create the cache again, since deserializing all + // pubkeys takes ~30 seconds for 350k keys (mainnet 2022Q2). + // When the BeaconStateCache is created in eth1 genesis builder it may be incorrect. Until we can ensure that + // it's safe to re-use _ANY_ BeaconStateCache, this option is disabled by default and only used in tests. + const cachedState = + isCachedBeaconState(anchorState) && opts.skipCreateStateCacheIfAvailable + ? anchorState + : createCachedBeaconState(anchorState, { + config, + pubkey2index: new PubkeyIndexMap(), + index2pubkey: [], + }); + + // Persist single global instance of state caches + this.pubkey2index = cachedState.epochCtx.pubkey2index; + this.index2pubkey = cachedState.epochCtx.index2pubkey; this.beaconProposerCache = new BeaconProposerCache(opts); // Restore state caches - const cachedState = createCachedBeaconState(anchorState, { - config, - pubkey2index: this.pubkey2index, - index2pubkey: this.index2pubkey, - }); const {checkpoint} = computeAnchorCheckpoint(config, anchorState); stateCache.add(cachedState); checkpointStateCache.add(checkpoint, cachedState); diff --git a/packages/lodestar/src/chain/lightClient/index.ts b/packages/lodestar/src/chain/lightClient/index.ts index a7e1f53ae55..352af40b344 100644 --- a/packages/lodestar/src/chain/lightClient/index.ts +++ b/packages/lodestar/src/chain/lightClient/index.ts @@ -190,11 +190,7 @@ export class LightClientServer { * - Persist state witness * - Use block's syncAggregate */ - onImportBlockHead( - block: altair.BeaconBlock, - postState: CachedBeaconStateAltair, - parentBlock: {blockRoot: RootHex; slot: Slot} - ): void { + onImportBlockHead(block: altair.BeaconBlock, postState: CachedBeaconStateAltair, parentBlockSlot: Slot): void { // What is the syncAggregate signing? // From the beacon-state-transition // ``` @@ -210,7 +206,7 @@ export class LightClientServer { this.logger.error("Error onSyncAggregate", {}, e); }); - this.persistPostBlockImportData(block, postState, parentBlock).catch((e) => { + this.persistPostBlockImportData(block, postState, parentBlockSlot).catch((e) => { this.logger.error("Error persistPostBlockImportData", {}, e); }); } @@ -331,7 +327,7 @@ export class LightClientServer { private async persistPostBlockImportData( block: altair.BeaconBlock, postState: CachedBeaconStateAltair, - parentBlock: {blockRoot: RootHex; slot: Slot} + parentBlockSlot: Slot ): Promise { const blockSlot = block.slot; @@ -359,11 +355,11 @@ export class LightClientServer { } // Only store next sync committee once per dependant root - const parentBlockPeriod = computeSyncPeriodAtSlot(parentBlock.slot); + const parentBlockPeriod = computeSyncPeriodAtSlot(parentBlockSlot); const period = computeSyncPeriodAtSlot(blockSlot); if (parentBlockPeriod < period) { // If the parentBlock is in a previous epoch it must be the dependantRoot of this epoch transition - const dependantRoot = parentBlock.blockRoot; + const dependantRoot = toHexString(block.parentRoot); const periodDependantRoots = this.knownSyncCommittee.getOrDefault(period); if (!periodDependantRoots.has(dependantRoot)) { periodDependantRoots.add(dependantRoot); diff --git a/packages/lodestar/src/chain/options.ts b/packages/lodestar/src/chain/options.ts index 5af30b5b233..8aee6ec2432 100644 --- a/packages/lodestar/src/chain/options.ts +++ b/packages/lodestar/src/chain/options.ts @@ -12,6 +12,7 @@ export type IChainOptions = BlockProcessOpts & persistInvalidSszObjects?: boolean; persistInvalidSszObjectsDir?: string; defaultFeeRecipient: string; + skipCreateStateCacheIfAvailable?: boolean; }; export type BlockProcessOpts = { diff --git a/packages/lodestar/src/node/utils/interop/state.ts b/packages/lodestar/src/node/utils/interop/state.ts index 6fd1731393f..1bc144353d3 100644 --- a/packages/lodestar/src/node/utils/interop/state.ts +++ b/packages/lodestar/src/node/utils/interop/state.ts @@ -31,6 +31,7 @@ export function getInteropState( latestPayloadHeader.prevRandao = eth1BlockHash; latestPayloadHeader.gasLimit = GENESIS_GAS_LIMIT; latestPayloadHeader.baseFeePerGas = GENESIS_BASE_FEE_PER_GAS; + const state = initializeBeaconStateFromEth1( config, createEmptyEpochContextImmutableData(config, {genesisValidatorsRoot: Buffer.alloc(32, 0)}), @@ -40,6 +41,8 @@ export function getInteropState( fullDepositDataRootList, latestPayloadHeader ); + state.genesisTime = genesisTime; + return state; } diff --git a/packages/lodestar/src/sync/constants.ts b/packages/lodestar/src/sync/constants.ts index 52259eb31f4..e0032b43bd0 100644 --- a/packages/lodestar/src/sync/constants.ts +++ b/packages/lodestar/src/sync/constants.ts @@ -10,8 +10,8 @@ export const MAX_BATCH_DOWNLOAD_ATTEMPTS = 5; /** Consider batch faulty after downloading and processing this number of times */ export const MAX_BATCH_PROCESSING_ATTEMPTS = 3; -/** Batch range excludes the first block of the epoch. @see Batch */ -export const BATCH_SLOT_OFFSET = 1; +/** Batch range aligned to the first block of the epoch. @see Batch */ +export const BATCH_SLOT_OFFSET = 0; /** First epoch to allow to start gossip */ export const MIN_EPOCH_TO_START_GOSSIP = -1; @@ -24,13 +24,13 @@ export const MIN_EPOCH_TO_START_GOSSIP = -1; * case the responder will fill the response up to the max request size, assuming they have the * bandwidth to do so. */ -export const EPOCHS_PER_BATCH = 2; +export const EPOCHS_PER_BATCH = 1; /** * The maximum number of batches to queue before requesting more. * In good network conditions downloading batches is much faster than processing them - * A number > 5 results in wasted progress when the chain completes syncing + * A number > 10 epochs worth results in wasted progress when the chain completes syncing * * TODO: When switching branches usually all batches in AwaitingProcessing are dropped, could it be optimized? */ -export const BATCH_BUFFER_SIZE = 5; +export const BATCH_BUFFER_SIZE = 10; diff --git a/packages/lodestar/src/sync/range/batch.ts b/packages/lodestar/src/sync/range/batch.ts index f35d8f357aa..c08a0093c42 100644 --- a/packages/lodestar/src/sync/range/batch.ts +++ b/packages/lodestar/src/sync/range/batch.ts @@ -54,14 +54,16 @@ export type BatchMetadata = { }; /** - * Batches are downloaded excluding the first block of the epoch assuming it has already been - * downloaded. + * Batches are downloaded at the first block of the epoch. * * For example: * * Epoch boundary | | * ... | 30 | 31 | 32 | 33 | 34 | ... | 61 | 62 | 63 | 64 | 65 | - * Batch 1 | Batch 2 | Batch 3 + * Batch 1 | Batch 2 | Batch 3 + * + * Note: this diverges with previous implementation, where Batch for epoch N included the first slot of epoch N + 1. + * The effects of this change have not been tested yet. */ export class Batch { readonly startEpoch: Epoch; diff --git a/packages/lodestar/test/e2e/db/api/beacon/repositories/blockArchive.test.ts b/packages/lodestar/test/e2e/db/api/beacon/repositories/blockArchive.test.ts index f7546f0af5b..c70189a5d9d 100644 --- a/packages/lodestar/test/e2e/db/api/beacon/repositories/blockArchive.test.ts +++ b/packages/lodestar/test/e2e/db/api/beacon/repositories/blockArchive.test.ts @@ -1,12 +1,12 @@ import {expect} from "chai"; import {config} from "@chainsafe/lodestar-config/default"; -import {LevelDbController} from "@chainsafe/lodestar-db"; import {fromHexString} from "@chainsafe/ssz"; import {allForks, phase0, ssz} from "@chainsafe/lodestar-types"; import {BeaconDb} from "../../../../../../src/db/index.js"; import {generateSignedBlock} from "../../../../../utils/block.js"; import {testLogger} from "../../../../../utils/logger.js"; import {BlockArchiveBatchPutBinaryItem} from "../../../../../../src/db/repositories/index.js"; +import {startTmpBeaconDb} from "../../../../../utils/db.js"; describe("BlockArchiveRepository", function () { let db: BeaconDb; @@ -35,11 +35,7 @@ describe("BlockArchiveRepository", function () { }); before(async () => { - db = new BeaconDb({ - config, - controller: new LevelDbController({name: ".tmpdb"}, {logger}), - }); - await db.start(); + db = await startTmpBeaconDb(config, logger); }); after(async () => { diff --git a/packages/lodestar/test/e2e/interop/genesisState.test.ts b/packages/lodestar/test/e2e/interop/genesisState.test.ts index e51b6a8f229..1b6d1837aa3 100644 --- a/packages/lodestar/test/e2e/interop/genesisState.test.ts +++ b/packages/lodestar/test/e2e/interop/genesisState.test.ts @@ -1,23 +1,19 @@ import {expect} from "chai"; import {toHexString} from "@chainsafe/ssz"; -import {LevelDbController} from "@chainsafe/lodestar-db"; import {config} from "@chainsafe/lodestar-config/default"; import {ssz} from "@chainsafe/lodestar-types"; import {BeaconDb} from "../../../src/index.js"; import {initDevState} from "../../../src/node/utils/state.js"; import {testLogger} from "../../utils/logger.js"; import {interopDeposits} from "../../../src/node/utils/interop/deposits.js"; +import {startTmpBeaconDb} from "../../utils/db.js"; describe("interop / initDevState", () => { let db: BeaconDb; const logger = testLogger(); before(async () => { - db = new BeaconDb({ - config, - controller: new LevelDbController({name: ".tmpdb"}, {logger}), - }); - await db.start(); + db = await startTmpBeaconDb(config, logger); }); after(async () => { diff --git a/packages/lodestar/test/perf/chain/verifyImportBlocks.test.ts b/packages/lodestar/test/perf/chain/verifyImportBlocks.test.ts index 4e4e2d71bbc..661bc7bfc19 100644 --- a/packages/lodestar/test/perf/chain/verifyImportBlocks.test.ts +++ b/packages/lodestar/test/perf/chain/verifyImportBlocks.test.ts @@ -83,6 +83,7 @@ describe("verify+import blocks - range sync perf test", () => { safeSlotsToImportOptimistically: SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY, disableArchiveOnCheckpoint: true, defaultFeeRecipient: defaultDefaultFeeRecipient, + skipCreateStateCacheIfAvailable: true, }, { config: state.config, diff --git a/packages/lodestar/test/spec/presets/operations.ts b/packages/lodestar/test/spec/presets/operations.ts index f3ac81ba3a3..a0eadaba828 100644 --- a/packages/lodestar/test/spec/presets/operations.ts +++ b/packages/lodestar/test/spec/presets/operations.ts @@ -7,6 +7,7 @@ import { CachedBeaconStateAltair, CachedBeaconStateBellatrix, CachedBeaconStatePhase0, + getBlockRootAtSlot, phase0, } from "@chainsafe/lodestar-beacon-state-transition"; import {processExecutionPayload} from "@chainsafe/lodestar-beacon-state-transition/bellatrix"; @@ -30,6 +31,7 @@ const sync_aggregate: BlockProcessFn verifyBlockSanityChecks(modules, {block}), BlockErrorCode.PARENT_UNKNOWN); + expectThrowsLodestarError(() => verifyBlocksSanityChecks(modules, [{block}]), BlockErrorCode.PARENT_UNKNOWN); }); it("GENESIS_BLOCK", function () { block.message.slot = 0; - expectThrowsLodestarError(() => verifyBlockSanityChecks(modules, {block}), BlockErrorCode.GENESIS_BLOCK); + expectThrowsLodestarError(() => verifyBlocksSanityChecks(modules, [{block}]), BlockErrorCode.GENESIS_BLOCK); }); it("ALREADY_KNOWN", function () { forkChoice.hasBlockHex.returns(true); - expectThrowsLodestarError(() => verifyBlockSanityChecks(modules, {block}), BlockErrorCode.ALREADY_KNOWN); + expectThrowsLodestarError(() => verifyBlocksSanityChecks(modules, [{block}]), BlockErrorCode.ALREADY_KNOWN); }); it("WOULD_REVERT_FINALIZED_SLOT", function () { forkChoice.getFinalizedCheckpoint.returns({epoch: 5, root: Buffer.alloc(32), rootHex: ""}); expectThrowsLodestarError( - () => verifyBlockSanityChecks(modules, {block}), + () => verifyBlocksSanityChecks(modules, [{block}]), BlockErrorCode.WOULD_REVERT_FINALIZED_SLOT ); }); it("FUTURE_SLOT", function () { block.message.slot = currentSlot + 1; - expectThrowsLodestarError(() => verifyBlockSanityChecks(modules, {block}), BlockErrorCode.FUTURE_SLOT); + expectThrowsLodestarError(() => verifyBlocksSanityChecks(modules, [{block}]), BlockErrorCode.FUTURE_SLOT); }); }); diff --git a/packages/lodestar/test/unit/sync/range/batch.test.ts b/packages/lodestar/test/unit/sync/range/batch.test.ts index 8fa5d7d17bf..294d00817a1 100644 --- a/packages/lodestar/test/unit/sync/range/batch.test.ts +++ b/packages/lodestar/test/unit/sync/range/batch.test.ts @@ -5,6 +5,7 @@ import {config} from "@chainsafe/lodestar-config/default"; import {generateEmptySignedBlock} from "../../../utils/block.js"; import {expectThrowsLodestarError} from "../../../utils/errors.js"; import {Batch, BatchOpts, BatchStatus, BatchErrorCode, BatchError} from "../../../../src/sync/range/batch.js"; +import {BATCH_SLOT_OFFSET} from "../../../../src/sync/constants.js"; describe("sync / range / batch", () => { const opts: BatchOpts = {epochsPerBatch: 2}; @@ -17,7 +18,7 @@ describe("sync / range / batch", () => { it("Should return correct blockByRangeRequest", () => { const batch = new Batch(startEpoch, config, opts); expect(batch.request).to.deep.equal({ - startSlot: 1, + startSlot: BATCH_SLOT_OFFSET, count: SLOTS_PER_EPOCH * opts.epochsPerBatch, step: 1, }); diff --git a/packages/lodestar/test/utils/db.ts b/packages/lodestar/test/utils/db.ts index 3e90dbd05f7..53bcd353e3a 100644 --- a/packages/lodestar/test/utils/db.ts +++ b/packages/lodestar/test/utils/db.ts @@ -1,4 +1,23 @@ -import {IFilterOptions} from "@chainsafe/lodestar-db"; +import child_process from "node:child_process"; +import {IFilterOptions, LevelDbController} from "@chainsafe/lodestar-db"; +import {IChainForkConfig} from "@chainsafe/lodestar-config"; +import {ILogger} from "@chainsafe/lodestar-utils"; +import {BeaconDb} from "../../src"; + +export const TEMP_DB_LOCATION = ".tmpdb"; + +export async function startTmpBeaconDb(config: IChainForkConfig, logger: ILogger): Promise { + // Clean-up db first + child_process.execSync(`rm -rf ${TEMP_DB_LOCATION}`); + + const db = new BeaconDb({ + config, + controller: new LevelDbController({name: TEMP_DB_LOCATION}, {logger}), + }); + await db.start(); + + return db; +} /** * Helper to filter an array with DB IFilterOptions options