From a5315af60f879f9ce4daf1152caf7b56d0aecc32 Mon Sep 17 00:00:00 2001 From: Jason Frame Date: Wed, 6 Mar 2024 18:11:36 +1000 Subject: [PATCH 01/39] rebase of bonsai archive Signed-off-by: Jason Frame --- .../controller/BesuControllerBuilder.java | 6 + .../BonsaiWorldStateKeyValueStorage.java | 5 + .../bonsai/worldview/BonsaiWorldState.java | 2 +- .../trie/diffbased/common/BonsaiContext.java | 46 ++++ .../common/DiffBasedWorldStateProvider.java | 13 + .../flat/ArchiveCodeStorageStrategy.java | 101 ++++++++ .../storage/flat/ArchiveFlatDbStrategy.java | 232 ++++++++++++++++++ .../common/storage/flat/FlatDbStrategy.java | 10 + .../storage/flat/FlatDbStrategyProvider.java | 14 ++ .../besu/ethereum/worldstate/FlatDbMode.java | 11 +- .../flat/ArchiveFlatDbReaderStrategyTest.java | 18 ++ .../request/AccountRangeDataRequest.java | 17 +- .../request/StorageRangeDataRequest.java | 20 +- 13 files changed, 476 insertions(+), 19 deletions(-) create mode 100644 ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java create mode 100644 ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveCodeStorageStrategy.java create mode 100644 ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveFlatDbStrategy.java create mode 100644 ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/storage/flat/ArchiveFlatDbReaderStrategyTest.java diff --git a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java index 41a24890ca3..9da2a601f32 100644 --- a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java +++ b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java @@ -1106,6 +1106,12 @@ WorldStateArchive createWorldStateArchive( case BONSAI -> { final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage = worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class); + + + // TODO, better integrate. Just for PoC, explicitly set our bonsai context chain head: + worldStateKeyValueStorage.getFlatDbStrategy() + .updateBlockContext(blockchain.getChainHeadHeader()); + yield new BonsaiWorldStateProvider( worldStateKeyValueStorage, blockchain, diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorage.java index 9f40d2bd350..dbb4ab1dfe2 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorage.java @@ -73,6 +73,11 @@ public BonsaiWorldStateKeyValueStorage( this.flatDbStrategyProvider = flatDbStrategyProvider; } + public BonsaiWorldStateKeyValueStorage getContextSafeCopy() { + return new BonsaiWorldStateKeyValueStorage( + flatDbStrategyProvider.contextSafeClone(), composedWorldStateStorage, trieLogStorage); + } + @Override public DataStorageFormat getDataStorageFormat() { return DataStorageFormat.BONSAI; diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiWorldState.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiWorldState.java index b62805c1fda..d00afda9ee3 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiWorldState.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiWorldState.java @@ -121,7 +121,7 @@ public Optional getCode(@Nonnull final Address address, final Hash codeHa @Override public BonsaiWorldStateKeyValueStorage getWorldStateStorage() { - return (BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage; + return ((BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage).getContextSafeCopy(); } @Override diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java new file mode 100644 index 00000000000..d41ee7472f1 --- /dev/null +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java @@ -0,0 +1,46 @@ +/* + * Copyright Hyperledger Besu Contributors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + * + */ +package org.hyperledger.besu.ethereum.bonsai; + +import org.hyperledger.besu.plugin.data.BlockHeader; + +import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; + +/** Context which holds information relevant to a bonsai archive storage query. */ +public class BonsaiContext { + + private final AtomicReference blockHeader; + + public BonsaiContext() { + blockHeader = new AtomicReference<>(); + } + + public BonsaiContext copy() { + var newCtx = new BonsaiContext(); + Optional.ofNullable(blockHeader.get()).ifPresent(newCtx::setBlockHeader); + return newCtx; + } + + public BonsaiContext setBlockHeader(final BlockHeader blockHeader) { + this.blockHeader.set(blockHeader); + return this; + } + + public Optional getBlockHeader() { + return Optional.ofNullable(blockHeader.get()); + } +} diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/DiffBasedWorldStateProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/DiffBasedWorldStateProvider.java index 75b370c1cd0..470f6754e94 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/DiffBasedWorldStateProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/DiffBasedWorldStateProvider.java @@ -130,6 +130,19 @@ public Optional getMutable( if (shouldPersistState) { return getMutable(blockHeader.getStateRoot(), blockHeader.getHash()); } else { + // TODO this needs to be better integrated && ensure block is canonical + // HACK for kikori PoC, if we have the trielog for this block, we can assume we have it in + // flatDB + // although, in practice we can only serve canonical chain worldstates and need to fall back + // to state rolling if the requested block is a fork. + if (this.worldStateStorage.getFlatDbStrategy() instanceof ArchiveFlatDbStrategy + && trieLogManager.getTrieLogLayer(blockHeader.getBlockHash()).isPresent()) { + + var contextSafeCopy = worldStateStorage.getContextSafeCopy(); + contextSafeCopy.getFlatDbStrategy().updateBlockContext(blockHeader); + return Optional.of(new BonsaiWorldState(this, contextSafeCopy, evmConfiguration)); + } + final BlockHeader chainHeadBlockHeader = blockchain.getChainHeadHeader(); if (chainHeadBlockHeader.getNumber() - blockHeader.getNumber() >= trieLogManager.getMaxLayersToLoad()) { diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveCodeStorageStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveCodeStorageStrategy.java new file mode 100644 index 00000000000..3628a0d19a4 --- /dev/null +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveCodeStorageStrategy.java @@ -0,0 +1,101 @@ +/* + * Copyright Hyperledger Besu Contributors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.hyperledger.besu.ethereum.trie.bonsai.storage.flat; + +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE; +import static org.hyperledger.besu.ethereum.trie.bonsai.storage.flat.ArchiveFlatDbStrategy.DELETED_CODE_VALUE; +import static org.hyperledger.besu.ethereum.trie.bonsai.storage.flat.ArchiveFlatDbStrategy.calculateArchiveKeyWithMinSuffix; + +import org.hyperledger.besu.datatypes.Hash; +import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; +import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage; +import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction; + +import java.util.Optional; + +import org.apache.tuweni.bytes.Bytes; +import org.bouncycastle.util.Arrays; + +public class ArchiveCodeStorageStrategy implements CodeStorageStrategy { + + private final BonsaiContext context; + + public ArchiveCodeStorageStrategy(final BonsaiContext context) { + this.context = context; + } + + /* + * Retrieves the code data for the given code hash and account hash and block context. + */ + @Override + public Optional getFlatCode( + final Hash codeHash, final Hash accountHash, final SegmentedKeyValueStorage storage) { + if (codeHash.equals(Hash.EMPTY)) { + return Optional.of(Bytes.EMPTY); + } else { + + // keyNearest, use MAX_BLOCK_SUFFIX in the absence of a block context: + Bytes keyNearest = + ArchiveFlatDbStrategy.calculateArchiveKeyWithMaxSuffix( + context, accountHash.toArrayUnsafe()); + + // use getNearest() with an account key that is suffixed by the block context + final Optional codeFound = + storage + .getNearestTo(CODE_STORAGE, keyNearest) + // return empty when we find a "deleted value key" + .filter( + found -> + !Arrays.areEqual( + DELETED_CODE_VALUE, found.value().orElse(DELETED_CODE_VALUE))) + // map NearestKey to Bytes-wrapped value + .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes) + // check codeHash to sanity check the value and ensure we have the correct nearestKey: + .filter(b -> Hash.hash(b).equals(codeHash)); + + return codeFound; + } + } + + /* + * Puts the code data for the given code hash and account hash and block context. + */ + @Override + public void putFlatCode( + final SegmentedKeyValueStorageTransaction transaction, + final Hash accountHash, + final Hash codeHash, + final Bytes code) { + // key suffixed with block context, or MIN_BLOCK_SUFFIX if we have no context: + byte[] keySuffixed = calculateArchiveKeyWithMinSuffix(context, accountHash.toArrayUnsafe()); + + transaction.put(CODE_STORAGE, keySuffixed, code.toArrayUnsafe()); + } + + /* + * Adds a "deleted key" code entry for the given account hash and block context. + */ + @Override + public void removeFlatCode( + final SegmentedKeyValueStorageTransaction transaction, + final Hash accountHash, + final Hash codeHash) { + // insert a key suffixed with block context, with 'deleted account' value + byte[] keySuffixed = calculateArchiveKeyWithMinSuffix(context, accountHash.toArrayUnsafe()); + + transaction.put(CODE_STORAGE, keySuffixed, DELETED_CODE_VALUE); + } +} diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveFlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveFlatDbStrategy.java new file mode 100644 index 00000000000..10d1698e32a --- /dev/null +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveFlatDbStrategy.java @@ -0,0 +1,232 @@ +/* + * Copyright Hyperledger Besu Contributors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + * + */ +package org.hyperledger.besu.ethereum.trie.bonsai.storage.flat; + +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE; + +import org.hyperledger.besu.datatypes.Hash; +import org.hyperledger.besu.datatypes.StorageSlotKey; +import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; +import org.hyperledger.besu.ethereum.trie.NodeLoader; +import org.hyperledger.besu.plugin.data.BlockHeader; +import org.hyperledger.besu.plugin.services.MetricsSystem; +import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage; +import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction; + +import java.util.Optional; +import java.util.function.Supplier; + +import org.apache.tuweni.bytes.Bytes; +import org.bouncycastle.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ArchiveFlatDbStrategy extends FullFlatDbStrategy { + private final BonsaiContext context; + private static final Logger LOG = LoggerFactory.getLogger(ArchiveFlatDbStrategy.class); + + public ArchiveFlatDbStrategy( + final BonsaiContext context, + final MetricsSystem metricsSystem, + final CodeStorageStrategy codeStorageStrategy) { + super(metricsSystem, codeStorageStrategy); + this.context = context; + } + + static final byte[] MAX_BLOCK_SUFFIX = Bytes.ofUnsignedLong(Long.MAX_VALUE).toArrayUnsafe(); + static final byte[] MIN_BLOCK_SUFFIX = Bytes.ofUnsignedLong(0L).toArrayUnsafe(); + static final byte[] DELETED_ACCOUNT_VALUE = new byte[0]; + public static final byte[] DELETED_CODE_VALUE = new byte[0]; + static final byte[] DELETED_STORAGE_VALUE = new byte[0]; + + @Override + public Optional getFlatAccount( + final Supplier> worldStateRootHashSupplier, + final NodeLoader nodeLoader, + final Hash accountHash, + final SegmentedKeyValueStorage storage) { + getAccountCounter.inc(); + + // keyNearest, use MAX_BLOCK_SUFFIX in the absence of a block context: + Bytes keyNearest = calculateArchiveKeyWithMaxSuffix(context, accountHash.toArrayUnsafe()); + + // use getNearest() with an account key that is suffixed by the block context + final Optional accountFound = + storage + .getNearestTo(ACCOUNT_INFO_STATE, keyNearest) + // return empty when we find a "deleted value key" + .filter( + found -> + !Arrays.areEqual( + DELETED_ACCOUNT_VALUE, found.value().orElse(DELETED_ACCOUNT_VALUE))) + // don't return accounts that do not have a matching account hash + .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()) + .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); + + if (accountFound.isPresent()) { + getAccountFoundInFlatDatabaseCounter.inc(); + } else { + getAccountNotFoundInFlatDatabaseCounter.inc(); + } + return accountFound; + } + + /* + * Puts the account data for the given account hash and block context. + */ + @Override + public void putFlatAccount( + final SegmentedKeyValueStorageTransaction transaction, + final Hash accountHash, + final Bytes accountValue) { + + // key suffixed with block context, or MIN_BLOCK_SUFFIX if we have no context: + byte[] keySuffixed = calculateArchiveKeyWithMinSuffix(context, accountHash.toArrayUnsafe()); + + transaction.put(ACCOUNT_INFO_STATE, keySuffixed, accountValue.toArrayUnsafe()); + } + + @Override + public void removeFlatAccount( + final SegmentedKeyValueStorageTransaction transaction, final Hash accountHash) { + + // insert a key suffixed with block context, with 'deleted account' value + byte[] keySuffixed = calculateArchiveKeyWithMinSuffix(context, accountHash.toArrayUnsafe()); + + transaction.put(ACCOUNT_INFO_STATE, keySuffixed, DELETED_ACCOUNT_VALUE); + } + + /* + * Retrieves the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader. + */ + @Override + public Optional getFlatStorageValueByStorageSlotKey( + final Supplier> worldStateRootHashSupplier, + final Supplier> storageRootSupplier, + final NodeLoader nodeLoader, + final Hash accountHash, + final StorageSlotKey storageSlotKey, + final SegmentedKeyValueStorage storage) { + getStorageValueCounter.inc(); + + // get natural key from account hash and slot key + byte[] naturalKey = calculateNaturalSlotKey(accountHash, storageSlotKey.getSlotHash()); + // keyNearest, use MAX_BLOCK_SUFFIX in the absence of a block context: + Bytes keyNearest = calculateArchiveKeyWithMaxSuffix(context, naturalKey); + + // use getNearest() with a key that is suffixed by the block context + final Optional storageFound = + storage + .getNearestTo(ACCOUNT_STORAGE_STORAGE, keyNearest) + // return empty when we find a "deleted value key" + .filter( + found -> + !Arrays.areEqual( + DELETED_STORAGE_VALUE, found.value().orElse(DELETED_STORAGE_VALUE))) + // don't return accounts that do not have a matching account hash and slotHash prefix + .filter( + found -> Bytes.of(naturalKey).commonPrefixLength(found.key()) >= naturalKey.length) + // map NearestKey to Bytes-wrapped value + .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); + + if (storageFound.isPresent()) { + getStorageValueFlatDatabaseCounter.inc(); + } else { + getStorageValueNotFoundInFlatDatabaseCounter.inc(); + } + return storageFound; + } + + /* + * Puts the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader. + */ + @Override + public void putFlatAccountStorageValueByStorageSlotHash( + final SegmentedKeyValueStorageTransaction transaction, + final Hash accountHash, + final Hash slotHash, + final Bytes storage) { + + // get natural key from account hash and slot key + byte[] naturalKey = calculateNaturalSlotKey(accountHash, slotHash); + // keyNearest, use MIN_BLOCK_SUFFIX in the absence of a block context: + byte[] keyNearest = calculateArchiveKeyWithMinSuffix(context, naturalKey); + + transaction.put(ACCOUNT_STORAGE_STORAGE, keyNearest, storage.toArrayUnsafe()); + } + + /* + * Removes the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader. + */ + @Override + public void removeFlatAccountStorageValueByStorageSlotHash( + final SegmentedKeyValueStorageTransaction transaction, + final Hash accountHash, + final Hash slotHash) { + + // get natural key from account hash and slot key + byte[] naturalKey = calculateNaturalSlotKey(accountHash, slotHash); + // insert a key suffixed with block context, with 'deleted account' value + byte[] keySuffixed = calculateArchiveKeyWithMinSuffix(context, naturalKey); + + transaction.put(ACCOUNT_STORAGE_STORAGE, keySuffixed, DELETED_STORAGE_VALUE); + } + + public byte[] calculateNaturalSlotKey(final Hash accountHash, final Hash slotHash) { + return Bytes.concatenate(accountHash, slotHash).toArrayUnsafe(); + } + + public static byte[] calculateArchiveKeyWithMinSuffix( + final BonsaiContext context, final byte[] naturalKey) { + return calculateArchiveKeyWithSuffix(context, naturalKey, MIN_BLOCK_SUFFIX); + } + + public static Bytes calculateArchiveKeyWithMaxSuffix( + final BonsaiContext context, final byte[] naturalKey) { + return Bytes.of(calculateArchiveKeyWithSuffix(context, naturalKey, MAX_BLOCK_SUFFIX)); + } + + // TODO JF: move this out of this class so can be used with ArchiveCodeStorageStrategy without + // being static + public static byte[] calculateArchiveKeyWithSuffix( + final BonsaiContext context, final byte[] naturalKey, final byte[] orElseSuffix) { + // TODO: this can be optimized, just for PoC now + return Arrays.concatenate( + naturalKey, + context + .getBlockHeader() + .map(BlockHeader::getNumber) + .map(Bytes::ofUnsignedLong) + .map(Bytes::toArrayUnsafe) + .orElseGet( + () -> { + // TODO: remove or rate limit these warnings + LOG.atWarn().setMessage("Block context not present, using default suffix").log(); + return orElseSuffix; + })); + } + + @Override + public void updateBlockContext(final BlockHeader blockHeader) { + context.setBlockHeader(blockHeader); + } + + @Override + public FlatDbStrategy contextSafeClone() { + return new ArchiveFlatDbStrategy(context.copy(), metricsSystem, codeStorageStrategy); + } +} diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java index ad631b8da91..6eb13a46e1c 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java @@ -22,6 +22,7 @@ import org.hyperledger.besu.datatypes.StorageSlotKey; import org.hyperledger.besu.ethereum.trie.NodeLoader; import org.hyperledger.besu.metrics.BesuMetricCategory; +import org.hyperledger.besu.plugin.data.BlockHeader; import org.hyperledger.besu.plugin.services.MetricsSystem; import org.hyperledger.besu.plugin.services.metrics.Counter; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage; @@ -311,4 +312,13 @@ private static NavigableMap toNavigableMap( pairStream.close(); return collected; } + + public void updateBlockContext(final BlockHeader blockHeader) { + // default no-op for strategies that do not care about bonsai context + } + + public FlatDbStrategy contextSafeClone() { + // FlatDBStrategies that care about bonsai context changes should override this + return this; + } } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java index 3ce51e793a4..337233b6200 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java @@ -18,6 +18,7 @@ import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE; import static org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY; +import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.FullFlatDbStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.PartialFlatDbStrategy; import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration; @@ -63,6 +64,11 @@ public void loadFlatDbStrategy(final SegmentedKeyValueStorage composedWorldState : new AccountHashCodeStorageStrategy(); if (flatDbMode == FlatDbMode.FULL) { this.flatDbStrategy = new FullFlatDbStrategy(metricsSystem, codeStorageStrategy); + } else if (flatDbMode == FlatDbMode.ARCHIVE) { + final BonsaiContext context = new BonsaiContext(); + this.flatDbStrategy = + new ArchiveFlatDbStrategy( + context, metricsSystem, new ArchiveCodeStorageStrategy(context)); } else { this.flatDbStrategy = new PartialFlatDbStrategy(metricsSystem, codeStorageStrategy); } @@ -169,4 +175,12 @@ public void downgradeToPartialFlatDbMode( public FlatDbMode getFlatDbMode() { return flatDbMode; } + + public FlatDbStrategyProvider contextSafeClone() { + FlatDbStrategyProvider copy = + new FlatDbStrategyProvider(metricsSystem, dataStorageConfiguration); + copy.flatDbStrategy = flatDbStrategy.contextSafeClone(); + copy.flatDbMode = flatDbMode; + return copy; + } } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/FlatDbMode.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/FlatDbMode.java index 4415ccb8251..56f93749735 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/FlatDbMode.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/FlatDbMode.java @@ -19,8 +19,8 @@ import org.apache.tuweni.bytes.Bytes; /** - * The FlatDbMode enum represents the different modes of the flat database. It has two modes: - * PARTIAL and FULL. + * The FlatDbMode enum represents the different modes of the flat database. It has three modes: + * PARTIAL, FULL, and ARCHIVE. * *

- PARTIAL: Not all the leaves are present inside the flat database. The trie serves as a * fallback to retrieve missing data. The PARTIAL mode is primarily used for backward compatibility @@ -30,11 +30,16 @@ *

- FULL: The flat database contains the complete representation of the world state, and there * is no need for a fallback mechanism. The FULL mode represents a fully synchronized state where * the flat database encompasses all the necessary data. + * + *

- ARCHIVE: The flat database contains the complete representation of the world state, and + * historical states as well. The ARCHIVE mode is an extension of FULL, which has a complete flat + * database and no need for a fallback mechanism. */ public enum FlatDbMode { NO_FLATTENED(Bytes.EMPTY), PARTIAL(Bytes.of(0x00)), - FULL(Bytes.of(0x01)); + FULL(Bytes.of(0x01)), + ARCHIVE(Bytes.of(0x02)); final Bytes version; diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/storage/flat/ArchiveFlatDbReaderStrategyTest.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/storage/flat/ArchiveFlatDbReaderStrategyTest.java new file mode 100644 index 00000000000..565c1e7c4e9 --- /dev/null +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/storage/flat/ArchiveFlatDbReaderStrategyTest.java @@ -0,0 +1,18 @@ +/* + * Copyright Hyperledger Besu Contributors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + * + */ +package org.hyperledger.besu.ethereum.bonsai.storage.flat; + +public class ArchiveFlatDbReaderStrategyTest {} diff --git a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/AccountRangeDataRequest.java b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/AccountRangeDataRequest.java index 21a707e9a63..349c4335ceb 100644 --- a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/AccountRangeDataRequest.java +++ b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/AccountRangeDataRequest.java @@ -137,14 +137,17 @@ protected int doPersist( new AtomicReference<>(noop()); // we have a flat DB only with Bonsai - worldStateStorageCoordinator.applyOnMatchingFlatMode( - FlatDbMode.FULL, - bonsaiWorldStateStorageStrategy -> { - flatDatabaseUpdater.set( + if (!worldStateStorage.getFlatDbMode().equals(FlatDbMode.PARTIAL)) { + // we have a flat DB only with Bonsai + flatDatabaseUpdater = (key, value) -> - ((BonsaiWorldStateKeyValueStorage.Updater) updater) - .putAccountInfoState(Hash.wrap(key), value)); - }); + ((BonsaiWorldStateKeyValueStorage.BonsaiUpdater) updater) + .putAccountInfoState(Hash.wrap(key), value); + } else { + worldStateStorageCoordinator.applyOnMatchingFlatMode(FlatDbMode.FULL, bonsaiWorldStateStorageStrategy -> { + flatDatabaseUpdater.set((key, value) -> ((BonsaiWorldStateKeyValueStorage.Updater) updater).putAccountInfoState(Hash.wrap(key), value)); + }); + } stackTrie.commit(flatDatabaseUpdater.get(), nodeUpdater); diff --git a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/StorageRangeDataRequest.java b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/StorageRangeDataRequest.java index 4c50926303b..6d3db7332a4 100644 --- a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/StorageRangeDataRequest.java +++ b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/StorageRangeDataRequest.java @@ -108,15 +108,19 @@ protected int doPersist( new AtomicReference<>(noop()); // we have a flat DB only with Bonsai - worldStateStorageCoordinator.applyOnMatchingFlatMode( - FlatDbMode.FULL, - bonsaiWorldStateStorageStrategy -> { - flatDatabaseUpdater.set( + + if (!worldStateStorage.getFlatDbMode().equals(FlatDbMode.PARTIAL)) { + // we have a flat DB only with Bonsai + flatDatabaseUpdater = (key, value) -> - ((BonsaiWorldStateKeyValueStorage.Updater) updater) - .putStorageValueBySlotHash( - accountHash, Hash.wrap(key), Bytes32.leftPad(RLP.decodeValue(value)))); - }); + ((BonsaiWorldStateKeyValueStorage.Updater) updater) + .putStorageValueBySlotHash( + accountHash, Hash.wrap(key), Bytes32.leftPad(RLP.decodeValue(value))); + } else { + worldStateStorageCoordinator.applyOnMatchingFlatMode(FlatDbMode.FULL, bonsaiWorldStateStorageStrategy -> { + flatDatabaseUpdater.set((key, value) -> ((BonsaiWorldStateKeyValueStorage.Updater) updater).putStorageValueBySlotHash(accountHash, Hash.wrap(key), Bytes32.leftPad(RLP.decodeValue(value)))); + }); + } stackTrie.commit(flatDatabaseUpdater.get(), nodeUpdater); From ec8a7ff20cd8d78f064b78aec7e4a8b8b59d801a Mon Sep 17 00:00:00 2001 From: Jason Frame Date: Fri, 8 Mar 2024 13:54:53 +1000 Subject: [PATCH 02/39] Change BonsaiReferenceTestWorldStateStorage getContextSafeCopy() to use constructor that reuses worldStateStorage so that we don't lose values in the EvmToolSpecTests Signed-off-by: Jason Frame --- .../BonsaiReferenceTestWorldStateStorage.java | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/ethereum/referencetests/src/main/java/org/hyperledger/besu/ethereum/referencetests/BonsaiReferenceTestWorldStateStorage.java b/ethereum/referencetests/src/main/java/org/hyperledger/besu/ethereum/referencetests/BonsaiReferenceTestWorldStateStorage.java index 3d059097810..fdaca383db0 100644 --- a/ethereum/referencetests/src/main/java/org/hyperledger/besu/ethereum/referencetests/BonsaiReferenceTestWorldStateStorage.java +++ b/ethereum/referencetests/src/main/java/org/hyperledger/besu/ethereum/referencetests/BonsaiReferenceTestWorldStateStorage.java @@ -23,6 +23,8 @@ import org.hyperledger.besu.ethereum.trie.diffbased.common.worldview.DiffBasedWorldView; import org.hyperledger.besu.evm.account.AccountStorageEntry; import org.hyperledger.besu.evm.worldstate.WorldState; +import org.hyperledger.besu.plugin.services.storage.KeyValueStorage; +import org.hyperledger.besu.plugin.services.storage.SnappedKeyValueStorage; import java.util.Comparator; import java.util.Map; @@ -37,11 +39,23 @@ import org.apache.tuweni.units.bigints.UInt256; public class BonsaiReferenceTestWorldStateStorage extends BonsaiWorldStateLayerStorage { + private final BonsaiWorldStateKeyValueStorage parent; private final BonsaiPreImageProxy preImageProxy; public BonsaiReferenceTestWorldStateStorage( final BonsaiWorldStateKeyValueStorage parent, final BonsaiPreImageProxy preImageProxy) { super(parent); + this.parent = parent; + this.preImageProxy = preImageProxy; + } + + private BonsaiReferenceTestWorldStateStorage( + final SnappedKeyValueStorage composedWorldStateStorage, + final KeyValueStorage trieLogStorage, + final BonsaiWorldStateKeyValueStorage parent, + final BonsaiPreImageProxy preImageProxy) { + super(composedWorldStateStorage, trieLogStorage, parent); + this.parent = parent; this.preImageProxy = preImageProxy; } @@ -84,4 +98,10 @@ public Stream streamAccounts( .filter(acct -> context.updater().getAccount(acct.getAddress().orElse(null)) != null) .sorted(Comparator.comparing(account -> account.getAddress().orElse(Address.ZERO))); } + + @Override + public BonsaiWorldStateKeyValueStorage getContextSafeCopy() { + return new BonsaiReferenceTestWorldStateStorage( + (SnappedKeyValueStorage) composedWorldStateStorage, trieLogStorage, parent, preImageProxy); + } } From b2d91967400e9d3e4da3ead0d70996ea74b33636 Mon Sep 17 00:00:00 2001 From: Jason Frame Date: Fri, 8 Mar 2024 14:27:04 +1000 Subject: [PATCH 03/39] Force archive mode for testing Signed-off-by: Jason Frame --- .../diffbased/common/storage/flat/FlatDbStrategyProvider.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java index 337233b6200..8b40ea419f8 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java @@ -95,9 +95,11 @@ FlatDbMode deriveFlatDbStrategy(final SegmentedKeyValueStorage composedWorldStat // if we do not have a db-supplied config for flatdb, derive it: // default to partial if trie data exists, but the flat config does not, // and default to the storage config otherwise + + // TODO: temporarily hard code ARCHIVE mode for testing var flatDbModeVal = existingTrieData - ? FlatDbMode.PARTIAL.getVersion() + ? FlatDbMode.ARCHIVE.getVersion() : requestedFlatDbMode.getVersion(); // persist this config in the db var setDbModeTx = composedWorldStateStorage.startTransaction(); From fd300b0c668a30189c5abd3277eab32989c5dcb1 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Wed, 17 Jul 2024 14:11:59 +0100 Subject: [PATCH 04/39] Rebase on main Signed-off-by: Matthew Whitehead --- .../controller/BesuControllerBuilder.java | 6 +-- .../bonsai/BonsaiWorldStateProvider.java | 43 +++++++++++++++++++ .../BonsaiWorldStateKeyValueStorage.java | 10 ++--- .../flat/ArchiveCodeStorageStrategy.java | 10 ++--- .../storage/flat/ArchiveFlatDbStrategy.java | 7 +-- .../trie/diffbased/common/BonsaiContext.java | 3 +- .../common/DiffBasedWorldStateProvider.java | 17 ++------ .../storage/flat/FlatDbStrategyProvider.java | 2 + .../flat/ArchiveFlatDbReaderStrategyTest.java | 3 +- .../request/AccountRangeDataRequest.java | 21 +++++---- .../request/StorageRangeDataRequest.java | 25 ++++++----- 11 files changed, 95 insertions(+), 52 deletions(-) rename ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/{common => bonsai}/storage/flat/ArchiveCodeStorageStrategy.java (88%) rename ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/{common => bonsai}/storage/flat/ArchiveFlatDbStrategy.java (96%) diff --git a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java index 9da2a601f32..9e75140a7cc 100644 --- a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java +++ b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java @@ -1107,10 +1107,10 @@ WorldStateArchive createWorldStateArchive( final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage = worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class); - // TODO, better integrate. Just for PoC, explicitly set our bonsai context chain head: - worldStateKeyValueStorage.getFlatDbStrategy() - .updateBlockContext(blockchain.getChainHeadHeader()); + worldStateKeyValueStorage + .getFlatDbStrategy() + .updateBlockContext(blockchain.getChainHeadHeader()); yield new BonsaiWorldStateProvider( worldStateKeyValueStorage, diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java index e4b7ea991f3..ca47fac8605 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java @@ -17,10 +17,13 @@ import org.hyperledger.besu.datatypes.Address; import org.hyperledger.besu.datatypes.Hash; import org.hyperledger.besu.ethereum.chain.Blockchain; +import org.hyperledger.besu.ethereum.core.BlockHeader; +import org.hyperledger.besu.ethereum.core.MutableWorldState; import org.hyperledger.besu.ethereum.rlp.RLP; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.cache.BonsaiCachedMerkleTrieLoader; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.cache.BonsaiCachedWorldStorageManager; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldStateKeyValueStorage; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.worldview.BonsaiWorldState; import org.hyperledger.besu.ethereum.trie.diffbased.common.DiffBasedWorldStateProvider; import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogManager; @@ -54,6 +57,7 @@ public BonsaiWorldStateProvider( final EvmConfiguration evmConfiguration) { super(worldStateKeyValueStorage, blockchain, maxLayersToLoad, pluginContext); this.bonsaiCachedMerkleTrieLoader = bonsaiCachedMerkleTrieLoader; + this.evmConfiguration = evmConfiguration; provideCachedWorldStorageManager( new BonsaiCachedWorldStorageManager( this, worldStateKeyValueStorage, this::cloneBonsaiWorldStateConfig)); @@ -72,12 +76,51 @@ public BonsaiWorldStateProvider( final EvmConfiguration evmConfiguration) { super(worldStateKeyValueStorage, blockchain, trieLogManager); this.bonsaiCachedMerkleTrieLoader = bonsaiCachedMerkleTrieLoader; + this.evmConfiguration = evmConfiguration; provideCachedWorldStorageManager(bonsaiCachedWorldStorageManager); loadPersistedState( new BonsaiWorldState( this, worldStateKeyValueStorage, evmConfiguration, defaultWorldStateConfig)); } + @Override + public Optional getMutable( + final BlockHeader blockHeader, final boolean shouldPersistState) { + if (shouldPersistState) { + return getMutable(blockHeader.getStateRoot(), blockHeader.getHash()); + } else { + // TODO this needs to be better integrated && ensure block is canonical + // HACK for kikori PoC, if we have the trielog for this block, we can assume we have it in + // flatDB + // although, in practice we can only serve canonical chain worldstates and need to fall back + // to state rolling if the requested block is a fork. + if (this.worldStateKeyValueStorage.getFlatDbStrategy() instanceof ArchiveFlatDbStrategy + && trieLogManager.getTrieLogLayer(blockHeader.getBlockHash()).isPresent()) { + var contextSafeCopy = + ((BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage).getContextSafeCopy(); + contextSafeCopy.getFlatDbStrategy().updateBlockContext(blockHeader); + return Optional.of( + new BonsaiWorldState( + this, contextSafeCopy, evmConfiguration, this.defaultWorldStateConfig)); + } + + final BlockHeader chainHeadBlockHeader = blockchain.getChainHeadHeader(); + if (chainHeadBlockHeader.getNumber() - blockHeader.getNumber() + >= trieLogManager.getMaxLayersToLoad()) { + LOG.warn( + "Exceeded the limit of historical blocks that can be loaded ({}). If you need to make older historical queries, configure your `--bonsai-historical-block-limit`.", + trieLogManager.getMaxLayersToLoad()); + return Optional.empty(); + } + return cachedWorldStorageManager + .getWorldState(blockHeader.getHash()) + .or(() -> cachedWorldStorageManager.getNearestWorldState(blockHeader)) + .or(() -> cachedWorldStorageManager.getHeadWorldState(blockchain::getBlockHeader)) + .flatMap(worldState -> rollMutableStateToBlockHash(worldState, blockHeader.getHash())) + .map(MutableWorldState::freeze); + } + } + public BonsaiCachedMerkleTrieLoader getCachedMerkleTrieLoader() { return bonsaiCachedMerkleTrieLoader; } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorage.java index dbb4ab1dfe2..7d140b69ec7 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorage.java @@ -65,6 +65,11 @@ public BonsaiWorldStateKeyValueStorage( flatDbStrategyProvider.loadFlatDbStrategy(composedWorldStateStorage); } + public BonsaiWorldStateKeyValueStorage getContextSafeCopy() { + return new BonsaiWorldStateKeyValueStorage( + flatDbStrategyProvider.contextSafeClone(), composedWorldStateStorage, trieLogStorage); + } + public BonsaiWorldStateKeyValueStorage( final FlatDbStrategyProvider flatDbStrategyProvider, final SegmentedKeyValueStorage composedWorldStateStorage, @@ -73,11 +78,6 @@ public BonsaiWorldStateKeyValueStorage( this.flatDbStrategyProvider = flatDbStrategyProvider; } - public BonsaiWorldStateKeyValueStorage getContextSafeCopy() { - return new BonsaiWorldStateKeyValueStorage( - flatDbStrategyProvider.contextSafeClone(), composedWorldStateStorage, trieLogStorage); - } - @Override public DataStorageFormat getDataStorageFormat() { return DataStorageFormat.BONSAI; diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveCodeStorageStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveCodeStorageStrategy.java similarity index 88% rename from ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveCodeStorageStrategy.java rename to ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveCodeStorageStrategy.java index 3628a0d19a4..a34e7bcf3d6 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveCodeStorageStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveCodeStorageStrategy.java @@ -1,5 +1,5 @@ /* - * Copyright Hyperledger Besu Contributors. + * Copyright contributors to Hyperledger Besu. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at @@ -12,15 +12,15 @@ * * SPDX-License-Identifier: Apache-2.0 */ - -package org.hyperledger.besu.ethereum.trie.bonsai.storage.flat; +package org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE; -import static org.hyperledger.besu.ethereum.trie.bonsai.storage.flat.ArchiveFlatDbStrategy.DELETED_CODE_VALUE; -import static org.hyperledger.besu.ethereum.trie.bonsai.storage.flat.ArchiveFlatDbStrategy.calculateArchiveKeyWithMinSuffix; +import static org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy.DELETED_CODE_VALUE; +import static org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy.calculateArchiveKeyWithMinSuffix; import org.hyperledger.besu.datatypes.Hash; import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; +import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.CodeStorageStrategy; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction; diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveFlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java similarity index 96% rename from ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveFlatDbStrategy.java rename to ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java index 10d1698e32a..1f0587feea2 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/ArchiveFlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java @@ -1,5 +1,5 @@ /* - * Copyright Hyperledger Besu Contributors. + * Copyright contributors to Hyperledger Besu. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at @@ -11,9 +11,8 @@ * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 - * */ -package org.hyperledger.besu.ethereum.trie.bonsai.storage.flat; +package org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE; @@ -22,6 +21,8 @@ import org.hyperledger.besu.datatypes.StorageSlotKey; import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; import org.hyperledger.besu.ethereum.trie.NodeLoader; +import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.CodeStorageStrategy; +import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy; import org.hyperledger.besu.plugin.data.BlockHeader; import org.hyperledger.besu.plugin.services.MetricsSystem; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage; diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java index d41ee7472f1..6e9f04fb7a4 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java @@ -1,5 +1,5 @@ /* - * Copyright Hyperledger Besu Contributors. + * Copyright contributors to Hyperledger Besu. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at @@ -11,7 +11,6 @@ * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 - * */ package org.hyperledger.besu.ethereum.bonsai; diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/DiffBasedWorldStateProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/DiffBasedWorldStateProvider.java index 470f6754e94..4334655988f 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/DiffBasedWorldStateProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/DiffBasedWorldStateProvider.java @@ -30,6 +30,7 @@ import org.hyperledger.besu.ethereum.trie.diffbased.common.worldview.accumulator.DiffBasedWorldStateUpdateAccumulator; import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive; import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator; +import org.hyperledger.besu.evm.internal.EvmConfiguration; import org.hyperledger.besu.evm.worldstate.WorldState; import org.hyperledger.besu.plugin.BesuContext; import org.hyperledger.besu.plugin.services.trielogs.TrieLog; @@ -53,6 +54,7 @@ public abstract class DiffBasedWorldStateProvider implements WorldStateArchive { protected final TrieLogManager trieLogManager; protected DiffBasedCachedWorldStorageManager cachedWorldStorageManager; protected DiffBasedWorldState persistedState; + protected EvmConfiguration evmConfiguration; protected final DiffBasedWorldStateKeyValueStorage worldStateKeyValueStorage; protected final DiffBasedWorldStateConfig defaultWorldStateConfig; @@ -130,19 +132,6 @@ public Optional getMutable( if (shouldPersistState) { return getMutable(blockHeader.getStateRoot(), blockHeader.getHash()); } else { - // TODO this needs to be better integrated && ensure block is canonical - // HACK for kikori PoC, if we have the trielog for this block, we can assume we have it in - // flatDB - // although, in practice we can only serve canonical chain worldstates and need to fall back - // to state rolling if the requested block is a fork. - if (this.worldStateStorage.getFlatDbStrategy() instanceof ArchiveFlatDbStrategy - && trieLogManager.getTrieLogLayer(blockHeader.getBlockHash()).isPresent()) { - - var contextSafeCopy = worldStateStorage.getContextSafeCopy(); - contextSafeCopy.getFlatDbStrategy().updateBlockContext(blockHeader); - return Optional.of(new BonsaiWorldState(this, contextSafeCopy, evmConfiguration)); - } - final BlockHeader chainHeadBlockHeader = blockchain.getChainHeadHeader(); if (chainHeadBlockHeader.getNumber() - blockHeader.getNumber() >= trieLogManager.getMaxLayersToLoad()) { @@ -166,7 +155,7 @@ public synchronized Optional getMutable( return rollMutableStateToBlockHash(persistedState, blockHash); } - Optional rollMutableStateToBlockHash( + protected Optional rollMutableStateToBlockHash( final DiffBasedWorldState mutableState, final Hash blockHash) { if (blockHash.equals(mutableState.blockHash())) { return Optional.of(mutableState); diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java index 8b40ea419f8..dea66687a7b 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java @@ -19,6 +19,8 @@ import static org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY; import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveCodeStorageStrategy; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.FullFlatDbStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.PartialFlatDbStrategy; import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration; diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/storage/flat/ArchiveFlatDbReaderStrategyTest.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/storage/flat/ArchiveFlatDbReaderStrategyTest.java index 565c1e7c4e9..9d6ca36129b 100644 --- a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/storage/flat/ArchiveFlatDbReaderStrategyTest.java +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/storage/flat/ArchiveFlatDbReaderStrategyTest.java @@ -1,5 +1,5 @@ /* - * Copyright Hyperledger Besu Contributors. + * Copyright contributors to Hyperledger Besu. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at @@ -11,7 +11,6 @@ * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 - * */ package org.hyperledger.besu.ethereum.bonsai.storage.flat; diff --git a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/AccountRangeDataRequest.java b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/AccountRangeDataRequest.java index 349c4335ceb..a366db4affb 100644 --- a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/AccountRangeDataRequest.java +++ b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/AccountRangeDataRequest.java @@ -137,16 +137,21 @@ protected int doPersist( new AtomicReference<>(noop()); // we have a flat DB only with Bonsai - if (!worldStateStorage.getFlatDbMode().equals(FlatDbMode.PARTIAL)) { + if (!worldStateStorageCoordinator.isMatchingFlatMode(FlatDbMode.PARTIAL)) { // we have a flat DB only with Bonsai - flatDatabaseUpdater = - (key, value) -> - ((BonsaiWorldStateKeyValueStorage.BonsaiUpdater) updater) - .putAccountInfoState(Hash.wrap(key), value); + flatDatabaseUpdater.set( + (key, value) -> + ((BonsaiWorldStateKeyValueStorage.Updater) updater) + .putAccountInfoState(Hash.wrap(key), value)); } else { - worldStateStorageCoordinator.applyOnMatchingFlatMode(FlatDbMode.FULL, bonsaiWorldStateStorageStrategy -> { - flatDatabaseUpdater.set((key, value) -> ((BonsaiWorldStateKeyValueStorage.Updater) updater).putAccountInfoState(Hash.wrap(key), value)); - }); + worldStateStorageCoordinator.applyOnMatchingFlatMode( + FlatDbMode.FULL, + bonsaiWorldStateStorageStrategy -> { + flatDatabaseUpdater.set( + (key, value) -> + ((BonsaiWorldStateKeyValueStorage.Updater) updater) + .putAccountInfoState(Hash.wrap(key), value)); + }); } stackTrie.commit(flatDatabaseUpdater.get(), nodeUpdater); diff --git a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/StorageRangeDataRequest.java b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/StorageRangeDataRequest.java index 6d3db7332a4..53143bca33f 100644 --- a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/StorageRangeDataRequest.java +++ b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/StorageRangeDataRequest.java @@ -108,18 +108,23 @@ protected int doPersist( new AtomicReference<>(noop()); // we have a flat DB only with Bonsai - - if (!worldStateStorage.getFlatDbMode().equals(FlatDbMode.PARTIAL)) { + if (!worldStateStorageCoordinator.isMatchingFlatMode(FlatDbMode.PARTIAL)) { // we have a flat DB only with Bonsai - flatDatabaseUpdater = - (key, value) -> - ((BonsaiWorldStateKeyValueStorage.Updater) updater) - .putStorageValueBySlotHash( - accountHash, Hash.wrap(key), Bytes32.leftPad(RLP.decodeValue(value))); + flatDatabaseUpdater.set( + (key, value) -> + ((BonsaiWorldStateKeyValueStorage.Updater) updater) + .putStorageValueBySlotHash( + accountHash, Hash.wrap(key), Bytes32.leftPad(RLP.decodeValue(value)))); } else { - worldStateStorageCoordinator.applyOnMatchingFlatMode(FlatDbMode.FULL, bonsaiWorldStateStorageStrategy -> { - flatDatabaseUpdater.set((key, value) -> ((BonsaiWorldStateKeyValueStorage.Updater) updater).putStorageValueBySlotHash(accountHash, Hash.wrap(key), Bytes32.leftPad(RLP.decodeValue(value)))); - }); + worldStateStorageCoordinator.applyOnMatchingFlatMode( + FlatDbMode.FULL, + bonsaiWorldStateStorageStrategy -> { + flatDatabaseUpdater.set( + (key, value) -> + ((BonsaiWorldStateKeyValueStorage.Updater) updater) + .putStorageValueBySlotHash( + accountHash, Hash.wrap(key), Bytes32.leftPad(RLP.decodeValue(value)))); + }); } stackTrie.commit(flatDatabaseUpdater.get(), nodeUpdater); From b10b6ae9af7184bf0dbce60981819219ffc0fcf7 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Fri, 19 Jul 2024 11:48:03 +0100 Subject: [PATCH 05/39] Create new DiffBasedWorldStateConfig when copying bonsai archive world state, and freeze it Signed-off-by: Matthew Whitehead --- .../bonsai/BonsaiWorldStateProvider.java | 9 ++++++-- .../storage/flat/FlatDbStrategyProvider.java | 22 +++++++++---------- .../common/worldview/DiffBasedWorldState.java | 1 + 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java index ca47fac8605..f3218405dde 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java @@ -99,9 +99,14 @@ public Optional getMutable( var contextSafeCopy = ((BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage).getContextSafeCopy(); contextSafeCopy.getFlatDbStrategy().updateBlockContext(blockHeader); - return Optional.of( + BonsaiWorldState worldState = new BonsaiWorldState( - this, contextSafeCopy, evmConfiguration, this.defaultWorldStateConfig)); + this, + contextSafeCopy, + evmConfiguration, + new DiffBasedWorldStateConfig(defaultWorldStateConfig)); + worldState.freeze(); + return Optional.of(worldState); } final BlockHeader chainHeadBlockHeader = blockchain.getChainHeadHeader(); diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java index dea66687a7b..731d78fba98 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java @@ -16,7 +16,6 @@ import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE; -import static org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY; import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveCodeStorageStrategy; @@ -79,13 +78,14 @@ public void loadFlatDbStrategy(final SegmentedKeyValueStorage composedWorldState @VisibleForTesting FlatDbMode deriveFlatDbStrategy(final SegmentedKeyValueStorage composedWorldStateStorage) { - final FlatDbMode requestedFlatDbMode = - dataStorageConfiguration.getUnstable().getBonsaiFullFlatDbEnabled() - ? FlatDbMode.FULL - : FlatDbMode.PARTIAL; + // final FlatDbMode requestedFlatDbMode = + // dataStorageConfiguration.getUnstable().getBonsaiFullFlatDbEnabled() + // ? FlatDbMode.FULL + // : FlatDbMode.PARTIAL; - final var existingTrieData = - composedWorldStateStorage.get(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY).isPresent(); + // TODO: commented out for archive testing + // final var existingTrieData = + // composedWorldStateStorage.get(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY).isPresent(); var flatDbMode = FlatDbMode.fromVersion( @@ -99,10 +99,10 @@ FlatDbMode deriveFlatDbStrategy(final SegmentedKeyValueStorage composedWorldStat // and default to the storage config otherwise // TODO: temporarily hard code ARCHIVE mode for testing - var flatDbModeVal = - existingTrieData - ? FlatDbMode.ARCHIVE.getVersion() - : requestedFlatDbMode.getVersion(); + var flatDbModeVal = FlatDbMode.ARCHIVE.getVersion(); + // existingTrieData + // ? FlatDbMode.ARCHIVE.getVersion() + // : requestedFlatDbMode.getVersion(); // persist this config in the db var setDbModeTx = composedWorldStateStorage.startTransaction(); setDbModeTx.put( diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/worldview/DiffBasedWorldState.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/worldview/DiffBasedWorldState.java index 76a8fbd6377..e99292a5c4b 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/worldview/DiffBasedWorldState.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/worldview/DiffBasedWorldState.java @@ -157,6 +157,7 @@ public void persist(final BlockHeader blockHeader) { boolean success = false; + this.worldStateKeyValueStorage.getFlatDbStrategy().updateBlockContext(blockHeader); final DiffBasedWorldStateKeyValueStorage.Updater stateUpdater = worldStateKeyValueStorage.updater(); Runnable saveTrieLog = () -> {}; From 6e786f384898851dcd70e043e829d51fb0688dde Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Wed, 31 Jul 2024 10:03:10 +0100 Subject: [PATCH 06/39] Add BONSAI_ARCHIVE storage format Signed-off-by: Matthew Whitehead --- .../org/hyperledger/besu/cli/BesuCommand.java | 2 +- .../besu/cli/options/DataStorageOptions.java | 2 +- .../storage/RevertMetadataSubCommand.java | 1 + .../storage/TrieLogSubCommand.java | 6 ++--- .../controller/BesuControllerBuilder.java | 9 +++++++ .../keyvalue/KeyValueSegmentIdentifier.java | 17 +++++++++---- .../keyvalue/KeyValueStorageProvider.java | 3 +-- .../common/GenesisWorldStateProvider.java | 4 +-- .../WorldStateStorageCoordinator.java | 23 ++++++++++++------ .../eth/sync/fastsync/FastSyncDownloader.java | 5 ++-- .../sync/snapsync/SnapWorldDownloadState.java | 4 +-- .../snapsync/SnapWorldStateDownloader.java | 4 +-- .../services/storage/DataStorageFormat.java | 8 +++++- .../RocksDBKeyValueStorageFactory.java | 8 ++++-- .../BaseVersionedStorageFormat.java | 13 +++++++++- .../configuration/DatabaseMetadata.java | 2 +- .../PrivacyVersionedStorageFormat.java | 9 ++++++- .../services/storage/rocksdb/Utils.java | 1 + trie.txt | Bin 0 -> 85015 bytes 19 files changed, 86 insertions(+), 35 deletions(-) create mode 100644 trie.txt diff --git a/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java b/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java index c8c0eaf6abd..d50b60dfe72 100644 --- a/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java +++ b/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java @@ -1936,7 +1936,7 @@ private PrivacyParameters privacyParameters() { throw new ParameterException( commandLine, String.format("%s %s", "Checkpoint sync", errorSuffix)); } - if (getDataStorageConfiguration().getDataStorageFormat().equals(DataStorageFormat.BONSAI)) { + if (getDataStorageConfiguration().getDataStorageFormat().isBonsaiFormat()) { throw new ParameterException(commandLine, String.format("%s %s", "Bonsai", errorSuffix)); } diff --git a/besu/src/main/java/org/hyperledger/besu/cli/options/DataStorageOptions.java b/besu/src/main/java/org/hyperledger/besu/cli/options/DataStorageOptions.java index b442f6ee034..10cf42dd6c7 100644 --- a/besu/src/main/java/org/hyperledger/besu/cli/options/DataStorageOptions.java +++ b/besu/src/main/java/org/hyperledger/besu/cli/options/DataStorageOptions.java @@ -47,7 +47,7 @@ public class DataStorageOptions implements CLIOptions @Option( names = {DATA_STORAGE_FORMAT}, description = - "Format to store trie data in. Either FOREST or BONSAI (default: ${DEFAULT-VALUE}).", + "Format to store trie data in. Either FOREST, BONSAI or BONSAI_ARCHIVE (default: ${DEFAULT-VALUE}).", arity = "1") private DataStorageFormat dataStorageFormat = DataStorageFormat.BONSAI; diff --git a/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/RevertMetadataSubCommand.java b/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/RevertMetadataSubCommand.java index c1e903e0808..5cfcd9ef492 100644 --- a/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/RevertMetadataSubCommand.java +++ b/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/RevertMetadataSubCommand.java @@ -123,6 +123,7 @@ public void run() { switch (dataStorageFormat) { case FOREST -> 1; case BONSAI -> 2; + case BONSAI_ARCHIVE -> 3; }; @JsonSerialize diff --git a/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java b/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java index 6fe6d058b3f..c7a362a4cef 100644 --- a/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java +++ b/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java @@ -28,8 +28,6 @@ import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldStateKeyValueStorage; import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogPruner; import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration; -import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration; -import org.hyperledger.besu.plugin.services.storage.DataStorageFormat; import java.io.IOException; import java.io.PrintWriter; @@ -323,8 +321,8 @@ private static TrieLogContext getTrieLogContext() { BesuController besuController = createBesuController(); final DataStorageConfiguration config = besuController.getDataStorageConfiguration(); checkArgument( - DataStorageFormat.BONSAI.equals(config.getDataStorageFormat()), - "Subcommand only works with data-storage-format=BONSAI"); + config.getDataStorageFormat().isBonsaiFormat(), + "Subcommand only works with data-storage-format=BONSAI or BONSAI_ARCHIVE"); final StorageProvider storageProvider = besuController.getStorageProvider(); final BonsaiWorldStateKeyValueStorage rootWorldStateStorage = diff --git a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java index 9e75140a7cc..9808d9dddec 100644 --- a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java +++ b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java @@ -1104,6 +1104,15 @@ WorldStateArchive createWorldStateArchive( final BonsaiCachedMerkleTrieLoader bonsaiCachedMerkleTrieLoader) { return switch (dataStorageConfiguration.getDataStorageFormat()) { case BONSAI -> { + yield new BonsaiWorldStateProvider( + worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class), + blockchain, + Optional.of(dataStorageConfiguration.getBonsaiMaxLayersToLoad()), + bonsaiCachedMerkleTrieLoader, + besuComponent.map(BesuComponent::getBesuPluginContext).orElse(null), + evmConfiguration); + } + case BONSAI_ARCHIVE -> { final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage = worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class); diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java index 32e372cdecf..b340625d5cb 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java @@ -15,6 +15,7 @@ package org.hyperledger.besu.ethereum.storage.keyvalue; import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI; +import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI_ARCHIVE; import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.FOREST; import org.hyperledger.besu.plugin.services.storage.DataStorageFormat; @@ -30,11 +31,17 @@ public enum KeyValueSegmentIdentifier implements SegmentIdentifier { PRIVATE_TRANSACTIONS(new byte[] {3}), PRIVATE_STATE(new byte[] {4}), PRUNING_STATE(new byte[] {5}, EnumSet.of(FOREST)), - ACCOUNT_INFO_STATE(new byte[] {6}, EnumSet.of(BONSAI), false, true, false), - CODE_STORAGE(new byte[] {7}, EnumSet.of(BONSAI)), - ACCOUNT_STORAGE_STORAGE(new byte[] {8}, EnumSet.of(BONSAI), false, true, false), - TRIE_BRANCH_STORAGE(new byte[] {9}, EnumSet.of(BONSAI), false, true, false), - TRIE_LOG_STORAGE(new byte[] {10}, EnumSet.of(BONSAI), true, false, true), + ACCOUNT_INFO_STATE(new byte[] {6}, EnumSet.of(BONSAI, BONSAI_ARCHIVE), false, true, false), + CODE_STORAGE(new byte[] {7}, EnumSet.of(BONSAI, BONSAI_ARCHIVE)), + ACCOUNT_STORAGE_STORAGE(new byte[] {8}, EnumSet.of(BONSAI, BONSAI_ARCHIVE), false, true, false), + TRIE_BRANCH_STORAGE(new byte[] {9}, EnumSet.of(BONSAI, BONSAI_ARCHIVE), false, true, false), + TRIE_LOG_STORAGE(new byte[] {10}, EnumSet.of(BONSAI, BONSAI_ARCHIVE), true, false, true), + ACCOUNT_FREEZER_STATE( + "ACCOUNT_FREEZER_STATE".getBytes(StandardCharsets.UTF_8), + EnumSet.of(BONSAI_ARCHIVE), + true, + false, + true), VARIABLES(new byte[] {11}), // formerly GOQUORUM_PRIVATE_WORLD_STATE // previously supported GoQuorum private states diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueStorageProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueStorageProvider.java index 944f668cd68..b39d7810b26 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueStorageProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueStorageProvider.java @@ -26,7 +26,6 @@ import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage; import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator; import org.hyperledger.besu.metrics.ObservableMetricsSystem; -import org.hyperledger.besu.plugin.services.storage.DataStorageFormat; import org.hyperledger.besu.plugin.services.storage.KeyValueStorage; import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage; @@ -82,7 +81,7 @@ public BlockchainStorage createBlockchainStorage( @Override public WorldStateKeyValueStorage createWorldStateStorage( final DataStorageConfiguration dataStorageConfiguration) { - if (dataStorageConfiguration.getDataStorageFormat().equals(DataStorageFormat.BONSAI)) { + if (dataStorageConfiguration.getDataStorageFormat().isBonsaiFormat()) { return new BonsaiWorldStateKeyValueStorage(this, metricsSystem, dataStorageConfiguration); } else { return new ForestWorldStateKeyValueStorage( diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java index 78bcb417e37..61c96282666 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java @@ -28,7 +28,6 @@ import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration; import org.hyperledger.besu.evm.internal.EvmConfiguration; import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem; -import org.hyperledger.besu.plugin.services.storage.DataStorageFormat; import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage; import org.hyperledger.besu.services.kvstore.SegmentedInMemoryKeyValueStorage; @@ -44,8 +43,7 @@ public class GenesisWorldStateProvider { */ public static MutableWorldState createGenesisWorldState( final DataStorageConfiguration dataStorageConfiguration) { - if (Objects.requireNonNull(dataStorageConfiguration).getDataStorageFormat() - == DataStorageFormat.BONSAI) { + if (Objects.requireNonNull(dataStorageConfiguration).getDataStorageFormat().isBonsaiFormat()) { return createGenesisBonsaiWorldState(); } else { return createGenesisForestWorldState(); diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/WorldStateStorageCoordinator.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/WorldStateStorageCoordinator.java index 710d5ea15df..41a6fea3743 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/WorldStateStorageCoordinator.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/WorldStateStorageCoordinator.java @@ -19,6 +19,7 @@ import org.hyperledger.besu.ethereum.trie.forest.storage.ForestWorldStateKeyValueStorage; import org.hyperledger.besu.plugin.services.storage.DataStorageFormat; +import java.util.List; import java.util.Optional; import java.util.function.Consumer; import java.util.function.Function; @@ -74,7 +75,7 @@ public STRATEGY getStrategy( } public boolean isMatchingFlatMode(final FlatDbMode flatDbMode) { - if (getDataStorageFormat().equals(DataStorageFormat.BONSAI)) { + if (getDataStorageFormat().isBonsaiFormat()) { final BonsaiWorldStateKeyValueStorage bonsaiWorldStateStorageStrategy = (BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage(); return bonsaiWorldStateStorageStrategy.getFlatDbMode().equals(flatDbMode); @@ -84,8 +85,8 @@ public boolean isMatchingFlatMode(final FlatDbMode flatDbMode) { public void applyOnMatchingFlatMode( final FlatDbMode flatDbMode, final Consumer onStrategy) { - applyOnMatchingStrategy( - DataStorageFormat.BONSAI, + applyOnMatchingStrategies( + List.of(DataStorageFormat.BONSAI, DataStorageFormat.BONSAI_ARCHIVE), worldStateKeyValueStorage -> { final BonsaiWorldStateKeyValueStorage bonsaiWorldStateStorageStrategy = (BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage(); @@ -96,8 +97,8 @@ public void applyOnMatchingFlatMode( } public void applyWhenFlatModeEnabled(final Consumer onStrategy) { - applyOnMatchingStrategy( - DataStorageFormat.BONSAI, + applyOnMatchingStrategies( + List.of(DataStorageFormat.BONSAI, DataStorageFormat.BONSAI_ARCHIVE), worldStateKeyValueStorage -> { final BonsaiWorldStateKeyValueStorage bonsaiWorldStateStorageStrategy = (BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage(); @@ -115,10 +116,18 @@ public void applyOnMatchingStrategy( } } + public void applyOnMatchingStrategies( + final List dataStorageFormats, + final Consumer onStrategy) { + if (dataStorageFormats.contains(getDataStorageFormat())) { + onStrategy.accept(worldStateKeyValueStorage()); + } + } + public RESPONSE applyForStrategy( final Function onBonsai, final Function onForest) { - if (getDataStorageFormat().equals(DataStorageFormat.BONSAI)) { + if (getDataStorageFormat().isBonsaiFormat()) { return onBonsai.apply(((BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage())); } else { return onForest.apply(((ForestWorldStateKeyValueStorage) worldStateKeyValueStorage())); @@ -128,7 +137,7 @@ public RESPONSE applyForStrategy( public void consumeForStrategy( final Consumer onBonsai, final Consumer onForest) { - if (getDataStorageFormat().equals(DataStorageFormat.BONSAI)) { + if (getDataStorageFormat().isBonsaiFormat()) { onBonsai.accept(((BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage())); } else { onForest.accept(((ForestWorldStateKeyValueStorage) worldStateKeyValueStorage())); diff --git a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloader.java b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloader.java index 0aaeefc6d6e..ad2ebe43a9f 100644 --- a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloader.java +++ b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloader.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.nio.file.Path; import java.time.Duration; +import java.util.List; import java.util.Optional; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; @@ -90,8 +91,8 @@ public CompletableFuture start() { } protected CompletableFuture start(final FastSyncState fastSyncState) { - worldStateStorageCoordinator.applyOnMatchingStrategy( - DataStorageFormat.BONSAI, + worldStateStorageCoordinator.applyOnMatchingStrategies( + List.of(DataStorageFormat.BONSAI, DataStorageFormat.BONSAI_ARCHIVE), worldStateKeyValueStorage -> { BonsaiWorldStateKeyValueStorage onBonsai = (BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage; diff --git a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadState.java b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadState.java index 41beaafa6a4..aff1d373992 100644 --- a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadState.java +++ b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadState.java @@ -258,8 +258,8 @@ public synchronized void startTrieHeal() { /** Method to reload the healing process of the trie */ public synchronized void reloadTrieHeal() { // Clear the flat database and trie log from the world state storage if needed - worldStateStorageCoordinator.applyOnMatchingStrategy( - DataStorageFormat.BONSAI, + worldStateStorageCoordinator.applyOnMatchingStrategies( + List.of(DataStorageFormat.BONSAI, DataStorageFormat.BONSAI_ARCHIVE), worldStateKeyValueStorage -> { final BonsaiWorldStateKeyValueStorage strategy = worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class); diff --git a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloader.java b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloader.java index fc9d0d8ef10..da0eea49a91 100644 --- a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloader.java +++ b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloader.java @@ -174,8 +174,8 @@ public CompletableFuture run( }); } else if (!snapContext.getAccountsHealingList().isEmpty()) { // restart only the heal step snapSyncState.setHealTrieStatus(true); - worldStateStorageCoordinator.applyOnMatchingStrategy( - DataStorageFormat.BONSAI, + worldStateStorageCoordinator.applyOnMatchingStrategies( + List.of(DataStorageFormat.BONSAI, DataStorageFormat.BONSAI_ARCHIVE), strategy -> { BonsaiWorldStateKeyValueStorage onBonsai = (BonsaiWorldStateKeyValueStorage) strategy; onBonsai.clearFlatDatabase(); diff --git a/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java b/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java index 8e056abc49a..b5efbc38127 100644 --- a/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java +++ b/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java @@ -19,5 +19,11 @@ public enum DataStorageFormat { /** Original format. Store all tries */ FOREST, /** New format. Store one trie, and trie logs to roll forward and backward */ - BONSAI; + BONSAI, + /** The new option for storing archive data e.g. state at any block */ + BONSAI_ARCHIVE; + + public boolean isBonsaiFormat() { + return this == BONSAI || this == BONSAI_ARCHIVE; + } } diff --git a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactory.java b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactory.java index d53c9e57fde..4a3a08ebe0a 100644 --- a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactory.java +++ b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactory.java @@ -14,6 +14,7 @@ */ package org.hyperledger.besu.plugin.services.storage.rocksdb; +import static org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat.BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION; import static org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION; import static org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat.BONSAI_WITH_VARIABLES; import static org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION; @@ -57,7 +58,10 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory { private static final Logger LOG = LoggerFactory.getLogger(RocksDBKeyValueStorageFactory.class); private static final EnumSet SUPPORTED_VERSIONED_FORMATS = - EnumSet.of(FOREST_WITH_RECEIPT_COMPACTION, BONSAI_WITH_RECEIPT_COMPACTION); + EnumSet.of( + FOREST_WITH_RECEIPT_COMPACTION, + BONSAI_WITH_RECEIPT_COMPACTION, + BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION); private static final String NAME = "rocksdb"; private final RocksDBMetricsFactory rocksDBMetricsFactory; private DatabaseMetadata databaseMetadata; @@ -160,7 +164,7 @@ public SegmentedKeyValueStorage create( metricsSystem, rocksDBMetricsFactory); } - case BONSAI -> { + case BONSAI, BONSAI_ARCHIVE -> { LOG.debug("BONSAI mode detected, Using OptimisticTransactionDB."); segmentedStorage = new OptimisticRocksDBColumnarKeyValueStorage( diff --git a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/BaseVersionedStorageFormat.java b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/BaseVersionedStorageFormat.java index ad3557636d2..52e8cfe4aee 100644 --- a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/BaseVersionedStorageFormat.java +++ b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/BaseVersionedStorageFormat.java @@ -44,7 +44,17 @@ public enum BaseVersionedStorageFormat implements VersionedStorageFormat { * Current Bonsai version, with receipts using compaction, in order to make Receipts use less disk * space */ - BONSAI_WITH_RECEIPT_COMPACTION(DataStorageFormat.BONSAI, 3); + BONSAI_WITH_RECEIPT_COMPACTION(DataStorageFormat.BONSAI, 3), + /** + * Current Bonsai version, with blockchain variables in a dedicated column family, in order to + * make BlobDB more effective + */ + BONSAI_ARCHIVE_WITH_VARIABLES(DataStorageFormat.BONSAI_ARCHIVE, 1), + /** + * Current Bonsai version, with receipts using compaction, in order to make Receipts use less disk + * space + */ + BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION(DataStorageFormat.BONSAI_ARCHIVE, 2); private final DataStorageFormat format; private final int version; @@ -65,6 +75,7 @@ public static BaseVersionedStorageFormat defaultForNewDB( return switch (configuration.getDatabaseFormat()) { case FOREST -> FOREST_WITH_RECEIPT_COMPACTION; case BONSAI -> BONSAI_WITH_RECEIPT_COMPACTION; + case BONSAI_ARCHIVE -> BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION; }; } diff --git a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/DatabaseMetadata.java b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/DatabaseMetadata.java index a72db7c322b..0e7a96578fb 100644 --- a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/DatabaseMetadata.java +++ b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/DatabaseMetadata.java @@ -239,7 +239,7 @@ public DatabaseMetadata upgradeToPrivacy() { "Unsupported database with format FOREST and version " + versionedStorageFormat.getVersion()); }; - case BONSAI -> + case BONSAI, BONSAI_ARCHIVE -> switch (versionedStorageFormat.getVersion()) { case 1 -> PrivacyVersionedStorageFormat.BONSAI_ORIGINAL; case 2 -> PrivacyVersionedStorageFormat.BONSAI_WITH_VARIABLES; diff --git a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/PrivacyVersionedStorageFormat.java b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/PrivacyVersionedStorageFormat.java index ca5988dc75f..fed089b8cea 100644 --- a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/PrivacyVersionedStorageFormat.java +++ b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/PrivacyVersionedStorageFormat.java @@ -44,7 +44,13 @@ public enum PrivacyVersionedStorageFormat implements VersionedStorageFormat { * Current Bonsai version, with receipts using compaction, in order to make Receipts use less disk * space */ - BONSAI_WITH_RECEIPT_COMPACTION(BaseVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION, 2); + BONSAI_WITH_RECEIPT_COMPACTION(BaseVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION, 2), + /** + * Bonsai archive version, with receipts using compaction, in order to make Receipts use less disk + * space + */ + BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION( + BaseVersionedStorageFormat.BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION, 3); private final VersionedStorageFormat baseVersionedStorageFormat; private final OptionalInt privacyVersion; @@ -66,6 +72,7 @@ public static VersionedStorageFormat defaultForNewDB( return switch (configuration.getDatabaseFormat()) { case FOREST -> FOREST_WITH_RECEIPT_COMPACTION; case BONSAI -> BONSAI_WITH_RECEIPT_COMPACTION; + case BONSAI_ARCHIVE -> BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION; }; } diff --git a/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/Utils.java b/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/Utils.java index a5d4f1ba913..6d96045e5a5 100644 --- a/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/Utils.java +++ b/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/Utils.java @@ -86,6 +86,7 @@ private static int dataStorageFormatToV1(final DataStorageFormat dataStorageForm return switch (dataStorageFormat) { case FOREST -> 1; case BONSAI -> 2; + case BONSAI_ARCHIVE -> 3; }; } } diff --git a/trie.txt b/trie.txt new file mode 100644 index 0000000000000000000000000000000000000000..44f16d426d14826174779f15b0067157e3f450f3 GIT binary patch literal 85015 zcmeHQdwdl2wV&DDWRno`+DD#bS5$m(txv>auciprYK0|+U1hD#Az2<36HJ0AirJan zjVOqd2ckkh4TXx*B7T%wxKbZL)#|PIs#uFx6!Go#7F19P-1D26otb?md%1s}{lMn8 zzvr3rJLk-Ae&;*q_dWj$$M^g%N(Y{KAhc!d8RHI!n^(Se)tAeroqtNqfAB|z4E_m0Jn?|H%c?0)Xat6q{$ zx$w5{-?sfSwr)P_=d=HZe!}7KcPG^JyJn2=Tp)}%srdenKE6bLX~ERVzwJDHS@`fI zK0NT!hD#=1cg$gu ztZ_WE;)JCiT=mrl<1YSvYw?zQ->BcV`=;=Bo&({no-c*4(9n2Z*aci!K;XiIL7!kd zzYO>j@YgtDZ(X?gw(I`bQV!h4K)88bVEb-N-lQd+V6DgGBYCTz!sHZ8KKt3CMVPz; zlkd*_@*+$=h{*$k`<7zzr2uy852`&n!LkU8A|I~43X>Z!dC%`&4r20oOs;r))lV_` zAtvwJcyuDRwZz+=xTXDFB;ji;Vo4|P+cA0ePxjBoN+{(y8gCL1t$ zldtL_OwNyY!yo(WdTcNTi}DBm%Zf=qCJV1y{{$vGFaS*`)jk_gLkFLEx_go1m56fThkE38l-8-tNLuR~hzZ zJD}BHmD<}MAkU?hZe{haPv~ur(c5lTx(IvA^Jw)yP3`S_$eW^;TI{U;r{>-)r|Z30 zPG+Tyw!DB=e|$=BmKjF<=~(KvvHHuk`mvQU5(GRc4~#Ro0ilbv-(`v6Rd5x$Rh%PqkYbqTCq<+cg0n z*shkK@G5^WDEoS3AJQryQB7cLlc8!XYQNy&1#h^=W)ZwR>W(bpcVoD>M;66K%#v8$ zRa#va*ggqhi!!kN88w#+N1AMwNMjGy>%k&_P~v-Jfx~tJ=aqwElh`N=JTGH;I8qL_ zmjjY)6+CXN7WA=??30lq4oMw^!aw_wM#w7?V=*ZF3+D4ZK3^~XXH^7ilU_ORbiMNU zm=uE%DBO@YF^27mYqWNKz1Y3du)=6JQ*RexgXDF_?o#Ft^6tRtsalHNqz=prT%;r2 zRJhaUDr}RLZ{z|}t*{;P-h+}pM(3vj2c_lyAR`X)4j~=P(8#|2KE@HOkzc5gnZEaE z`fMlU-yFc{>nhlB>p&Kv;*c&I5BWFBP<=L1&@hc@(B*i&Oq*x;WX#tKwuYP7x>j_@TtfUXxawzU{RlO= zAtv6x*J}3<81W#g8oSHOzOHn)0dt<{gkonvWM-u}KL-|%7(^EDH?bIs9Z~$KW<|Hj zIbF6DitoySgyOIabqh@-1k zygLVqMv6$$DicMbXk~PCr{{Y5+(bcFWc4_&Mc^cnwvpXHciEcMmjfto`7{ zRdcsLHs{Frdp4{XyZ&FzJJ+~R{bt6M-<@ zDA}qk_0u!N38keuOhReC4E1)?Brs_h&Qe5Vt4Lg|;F>cnA(m>loWVX=Dl?mu^qsl8 zmhQE$zRG=#(=q99o>$i$h*tl+Yvt&*?`+z)xvK4kmcU+QthTm7JHoCZEm7_a^J#^$ zNjWgB>_TMP116?HnNPyIw@H~YNJx6B5`{RfOl7-sA?XzvJotE_F<~&MDbsN%wW>^3 zMIWr1K4ju+hM69oLXh??jiXaVTPt+Uou}S4k_2AS{lXLXkVzzgG}++mvYnP zvV0_W{29wJNd!l!KZv(s$5d_mjLnuf4j(KXfQ1+OgYo6L;FY4*3KuvQ2f%SVIP&lr z86OWEOQTYZ2S=knXnOnZU*)AV(rB;ivr zs~8_Xs62c)ZGo`FPZOg&7UO0X_QN)RxF}}e0z`d+^;8R<2p_*nq|y0r5;$JMWxw-$ zF$NBNYU}fX^HMP;c)O8w+yw*YR1utI{$MoWAzGn;sUniv2+mG13QpVtG^k2z5OfB! zq1Q{`yqz^sA8Va|!f{qBEO%~CX1ViCEPT|&553iwSn!V83RexvKBqoWt|Bb_&}fA_ zqLCPzDc30haE;Mt$~7(~;pIAUs{nOC@Pg}7H3OFrZ0dZyGV%{x*QjG9&6lfFoi7*G ziO&h%XaphJt&G)&WAz$it?8ETRL8={bT-!GdIlT30bD!O41I84FS{b)n9eKeu&^E6=QFAIMo>y30xwx2JS)2w%a(&1l?6Gga1p17{S#t$GUm7+!wQ=W zs-C`6TB0(iUtx}WmzsgRVBBhm@#7K){~v7s6*!7;0B|2sD!UulG$Jp1SM>DbU!1v0 z2hJSIR?^H>{z&ClB{q-qOQDVv!re<4eU-idR8H0DtGsTBbeBrs3WdJPHa&w8+yIqf zHA5p<=uRTIat(Gkpb=cTQK@Vq7-^RKkYdInO;#)4z!t9?B)p#_tCa`z3`17Crd)R0 zO<61*$M)7n3${k#B!OeLQLsoh*@qM74nx%?0ijEB_xNx=r83b9RXG1u)1mSRa#_{Z zVf9~b>L04^Lv=^F?D0X>nuwC@^-0W*g(Z}fs;xN07O1+GNeW(vKiD<7@se}S3e4@C zGYL9jg0~sw%?q`A=XQ8o+d92%3tRC%!#8$?+Ou6I$1z*ksBjdjYlYDf#mFHM)ng>6 zdImY&z-Ul6G?mISuJ|>xR(-h$)g`)#QYo6?Z3#8EhoB?W;zg3$7D8)t$U8HHDU~Ac zqELHV7NX1)Z4^#6DB2=HwX*-5g`z!35#ARxio2#RY=xQgLKD2Lp@obmDuX)bgcKsY zpiRfL*z42qOlo8X1{^V{2Q4tZy_x83M9 z=xJ$DDauYy!mQQ2CY)qYv`2!P1=%U8<&dJC87b;)OQ5Gcq*9cPKa6uG9@RFR4YhU}qQu5xlC8>Y?Ji-Ie)YBQ;8<*VjxwD| zW^0e)Sld&^>THMBj#(%k$rGYU$Pg1^y81dW$!y&`3F>gYlTC~2WD)9`GEJADg>O87DiWDhp-po82H5w_xwM|@$G?~?T1UI%Vz$ppc(i{r4ba-c~6u~^E z=}Z>Ve@r|Y6*FnkC|QD0r)B5SsLdjby2YSK*Xu;9#}rA?a+8FN`WN1H<5q3d5ghBO z>=IJ%#j$Wzlyvno7qqvgNCz_<>eXj49|}86S6?3yPS7c;UoJuYDcP^S{&5lNZ%$89 z@?5Z4P$|kb7mOqmMIM()V?AD^Xlr(gJh=Y%6r@me<6IPrWO-$hYMKv^h->Wle0Y{g z;JG6Ee0Uziu{z^p9sO;~-1)OQt{z?C-LZ%JZuZhYJ-KXF#nkA3Kk)0xhhDAzcI>pL z*0vU3zPiTsKubs4yyi=|0ta_)`0&odlLo9eG)MpV#rmG-PXX5%#kW3{Z+p)6yC2%7 zJU0L0vzwm!_s7BmS65$f`S4rT$&W0#pzXyg9@@6z zvW9|h&p)%zU2<@LbGR=J)B>>!19y-6!w=70f4Jtyk1jrM>h8h=%e%SF8 zUHfi$ZPhSFXW)qOug(8m%eJGlCOmr9Bd=_DcTh`1LijJcWdx2egvbg6jv#OZfg?CE{xIw$ za0GL0E8a37a0G)^n6L%}j!3zyY8eEMAaH~(8zwg*fg^OTBqmHeB5;K6{3URNAtfdW zA#j8)Aq0*fa70evh%3JuHR{ksZ+7mle@lF2@cZa5AN;3-z!3zFAaH~X7K$7}kt2qB z)t@3q;Oh_+IfB3u1dcE~2Lz5d=BCIIiaZP}6c0s?&~-;mWWt0{(-w*x zkz_X5&tVx{g!d|8SSWIY7E1-+D9SYu2!SIA96{g+0!L8fh+|6F3W^*-ks}QMWSW%8 zWl51E((s36nRrCth@p8z;0WFIP2dPaTufR-;0Rqp2pmD+h@8L?*T1^UzR~^m*DE%h zS^e$Hhj)Cp?8x&3jv#OZfg=bUF%7H)jv#OZ+i+GQB+#)Mc$5mp(oJ_t3xOjD9HDz^ zC~^dv3<5_e@-Qr49f2cs-BA++j!1XYrDHXYxrdB)PseJY3c2Z6jYNx*x`HKeM1+pj zKuyU63K`PS(6JhHtOi1gC~^cHt3luh^<)n^R>N>mrlw3TO9DrvvPa1=@rb|?L-UBh z5xVP}z!8SHn6!w%5xRsBID)_tIe{Y<|N72{uRiUGGj_crzy85nuk89@^OHX&a0G!P z2pmD+2m(id?Gpk=u)THyM|i_MHVc6x!eDVe)b!;->hn98S8YV>Rel4LVlCq(Cg0>Fz|wY8dyV zq!b;ikt9P5`$&-^C~^cvjv#OZMUJ4z5xoSCXzU8LCoLP2ewtjC1dd4JP5h^cM+AU{~_pl5u9jgK5 zzll)f2ox8J9D(au$0H&*`te18M)ju5cGnU9?AR%A5k3lz0e`T!M=cVk$-W+0xphE%df6Yz?)0=XQ8I=Y+hALhWr{5I6~W)Y8%( z>gX`sxXCG#TbIBQ$;{Dyn|MUv2;KEf;0QxpOcFxi2wg%596{iSoWK#!{>Pj7-~N8d zt&I;#;#;?zv!v@i=X3%`5IBOs5d@APa0G!P)ZJ$SM-Vtd-5Mou1mgpNBMh%p<`WQ# z&dcE?e~KJIkt2p9R1`Vl?i{pgBt?#($PtEGDJhB^!FdTBLEs2gEeIUJ*0ThTaNh0@ zYN!uz{z(MqYJX64f^&NS5#fVD1q>pNL%@jhO)PxW1QyX-eTfC{sI3ksw{jI>;fF@6 zu+R{RSu0%NIwb(EG2qBUyr{r|Yg|m~=D~^ZQPtt>x>U{h!5>6)O0Tb%;ZneLjXG8m zn8nqpf>~TxCk|!_3!ETCyOpu}aI9WqtTo-zo$6S9%A5bL$Mp<0cmue0s2Td;zFvlB z6^=>n_*D})0__MLt3iZvsc?u5SWI5I92RfJr~FW3TJ)FWP=7fg=bULEs1iM-VuIz!BOGA|0zikt3A-K>|k*I6}1v z@oxf0czJ>I;#;`s(GBTy6Ah{;PxobrHj*KPLf5k3`};rtVsY@bsW(krTpvC7uGRVM zp~p|F4|RGztYjXWKSi@rlgz7hMF!&VVSExu7^d z2gm$~K^bcI<7PTVjzH%Ex)e}MV(FzJeQu(R#QnJ=2^_Jp_JbE!&E5XkoFn7!*|1{l z`hPX=T;o3Vn;BPrbK(VC$+q;P$Uc!pYQh>~Q+ne-kt5RBEQ%bFP9e&d zCMyI*j!=Y$z!A*ZPT&XvM|gq25u0@6i2La~r6nqIq+jv;yLYJ>xRzs3l7<*RZg%|k zL*NJkM-VuIz!3zFz<5&zG~}oVwcAsmA)W2e+A#|Qi_B_oyU~me(PE2a4=rMbhSY&@ zvhMmOa0G!P2prMDDO#lL)q_VAJi2~(mPtV1h@8L?@6T&K)#>e)gPbIn`MSke>#@BZLI!st^RD*!}SUM-vW8N{Xs4WmVTq< zy{zSZtmWrg%QdMZ?KJ8?6R)4ot9`wq?3l9MSIa-Ktg<>`%r78cWdT^e!&2uAbE&mX z@7G!x@7MlQrC;j>M%lSorf|bPO{LK~8FLbOYn@}%Z`SL-sMTMPTK|5dejk=ro`*cJ zBM*>2*3CxQCRVl$%b44QU8VIo)_VeHtsffo-@{U6CJJ6w?;&u6I!_ciVkmLS|4bJr`*>BH?8PbKWFL=_Yxy2o;P9|s z^r*@~^m{bQ0?*4RZsABd*lPlkY!y6itfshee6mkw+Ij`LcFNbqT|3J87S}4?9odI3 z9NB**b!*Pf2=ijvH|5~Gu|Fh3^$y)wVf=ghHmSQ?i+?Zh_!f~*e-8=lZ)n4UeXkgc zL2(VXVgMx|73N^&Ae|C@YsjR?(IKgsU zZfHdGr*}oA&rR`9Y8OezYUl=>z!3zFNV?;r*4TI;a6}MBS|gDNR1i2K(U9T;ywt?* zD-0DRr8m79utr_)(yM-2*uhch((i~Ne?h0a0G6o5;y_~96{g+ za1%I!MJ&OES?>z`C-L04^lavrIJwB*f6H$`AK8c+p6qd?eP_-3@*aB78;t=ZNzWT<0U~cD}Nze%s zyv;Cgo_cCTYg;EfLPB-xhT0RZE9HmjjppiFVYKeHsvaXj)iZ4Wh0&lLS;Q)v9L?32 zi%?yX;%IIOHMfVLBh=zWlG+wRYjemuGlVIXBIQJj6grFyGesLsj^^qu5>zYu&siwi zgB0O$OB%&pQx_6Ag1`}GnXM&o1k|m}uGgbRi%|EGp+#D35gkARH?}Qk?evCjX%2;2 zI=nMgy@q*A(+xdNPMP$Yj@8iZEJcnWa70evh>suryL;ZN&F`#!f)DjSwQJ(Wi;uJs zID)_tDNb<$MOzIlZl>NY!UoCfjNPTU$sz9!RN|_o*iGud z_!8^!If<1=ILLbsO8OX`pQ3}lwA>$L#6jL6q{DWAt@!%!B@5+M`}{(M%$)onPRPGG z2Ood_jp*Y)8!6D=RL&2hx4rc5CPcw7Ue{#vAqaVhwaI^413U}^YeV1&0!Qc`noNPq ztx)*090a=XA{lC?n+3YiCn{>D7=bSS*k9KtKNgvP4}mlr5{ag7)SRnXoAlb7!H`&i zj@3Yk7?!V&B1h=Dqb4#VZs=GIiX1_aBUCLRa72>XU_XataB=hY2 zh`rVqnCu<7;|m9)7=FR3fQ78Mx@W%yE~oqK@X$gOIU-YNrcCS}lCMf^riy)uJ0)wO z;t)Pt;fAadqegl3(7x~~kNN(TuP!F3=CxXK+BmnsF-ZhRsd8F_V`}ng4GzG<3ym`r za92Fa4p49`4uIo!-Hqv38kJ%^I2zTn859(=z=sBSFmqwi?U~AfaP5C~`!02_bOAC_HQ|4zSXoAu|`Wx2D7u z!Bb~CRA@+tp`2+`CXJCMRZagn%cFPSH~RIr=KN#5 Q)pu9;g>X2ELdS>y7lhdH3IG5A literal 0 HcmV?d00001 From 32771d31982346652bde32b748b4c04698793586 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Fri, 16 Aug 2024 16:19:08 +0100 Subject: [PATCH 07/39] New ACCOUNT_FREEZER_STATE DB segment. New BonsaiArchiveFreezer to listen for blocks and move account state to new DB segment Signed-off-by: Matthew Whitehead --- .../storage/TrieLogSubCommand.java | 1 + .../controller/BesuControllerBuilder.java | 31 ++++ .../flat/ArchiveCodeStorageStrategy.java | 2 +- .../storage/flat/ArchiveFlatDbStrategy.java | 33 +++- .../worldview/BonsaiArchiveFreezer.java | 161 ++++++++++++++++++ .../DiffBasedWorldStateKeyValueStorage.java | 71 ++++++++ .../storage/flat/FlatDbStrategyProvider.java | 8 +- 7 files changed, 299 insertions(+), 8 deletions(-) create mode 100644 ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java diff --git a/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java b/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java index c7a362a4cef..ecf367e97d8 100644 --- a/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java +++ b/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java @@ -39,6 +39,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.core.config.Configurator; +import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration; import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java index 9808d9dddec..b6f5862a6e8 100644 --- a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java +++ b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java @@ -86,6 +86,7 @@ import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.BonsaiWorldStateProvider; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.cache.BonsaiCachedMerkleTrieLoader; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldStateKeyValueStorage; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.worldview.BonsaiArchiveFreezer; import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogManager; import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogPruner; import org.hyperledger.besu.ethereum.trie.forest.ForestWorldStateArchive; @@ -753,6 +754,19 @@ public BesuController build() { trieLogManager.subscribe(trieLogPruner); } + // TODO - do we want a flag to turn this on and off? + if (DataStorageFormat.BONSAI_ARCHIVE.equals(dataStorageConfiguration.getDataStorageFormat())) { + final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage = + worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class); + final BonsaiArchiveFreezer archiveFreezer = + createBonsaiArchiveFreezer( + worldStateKeyValueStorage, + blockchain, + scheduler, + ((BonsaiWorldStateProvider) worldStateArchive).getTrieLogManager()); + blockchain.observeBlockAdded(archiveFreezer); + } + final List closeables = new ArrayList<>(); closeables.add(protocolContext.getWorldStateArchive()); closeables.add(storageProvider); @@ -819,6 +833,23 @@ private TrieLogPruner createTrieLogPruner( return trieLogPruner; } + private BonsaiArchiveFreezer createBonsaiArchiveFreezer( + final WorldStateKeyValueStorage worldStateStorage, + final Blockchain blockchain, + final EthScheduler scheduler, + final TrieLogManager trieLogManager) { + final BonsaiArchiveFreezer archiveFreezer = + new BonsaiArchiveFreezer( + (BonsaiWorldStateKeyValueStorage) worldStateStorage, + blockchain, + scheduler::executeServiceTask, + 10, + trieLogManager); + archiveFreezer.initialize(); + + return archiveFreezer; + } + /** * Create synchronizer synchronizer. * diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveCodeStorageStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveCodeStorageStrategy.java index a34e7bcf3d6..ad4219faae7 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveCodeStorageStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveCodeStorageStrategy.java @@ -55,7 +55,7 @@ public Optional getFlatCode( // use getNearest() with an account key that is suffixed by the block context final Optional codeFound = storage - .getNearestTo(CODE_STORAGE, keyNearest) + .getNearestBefore(CODE_STORAGE, keyNearest) // return empty when we find a "deleted value key" .filter( found -> diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java index 1f0587feea2..76b6aff90d1 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java @@ -14,6 +14,7 @@ */ package org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_FREEZER_STATE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE; @@ -50,7 +51,7 @@ public ArchiveFlatDbStrategy( static final byte[] MAX_BLOCK_SUFFIX = Bytes.ofUnsignedLong(Long.MAX_VALUE).toArrayUnsafe(); static final byte[] MIN_BLOCK_SUFFIX = Bytes.ofUnsignedLong(0L).toArrayUnsafe(); - static final byte[] DELETED_ACCOUNT_VALUE = new byte[0]; + public static final byte[] DELETED_ACCOUNT_VALUE = new byte[0]; public static final byte[] DELETED_CODE_VALUE = new byte[0]; static final byte[] DELETED_STORAGE_VALUE = new byte[0]; @@ -60,15 +61,15 @@ public Optional getFlatAccount( final NodeLoader nodeLoader, final Hash accountHash, final SegmentedKeyValueStorage storage) { - getAccountCounter.inc(); + getAccountCounter.inc(); // keyNearest, use MAX_BLOCK_SUFFIX in the absence of a block context: Bytes keyNearest = calculateArchiveKeyWithMaxSuffix(context, accountHash.toArrayUnsafe()); // use getNearest() with an account key that is suffixed by the block context final Optional accountFound = storage - .getNearestTo(ACCOUNT_INFO_STATE, keyNearest) + .getNearestBefore(ACCOUNT_INFO_STATE, keyNearest) // return empty when we find a "deleted value key" .filter( found -> @@ -80,10 +81,30 @@ public Optional getFlatAccount( if (accountFound.isPresent()) { getAccountFoundInFlatDatabaseCounter.inc(); + return accountFound; } else { - getAccountNotFoundInFlatDatabaseCounter.inc(); + // Check the frozen state as old state is moved out of the primary DB segment + final Optional frozenAccountFound = + storage + .getNearestBefore(ACCOUNT_FREEZER_STATE, keyNearest) + // return empty when we find a "deleted value key" + .filter( + found -> + !Arrays.areEqual( + DELETED_ACCOUNT_VALUE, found.value().orElse(DELETED_ACCOUNT_VALUE))) + // don't return accounts that do not have a matching account hash + .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()) + .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); + + if (frozenAccountFound.isPresent()) { + // TODO - different metric for frozen lookups? + getAccountFoundInFlatDatabaseCounter.inc(); + } else { + getAccountNotFoundInFlatDatabaseCounter.inc(); + } + + return frozenAccountFound; } - return accountFound; } /* @@ -132,7 +153,7 @@ public Optional getFlatStorageValueByStorageSlotKey( // use getNearest() with a key that is suffixed by the block context final Optional storageFound = storage - .getNearestTo(ACCOUNT_STORAGE_STORAGE, keyNearest) + .getNearestBefore(ACCOUNT_STORAGE_STORAGE, keyNearest) // return empty when we find a "deleted value key" .filter( found -> diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java new file mode 100644 index 00000000000..bbd0d4c4a0b --- /dev/null +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java @@ -0,0 +1,161 @@ +/* + * Copyright contributors to Hyperledger Besu. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ +package org.hyperledger.besu.ethereum.trie.diffbased.bonsai.worldview; + +import org.hyperledger.besu.datatypes.Hash; +import org.hyperledger.besu.ethereum.chain.BlockAddedEvent; +import org.hyperledger.besu.ethereum.chain.BlockAddedObserver; +import org.hyperledger.besu.ethereum.chain.Blockchain; +import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage; +import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogManager; +import org.hyperledger.besu.plugin.services.trielogs.TrieLog; + +import java.util.Comparator; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.Multimap; +import com.google.common.collect.TreeMultimap; +import org.apache.tuweni.bytes.Bytes; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class manages the "freezing" of historic state that is still needed to satisfy queries but + * doesn't need to be in the main DB segment for. Doing so would degrade block-import performance + * over time so we move state beyond a certain age (in blocks) to other DB segments, assuming there + * is a more recent (i.e. changed) version of the state. If state is created once and never changed + * it will remain in the primary DB segment(s). + */ +public class BonsaiArchiveFreezer implements BlockAddedObserver { + + private static final Logger LOG = LoggerFactory.getLogger(BonsaiArchiveFreezer.class); + + private final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage; + private final Blockchain blockchain; + private final Consumer executeAsync; + private final long numberOfBlocksToKeepInWarmStorage; + private final TrieLogManager trieLogManager; + + private final Multimap blocksToMoveToFreezer = + TreeMultimap.create(Comparator.reverseOrder(), Comparator.naturalOrder()); + + public BonsaiArchiveFreezer( + final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage, + final Blockchain blockchain, + final Consumer executeAsync, + final long numberOfBlocksToKeepInWarmStorage, + final TrieLogManager trieLogManager) { + this.rootWorldStateStorage = rootWorldStateStorage; + this.blockchain = blockchain; + this.executeAsync = executeAsync; + this.numberOfBlocksToKeepInWarmStorage = numberOfBlocksToKeepInWarmStorage; + this.trieLogManager = trieLogManager; + } + + public int initialize() { + // TODO Probably need to freeze old blocks that haven't been frozen already? + return 0; + } + + public synchronized void addToFreezerQueue(final long blockNumber, final Hash blockHash) { + LOG.atDebug() + .setMessage( + "adding block to archive freezer queue for moving to cold storage, blockNumber {}; blockHash {}") + .addArgument(blockNumber) + .addArgument(blockHash) + .log(); + blocksToMoveToFreezer.put(blockNumber, blockHash); + } + + public synchronized int moveBlockStateToFreezer() { + final long retainAboveThisBlock = + blockchain.getChainHeadBlockNumber() - numberOfBlocksToKeepInWarmStorage; + if (rootWorldStateStorage.getFlatDbMode().getVersion() == Bytes.EMPTY) { + throw new IllegalStateException("DB mode version not set"); + } + + AtomicInteger frozenStateCount = new AtomicInteger(); + + LOG.atDebug() + .setMessage( + "Moving cold state to freezer storage (chainHeadNumber: {} - numberOfBlocksToKeepInWarmStorage: {}) = {}") + .addArgument(blockchain::getChainHeadBlockNumber) + .addArgument(numberOfBlocksToKeepInWarmStorage) + .addArgument(retainAboveThisBlock) + .log(); + + final var blocksToMove = + blocksToMoveToFreezer.asMap().entrySet().stream() + .dropWhile((e) -> e.getKey() > retainAboveThisBlock); + // TODO - limit to a configurable number of blocks to move per loop + + final Multimap movedToFreezer = ArrayListMultimap.create(); + + // Determine which world state keys have changed in the last N blocks by looking at the + // trie logs for the blocks. Then move the old keys to the freezer segment (if and only if they + // have changed) + blocksToMove.forEach( + (block) -> { + for (Hash blockHash : block.getValue()) { + Optional trieLog = trieLogManager.getTrieLogLayer(blockHash); + if (trieLog.isPresent()) { + trieLog + .get() + .getAccountChanges() + .forEach( + (address, change) -> { + // Move any previous state for this account + frozenStateCount.addAndGet( + rootWorldStateStorage.freezePreviousAccountState( + blockchain.getBlockHeader(blockHash), + blockchain.getBlockHeader(block.getKey() - 1), + address.addressHash())); + // TODO - block number - 1 is a hack until getNearestBefore() is pulled in + }); + } + movedToFreezer.put(block.getKey(), blockHash); + } + }); + + movedToFreezer.keySet().forEach(blocksToMoveToFreezer::removeAll); + + if (frozenStateCount.get() > 0) { + LOG.atInfo() + .setMessage("froze {} state entries for {} blocks") + .addArgument(frozenStateCount.get()) + .addArgument(movedToFreezer::size) + .log(); + } + + return movedToFreezer.size(); + } + + @Override + public void onBlockAdded(final BlockAddedEvent addedBlockContext) { + final Hash blockHash = addedBlockContext.getBlock().getHeader().getBlockHash(); + final Optional blockNumber = + Optional.of(addedBlockContext.getBlock().getHeader().getNumber()); + blockNumber.ifPresent( + blockNum -> + executeAsync.accept( + () -> { + addToFreezerQueue(blockNum, blockHash); + moveBlockStateToFreezer(); + })); + } +} diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java index 50adf7b34fe..6edf2ea763a 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java @@ -14,14 +14,19 @@ */ package org.hyperledger.besu.ethereum.trie.diffbased.common.storage; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_FREEZER_STATE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE; +import static org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy.DELETED_ACCOUNT_VALUE; import org.hyperledger.besu.datatypes.Hash; +import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; +import org.hyperledger.besu.ethereum.core.BlockHeader; import org.hyperledger.besu.ethereum.storage.StorageProvider; import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.common.StorageSubscriber; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy; import org.hyperledger.besu.ethereum.worldstate.FlatDbMode; @@ -38,12 +43,14 @@ import java.util.NavigableMap; import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Predicate; import java.util.stream.Stream; import kotlin.Pair; import org.apache.tuweni.bytes.Bytes; import org.apache.tuweni.bytes.Bytes32; +import org.bouncycastle.util.Arrays; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -192,6 +199,70 @@ public boolean pruneTrieLog(final Hash blockHash) { } } + /** + * Move old state from the primary DB segments to "cold" segments that will only be used for + * historic state queries. This prevents performance degradation over time for writes to the + * primary DB segments. + * + * @param currentBlockHeader TODO - should not be needed + * @param previousBlockHeader the block header for the previous block, used to get the "nearest + * before" state + * @param accountHash the account to freeze old state for + * @return the number of account states that were moved to frozen storage + */ + public int freezePreviousAccountState( + final Optional currentBlockHeader, + final Optional previousBlockHeader, + final Hash accountHash) { + AtomicInteger frozenStateCount = new AtomicInteger(); + if (previousBlockHeader.isPresent()) { + try { + // Get the key for this block + final BonsaiContext theContext = new BonsaiContext(); + theContext.setBlockHeader(currentBlockHeader.get()); + + // Get the key for the previous block + final BonsaiContext previousContext = new BonsaiContext(); + previousContext.setBlockHeader(previousBlockHeader.get()); + final Bytes previousKey = + ArchiveFlatDbStrategy.calculateArchiveKeyWithMaxSuffix( + previousContext, accountHash.toArrayUnsafe()); + + composedWorldStateStorage + .getNearestBefore(ACCOUNT_INFO_STATE, previousKey) + .filter( + // Ignore deleted entries + found -> + !Arrays.areEqual( + DELETED_ACCOUNT_VALUE, found.value().orElse(DELETED_ACCOUNT_VALUE))) + // Skip "nearest" entries that are for a different account + .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()) + .stream() + .forEach( + (nearestKey) -> { + SegmentedKeyValueStorageTransaction tx = + composedWorldStateStorage.startTransaction(); + tx.remove(ACCOUNT_INFO_STATE, nearestKey.key().toArrayUnsafe()); + tx.put( + ACCOUNT_FREEZER_STATE, + nearestKey.key().toArrayUnsafe(), + nearestKey.value().get()); + tx.commit(); + frozenStateCount.getAndIncrement(); + }); + + LOG.atDebug() + .setMessage("no previous state for account {} found to move to cold storage") + .addArgument(accountHash) + .log(); + } catch (Exception e) { + LOG.error("Error moving account state for account {} to cold storage", accountHash, e); + } + } + + return frozenStateCount.get(); + } + @Override public synchronized void close() throws Exception { // when the storage clears, close diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java index 731d78fba98..3e08f8eb6fd 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java @@ -25,6 +25,7 @@ import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration; import org.hyperledger.besu.ethereum.worldstate.FlatDbMode; import org.hyperledger.besu.plugin.services.MetricsSystem; +import org.hyperledger.besu.plugin.services.storage.DataStorageFormat; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction; @@ -99,7 +100,12 @@ FlatDbMode deriveFlatDbStrategy(final SegmentedKeyValueStorage composedWorldStat // and default to the storage config otherwise // TODO: temporarily hard code ARCHIVE mode for testing - var flatDbModeVal = FlatDbMode.ARCHIVE.getVersion(); + var flatDbModeVal = + dataStorageConfiguration + .getDataStorageFormat() + .equals(DataStorageFormat.BONSAI_ARCHIVE) + ? FlatDbMode.ARCHIVE.getVersion() + : FlatDbMode.FULL.getVersion(); // existingTrieData // ? FlatDbMode.ARCHIVE.getVersion() // : requestedFlatDbMode.getVersion(); From b1f448d762d9c0fbd9bf9ba4fa07a76847b9222c Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Wed, 21 Aug 2024 14:55:26 +0100 Subject: [PATCH 08/39] Add freezer segment for account storage Signed-off-by: Matthew Whitehead --- .../keyvalue/KeyValueSegmentIdentifier.java | 6 ++ .../storage/flat/ArchiveFlatDbStrategy.java | 2 +- .../worldview/BonsaiArchiveFreezer.java | 92 +++++++++++++------ .../DiffBasedWorldStateKeyValueStorage.java | 72 +++++++++++++-- 4 files changed, 134 insertions(+), 38 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java index b340625d5cb..6d20bbb52d6 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java @@ -42,6 +42,12 @@ public enum KeyValueSegmentIdentifier implements SegmentIdentifier { true, false, true), + STORAGE_FREEZER_STATE( + "STORAGE_FREEZER_STATE".getBytes(StandardCharsets.UTF_8), + EnumSet.of(BONSAI_ARCHIVE), + true, + false, + true), VARIABLES(new byte[] {11}), // formerly GOQUORUM_PRIVATE_WORLD_STATE // previously supported GoQuorum private states diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java index 76b6aff90d1..3288d9063e2 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java @@ -208,7 +208,7 @@ public void removeFlatAccountStorageValueByStorageSlotHash( transaction.put(ACCOUNT_STORAGE_STORAGE, keySuffixed, DELETED_STORAGE_VALUE); } - public byte[] calculateNaturalSlotKey(final Hash accountHash, final Hash slotHash) { + public static byte[] calculateNaturalSlotKey(final Hash accountHash, final Hash slotHash) { return Bytes.concatenate(accountHash, slotHash).toArrayUnsafe(); } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java index bbd0d4c4a0b..aeb80c2f049 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java @@ -89,7 +89,8 @@ public synchronized int moveBlockStateToFreezer() { throw new IllegalStateException("DB mode version not set"); } - AtomicInteger frozenStateCount = new AtomicInteger(); + AtomicInteger frozenAccountStateCount = new AtomicInteger(); + AtomicInteger frozenStorageStateCount = new AtomicInteger(); LOG.atDebug() .setMessage( @@ -99,7 +100,7 @@ public synchronized int moveBlockStateToFreezer() { .addArgument(retainAboveThisBlock) .log(); - final var blocksToMove = + final var accountsToMove = blocksToMoveToFreezer.asMap().entrySet().stream() .dropWhile((e) -> e.getKey() > retainAboveThisBlock); // TODO - limit to a configurable number of blocks to move per loop @@ -109,35 +110,72 @@ public synchronized int moveBlockStateToFreezer() { // Determine which world state keys have changed in the last N blocks by looking at the // trie logs for the blocks. Then move the old keys to the freezer segment (if and only if they // have changed) - blocksToMove.forEach( - (block) -> { - for (Hash blockHash : block.getValue()) { - Optional trieLog = trieLogManager.getTrieLogLayer(blockHash); - if (trieLog.isPresent()) { - trieLog - .get() - .getAccountChanges() - .forEach( - (address, change) -> { - // Move any previous state for this account - frozenStateCount.addAndGet( - rootWorldStateStorage.freezePreviousAccountState( - blockchain.getBlockHeader(blockHash), - blockchain.getBlockHeader(block.getKey() - 1), - address.addressHash())); - // TODO - block number - 1 is a hack until getNearestBefore() is pulled in - }); - } - movedToFreezer.put(block.getKey(), blockHash); - } - }); + accountsToMove + .parallel() + .forEach( + (block) -> { + for (Hash blockHash : block.getValue()) { + Optional trieLog = trieLogManager.getTrieLogLayer(blockHash); + if (trieLog.isPresent()) { + trieLog + .get() + .getAccountChanges() + .forEach( + (address, change) -> { + // Move any previous state for this account + frozenAccountStateCount.addAndGet( + rootWorldStateStorage.freezePreviousAccountState( + blockchain.getBlockHeader( + blockchain.getBlockHeader(blockHash).get().getParentHash()), + address.addressHash())); + }); + } + movedToFreezer.put(block.getKey(), blockHash); + } + }); + + final var storageToMove = + blocksToMoveToFreezer.asMap().entrySet().stream() + .dropWhile((e) -> e.getKey() > retainAboveThisBlock); + + storageToMove + .parallel() + .forEach( + (block) -> { + for (Hash blockHash : block.getValue()) { + Optional trieLog = trieLogManager.getTrieLogLayer(blockHash); + if (trieLog.isPresent()) { + trieLog + .get() + .getStorageChanges() + .forEach( + (address, storageSlotKey) -> { + storageSlotKey.forEach( + (slotKey, slotValue) -> { + // Move any previous state for this account + frozenStorageStateCount.addAndGet( + rootWorldStateStorage.freezePreviousStorageState( + blockchain.getBlockHeader( + blockchain + .getBlockHeader(blockHash) + .get() + .getParentHash()), + Bytes.concatenate( + address.addressHash(), slotKey.getSlotHash()))); + }); + }); + } + movedToFreezer.put(block.getKey(), blockHash); + } + }); movedToFreezer.keySet().forEach(blocksToMoveToFreezer::removeAll); - if (frozenStateCount.get() > 0) { + if (frozenAccountStateCount.get() > 0 || frozenStorageStateCount.get() > 0) { LOG.atInfo() - .setMessage("froze {} state entries for {} blocks") - .addArgument(frozenStateCount.get()) + .setMessage("froze {} account state entries, {} storage state entries for {} blocks") + .addArgument(frozenAccountStateCount.get()) + .addArgument(frozenStorageStateCount.get()) .addArgument(movedToFreezer::size) .log(); } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java index 6edf2ea763a..0e397ced852 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java @@ -18,6 +18,7 @@ import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.STORAGE_FREEZER_STATE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE; import static org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy.DELETED_ACCOUNT_VALUE; @@ -200,27 +201,20 @@ public boolean pruneTrieLog(final Hash blockHash) { } /** - * Move old state from the primary DB segments to "cold" segments that will only be used for - * historic state queries. This prevents performance degradation over time for writes to the + * Move old account state from the primary DB segments to "cold" segments that will only be used + * for historic state queries. This prevents performance degradation over time for writes to the * primary DB segments. * - * @param currentBlockHeader TODO - should not be needed * @param previousBlockHeader the block header for the previous block, used to get the "nearest * before" state * @param accountHash the account to freeze old state for * @return the number of account states that were moved to frozen storage */ public int freezePreviousAccountState( - final Optional currentBlockHeader, - final Optional previousBlockHeader, - final Hash accountHash) { + final Optional previousBlockHeader, final Hash accountHash) { AtomicInteger frozenStateCount = new AtomicInteger(); if (previousBlockHeader.isPresent()) { try { - // Get the key for this block - final BonsaiContext theContext = new BonsaiContext(); - theContext.setBlockHeader(currentBlockHeader.get()); - // Get the key for the previous block final BonsaiContext previousContext = new BonsaiContext(); previousContext.setBlockHeader(previousBlockHeader.get()); @@ -263,6 +257,64 @@ public int freezePreviousAccountState( return frozenStateCount.get(); } + /** + * Move old storage state from the primary DB segments to "cold" segments that will only be used + * for historic state queries. This prevents performance degradation over time for writes to the + * primary DB segments. + * + * @param previousBlockHeader the block header for the previous block, used to get the "nearest + * before" state + * @param storageSlotKey the storage slot to freeze old state for + * @return the number of storage states that were moved to frozen storage + */ + public int freezePreviousStorageState( + final Optional previousBlockHeader, final Bytes storageSlotKey) { + AtomicInteger frozenStateCount = new AtomicInteger(); + if (previousBlockHeader.isPresent()) { + try { + // Get the key for the previous block + final BonsaiContext previousContext = new BonsaiContext(); + previousContext.setBlockHeader(previousBlockHeader.get()); + final Bytes previousKey = + ArchiveFlatDbStrategy.calculateArchiveKeyWithMaxSuffix( + previousContext, storageSlotKey.toArrayUnsafe()); + + composedWorldStateStorage + .getNearestBefore(ACCOUNT_STORAGE_STORAGE, previousKey) + .filter( + // Ignore deleted entries + found -> + !Arrays.areEqual( + DELETED_ACCOUNT_VALUE, found.value().orElse(DELETED_ACCOUNT_VALUE))) + // Skip "nearest" entries that are for a different account + .filter( + found -> storageSlotKey.commonPrefixLength(found.key()) >= storageSlotKey.size()) + .stream() + .forEach( + (nearestKey) -> { + SegmentedKeyValueStorageTransaction tx = + composedWorldStateStorage.startTransaction(); + tx.remove(ACCOUNT_STORAGE_STORAGE, nearestKey.key().toArrayUnsafe()); + tx.put( + STORAGE_FREEZER_STATE, + nearestKey.key().toArrayUnsafe(), + nearestKey.value().get()); + tx.commit(); + frozenStateCount.getAndIncrement(); + }); + + LOG.atDebug() + .setMessage("no previous state for storage {} found to move to cold storage") + .addArgument(storageSlotKey) + .log(); + } catch (Exception e) { + LOG.error("Error moving storage state for slot {} to cold storage", storageSlotKey, e); + } + } + + return frozenStateCount.get(); + } + @Override public synchronized void close() throws Exception { // when the storage clears, close From 06ba76eb2920b6927df9279c3fe2022e18585ac8 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Wed, 4 Sep 2024 15:04:42 +0100 Subject: [PATCH 09/39] Find frozen storage slots correctly, refactor the DB segment names Signed-off-by: Matthew Whitehead --- .../keyvalue/KeyValueSegmentIdentifier.java | 8 ++--- .../storage/flat/ArchiveFlatDbStrategy.java | 29 +++++++++++++--- .../worldview/BonsaiArchiveFreezer.java | 34 +++++++++++++------ .../DiffBasedWorldStateKeyValueStorage.java | 14 ++++---- 4 files changed, 59 insertions(+), 26 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java index 6d20bbb52d6..06ba67a6456 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java @@ -36,14 +36,14 @@ public enum KeyValueSegmentIdentifier implements SegmentIdentifier { ACCOUNT_STORAGE_STORAGE(new byte[] {8}, EnumSet.of(BONSAI, BONSAI_ARCHIVE), false, true, false), TRIE_BRANCH_STORAGE(new byte[] {9}, EnumSet.of(BONSAI, BONSAI_ARCHIVE), false, true, false), TRIE_LOG_STORAGE(new byte[] {10}, EnumSet.of(BONSAI, BONSAI_ARCHIVE), true, false, true), - ACCOUNT_FREEZER_STATE( - "ACCOUNT_FREEZER_STATE".getBytes(StandardCharsets.UTF_8), + ACCOUNT_INFO_STATE_FREEZER( + "ACCOUNT_INFO_STATE_FREEZER".getBytes(StandardCharsets.UTF_8), EnumSet.of(BONSAI_ARCHIVE), true, false, true), - STORAGE_FREEZER_STATE( - "STORAGE_FREEZER_STATE".getBytes(StandardCharsets.UTF_8), + ACCOUNT_STORAGE_FREEZER( + "ACCOUNT_STORAGE_FREEZER".getBytes(StandardCharsets.UTF_8), EnumSet.of(BONSAI_ARCHIVE), true, false, diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java index 3288d9063e2..ae882080bf3 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java @@ -14,8 +14,9 @@ */ package org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat; -import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_FREEZER_STATE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_FREEZER; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE; import org.hyperledger.besu.datatypes.Hash; @@ -86,7 +87,7 @@ public Optional getFlatAccount( // Check the frozen state as old state is moved out of the primary DB segment final Optional frozenAccountFound = storage - .getNearestBefore(ACCOUNT_FREEZER_STATE, keyNearest) + .getNearestBefore(ACCOUNT_INFO_STATE_FREEZER, keyNearest) // return empty when we find a "deleted value key" .filter( found -> @@ -167,10 +168,30 @@ public Optional getFlatStorageValueByStorageSlotKey( if (storageFound.isPresent()) { getStorageValueFlatDatabaseCounter.inc(); + return storageFound; } else { - getStorageValueNotFoundInFlatDatabaseCounter.inc(); + // Check the frozen storage as old state is moved out of the primary DB segment + final Optional frozenStorageFound = + storage + .getNearestBefore(ACCOUNT_STORAGE_FREEZER, keyNearest) + // return empty when we find a "deleted value key" + .filter( + found -> + !Arrays.areEqual( + DELETED_STORAGE_VALUE, found.value().orElse(DELETED_STORAGE_VALUE))) + // don't return accounts that do not have a matching account hash + .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()) + .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); + + if (frozenStorageFound.isPresent()) { + // TODO - different metric for frozen lookups? + getStorageValueFlatDatabaseCounter.inc(); + } else { + getStorageValueNotFoundInFlatDatabaseCounter.inc(); + } + + return frozenStorageFound; } - return storageFound; } /* diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java index aeb80c2f049..25ab743ab2c 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java @@ -90,7 +90,7 @@ public synchronized int moveBlockStateToFreezer() { } AtomicInteger frozenAccountStateCount = new AtomicInteger(); - AtomicInteger frozenStorageStateCount = new AtomicInteger(); + AtomicInteger frozenAccountStorageCount = new AtomicInteger(); LOG.atDebug() .setMessage( @@ -105,7 +105,8 @@ public synchronized int moveBlockStateToFreezer() { .dropWhile((e) -> e.getKey() > retainAboveThisBlock); // TODO - limit to a configurable number of blocks to move per loop - final Multimap movedToFreezer = ArrayListMultimap.create(); + final Multimap accountStateFreezerActionsComplete = ArrayListMultimap.create(); + final Multimap accountStorageFreezerActionsComplete = ArrayListMultimap.create(); // Determine which world state keys have changed in the last N blocks by looking at the // trie logs for the blocks. Then move the old keys to the freezer segment (if and only if they @@ -130,7 +131,7 @@ public synchronized int moveBlockStateToFreezer() { address.addressHash())); }); } - movedToFreezer.put(block.getKey(), blockHash); + accountStateFreezerActionsComplete.put(block.getKey(), blockHash); } }); @@ -153,7 +154,7 @@ public synchronized int moveBlockStateToFreezer() { storageSlotKey.forEach( (slotKey, slotValue) -> { // Move any previous state for this account - frozenStorageStateCount.addAndGet( + frozenAccountStorageCount.addAndGet( rootWorldStateStorage.freezePreviousStorageState( blockchain.getBlockHeader( blockchain @@ -165,22 +166,33 @@ public synchronized int moveBlockStateToFreezer() { }); }); } - movedToFreezer.put(block.getKey(), blockHash); + accountStorageFreezerActionsComplete.put(block.getKey(), blockHash); } }); - movedToFreezer.keySet().forEach(blocksToMoveToFreezer::removeAll); + // For us to consider all state and storage changes for a block complete, it must have been + // recorded in both accountState and accountStorage lists. If only one finished we need to try + // freezing state/storage for that block again on the next loop + int frozenBlocksCompleted = blocksToMoveToFreezer.size(); + accountStateFreezerActionsComplete + .keySet() + .forEach( + (b) -> { + if (accountStorageFreezerActionsComplete.containsKey(b)) { + blocksToMoveToFreezer.removeAll(b); + } + }); - if (frozenAccountStateCount.get() > 0 || frozenStorageStateCount.get() > 0) { + if (frozenAccountStateCount.get() > 0 || frozenAccountStorageCount.get() > 0) { LOG.atInfo() - .setMessage("froze {} account state entries, {} storage state entries for {} blocks") + .setMessage("froze {} account state entries, {} account storage entries for {} blocks") .addArgument(frozenAccountStateCount.get()) - .addArgument(frozenStorageStateCount.get()) - .addArgument(movedToFreezer::size) + .addArgument(frozenAccountStorageCount.get()) + .addArgument(frozenBlocksCompleted) .log(); } - return movedToFreezer.size(); + return frozenBlocksCompleted; } @Override diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java index 0e397ced852..5ac2382179b 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java @@ -14,11 +14,11 @@ */ package org.hyperledger.besu.ethereum.trie.diffbased.common.storage; -import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_FREEZER_STATE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_FREEZER; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE; -import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.STORAGE_FREEZER_STATE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE; import static org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy.DELETED_ACCOUNT_VALUE; @@ -201,9 +201,9 @@ public boolean pruneTrieLog(final Hash blockHash) { } /** - * Move old account state from the primary DB segments to "cold" segments that will only be used - * for historic state queries. This prevents performance degradation over time for writes to the - * primary DB segments. + * Move old account state from the primary DB segments to "freezer" segments that will only be + * used for historic state queries. This prevents performance degradation over time for writes to + * the primary DB segments. * * @param previousBlockHeader the block header for the previous block, used to get the "nearest * before" state @@ -238,7 +238,7 @@ public int freezePreviousAccountState( composedWorldStateStorage.startTransaction(); tx.remove(ACCOUNT_INFO_STATE, nearestKey.key().toArrayUnsafe()); tx.put( - ACCOUNT_FREEZER_STATE, + ACCOUNT_INFO_STATE_FREEZER, nearestKey.key().toArrayUnsafe(), nearestKey.value().get()); tx.commit(); @@ -296,7 +296,7 @@ public int freezePreviousStorageState( composedWorldStateStorage.startTransaction(); tx.remove(ACCOUNT_STORAGE_STORAGE, nearestKey.key().toArrayUnsafe()); tx.put( - STORAGE_FREEZER_STATE, + ACCOUNT_STORAGE_FREEZER, nearestKey.key().toArrayUnsafe(), nearestKey.value().get()); tx.commit(); From 1d930f5f78c3cee1b88c730d3e9c983c2fe244e8 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Thu, 5 Sep 2024 17:07:30 +0100 Subject: [PATCH 10/39] Check for blocks to freeze state for on startup. Store the most recent block state has been frozen for Signed-off-by: Matthew Whitehead --- .../controller/BesuControllerBuilder.java | 1 - .../worldview/BonsaiArchiveFreezer.java | 68 ++++++++++++++----- .../DiffBasedWorldStateKeyValueStorage.java | 20 ++++++ 3 files changed, 72 insertions(+), 17 deletions(-) diff --git a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java index b6f5862a6e8..054f78aa8bf 100644 --- a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java +++ b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java @@ -843,7 +843,6 @@ private BonsaiArchiveFreezer createBonsaiArchiveFreezer( (BonsaiWorldStateKeyValueStorage) worldStateStorage, blockchain, scheduler::executeServiceTask, - 10, trieLogManager); archiveFreezer.initialize(); diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java index 25ab743ab2c..d38d91dccbf 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java @@ -18,6 +18,7 @@ import org.hyperledger.besu.ethereum.chain.BlockAddedEvent; import org.hyperledger.besu.ethereum.chain.BlockAddedObserver; import org.hyperledger.besu.ethereum.chain.Blockchain; +import org.hyperledger.besu.ethereum.core.Block; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage; import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogManager; import org.hyperledger.besu.plugin.services.trielogs.TrieLog; @@ -48,7 +49,8 @@ public class BonsaiArchiveFreezer implements BlockAddedObserver { private final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage; private final Blockchain blockchain; private final Consumer executeAsync; - private final long numberOfBlocksToKeepInWarmStorage; + private static final int PRELOAD_LIMIT = 1000; + private static final int DISTANCE_FROM_HEAD_BEFORE_FREEZING_OLD_STATE = 10; private final TrieLogManager trieLogManager; private final Multimap blocksToMoveToFreezer = @@ -58,24 +60,56 @@ public BonsaiArchiveFreezer( final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage, final Blockchain blockchain, final Consumer executeAsync, - final long numberOfBlocksToKeepInWarmStorage, final TrieLogManager trieLogManager) { this.rootWorldStateStorage = rootWorldStateStorage; this.blockchain = blockchain; this.executeAsync = executeAsync; - this.numberOfBlocksToKeepInWarmStorage = numberOfBlocksToKeepInWarmStorage; this.trieLogManager = trieLogManager; } - public int initialize() { - // TODO Probably need to freeze old blocks that haven't been frozen already? - return 0; + public void initialize() { + // On startup there will be recent blocks whose state and storage hasn't been archived yet. + // Pre-load them ready for freezing state once enough new blocks have been added to the chain. + Optional frozenBlocksHead = Optional.empty(); + + Optional latestFrozenBlock = rootWorldStateStorage.getLatestArchiveFrozenBlock(); + + if (latestFrozenBlock.isPresent()) { + frozenBlocksHead = latestFrozenBlock; + } else { + // Start from genesis block + if (blockchain.getBlockHashByNumber(0).isPresent()) { + frozenBlocksHead = Optional.of(0L); + } + } + + if (frozenBlocksHead.isPresent()) { + int preLoadedBlocks = 0; + Optional nextBlock = blockchain.getBlockByNumber(frozenBlocksHead.get()); + for (int i = 0; i < PRELOAD_LIMIT; i++) { + if (nextBlock.isPresent()) { + addToFreezerQueue( + nextBlock.get().getHeader().getNumber(), nextBlock.get().getHeader().getHash()); + preLoadedBlocks++; + nextBlock = blockchain.getBlockByNumber(nextBlock.get().getHeader().getNumber() + 1); + } else { + break; + } + } + LOG.atInfo() + .setMessage("Preloaded {} blocks to move their state and storage to the archive freezer") + .addArgument(preLoadedBlocks) + .log(); + } + + // Start processing any backlog on startup - don't wait for a new block to be imported. + moveBlockStateToFreezer(); } public synchronized void addToFreezerQueue(final long blockNumber, final Hash blockHash) { LOG.atDebug() .setMessage( - "adding block to archive freezer queue for moving to cold storage, blockNumber {}; blockHash {}") + "Adding block to archive freezer queue for moving to cold storage, blockNumber {}; blockHash {}") .addArgument(blockNumber) .addArgument(blockHash) .log(); @@ -84,7 +118,8 @@ public synchronized void addToFreezerQueue(final long blockNumber, final Hash bl public synchronized int moveBlockStateToFreezer() { final long retainAboveThisBlock = - blockchain.getChainHeadBlockNumber() - numberOfBlocksToKeepInWarmStorage; + blockchain.getChainHeadBlockNumber() - DISTANCE_FROM_HEAD_BEFORE_FREEZING_OLD_STATE; + if (rootWorldStateStorage.getFlatDbMode().getVersion() == Bytes.EMPTY) { throw new IllegalStateException("DB mode version not set"); } @@ -92,18 +127,17 @@ public synchronized int moveBlockStateToFreezer() { AtomicInteger frozenAccountStateCount = new AtomicInteger(); AtomicInteger frozenAccountStorageCount = new AtomicInteger(); - LOG.atDebug() + LOG.atTrace() .setMessage( "Moving cold state to freezer storage (chainHeadNumber: {} - numberOfBlocksToKeepInWarmStorage: {}) = {}") .addArgument(blockchain::getChainHeadBlockNumber) - .addArgument(numberOfBlocksToKeepInWarmStorage) + .addArgument(DISTANCE_FROM_HEAD_BEFORE_FREEZING_OLD_STATE) .addArgument(retainAboveThisBlock) .log(); final var accountsToMove = blocksToMoveToFreezer.asMap().entrySet().stream() .dropWhile((e) -> e.getKey() > retainAboveThisBlock); - // TODO - limit to a configurable number of blocks to move per loop final Multimap accountStateFreezerActionsComplete = ArrayListMultimap.create(); final Multimap accountStorageFreezerActionsComplete = ArrayListMultimap.create(); @@ -173,26 +207,28 @@ public synchronized int moveBlockStateToFreezer() { // For us to consider all state and storage changes for a block complete, it must have been // recorded in both accountState and accountStorage lists. If only one finished we need to try // freezing state/storage for that block again on the next loop - int frozenBlocksCompleted = blocksToMoveToFreezer.size(); + AtomicInteger frozenBlocksCompleted = new AtomicInteger(); accountStateFreezerActionsComplete .keySet() .forEach( (b) -> { if (accountStorageFreezerActionsComplete.containsKey(b)) { + frozenBlocksCompleted.getAndIncrement(); + rootWorldStateStorage.setLatestArchiveFrozenBlock(b); blocksToMoveToFreezer.removeAll(b); } }); if (frozenAccountStateCount.get() > 0 || frozenAccountStorageCount.get() > 0) { - LOG.atInfo() - .setMessage("froze {} account state entries, {} account storage entries for {} blocks") + LOG.atDebug() + .setMessage("Froze {} account state entries, {} account storage entries for {} blocks") .addArgument(frozenAccountStateCount.get()) .addArgument(frozenAccountStorageCount.get()) - .addArgument(frozenBlocksCompleted) + .addArgument(frozenBlocksCompleted.get()) .log(); } - return frozenBlocksCompleted; + return frozenBlocksCompleted.get(); } @Override diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java index 5ac2382179b..3064233fb0c 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java @@ -66,6 +66,10 @@ public abstract class DiffBasedWorldStateKeyValueStorage public static final byte[] WORLD_BLOCK_HASH_KEY = "worldBlockHash".getBytes(StandardCharsets.UTF_8); + // 0x61726368697665426C6F636B7346726F7A656E + public static final byte[] ARCHIVE_BLOCKS_FROZEN = + "archiveBlocksFrozen".getBytes(StandardCharsets.UTF_8); + private final AtomicBoolean shouldClose = new AtomicBoolean(false); protected final AtomicBoolean isClosed = new AtomicBoolean(false); @@ -315,6 +319,22 @@ public int freezePreviousStorageState( return frozenStateCount.get(); } + public Optional getLatestArchiveFrozenBlock() { + return composedWorldStateStorage + .get(ACCOUNT_INFO_STATE_FREEZER, ARCHIVE_BLOCKS_FROZEN) + .map(Bytes::wrap) + .map(Bytes::toLong); + } + + public void setLatestArchiveFrozenBlock(final Long blockNumber) { + SegmentedKeyValueStorageTransaction tx = composedWorldStateStorage.startTransaction(); + tx.put( + ACCOUNT_INFO_STATE_FREEZER, + ARCHIVE_BLOCKS_FROZEN, + Bytes.ofUnsignedLong(blockNumber).toArrayUnsafe()); + tx.commit(); + } + @Override public synchronized void close() throws Exception { // when the storage clears, close From 605d590a0dd71725f530941bfc912bb316b44def Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Fri, 20 Sep 2024 15:53:40 +0100 Subject: [PATCH 11/39] Make sure genesis world state is created in archive mode Signed-off-by: Matthew Whitehead --- .../common/GenesisWorldStateProvider.java | 16 ++++++-- .../storage/flat/FlatDbStrategyProvider.java | 39 ++++++++++++------- .../worldstate/DataStorageConfiguration.java | 6 +++ 3 files changed, 43 insertions(+), 18 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java index 61c96282666..efe601acbea 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java @@ -28,6 +28,7 @@ import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration; import org.hyperledger.besu.evm.internal.EvmConfiguration; import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem; +import org.hyperledger.besu.plugin.services.storage.DataStorageFormat; import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage; import org.hyperledger.besu.services.kvstore.SegmentedInMemoryKeyValueStorage; @@ -43,8 +44,13 @@ public class GenesisWorldStateProvider { */ public static MutableWorldState createGenesisWorldState( final DataStorageConfiguration dataStorageConfiguration) { - if (Objects.requireNonNull(dataStorageConfiguration).getDataStorageFormat().isBonsaiFormat()) { - return createGenesisBonsaiWorldState(); + + if (Objects.requireNonNull(dataStorageConfiguration).getDataStorageFormat() + == DataStorageFormat.BONSAI) { + return createGenesisBonsaiWorldState(false); + } else if (Objects.requireNonNull(dataStorageConfiguration).getDataStorageFormat() + == DataStorageFormat.BONSAI_ARCHIVE) { + return createGenesisBonsaiWorldState(true); } else { return createGenesisForestWorldState(); } @@ -55,7 +61,7 @@ public static MutableWorldState createGenesisWorldState( * * @return a mutable world state for the Genesis block */ - private static MutableWorldState createGenesisBonsaiWorldState() { + private static MutableWorldState createGenesisBonsaiWorldState(final boolean archiveMode) { final BonsaiCachedMerkleTrieLoader bonsaiCachedMerkleTrieLoader = new BonsaiCachedMerkleTrieLoader(new NoOpMetricsSystem()); final BonsaiWorldStateKeyValueStorage bonsaiWorldStateKeyValueStorage = @@ -65,7 +71,9 @@ private static MutableWorldState createGenesisBonsaiWorldState() { new InMemoryKeyValueStorage(), new NoOpMetricsSystem()), new NoOpMetricsSystem(), - DataStorageConfiguration.DEFAULT_BONSAI_CONFIG); + archiveMode + ? DataStorageConfiguration.DEFAULT_BONSAI_ARCHIVE_CONFIG + : DataStorageConfiguration.DEFAULT_BONSAI_CONFIG); return new BonsaiWorldState( bonsaiWorldStateKeyValueStorage, bonsaiCachedMerkleTrieLoader, diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java index 3e08f8eb6fd..5640deb3c4f 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java @@ -16,6 +16,7 @@ import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE; +import static org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY; import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveCodeStorageStrategy; @@ -78,15 +79,20 @@ public void loadFlatDbStrategy(final SegmentedKeyValueStorage composedWorldState } @VisibleForTesting - FlatDbMode deriveFlatDbStrategy(final SegmentedKeyValueStorage composedWorldStateStorage) { - // final FlatDbMode requestedFlatDbMode = - // dataStorageConfiguration.getUnstable().getBonsaiFullFlatDbEnabled() - // ? FlatDbMode.FULL - // : FlatDbMode.PARTIAL; + synchronized FlatDbMode deriveFlatDbStrategy( + final SegmentedKeyValueStorage composedWorldStateStorage) { + final FlatDbMode requestedFlatDbMode = + dataStorageConfiguration.getUnstable().getBonsaiFullFlatDbEnabled() + ? (dataStorageConfiguration + .getDataStorageFormat() + .equals(DataStorageFormat.BONSAI_ARCHIVE) + ? FlatDbMode.ARCHIVE + : FlatDbMode.FULL) + : FlatDbMode.PARTIAL; // TODO: commented out for archive testing - // final var existingTrieData = - // composedWorldStateStorage.get(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY).isPresent(); + final var existingTrieData = + composedWorldStateStorage.get(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY).isPresent(); var flatDbMode = FlatDbMode.fromVersion( @@ -100,15 +106,20 @@ FlatDbMode deriveFlatDbStrategy(final SegmentedKeyValueStorage composedWorldStat // and default to the storage config otherwise // TODO: temporarily hard code ARCHIVE mode for testing + /*var flatDbModeVal = + dataStorageConfiguration + .getDataStorageFormat() + .equals(DataStorageFormat.BONSAI_ARCHIVE) + ? FlatDbMode.ARCHIVE.getVersion() + : FlatDbMode.FULL.getVersion();*/ var flatDbModeVal = - dataStorageConfiguration - .getDataStorageFormat() - .equals(DataStorageFormat.BONSAI_ARCHIVE) + existingTrieData ? FlatDbMode.ARCHIVE.getVersion() - : FlatDbMode.FULL.getVersion(); - // existingTrieData - // ? FlatDbMode.ARCHIVE.getVersion() - // : requestedFlatDbMode.getVersion(); + : requestedFlatDbMode.getVersion(); + + // MRW TODO - If there is archive data in the freezer segment, we can assume + // archive mode + // persist this config in the db var setDbModeTx = composedWorldStateStorage.startTransaction(); setDbModeTx.put( diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageConfiguration.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageConfiguration.java index 320e38733d4..48f8cf01737 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageConfiguration.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageConfiguration.java @@ -41,6 +41,12 @@ public interface DataStorageConfiguration { .bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD) .build(); + DataStorageConfiguration DEFAULT_BONSAI_ARCHIVE_CONFIG = + ImmutableDataStorageConfiguration.builder() + .dataStorageFormat(DataStorageFormat.BONSAI_ARCHIVE) + .bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD) + .build(); + DataStorageConfiguration DEFAULT_BONSAI_PARTIAL_DB_CONFIG = ImmutableDataStorageConfiguration.builder() .dataStorageFormat(DataStorageFormat.BONSAI) From c7c2ee5d25e25b52605bac6a85f6893abeb76d7b Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Mon, 23 Sep 2024 13:01:45 +0100 Subject: [PATCH 12/39] Fix incorrect logs Signed-off-by: Matthew Whitehead --- .../DiffBasedWorldStateKeyValueStorage.java | 26 +++++++++++-------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java index 3064233fb0c..b8dace100e4 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java @@ -249,10 +249,12 @@ public int freezePreviousAccountState( frozenStateCount.getAndIncrement(); }); - LOG.atDebug() - .setMessage("no previous state for account {} found to move to cold storage") - .addArgument(accountHash) - .log(); + if (frozenStateCount.get() == 0) { + LOG.atDebug() + .setMessage("no previous state for account {} found to move to cold storage") + .addArgument(accountHash) + .log(); + } } catch (Exception e) { LOG.error("Error moving account state for account {} to cold storage", accountHash, e); } @@ -273,7 +275,7 @@ public int freezePreviousAccountState( */ public int freezePreviousStorageState( final Optional previousBlockHeader, final Bytes storageSlotKey) { - AtomicInteger frozenStateCount = new AtomicInteger(); + AtomicInteger frozenStorageCount = new AtomicInteger(); if (previousBlockHeader.isPresent()) { try { // Get the key for the previous block @@ -304,19 +306,21 @@ public int freezePreviousStorageState( nearestKey.key().toArrayUnsafe(), nearestKey.value().get()); tx.commit(); - frozenStateCount.getAndIncrement(); + frozenStorageCount.getAndIncrement(); }); - LOG.atDebug() - .setMessage("no previous state for storage {} found to move to cold storage") - .addArgument(storageSlotKey) - .log(); + if (frozenStorageCount.get() == 0) { + LOG.atDebug() + .setMessage("no previous state for storage {} found to move to cold storage") + .addArgument(storageSlotKey) + .log(); + } } catch (Exception e) { LOG.error("Error moving storage state for slot {} to cold storage", storageSlotKey, e); } } - return frozenStateCount.get(); + return frozenStorageCount.get(); } public Optional getLatestArchiveFrozenBlock() { From 990bd87eb3e4dc35b394826110d92624be952f15 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Thu, 26 Sep 2024 09:45:15 +0100 Subject: [PATCH 13/39] Ensure deleted storage is returned from live DB segment, not old storage from the freezer segment Signed-off-by: Matthew Whitehead --- .../storage/flat/ArchiveFlatDbStrategy.java | 92 +++++++-------- .../DiffBasedWorldStateKeyValueStorage.java | 110 +++++++++--------- 2 files changed, 100 insertions(+), 102 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java index ae882080bf3..171da04365f 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java @@ -54,7 +54,7 @@ public ArchiveFlatDbStrategy( static final byte[] MIN_BLOCK_SUFFIX = Bytes.ofUnsignedLong(0L).toArrayUnsafe(); public static final byte[] DELETED_ACCOUNT_VALUE = new byte[0]; public static final byte[] DELETED_CODE_VALUE = new byte[0]; - static final byte[] DELETED_STORAGE_VALUE = new byte[0]; + public static final byte[] DELETED_STORAGE_VALUE = new byte[0]; @Override public Optional getFlatAccount( @@ -64,48 +64,43 @@ public Optional getFlatAccount( final SegmentedKeyValueStorage storage) { getAccountCounter.inc(); + Optional accountFound; + // keyNearest, use MAX_BLOCK_SUFFIX in the absence of a block context: Bytes keyNearest = calculateArchiveKeyWithMaxSuffix(context, accountHash.toArrayUnsafe()); - // use getNearest() with an account key that is suffixed by the block context - final Optional accountFound = + // Find the nearest account state for this address and block context + Optional nearestAccount = storage .getNearestBefore(ACCOUNT_INFO_STATE, keyNearest) - // return empty when we find a "deleted value key" - .filter( - found -> - !Arrays.areEqual( - DELETED_ACCOUNT_VALUE, found.value().orElse(DELETED_ACCOUNT_VALUE))) - // don't return accounts that do not have a matching account hash - .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()) - .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); + .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()); - if (accountFound.isPresent()) { - getAccountFoundInFlatDatabaseCounter.inc(); - return accountFound; - } else { - // Check the frozen state as old state is moved out of the primary DB segment - final Optional frozenAccountFound = + // If there isn't a match look in the freezer DB segment + if (nearestAccount.isEmpty()) { + accountFound = storage .getNearestBefore(ACCOUNT_INFO_STATE_FREEZER, keyNearest) - // return empty when we find a "deleted value key" + .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()) + .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); + } else { + accountFound = + nearestAccount .filter( found -> !Arrays.areEqual( DELETED_ACCOUNT_VALUE, found.value().orElse(DELETED_ACCOUNT_VALUE))) - // don't return accounts that do not have a matching account hash - .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()) + // return empty when we find a "deleted value key" .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); + } - if (frozenAccountFound.isPresent()) { - // TODO - different metric for frozen lookups? - getAccountFoundInFlatDatabaseCounter.inc(); - } else { - getAccountNotFoundInFlatDatabaseCounter.inc(); - } - - return frozenAccountFound; + if (accountFound.isPresent()) { + // TODO - different metric for frozen lookups? + getAccountFoundInFlatDatabaseCounter.inc(); + } else { + getAccountNotFoundInFlatDatabaseCounter.inc(); } + + return accountFound; } /* @@ -144,6 +139,8 @@ public Optional getFlatStorageValueByStorageSlotKey( final Hash accountHash, final StorageSlotKey storageSlotKey, final SegmentedKeyValueStorage storage) { + + Optional storageFound; getStorageValueCounter.inc(); // get natural key from account hash and slot key @@ -151,47 +148,44 @@ public Optional getFlatStorageValueByStorageSlotKey( // keyNearest, use MAX_BLOCK_SUFFIX in the absence of a block context: Bytes keyNearest = calculateArchiveKeyWithMaxSuffix(context, naturalKey); - // use getNearest() with a key that is suffixed by the block context - final Optional storageFound = + // Find the nearest storage for this address, slot key hash, and block context + Optional nearestStorage = storage .getNearestBefore(ACCOUNT_STORAGE_STORAGE, keyNearest) - // return empty when we find a "deleted value key" - .filter( - found -> - !Arrays.areEqual( - DELETED_STORAGE_VALUE, found.value().orElse(DELETED_STORAGE_VALUE))) - // don't return accounts that do not have a matching account hash and slotHash prefix .filter( - found -> Bytes.of(naturalKey).commonPrefixLength(found.key()) >= naturalKey.length) - // map NearestKey to Bytes-wrapped value - .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); + found -> Bytes.of(naturalKey).commonPrefixLength(found.key()) >= naturalKey.length); - if (storageFound.isPresent()) { - getStorageValueFlatDatabaseCounter.inc(); - return storageFound; - } else { + // If there isn't a match look in the freezer DB segment + if (nearestStorage.isEmpty()) { // Check the frozen storage as old state is moved out of the primary DB segment - final Optional frozenStorageFound = + storageFound = storage .getNearestBefore(ACCOUNT_STORAGE_FREEZER, keyNearest) + // don't return accounts that do not have a matching account hash + .filter( + found -> + Bytes.of(naturalKey).commonPrefixLength(found.key()) >= naturalKey.length) + .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); + } else { + storageFound = + nearestStorage // return empty when we find a "deleted value key" .filter( found -> !Arrays.areEqual( DELETED_STORAGE_VALUE, found.value().orElse(DELETED_STORAGE_VALUE))) - // don't return accounts that do not have a matching account hash - .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()) + // map NearestKey to Bytes-wrapped value .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); - if (frozenStorageFound.isPresent()) { + if (storageFound.isPresent()) { // TODO - different metric for frozen lookups? getStorageValueFlatDatabaseCounter.inc(); } else { getStorageValueNotFoundInFlatDatabaseCounter.inc(); } - - return frozenStorageFound; } + + return storageFound; } /* diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java index b8dace100e4..69a8c92eb2d 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java @@ -20,7 +20,6 @@ import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE; -import static org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy.DELETED_ACCOUNT_VALUE; import org.hyperledger.besu.datatypes.Hash; import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; @@ -51,7 +50,6 @@ import kotlin.Pair; import org.apache.tuweni.bytes.Bytes; import org.apache.tuweni.bytes.Bytes32; -import org.bouncycastle.util.Arrays; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -223,31 +221,33 @@ public int freezePreviousAccountState( final BonsaiContext previousContext = new BonsaiContext(); previousContext.setBlockHeader(previousBlockHeader.get()); final Bytes previousKey = - ArchiveFlatDbStrategy.calculateArchiveKeyWithMaxSuffix( - previousContext, accountHash.toArrayUnsafe()); - - composedWorldStateStorage - .getNearestBefore(ACCOUNT_INFO_STATE, previousKey) - .filter( - // Ignore deleted entries - found -> - !Arrays.areEqual( - DELETED_ACCOUNT_VALUE, found.value().orElse(DELETED_ACCOUNT_VALUE))) - // Skip "nearest" entries that are for a different account - .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()) - .stream() - .forEach( - (nearestKey) -> { - SegmentedKeyValueStorageTransaction tx = - composedWorldStateStorage.startTransaction(); - tx.remove(ACCOUNT_INFO_STATE, nearestKey.key().toArrayUnsafe()); - tx.put( - ACCOUNT_INFO_STATE_FREEZER, - nearestKey.key().toArrayUnsafe(), - nearestKey.value().get()); - tx.commit(); - frozenStateCount.getAndIncrement(); - }); + Bytes.of( + ArchiveFlatDbStrategy.calculateArchiveKeyWithMinSuffix( + previousContext, accountHash.toArrayUnsafe())); + + Optional nextMatch; + + // Move all entries that match this address hash to the freezer DB segment + while ((nextMatch = + composedWorldStateStorage + .getNearestBefore(ACCOUNT_INFO_STATE, previousKey) + .filter( + found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size())) + .isPresent()) { + nextMatch.stream() + .forEach( + (nearestKey) -> { + SegmentedKeyValueStorageTransaction tx = + composedWorldStateStorage.startTransaction(); + tx.remove(ACCOUNT_INFO_STATE, nearestKey.key().toArrayUnsafe()); + tx.put( + ACCOUNT_INFO_STATE_FREEZER, + nearestKey.key().toArrayUnsafe(), + nearestKey.value().get()); + tx.commit(); + frozenStateCount.getAndIncrement(); + }); + } if (frozenStateCount.get() == 0) { LOG.atDebug() @@ -282,32 +282,36 @@ public int freezePreviousStorageState( final BonsaiContext previousContext = new BonsaiContext(); previousContext.setBlockHeader(previousBlockHeader.get()); final Bytes previousKey = - ArchiveFlatDbStrategy.calculateArchiveKeyWithMaxSuffix( - previousContext, storageSlotKey.toArrayUnsafe()); - - composedWorldStateStorage - .getNearestBefore(ACCOUNT_STORAGE_STORAGE, previousKey) - .filter( - // Ignore deleted entries - found -> - !Arrays.areEqual( - DELETED_ACCOUNT_VALUE, found.value().orElse(DELETED_ACCOUNT_VALUE))) - // Skip "nearest" entries that are for a different account - .filter( - found -> storageSlotKey.commonPrefixLength(found.key()) >= storageSlotKey.size()) - .stream() - .forEach( - (nearestKey) -> { - SegmentedKeyValueStorageTransaction tx = - composedWorldStateStorage.startTransaction(); - tx.remove(ACCOUNT_STORAGE_STORAGE, nearestKey.key().toArrayUnsafe()); - tx.put( - ACCOUNT_STORAGE_FREEZER, - nearestKey.key().toArrayUnsafe(), - nearestKey.value().get()); - tx.commit(); - frozenStorageCount.getAndIncrement(); - }); + Bytes.of( + ArchiveFlatDbStrategy.calculateArchiveKeyWithMinSuffix( + previousContext, storageSlotKey.toArrayUnsafe())); + + Optional nextMatch; + + // Move all entries that match the storage hash for this address & slot + // to the freezer DB segment + while ((nextMatch = + composedWorldStateStorage + .getNearestBefore(ACCOUNT_STORAGE_STORAGE, previousKey) + .filter( + found -> + storageSlotKey.commonPrefixLength(found.key()) + >= storageSlotKey.size())) + .isPresent()) { + nextMatch.stream() + .forEach( + (nearestKey) -> { + SegmentedKeyValueStorageTransaction tx = + composedWorldStateStorage.startTransaction(); + tx.remove(ACCOUNT_STORAGE_STORAGE, nearestKey.key().toArrayUnsafe()); + tx.put( + ACCOUNT_STORAGE_FREEZER, + nearestKey.key().toArrayUnsafe(), + nearestKey.value().get()); + tx.commit(); + frozenStorageCount.getAndIncrement(); + }); + } if (frozenStorageCount.get() == 0) { LOG.atDebug() From 8518428365412188a97490c3e89020c0b1560498 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Thu, 26 Sep 2024 16:29:03 +0100 Subject: [PATCH 14/39] Adding tests Signed-off-by: Matthew Whitehead --- .../java/org/hyperledger/besu/cli/BesuCommand.java | 5 ++++- .../org/hyperledger/besu/cli/PrivacyOptionsTest.java | 10 ++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java b/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java index d50b60dfe72..a4fee410acc 100644 --- a/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java +++ b/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java @@ -1936,9 +1936,12 @@ private PrivacyParameters privacyParameters() { throw new ParameterException( commandLine, String.format("%s %s", "Checkpoint sync", errorSuffix)); } - if (getDataStorageConfiguration().getDataStorageFormat().isBonsaiFormat()) { + if (getDataStorageConfiguration().getDataStorageFormat() == DataStorageFormat.BONSAI) { throw new ParameterException(commandLine, String.format("%s %s", "Bonsai", errorSuffix)); } + if (getDataStorageConfiguration().getDataStorageFormat() == DataStorageFormat.BONSAI_ARCHIVE) { + throw new ParameterException(commandLine, String.format("%s %s", "Bonsai archive", errorSuffix)); + } if (Boolean.TRUE.equals(privacyOptionGroup.isPrivacyMultiTenancyEnabled) && Boolean.FALSE.equals(jsonRpcConfiguration.isAuthenticationEnabled()) diff --git a/besu/src/test/java/org/hyperledger/besu/cli/PrivacyOptionsTest.java b/besu/src/test/java/org/hyperledger/besu/cli/PrivacyOptionsTest.java index af43535a8ff..752f496f35e 100644 --- a/besu/src/test/java/org/hyperledger/besu/cli/PrivacyOptionsTest.java +++ b/besu/src/test/java/org/hyperledger/besu/cli/PrivacyOptionsTest.java @@ -215,6 +215,16 @@ public void privacyWithBonsaiExplicitMustError() { assertThat(commandOutput.toString(UTF_8)).isEmpty(); } + @Test + public void privacyWithBonsaiArchiveExplicitMustError() { + // bypass overridden parseCommand method which specifies bonsai + super.parseCommand("--privacy-enabled", "--data-storage-format", "BONSAI_ARCHIVE"); + + assertThat(commandErrorOutput.toString(UTF_8)) + .contains("Bonsai archive cannot be enabled with privacy."); + assertThat(commandOutput.toString(UTF_8)).isEmpty(); + } + @Test public void privacyWithoutPrivacyPublicKeyFails() { parseCommand("--privacy-enabled", "--privacy-url", ENCLAVE_URI); From a1cb77d515ca14b962c0ee544364d2e3844eef66 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Mon, 30 Sep 2024 10:20:26 +0100 Subject: [PATCH 15/39] Refactor DB TX with retries Signed-off-by: Matthew Whitehead --- .../DiffBasedWorldStateKeyValueStorage.java | 42 ++++++++++++++----- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java index 69a8c92eb2d..96355adfae9 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java @@ -31,9 +31,11 @@ import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy; import org.hyperledger.besu.ethereum.worldstate.FlatDbMode; import org.hyperledger.besu.ethereum.worldstate.WorldStateKeyValueStorage; +import org.hyperledger.besu.plugin.services.exception.StorageException; import org.hyperledger.besu.plugin.services.storage.DataStorageFormat; import org.hyperledger.besu.plugin.services.storage.KeyValueStorage; import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction; +import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction; import org.hyperledger.besu.util.Subscribers; @@ -237,14 +239,11 @@ public int freezePreviousAccountState( nextMatch.stream() .forEach( (nearestKey) -> { - SegmentedKeyValueStorageTransaction tx = - composedWorldStateStorage.startTransaction(); - tx.remove(ACCOUNT_INFO_STATE, nearestKey.key().toArrayUnsafe()); - tx.put( + moveDBEntry( + ACCOUNT_INFO_STATE, ACCOUNT_INFO_STATE_FREEZER, nearestKey.key().toArrayUnsafe(), nearestKey.value().get()); - tx.commit(); frozenStateCount.getAndIncrement(); }); } @@ -301,14 +300,11 @@ public int freezePreviousStorageState( nextMatch.stream() .forEach( (nearestKey) -> { - SegmentedKeyValueStorageTransaction tx = - composedWorldStateStorage.startTransaction(); - tx.remove(ACCOUNT_STORAGE_STORAGE, nearestKey.key().toArrayUnsafe()); - tx.put( + moveDBEntry( + ACCOUNT_STORAGE_STORAGE, ACCOUNT_STORAGE_FREEZER, nearestKey.key().toArrayUnsafe(), nearestKey.value().get()); - tx.commit(); frozenStorageCount.getAndIncrement(); }); } @@ -327,6 +323,32 @@ public int freezePreviousStorageState( return frozenStorageCount.get(); } + private void moveDBEntry( + final SegmentIdentifier fromSegment, + final SegmentIdentifier toSegment, + final byte[] key, + final byte[] value) { + boolean retried = false; + while (true) { // Allow for a single DB retry + try { + SegmentedKeyValueStorageTransaction tx = composedWorldStateStorage.startTransaction(); + tx.remove(fromSegment, key); + tx.put(toSegment, key, value); + tx.commit(); + break; + } catch (StorageException se) { + if (se.getMessage().contains("RocksDBException: Busy")) { + if (retried) { + break; + } + retried = true; + } else { + break; + } + } + } + } + public Optional getLatestArchiveFrozenBlock() { return composedWorldStateStorage .get(ACCOUNT_INFO_STATE_FREEZER, ARCHIVE_BLOCKS_FROZEN) From 660d449ce19ede4a92a2b82948fe0d9206f3fa37 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Mon, 30 Sep 2024 13:52:34 +0100 Subject: [PATCH 16/39] Honour code hash or account hash config for storing code. Add tests for DB mode Signed-off-by: Matthew Whitehead --- .../flat/ArchiveCodeStorageStrategy.java | 101 ---------------- .../storage/flat/FlatDbStrategyProvider.java | 17 ++- .../flat/FlatDbStrategyProviderTest.java | 114 ++++++++++++++++++ 3 files changed, 125 insertions(+), 107 deletions(-) delete mode 100644 ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveCodeStorageStrategy.java diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveCodeStorageStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveCodeStorageStrategy.java deleted file mode 100644 index ad4219faae7..00000000000 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveCodeStorageStrategy.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright contributors to Hyperledger Besu. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ -package org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat; - -import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE; -import static org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy.DELETED_CODE_VALUE; -import static org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy.calculateArchiveKeyWithMinSuffix; - -import org.hyperledger.besu.datatypes.Hash; -import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; -import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.CodeStorageStrategy; -import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage; -import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction; - -import java.util.Optional; - -import org.apache.tuweni.bytes.Bytes; -import org.bouncycastle.util.Arrays; - -public class ArchiveCodeStorageStrategy implements CodeStorageStrategy { - - private final BonsaiContext context; - - public ArchiveCodeStorageStrategy(final BonsaiContext context) { - this.context = context; - } - - /* - * Retrieves the code data for the given code hash and account hash and block context. - */ - @Override - public Optional getFlatCode( - final Hash codeHash, final Hash accountHash, final SegmentedKeyValueStorage storage) { - if (codeHash.equals(Hash.EMPTY)) { - return Optional.of(Bytes.EMPTY); - } else { - - // keyNearest, use MAX_BLOCK_SUFFIX in the absence of a block context: - Bytes keyNearest = - ArchiveFlatDbStrategy.calculateArchiveKeyWithMaxSuffix( - context, accountHash.toArrayUnsafe()); - - // use getNearest() with an account key that is suffixed by the block context - final Optional codeFound = - storage - .getNearestBefore(CODE_STORAGE, keyNearest) - // return empty when we find a "deleted value key" - .filter( - found -> - !Arrays.areEqual( - DELETED_CODE_VALUE, found.value().orElse(DELETED_CODE_VALUE))) - // map NearestKey to Bytes-wrapped value - .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes) - // check codeHash to sanity check the value and ensure we have the correct nearestKey: - .filter(b -> Hash.hash(b).equals(codeHash)); - - return codeFound; - } - } - - /* - * Puts the code data for the given code hash and account hash and block context. - */ - @Override - public void putFlatCode( - final SegmentedKeyValueStorageTransaction transaction, - final Hash accountHash, - final Hash codeHash, - final Bytes code) { - // key suffixed with block context, or MIN_BLOCK_SUFFIX if we have no context: - byte[] keySuffixed = calculateArchiveKeyWithMinSuffix(context, accountHash.toArrayUnsafe()); - - transaction.put(CODE_STORAGE, keySuffixed, code.toArrayUnsafe()); - } - - /* - * Adds a "deleted key" code entry for the given account hash and block context. - */ - @Override - public void removeFlatCode( - final SegmentedKeyValueStorageTransaction transaction, - final Hash accountHash, - final Hash codeHash) { - // insert a key suffixed with block context, with 'deleted account' value - byte[] keySuffixed = calculateArchiveKeyWithMinSuffix(context, accountHash.toArrayUnsafe()); - - transaction.put(CODE_STORAGE, keySuffixed, DELETED_CODE_VALUE); - } -} diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java index 5640deb3c4f..dbcae20c07d 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java @@ -19,7 +19,6 @@ import static org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY; import org.hyperledger.besu.ethereum.bonsai.BonsaiContext; -import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveCodeStorageStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.FullFlatDbStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.PartialFlatDbStrategy; @@ -70,8 +69,7 @@ public void loadFlatDbStrategy(final SegmentedKeyValueStorage composedWorldState } else if (flatDbMode == FlatDbMode.ARCHIVE) { final BonsaiContext context = new BonsaiContext(); this.flatDbStrategy = - new ArchiveFlatDbStrategy( - context, metricsSystem, new ArchiveCodeStorageStrategy(context)); + new ArchiveFlatDbStrategy(context, metricsSystem, codeStorageStrategy); } else { this.flatDbStrategy = new PartialFlatDbStrategy(metricsSystem, codeStorageStrategy); } @@ -175,9 +173,16 @@ public FlatDbStrategy getFlatDbStrategy( public void upgradeToFullFlatDbMode(final SegmentedKeyValueStorage composedWorldStateStorage) { final SegmentedKeyValueStorageTransaction transaction = composedWorldStateStorage.startTransaction(); - LOG.info("setting FlatDbStrategy to FULL"); - transaction.put( - TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.FULL.getVersion().toArrayUnsafe()); + if (dataStorageConfiguration.getDataStorageFormat() == DataStorageFormat.BONSAI) { + LOG.info("setting FlatDbStrategy to FULL"); + transaction.put( + TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.FULL.getVersion().toArrayUnsafe()); + } else if (dataStorageConfiguration.getDataStorageFormat() + == DataStorageFormat.BONSAI_ARCHIVE) { + LOG.info("setting FlatDbStrategy to ARCHIVE"); + transaction.put( + TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.ARCHIVE.getVersion().toArrayUnsafe()); + } transaction.commit(); loadFlatDbStrategy(composedWorldStateStorage); // force reload of flat db reader strategy } diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProviderTest.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProviderTest.java index b3c709b89f2..12ee5b1ef07 100644 --- a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProviderTest.java +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProviderTest.java @@ -19,6 +19,7 @@ import org.hyperledger.besu.datatypes.Hash; import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.ArchiveFlatDbStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.FullFlatDbStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.PartialFlatDbStrategy; import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration; @@ -45,6 +46,9 @@ class FlatDbStrategyProviderTest { private final FlatDbStrategyProvider flatDbStrategyProvider = new FlatDbStrategyProvider(new NoOpMetricsSystem(), DataStorageConfiguration.DEFAULT_CONFIG); + private final FlatDbStrategyProvider archiveFlatDbStrategyProvider = + new FlatDbStrategyProvider( + new NoOpMetricsSystem(), DataStorageConfiguration.DEFAULT_BONSAI_ARCHIVE_CONFIG); private final SegmentedKeyValueStorage composedWorldStateStorage = new SegmentedInMemoryKeyValueStorage( List.of( @@ -75,10 +79,25 @@ void upgradesFlatDbStrategyToFullFlatDbMode() { assertThat(flatDbStrategyProvider.flatDbStrategy).isNotNull(); assertThat(flatDbStrategyProvider.getFlatDbStrategy(composedWorldStateStorage)) .isInstanceOf(FullFlatDbStrategy.class); + assertThat(flatDbStrategyProvider.getFlatDbStrategy(composedWorldStateStorage)) + .isNotInstanceOf(ArchiveFlatDbStrategy.class); assertThat(flatDbStrategyProvider.flatDbStrategy.codeStorageStrategy) .isInstanceOf(CodeHashCodeStorageStrategy.class); } + @Test + void upgradesFlatDbStrategyToArchiveFlatDbMode() { + updateFlatDbMode(FlatDbMode.PARTIAL); + + archiveFlatDbStrategyProvider.upgradeToFullFlatDbMode(composedWorldStateStorage); + assertThat(archiveFlatDbStrategyProvider.flatDbMode).isEqualTo(FlatDbMode.ARCHIVE); + assertThat(archiveFlatDbStrategyProvider.flatDbStrategy).isNotNull(); + assertThat(archiveFlatDbStrategyProvider.getFlatDbStrategy(composedWorldStateStorage)) + .isInstanceOf(ArchiveFlatDbStrategy.class); + assertThat(archiveFlatDbStrategyProvider.flatDbStrategy.codeStorageStrategy) + .isInstanceOf(CodeHashCodeStorageStrategy.class); + } + @ParameterizedTest @ValueSource(booleans = {false, true}) void emptyDbCreatesFlatDbStrategyUsingCodeByHashConfig(final boolean codeByHashEnabled) { @@ -104,6 +123,31 @@ void emptyDbCreatesFlatDbStrategyUsingCodeByHashConfig(final boolean codeByHashE .isInstanceOf(expectedCodeStorageClass); } + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void emptyDbCreatesArchiveFlatDbStrategyUsingCodeByHashConfig(final boolean codeByHashEnabled) { + final DataStorageConfiguration dataStorageConfiguration = + ImmutableDataStorageConfiguration.builder() + .dataStorageFormat(DataStorageFormat.BONSAI_ARCHIVE) + .bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD) + .unstable( + ImmutableDataStorageConfiguration.Unstable.builder() + .bonsaiCodeStoredByCodeHashEnabled(codeByHashEnabled) + .build()) + .build(); + final FlatDbStrategyProvider flatDbStrategyProvider = + new FlatDbStrategyProvider(new NoOpMetricsSystem(), dataStorageConfiguration); + + flatDbStrategyProvider.loadFlatDbStrategy(composedWorldStateStorage); + final Class expectedCodeStorageClass = + codeByHashEnabled + ? CodeHashCodeStorageStrategy.class + : AccountHashCodeStorageStrategy.class; + assertThat(flatDbStrategyProvider.flatDbMode).isEqualTo(FlatDbMode.ARCHIVE); + assertThat(flatDbStrategyProvider.flatDbStrategy.codeStorageStrategy) + .isInstanceOf(expectedCodeStorageClass); + } + @ParameterizedTest @ValueSource(booleans = {false, true}) void existingAccountHashDbUsesAccountHash(final boolean codeByHashEnabled) { @@ -134,6 +178,36 @@ void existingAccountHashDbUsesAccountHash(final boolean codeByHashEnabled) { .isInstanceOf(AccountHashCodeStorageStrategy.class); } + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void existingAccountHashArchiveDbUsesAccountHash(final boolean codeByHashEnabled) { + final DataStorageConfiguration dataStorageConfiguration = + ImmutableDataStorageConfiguration.builder() + .dataStorageFormat(DataStorageFormat.BONSAI_ARCHIVE) + .bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD) + .unstable( + ImmutableDataStorageConfiguration.Unstable.builder() + .bonsaiCodeStoredByCodeHashEnabled(codeByHashEnabled) + .build()) + .build(); + final FlatDbStrategyProvider flatDbStrategyProvider = + new FlatDbStrategyProvider(new NoOpMetricsSystem(), dataStorageConfiguration); + + final SegmentedKeyValueStorageTransaction transaction = + composedWorldStateStorage.startTransaction(); + final AccountHashCodeStorageStrategy accountHashCodeStorageStrategy = + new AccountHashCodeStorageStrategy(); + // key representing account hash just needs to not be the code hash + final Hash accountHash = Hash.wrap(Bytes32.fromHexString("0001")); + accountHashCodeStorageStrategy.putFlatCode(transaction, accountHash, null, Bytes.of(2)); + transaction.commit(); + + flatDbStrategyProvider.loadFlatDbStrategy(composedWorldStateStorage); + assertThat(flatDbStrategyProvider.flatDbMode).isEqualTo(FlatDbMode.ARCHIVE); + assertThat(flatDbStrategyProvider.flatDbStrategy.codeStorageStrategy) + .isInstanceOf(AccountHashCodeStorageStrategy.class); + } + @ParameterizedTest @ValueSource(booleans = {false, true}) void existingCodeHashDbUsesCodeHash(final boolean codeByHashEnabled) { @@ -163,6 +237,35 @@ void existingCodeHashDbUsesCodeHash(final boolean codeByHashEnabled) { .isInstanceOf(CodeHashCodeStorageStrategy.class); } + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void existingCodeHashArchiveDbUsesCodeHash(final boolean codeByHashEnabled) { + final DataStorageConfiguration dataStorageConfiguration = + ImmutableDataStorageConfiguration.builder() + .dataStorageFormat(DataStorageFormat.BONSAI_ARCHIVE) + .bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD) + .unstable( + ImmutableDataStorageConfiguration.Unstable.builder() + .bonsaiCodeStoredByCodeHashEnabled(codeByHashEnabled) + .build()) + .build(); + final FlatDbStrategyProvider flatDbStrategyProvider = + new FlatDbStrategyProvider(new NoOpMetricsSystem(), dataStorageConfiguration); + + final SegmentedKeyValueStorageTransaction transaction = + composedWorldStateStorage.startTransaction(); + + final CodeHashCodeStorageStrategy codeHashCodeStorageStrategy = + new CodeHashCodeStorageStrategy(); + codeHashCodeStorageStrategy.putFlatCode(transaction, null, Hash.hash(Bytes.of(1)), Bytes.of(1)); + transaction.commit(); + + flatDbStrategyProvider.loadFlatDbStrategy(composedWorldStateStorage); + assertThat(flatDbStrategyProvider.flatDbMode).isEqualTo(FlatDbMode.ARCHIVE); + assertThat(flatDbStrategyProvider.flatDbStrategy.codeStorageStrategy) + .isInstanceOf(CodeHashCodeStorageStrategy.class); + } + @Test void downgradesFlatDbStrategyToPartiallyFlatDbMode() { updateFlatDbMode(FlatDbMode.FULL); @@ -174,6 +277,17 @@ void downgradesFlatDbStrategyToPartiallyFlatDbMode() { .isInstanceOf(PartialFlatDbStrategy.class); } + @Test + void downgradesArchiveFlatDbStrategyToPartiallyFlatDbMode() { + updateFlatDbMode(FlatDbMode.ARCHIVE); + + flatDbStrategyProvider.downgradeToPartialFlatDbMode(composedWorldStateStorage); + assertThat(flatDbStrategyProvider.flatDbMode).isEqualTo(FlatDbMode.PARTIAL); + assertThat(flatDbStrategyProvider.flatDbStrategy).isNotNull(); + assertThat(flatDbStrategyProvider.getFlatDbStrategy(composedWorldStateStorage)) + .isInstanceOf(PartialFlatDbStrategy.class); + } + private void updateFlatDbMode(final FlatDbMode flatDbMode) { final SegmentedKeyValueStorageTransaction transaction = composedWorldStateStorage.startTransaction(); From 406312865d43c6ac5d9609f9506e087f7ef5c3b2 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Tue, 1 Oct 2024 15:55:09 +0100 Subject: [PATCH 17/39] Refactor code to move one block's worth of state/storage changes at a time. Add more tests Signed-off-by: Matthew Whitehead --- .../worldview/BonsaiArchiveFreezer.java | 211 ++-- .../DiffBasedWorldStateKeyValueStorage.java | 51 +- .../common/trielog/ArchiveFreezerTests.java | 1042 +++++++++++++++++ 3 files changed, 1198 insertions(+), 106 deletions(-) create mode 100644 ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiveFreezerTests.java diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java index d38d91dccbf..7f41fa22127 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java @@ -23,14 +23,13 @@ import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogManager; import org.hyperledger.besu.plugin.services.trielogs.TrieLog; -import java.util.Comparator; +import java.util.Collections; +import java.util.Map; import java.util.Optional; +import java.util.TreeMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.Multimap; -import com.google.common.collect.TreeMultimap; import org.apache.tuweni.bytes.Bytes; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,12 +48,12 @@ public class BonsaiArchiveFreezer implements BlockAddedObserver { private final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage; private final Blockchain blockchain; private final Consumer executeAsync; - private static final int PRELOAD_LIMIT = 1000; + private static final int CATCHUP_LIMIT = 1000; private static final int DISTANCE_FROM_HEAD_BEFORE_FREEZING_OLD_STATE = 10; private final TrieLogManager trieLogManager; - private final Multimap blocksToMoveToFreezer = - TreeMultimap.create(Comparator.reverseOrder(), Comparator.naturalOrder()); + private final Map pendingBlocksToArchive = + Collections.synchronizedMap(new TreeMap<>()); public BonsaiArchiveFreezer( final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage, @@ -67,9 +66,7 @@ public BonsaiArchiveFreezer( this.trieLogManager = trieLogManager; } - public void initialize() { - // On startup there will be recent blocks whose state and storage hasn't been archived yet. - // Pre-load them ready for freezing state once enough new blocks have been added to the chain. + private void preloadCatchupBlocks() { Optional frozenBlocksHead = Optional.empty(); Optional latestFrozenBlock = rootWorldStateStorage.getLatestArchiveFrozenBlock(); @@ -86,7 +83,7 @@ public void initialize() { if (frozenBlocksHead.isPresent()) { int preLoadedBlocks = 0; Optional nextBlock = blockchain.getBlockByNumber(frozenBlocksHead.get()); - for (int i = 0; i < PRELOAD_LIMIT; i++) { + for (int i = 0; i < CATCHUP_LIMIT; i++) { if (nextBlock.isPresent()) { addToFreezerQueue( nextBlock.get().getHeader().getNumber(), nextBlock.get().getHeader().getHash()); @@ -97,13 +94,27 @@ public void initialize() { } } LOG.atInfo() - .setMessage("Preloaded {} blocks to move their state and storage to the archive freezer") + .setMessage( + "Preloaded {} blocks from {} to move their state and storage to the archive freezer") .addArgument(preLoadedBlocks) + .addArgument(frozenBlocksHead.get()) .log(); } + } + + public void initialize() { + // On startup there will be recent blocks whose state and storage hasn't been archived yet. + // Pre-load them ready for freezing state once enough new blocks have been added to the chain. + preloadCatchupBlocks(); + + // Keep catching up until we move less to the freezer than the catchup limit + while (moveBlockStateToFreezer() == CATCHUP_LIMIT) { + preloadCatchupBlocks(); + } + } - // Start processing any backlog on startup - don't wait for a new block to be imported. - moveBlockStateToFreezer(); + public int getPendingBlocksCount() { + return pendingBlocksToArchive.size(); } public synchronized void addToFreezerQueue(final long blockNumber, final Hash blockHash) { @@ -113,10 +124,17 @@ public synchronized void addToFreezerQueue(final long blockNumber, final Hash bl .addArgument(blockNumber) .addArgument(blockHash) .log(); - blocksToMoveToFreezer.put(blockNumber, blockHash); + pendingBlocksToArchive.put(blockNumber, blockHash); + } + + private synchronized void removeArchivedFromQueue(final Map archivedBlocks) { + archivedBlocks.keySet().forEach(e -> pendingBlocksToArchive.remove(e)); } - public synchronized int moveBlockStateToFreezer() { + // Move state and storage entries from their primary DB segments to the freezer segments. This is + // intended to maintain good performance for new block imports by keeping the primary DB segments + // to live state only. Returns the number of state and storage entries moved. + public int moveBlockStateToFreezer() { final long retainAboveThisBlock = blockchain.getChainHeadBlockNumber() - DISTANCE_FROM_HEAD_BEFORE_FREEZING_OLD_STATE; @@ -135,100 +153,99 @@ public synchronized int moveBlockStateToFreezer() { .addArgument(retainAboveThisBlock) .log(); - final var accountsToMove = - blocksToMoveToFreezer.asMap().entrySet().stream() - .dropWhile((e) -> e.getKey() > retainAboveThisBlock); + // Typically we will move all storage and state for a single block i.e. when a new block is + // imported, move state for block-N. There are cases where we catch-up and move old state + // for a number of blocks so we may iterate over a number of blocks freezing their state, + // not just a single one. - final Multimap accountStateFreezerActionsComplete = ArrayListMultimap.create(); - final Multimap accountStorageFreezerActionsComplete = ArrayListMultimap.create(); + final Map blocksToFreeze = new TreeMap<>(); + pendingBlocksToArchive.entrySet().stream() + .filter((e) -> e.getKey() <= retainAboveThisBlock) + .forEach( + (e) -> { + blocksToFreeze.put(e.getKey(), e.getValue()); + }); // Determine which world state keys have changed in the last N blocks by looking at the // trie logs for the blocks. Then move the old keys to the freezer segment (if and only if they // have changed) - accountsToMove - .parallel() + blocksToFreeze + .entrySet() .forEach( (block) -> { - for (Hash blockHash : block.getValue()) { - Optional trieLog = trieLogManager.getTrieLogLayer(blockHash); - if (trieLog.isPresent()) { - trieLog - .get() - .getAccountChanges() - .forEach( - (address, change) -> { - // Move any previous state for this account - frozenAccountStateCount.addAndGet( - rootWorldStateStorage.freezePreviousAccountState( - blockchain.getBlockHeader( - blockchain.getBlockHeader(blockHash).get().getParentHash()), - address.addressHash())); - }); - } - accountStateFreezerActionsComplete.put(block.getKey(), blockHash); + if (pendingBlocksToArchive.size() > 0 && pendingBlocksToArchive.size() % 100 == 0) { + // Log progress in case catching up causes there to be a large number of keys + // to move + LOG.atInfo() + .setMessage("state for blocks {} to {} archived") + .addArgument(block.getKey()) + .addArgument(block.getKey() + pendingBlocksToArchive.size()) + .log(); } - }); - - final var storageToMove = - blocksToMoveToFreezer.asMap().entrySet().stream() - .dropWhile((e) -> e.getKey() > retainAboveThisBlock); - - storageToMove - .parallel() - .forEach( - (block) -> { - for (Hash blockHash : block.getValue()) { - Optional trieLog = trieLogManager.getTrieLogLayer(blockHash); - if (trieLog.isPresent()) { - trieLog - .get() - .getStorageChanges() - .forEach( - (address, storageSlotKey) -> { - storageSlotKey.forEach( - (slotKey, slotValue) -> { - // Move any previous state for this account - frozenAccountStorageCount.addAndGet( - rootWorldStateStorage.freezePreviousStorageState( - blockchain.getBlockHeader( - blockchain - .getBlockHeader(blockHash) - .get() - .getParentHash()), - Bytes.concatenate( - address.addressHash(), slotKey.getSlotHash()))); - }); - }); - } - accountStorageFreezerActionsComplete.put(block.getKey(), blockHash); + Hash blockHash = block.getValue(); + LOG.atDebug() + .setMessage("Freezing all account state for block {}") + .addArgument(block.getKey()) + .log(); + Optional trieLog = trieLogManager.getTrieLogLayer(blockHash); + if (trieLog.isPresent()) { + trieLog + .get() + .getAccountChanges() + .forEach( + (address, change) -> { + // Move any previous state for this account + frozenAccountStateCount.addAndGet( + rootWorldStateStorage.freezePreviousAccountState( + blockchain.getBlockHeader( + blockchain.getBlockHeader(blockHash).get().getParentHash()), + address.addressHash())); + }); + LOG.atDebug() + .setMessage("Freezing all storage state for block {}") + .addArgument(block.getKey()) + .log(); + trieLog + .get() + .getStorageChanges() + .forEach( + (address, storageSlotKey) -> { + storageSlotKey.forEach( + (slotKey, slotValue) -> { + // Move any previous state for this account + frozenAccountStorageCount.addAndGet( + rootWorldStateStorage.freezePreviousStorageState( + blockchain.getBlockHeader( + blockchain + .getBlockHeader(blockHash) + .get() + .getParentHash()), + Bytes.concatenate( + address.addressHash(), slotKey.getSlotHash()))); + }); + }); } + LOG.atDebug() + .setMessage("All account state and storage frozen for block {}") + .addArgument(block.getKey()) + .log(); + rootWorldStateStorage.setLatestArchiveFrozenBlock(block.getKey()); }); - // For us to consider all state and storage changes for a block complete, it must have been - // recorded in both accountState and accountStorage lists. If only one finished we need to try - // freezing state/storage for that block again on the next loop - AtomicInteger frozenBlocksCompleted = new AtomicInteger(); - accountStateFreezerActionsComplete - .keySet() - .forEach( - (b) -> { - if (accountStorageFreezerActionsComplete.containsKey(b)) { - frozenBlocksCompleted.getAndIncrement(); - rootWorldStateStorage.setLatestArchiveFrozenBlock(b); - blocksToMoveToFreezer.removeAll(b); - } - }); + LOG.atDebug() + .setMessage( + "finished moving cold state to freezer storage for range (chainHeadNumber: {} - numberOfBlocksToKeepInWarmStorage: {}) = {}. Froze {} account state entries, {} account storage entries from {} blocks") + .addArgument(blockchain::getChainHeadBlockNumber) + .addArgument(DISTANCE_FROM_HEAD_BEFORE_FREEZING_OLD_STATE) + .addArgument(retainAboveThisBlock) + .addArgument(frozenAccountStateCount.get()) + .addArgument(frozenAccountStorageCount.get()) + .addArgument(blocksToFreeze.size()) + .log(); - if (frozenAccountStateCount.get() > 0 || frozenAccountStorageCount.get() > 0) { - LOG.atDebug() - .setMessage("Froze {} account state entries, {} account storage entries for {} blocks") - .addArgument(frozenAccountStateCount.get()) - .addArgument(frozenAccountStorageCount.get()) - .addArgument(frozenBlocksCompleted.get()) - .log(); - } + removeArchivedFromQueue(blocksToFreeze); - return frozenBlocksCompleted.get(); + return frozenAccountStateCount.get() + frozenAccountStorageCount.get(); } @Override diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java index 96355adfae9..f1985e8d381 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java @@ -234,7 +234,10 @@ public int freezePreviousAccountState( composedWorldStateStorage .getNearestBefore(ACCOUNT_INFO_STATE, previousKey) .filter( - found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size())) + found -> + found.value().isPresent() + && accountHash.commonPrefixLength(found.key()) + >= accountHash.size())) .isPresent()) { nextMatch.stream() .forEach( @@ -249,8 +252,18 @@ public int freezePreviousAccountState( } if (frozenStateCount.get() == 0) { + // A lot of entries will have no previous history, so use trace to log when no previous + // storage was found + LOG.atTrace() + .setMessage("no previous state found for block {}, address hash {}") + .addArgument(previousBlockHeader.get().getNumber()) + .addArgument(accountHash) + .log(); + } else { LOG.atDebug() - .setMessage("no previous state for account {} found to move to cold storage") + .setMessage("{} storage entries frozen for block {}, address hash {}") + .addArgument(frozenStateCount.get()) + .addArgument(previousBlockHeader.get().getNumber()) .addArgument(accountHash) .log(); } @@ -294,12 +307,25 @@ public int freezePreviousStorageState( .getNearestBefore(ACCOUNT_STORAGE_STORAGE, previousKey) .filter( found -> - storageSlotKey.commonPrefixLength(found.key()) - >= storageSlotKey.size())) + found.value().isPresent() + && storageSlotKey.commonPrefixLength(found.key()) + >= storageSlotKey.size())) .isPresent()) { nextMatch.stream() .forEach( (nearestKey) -> { + if (frozenStorageCount.get() > 0 && frozenStorageCount.get() % 100 == 0) { + // Log progress in case catching up causes there to be a large number of keys + // to move + LOG.atDebug() + .setMessage( + "{} storage entries frozen for block {}, slot hash {}, latest key {}") + .addArgument(frozenStorageCount.get()) + .addArgument(previousBlockHeader.get().getNumber()) + .addArgument(storageSlotKey) + .addArgument(nearestKey.key()) + .log(); + } moveDBEntry( ACCOUNT_STORAGE_STORAGE, ACCOUNT_STORAGE_FREEZER, @@ -310,8 +336,18 @@ public int freezePreviousStorageState( } if (frozenStorageCount.get() == 0) { + // A lot of entries will have no previous history, so use trace to log when no previous + // storage was found + LOG.atTrace() + .setMessage("no previous storage found for block {}, slot hash {}") + .addArgument(previousBlockHeader.get().getNumber()) + .addArgument(storageSlotKey) + .log(); + } else { LOG.atDebug() - .setMessage("no previous state for storage {} found to move to cold storage") + .setMessage("{} storage entries frozen for block {}, slot hash {}") + .addArgument(frozenStorageCount.get()) + .addArgument(previousBlockHeader.get().getNumber()) .addArgument(storageSlotKey) .log(); } @@ -337,10 +373,7 @@ private void moveDBEntry( tx.commit(); break; } catch (StorageException se) { - if (se.getMessage().contains("RocksDBException: Busy")) { - if (retried) { - break; - } + if (!retried && se.getMessage().contains("RocksDBException: Busy")) { retried = true; } else { break; diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiveFreezerTests.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiveFreezerTests.java new file mode 100644 index 00000000000..d3fabcb39fb --- /dev/null +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiveFreezerTests.java @@ -0,0 +1,1042 @@ +/* + * Copyright contributors to Hyperledger Besu. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ +package org.hyperledger.besu.ethereum.trie.diffbased.common.trielog; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.when; + +import org.hyperledger.besu.datatypes.Address; +import org.hyperledger.besu.datatypes.Hash; +import org.hyperledger.besu.datatypes.StorageSlotKey; +import org.hyperledger.besu.datatypes.Wei; +import org.hyperledger.besu.ethereum.chain.BlockAddedEvent; +import org.hyperledger.besu.ethereum.chain.Blockchain; +import org.hyperledger.besu.ethereum.core.Block; +import org.hyperledger.besu.ethereum.core.BlockBody; +import org.hyperledger.besu.ethereum.core.BlockHeader; +import org.hyperledger.besu.ethereum.core.Difficulty; +import org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider; +import org.hyperledger.besu.ethereum.mainnet.MainnetBlockHeaderFunctions; +import org.hyperledger.besu.ethereum.referencetests.BonsaiReferenceTestWorldStateStorage; +import org.hyperledger.besu.ethereum.rlp.BytesValueRLPOutput; +import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.BonsaiAccount; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiPreImageProxy; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldStateKeyValueStorage; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.worldview.BonsaiArchiveFreezer; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.worldview.BonsaiWorldState; +import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration; +import org.hyperledger.besu.ethereum.worldstate.FlatDbMode; +import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue; +import org.hyperledger.besu.evm.log.LogsBloomFilter; +import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem; +import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction; + +import java.util.Optional; +import java.util.function.Consumer; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.config.Configurator; +import org.apache.tuweni.bytes.Bytes; +import org.apache.tuweni.units.bigints.UInt256; +import org.bouncycastle.util.Arrays; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +public class ArchiveFreezerTests { + + // Number of blocks in the chain. This is different to the number of blocks + // we have successfully frozen state for + static final long SHORT_TEST_CHAIN_HEIGHT = 150; + static final long LONG_TEST_CHAIN_HEIGHT = 2000; + + // Address used for account and storage changes + final Address address = Address.fromHexString("0x95cD8499051f7FE6a2F53749eC1e9F4a81cafa13"); + + // Cache blocks that are generated during the test + static LoadingCache> blockNumberCache; + + static LoadingCache> blockHashCache = + CacheBuilder.newBuilder() + .maximumSize(SHORT_TEST_CHAIN_HEIGHT) + .build( + new CacheLoader<>() { + @Override + public Optional load(final Hash blockHash) { + Optional foundBlock; + for (long i = 0; i < SHORT_TEST_CHAIN_HEIGHT; i++) { + if ((foundBlock = blockNumberCache.getUnchecked(i)).isPresent() + && foundBlock.get().getHash().equals(blockHash)) { + return foundBlock; + } + } + return Optional.empty(); + } + }); + + static long currentBlockHeight = 0; + + private BonsaiWorldStateKeyValueStorage worldStateStorage; + private Blockchain blockchain; + private TrieLogManager trieLogManager; + private final Consumer executeAsync = Runnable::run; + private BonsaiWorldState bonsaiWorldState; + + @SuppressWarnings("BannedMethod") + @BeforeEach + public void setup() { + Configurator.setLevel(LogManager.getLogger(ArchiveFreezerTests.class).getName(), Level.TRACE); + worldStateStorage = Mockito.mock(BonsaiWorldStateKeyValueStorage.class); + blockchain = Mockito.mock(Blockchain.class); + trieLogManager = Mockito.mock(TrieLogManager.class); + bonsaiWorldState = Mockito.mock(BonsaiWorldState.class); + } + + private static long getCurrentBlockHeight() { + return currentBlockHeight; + } + + // Utility for generating blocks on the fly, up to a certain chain height at which point no block + // is returned. + // This is recursive and generates all parent blocks for the requested block number on the fly, + // but combined with a cache loader each block number will only be generated once + private static Optional getGeneratedBlock(final long blockNumber, final long chainLength) { + if (blockNumber >= chainLength) { + return Optional.empty(); + } + // Fake block + final BlockHeader header = + new BlockHeader( + blockNumber == 0 + ? Hash.EMPTY + : blockNumberCache.getUnchecked(blockNumber - 1).get().getHash(), + Hash.EMPTY_TRIE_HASH, + Address.ZERO, + Hash.EMPTY_TRIE_HASH, + Hash.EMPTY_TRIE_HASH, + Hash.EMPTY_TRIE_HASH, + LogsBloomFilter.builder().build(), + Difficulty.ONE, + blockNumber, + 0, + 0, + 0, + Bytes.of(0x00), + Wei.ZERO, + Hash.EMPTY, + 0, + null, + null, + null, + null, + null, + new MainnetBlockHeaderFunctions()); + return Optional.of(new Block(header, BlockBody.empty())); + } + + @Test + public void archiveFreezerLimitsInitialArchiveBlocks() { + + blockNumberCache = + CacheBuilder.newBuilder() + .maximumSize(LONG_TEST_CHAIN_HEIGHT) + .build( + new CacheLoader<>() { + @Override + public Optional load(final Long blockNumber) { + return getGeneratedBlock(blockNumber, LONG_TEST_CHAIN_HEIGHT); + } + }); + + when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); + + // If we had previously frozen up to block 100... + when(worldStateStorage.getLatestArchiveFrozenBlock()).thenReturn(Optional.of(100L)); + + // When any block is asked for by the archive freezer, generate it on the fly and return it + // unless it is > block num 2000 + when(blockchain.getBlockByNumber(anyLong())) + .then( + requestedBlockNumber -> + blockNumberCache.getUnchecked(requestedBlockNumber.getArgument(0, Long.class))); + + BonsaiArchiveFreezer archiveFreezer = + new BonsaiArchiveFreezer(worldStateStorage, blockchain, executeAsync, trieLogManager); + archiveFreezer.initialize(); + + // Check that we will only attempt to catch up 1000 blocks worth of state/storage moves + assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(1000); + } + + @Test + public void archiveFreezerMoves1AccountStateChangeToFreezerSegment() { + // Set up the block cache + blockNumberCache = + CacheBuilder.newBuilder() + .maximumSize(SHORT_TEST_CHAIN_HEIGHT) + .build( + new CacheLoader<>() { + @Override + public Optional load(final Long blockNumber) { + return getGeneratedBlock(blockNumber, SHORT_TEST_CHAIN_HEIGHT); + } + }); + + when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); + + // If we had previously frozen up to block 100... + when(worldStateStorage.getLatestArchiveFrozenBlock()).thenReturn(Optional.of(100L)); + + // Mock the number of changes the freeze action carries out for each relevant block + when(worldStateStorage.freezePreviousAccountState(any(), any())) + .then( + request -> { + Object objHeader = request.getArgument(0, Optional.class).get(); + if (objHeader instanceof BlockHeader) { + BlockHeader blockHeader = (BlockHeader) objHeader; + if (blockHeader.getNumber() == 101) { + // Mock 1 state change when block 102 is being processed, because state changes in + // block 101 can be frozen NB: the trie log in this test for block 102 isn't + // frozen because no further changes to that account are made + return 1; + } + return 0; + } + return 0; + }); + + // When any block is asked for by the archive freezer, generate it on the fly, cache it, and + // return it unless it + when(blockchain.getBlockByNumber(anyLong())) + .then( + requestedBlockNumber -> + blockNumberCache.getUnchecked(requestedBlockNumber.getArgument(0, Long.class))); + + when(blockchain.getBlockHeader(any())) + .then( + requestedBlockHash -> + blockHashCache + .getUnchecked(requestedBlockHash.getArgument(0, Hash.class)) + .map(Block::getHeader)); + + // Generate some trie logs to return for a specific block + + // Simulate an account change in block 101. This state will be frozen because block 102 updates + // the same account (see below) + TrieLogLayer block101TrieLogs = new TrieLogLayer(); + StateTrieAccountValue oldValue = + new StateTrieAccountValue(12, Wei.fromHexString("0x123"), Hash.EMPTY, Hash.EMPTY); + StateTrieAccountValue newValue = + new StateTrieAccountValue(13, Wei.fromHexString("0x234"), Hash.EMPTY, Hash.EMPTY); + block101TrieLogs.addAccountChange(address, oldValue, newValue); + + // Simulate another change to the same account, this time in block 102. This change won't be + // frozen during the test because it is the current state of the account. + TrieLogLayer block102TrieLogs = new TrieLogLayer(); + oldValue = new StateTrieAccountValue(13, Wei.fromHexString("0x234"), Hash.EMPTY, Hash.EMPTY); + newValue = new StateTrieAccountValue(14, Wei.fromHexString("0x345"), Hash.EMPTY, Hash.EMPTY); + block102TrieLogs.addAccountChange(address, oldValue, newValue); + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0xf0b2ba5849ad812479a44bb1efd97f1f3fdab945ff53a81a4dea55b4db1a972e"))) + .thenReturn(Optional.of(block101TrieLogs)); + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0x0d22db864d4effa62b640de645bffd44fb5d130578fbea4399f9abf8d7ac7789"))) + .thenReturn(Optional.of(block102TrieLogs)); + + // Initialize the archive freezer + BonsaiArchiveFreezer archiveFreezer = + new BonsaiArchiveFreezer(worldStateStorage, blockchain, executeAsync, trieLogManager); + archiveFreezer.initialize(); + + // Chain height is 150, we've frozen state up to block 100, we should have initialized the next + // 50 blocks to be archived + assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(SHORT_TEST_CHAIN_HEIGHT - 100); + + when(blockchain.getChainHeadBlockNumber()) + .then(requestedBlockNumber -> getCurrentBlockHeight()); + + // Process the next 50 blocks. Only 1 account state change should happen during this processing + // since there are only trie logs for blocks 101 and 102 + for (long nextBlock = 101; nextBlock < 150; nextBlock++) { + currentBlockHeight = nextBlock; + if (nextBlock == 112) { + archiveFreezer.addToFreezerQueue( + nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); + int accountsMoved = archiveFreezer.moveBlockStateToFreezer(); + assertThat(accountsMoved).isEqualTo(1); + } else { + archiveFreezer.addToFreezerQueue( + nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); + int accountsMoved = archiveFreezer.moveBlockStateToFreezer(); + assertThat(accountsMoved).isEqualTo(0); + } + } + } + + @Test + public void archiveFreezerMoves2StorageChangesToFreezerSegment() { + // Set up the block cache + blockNumberCache = + CacheBuilder.newBuilder() + .maximumSize(SHORT_TEST_CHAIN_HEIGHT) + .build( + new CacheLoader<>() { + @Override + public Optional load(final Long blockNumber) { + return getGeneratedBlock(blockNumber, SHORT_TEST_CHAIN_HEIGHT); + } + }); + + when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); + + // If we had previously frozen up to block 100... + when(worldStateStorage.getLatestArchiveFrozenBlock()).thenReturn(Optional.of(100L)); + + // Mock the number of changes the freeze action carries out for each relevant block + when(worldStateStorage.freezePreviousStorageState(any(), any())) + .then( + request -> { + Object objHeader = request.getArgument(0, Optional.class).get(); + if (objHeader instanceof BlockHeader) { + BlockHeader blockHeader = (BlockHeader) objHeader; + if (blockHeader.getNumber() == 101 || blockHeader.getNumber() == 102) { + // Mock 1 state change when block 102 is being processed, because state changes in + // block 101 can be frozen (and likewise for block 103). NB: the trie log in this + // test for block 103 isn't frozen because no further changes to that storage are + // made + return 1; + } + return 0; + } + return 0; + }); + + // When any block is asked for by the archive freezer, generate it on the fly, cache it, and + // return it unless it + when(blockchain.getBlockByNumber(anyLong())) + .then( + requestedBlockNumber -> + blockNumberCache.getUnchecked(requestedBlockNumber.getArgument(0, Long.class))); + + when(blockchain.getBlockHeader(any())) + .then( + requestedBlockHash -> + blockHashCache + .getUnchecked(requestedBlockHash.getArgument(0, Hash.class)) + .map(Block::getHeader)); + + // Generate some trie logs to return for a specific block + + // Simulate a storage change in block 101. This state will be frozen because block 102 updates + // the same storage (see below) + TrieLogLayer block101TrieLogs = new TrieLogLayer(); + UInt256 oldValue = UInt256.ZERO; + UInt256 newValue = UInt256.ONE; + UInt256 slot = UInt256.ONE; + StorageSlotKey storageSlotKey = new StorageSlotKey(slot); + block101TrieLogs.addStorageChange(address, storageSlotKey, oldValue, newValue); + + // Simulate a storage change in block 102. This state will also be frozen because block 102 + // updates the same storage (see below) + TrieLogLayer block102TrieLogs = new TrieLogLayer(); + oldValue = UInt256.ONE; + newValue = UInt256.valueOf(2L); + slot = UInt256.ONE; + storageSlotKey = new StorageSlotKey(slot); + block102TrieLogs.addStorageChange(address, storageSlotKey, oldValue, newValue); + + // Simulate a storage change in block 103. This state will not be frozen because it refers to a + // different slot + TrieLogLayer block103TrieLogs = new TrieLogLayer(); + oldValue = UInt256.ZERO; + newValue = UInt256.ONE; + slot = UInt256.valueOf(2L); + storageSlotKey = new StorageSlotKey(slot); + block103TrieLogs.addStorageChange(address, storageSlotKey, oldValue, newValue); + + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0xf0b2ba5849ad812479a44bb1efd97f1f3fdab945ff53a81a4dea55b4db1a972e"))) + .thenReturn(Optional.of(block101TrieLogs)); + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0x0d22db864d4effa62b640de645bffd44fb5d130578fbea4399f9abf8d7ac7789"))) + .thenReturn(Optional.of(block102TrieLogs)); + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0x96440b533326c26f4611e4c0b123ce732aa7a68e3b275f4a5a2ea9bc4b089c73"))) + .thenReturn(Optional.of(block103TrieLogs)); + + // Initialize the archive freezer + BonsaiArchiveFreezer archiveFreezer = + new BonsaiArchiveFreezer(worldStateStorage, blockchain, executeAsync, trieLogManager); + archiveFreezer.initialize(); + + // Chain height is 150, we've frozen state up to block 100, we should have initialized the next + // 50 blocks to be archived + assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(SHORT_TEST_CHAIN_HEIGHT - 100); + + when(blockchain.getChainHeadBlockNumber()) + .then(requestedBlockNumber -> getCurrentBlockHeight()); + + int totalStorageMoved = 0; + // Process the next 50 blocks. 2 storage changes should be frozen during this time should happen + // during this processing since there are only trie logs for blocks 101 and 102 + for (long nextBlock = 101; nextBlock < 150; nextBlock++) { + currentBlockHeight = nextBlock; + archiveFreezer.addToFreezerQueue( + nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); + int storageMoved = archiveFreezer.moveBlockStateToFreezer(); + totalStorageMoved += storageMoved; + if (nextBlock == 112 || nextBlock == 113) { + assertThat(storageMoved).isEqualTo(1); + } else { + assertThat(storageMoved).isEqualTo(0); + } + } + + assertThat(totalStorageMoved).isEqualTo(2); + } + + @Test + public void archiveFreezerMoves1AccountAnd2StorageChangesToFreezerSegment() { + // Set up the block cache + blockNumberCache = + CacheBuilder.newBuilder() + .maximumSize(SHORT_TEST_CHAIN_HEIGHT) + .build( + new CacheLoader<>() { + @Override + public Optional load(final Long blockNumber) { + return getGeneratedBlock(blockNumber, SHORT_TEST_CHAIN_HEIGHT); + } + }); + + when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); + + // If we had previously frozen up to block 100... + when(worldStateStorage.getLatestArchiveFrozenBlock()).thenReturn(Optional.of(100L)); + + // Mock the number of changes the freeze action carries out for each relevant block + when(worldStateStorage.freezePreviousStorageState(any(), any())) + .then( + request -> { + Object objHeader = request.getArgument(0, Optional.class).get(); + if (objHeader instanceof BlockHeader) { + BlockHeader blockHeader = (BlockHeader) objHeader; + if (blockHeader.getNumber() == 101 || blockHeader.getNumber() == 102) { + // Mock 1 storage change when block 102 is being processed, because state changes + // in block 101 can be frozen (and likewise for block 103). NB: the trie log in + // this test for block 103 isn't frozen because no further changes to that storage + // are made + return 1; + } + } + return 0; + }); + + // Mock the number of changes the freeze action carries out for each relevant block + when(worldStateStorage.freezePreviousAccountState(any(), any())) + .then( + request -> { + Object objHeader = request.getArgument(0, Optional.class).get(); + if (objHeader instanceof BlockHeader) { + BlockHeader blockHeader = (BlockHeader) objHeader; + if (blockHeader.getNumber() == 101) { + // Mock 1 state change when block 102 is being processed, because state changes in + // block 101 can be frozen + return 1; + } + } + return 0; + }); + + // When any block is asked for by the archive freezer, generate it on the fly, cache it, and + // return it unless it + when(blockchain.getBlockByNumber(anyLong())) + .then( + requestedBlockNumber -> + blockNumberCache.getUnchecked(requestedBlockNumber.getArgument(0, Long.class))); + + when(blockchain.getBlockHeader(any())) + .then( + requestedBlockHash -> + blockHashCache + .getUnchecked(requestedBlockHash.getArgument(0, Hash.class)) + .map(Block::getHeader)); + + // Generate some trie logs to return for a specific block + + Address address = Address.fromHexString("0x95cD8499051f7FE6a2F53749eC1e9F4a81cafa13"); + + // Simulate a storage change AND an account change in block 101. This state and storage will be + // frozen because block 102 updates both again (see below) + TrieLogLayer block101TrieLogs = new TrieLogLayer(); + UInt256 oldStorageValue = UInt256.ZERO; + UInt256 newStorageValue = UInt256.ONE; + UInt256 slot = UInt256.ONE; + StorageSlotKey storageSlotKey = new StorageSlotKey(slot); + block101TrieLogs.addStorageChange(address, storageSlotKey, oldStorageValue, newStorageValue); + StateTrieAccountValue oldAccountValue = + new StateTrieAccountValue(12, Wei.fromHexString("0x123"), Hash.EMPTY, Hash.EMPTY); + StateTrieAccountValue newAccountValue = + new StateTrieAccountValue(13, Wei.fromHexString("0x234"), Hash.EMPTY, Hash.EMPTY); + block101TrieLogs.addAccountChange(address, oldAccountValue, newAccountValue); + + // Simulate a storage AND account change in block 102. + TrieLogLayer block102TrieLogs = new TrieLogLayer(); + oldStorageValue = UInt256.ONE; + newStorageValue = UInt256.valueOf(2L); + slot = UInt256.ONE; + storageSlotKey = new StorageSlotKey(slot); + block102TrieLogs.addStorageChange(address, storageSlotKey, oldStorageValue, newStorageValue); + oldAccountValue = + new StateTrieAccountValue(13, Wei.fromHexString("0x234"), Hash.EMPTY, Hash.EMPTY); + newAccountValue = + new StateTrieAccountValue(14, Wei.fromHexString("0x345"), Hash.EMPTY, Hash.EMPTY); + block102TrieLogs.addAccountChange(address, oldAccountValue, newAccountValue); + + // Simulate a storage change in block 103. This state will not be frozen because it refers to a + // different slot + TrieLogLayer block103TrieLogs = new TrieLogLayer(); + oldStorageValue = UInt256.ZERO; + newStorageValue = UInt256.ONE; + slot = UInt256.valueOf(2L); + storageSlotKey = new StorageSlotKey(slot); + block103TrieLogs.addStorageChange(address, storageSlotKey, oldStorageValue, newStorageValue); + + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0xf0b2ba5849ad812479a44bb1efd97f1f3fdab945ff53a81a4dea55b4db1a972e"))) + .thenReturn(Optional.of(block101TrieLogs)); + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0x0d22db864d4effa62b640de645bffd44fb5d130578fbea4399f9abf8d7ac7789"))) + .thenReturn(Optional.of(block102TrieLogs)); + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0x96440b533326c26f4611e4c0b123ce732aa7a68e3b275f4a5a2ea9bc4b089c73"))) + .thenReturn(Optional.of(block103TrieLogs)); + + // Initialize the archive freezer + BonsaiArchiveFreezer archiveFreezer = + new BonsaiArchiveFreezer(worldStateStorage, blockchain, executeAsync, trieLogManager); + archiveFreezer.initialize(); + + // Chain height is 150, we've frozen state up to block 100, we should have initialized the next + // 50 blocks to be archived + assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(SHORT_TEST_CHAIN_HEIGHT - 100); + + when(blockchain.getChainHeadBlockNumber()) + .then(requestedBlockNumber -> getCurrentBlockHeight()); + + int totalStorageMoved = 0; + // Process the next 50 blocks. 2 storage changes should be frozen during this time should happen + // during this processing since there are only trie logs for blocks 101 and 102 + for (long nextBlock = 101; nextBlock < 150; nextBlock++) { + currentBlockHeight = nextBlock; + archiveFreezer.addToFreezerQueue( + nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); + int storageAndAccountsMoved = archiveFreezer.moveBlockStateToFreezer(); + if (nextBlock == 112) { + assertThat(storageAndAccountsMoved).isEqualTo(2); + } else if (nextBlock == 113) { + assertThat(storageAndAccountsMoved).isEqualTo(1); + } else { + assertThat(storageAndAccountsMoved).isEqualTo(0); + } + totalStorageMoved += storageAndAccountsMoved; + } + + assertThat(totalStorageMoved).isEqualTo(3); + } + + @Test + public void archiveFreezerInMemoryDBFreezesAccountStateCorrectly() { + final BonsaiPreImageProxy preImageProxy = + new BonsaiPreImageProxy.BonsaiReferenceTestPreImageProxy(); + + final BonsaiWorldStateKeyValueStorage bonsaiWorldStateKeyValueStorage = + new BonsaiWorldStateKeyValueStorage( + new InMemoryKeyValueStorageProvider(), + new NoOpMetricsSystem(), + DataStorageConfiguration.DEFAULT_BONSAI_ARCHIVE_CONFIG); + + final BonsaiReferenceTestWorldStateStorage testWorldStateStorage = + new BonsaiReferenceTestWorldStateStorage(bonsaiWorldStateKeyValueStorage, preImageProxy); + + assertThat(testWorldStateStorage.getFlatDbMode()).isEqualTo(FlatDbMode.ARCHIVE); + + // Assume we've archived up to block 150L i.e. we're up to date with the chain head + // (SHORT_TEST_CHAIN_HEIGHT) + testWorldStateStorage.setLatestArchiveFrozenBlock(150L); + + // Set up the block cache + blockNumberCache = + CacheBuilder.newBuilder() + .maximumSize(SHORT_TEST_CHAIN_HEIGHT) + .build( + new CacheLoader<>() { + @Override + public Optional load(final Long blockNumber) { + return getGeneratedBlock(blockNumber, SHORT_TEST_CHAIN_HEIGHT); + } + }); + + // When any block is asked for by the archive freezer, generate it on the fly, cache it, and + // return it unless it + when(blockchain.getBlockByNumber(anyLong())) + .then( + requestedBlockNumber -> + blockNumberCache.getUnchecked(requestedBlockNumber.getArgument(0, Long.class))); + + when(blockchain.getBlockHeader(any())) + .then( + requestedBlockHash -> + blockHashCache + .getUnchecked(requestedBlockHash.getArgument(0, Hash.class)) + .map(Block::getHeader)); + + // Generate some trie logs to return for a specific block + + // For state to be moved from the primary DB segment to the archive DB segment, we need the + // primary DB segment + // to have the account in already + SegmentedKeyValueStorageTransaction tx = + testWorldStateStorage.getComposedWorldStateStorage().startTransaction(); + final BonsaiAccount block150Account = + new BonsaiAccount( + bonsaiWorldState, + address, + address.addressHash(), + 12, + Wei.fromHexString("0x123"), + Hash.EMPTY, + Hash.EMPTY, + false); + final BonsaiAccount block151Account = + new BonsaiAccount( + bonsaiWorldState, + address, + address.addressHash(), + 13, + Wei.fromHexString("0x234"), + Hash.EMPTY, + Hash.EMPTY, + false); + final BonsaiAccount block152Account = + new BonsaiAccount( + bonsaiWorldState, + address, + address.addressHash(), + 14, + Wei.fromHexString("0x345"), + Hash.EMPTY, + Hash.EMPTY, + false); + // The key for a bonsai-archive flat DB account entry is suffixed with the block number where + // that state change took place, hence the "0x0000000000000096" suffix to the address hash below + BytesValueRLPOutput out = new BytesValueRLPOutput(); + block150Account.writeTo(out); + tx.put( + KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000096").toArrayUnsafe()), + out.encoded().toArrayUnsafe()); + out = new BytesValueRLPOutput(); + block151Account.writeTo(out); + tx.put( + KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000097").toArrayUnsafe()), + out.encoded().toArrayUnsafe()); + out = new BytesValueRLPOutput(); + block152Account.writeTo(out); + tx.put( + KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000098").toArrayUnsafe()), + out.encoded().toArrayUnsafe()); + tx.commit(); + + // Simulate an account change in block 151. This state will be frozen because block 152 updates + // the same account (see below) + TrieLogLayer block151TrieLogs = new TrieLogLayer(); + StateTrieAccountValue oldValue = + new StateTrieAccountValue(12, Wei.fromHexString("0x123"), Hash.EMPTY, Hash.EMPTY); + StateTrieAccountValue newValue = + new StateTrieAccountValue(13, Wei.fromHexString("0x234"), Hash.EMPTY, Hash.EMPTY); + block151TrieLogs.addAccountChange(address, oldValue, newValue); + + // Simulate another change to the same account, this time in block 152. This change won't be + // frozen during the test because it is the current state of the account. + TrieLogLayer block152TrieLogs = new TrieLogLayer(); + oldValue = new StateTrieAccountValue(13, Wei.fromHexString("0x234"), Hash.EMPTY, Hash.EMPTY); + newValue = new StateTrieAccountValue(14, Wei.fromHexString("0x345"), Hash.EMPTY, Hash.EMPTY); + block152TrieLogs.addAccountChange(address, oldValue, newValue); + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0x62f948556539c8af8f44dd080bc2366fc361eac68e5623313a42323e48cb3f8e"))) // Block 151 + .thenReturn(Optional.of(block151TrieLogs)); + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0x8d6a523f547ee224ba533b34034a3056838f2dab3daf0ffbf75713daf18bf885"))) // Block 152 + .thenReturn(Optional.of(block152TrieLogs)); + + // Initialize the archive freezer + BonsaiArchiveFreezer archiveFreezer = + new BonsaiArchiveFreezer(testWorldStateStorage, blockchain, executeAsync, trieLogManager); + archiveFreezer.initialize(); + + // Chain height is 150, we've frozen state up to block 150 + currentBlockHeight = SHORT_TEST_CHAIN_HEIGHT; + when(blockchain.getChainHeadBlockNumber()) + .then(requestedBlockNumber -> getCurrentBlockHeight()); + assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(0); + + // Process the next 50 blocks 150-200 and count the archive changes. We'll recreate the + // block cache so we can generate blocks beyond 150 + blockNumberCache = + CacheBuilder.newBuilder() + .maximumSize(LONG_TEST_CHAIN_HEIGHT) + .build( + new CacheLoader<>() { + @Override + public Optional load(final Long blockNumber) { + return getGeneratedBlock(blockNumber, LONG_TEST_CHAIN_HEIGHT); + } + }); + + blockHashCache = + CacheBuilder.newBuilder() + .maximumSize(LONG_TEST_CHAIN_HEIGHT) + .build( + new CacheLoader<>() { + @Override + public Optional load(final Hash blockHash) { + Optional foundBlock; + for (long i = 0; i < LONG_TEST_CHAIN_HEIGHT; i++) { + if ((foundBlock = blockNumberCache.getUnchecked(i)).isPresent() + && foundBlock.get().getHash().equals(blockHash)) { + return foundBlock; + } + } + return Optional.empty(); + } + }); + + // By default we freeze state for chainheight - 10 blocks, so importing up to block 210 whould + // cause blocks up to 200 to be archived + for (long nextBlock = 151; nextBlock <= 210; nextBlock++) { + currentBlockHeight = nextBlock; + archiveFreezer.onBlockAdded( + BlockAddedEvent.createForStoredOnly(blockNumberCache.getUnchecked(nextBlock).get())); + } + + // We should have marked up to block 200 as archived + assertThat(testWorldStateStorage.getLatestArchiveFrozenBlock().get()).isEqualTo(200); + + // Only the latest/current state of the account should be in the primary DB segment + assertThat( + testWorldStateStorage.getComposedWorldStateStorage().stream( + KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE) + .count()) + .isEqualTo(1); + + // Both the previous account states should be in the freezer segment, plus the special key that + // records the latest frozen block + assertThat( + testWorldStateStorage.getComposedWorldStateStorage().stream( + KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_FREEZER) + .count()) + .isEqualTo(3); + + // Check the entries are in the correct segment + assertThat( + testWorldStateStorage + .getComposedWorldStateStorage() + .containsKey( + KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_FREEZER, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000096").toArrayUnsafe()))) + .isTrue(); + assertThat( + testWorldStateStorage + .getComposedWorldStateStorage() + .containsKey( + KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_FREEZER, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000097").toArrayUnsafe()))) + .isTrue(); + assertThat( + testWorldStateStorage + .getComposedWorldStateStorage() + .containsKey( + KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000098").toArrayUnsafe()))) + .isTrue(); + } + + @Test + public void archiveFreezerInMemoryDBFreezesStorageStateCorrectly() { + final BonsaiPreImageProxy preImageProxy = + new BonsaiPreImageProxy.BonsaiReferenceTestPreImageProxy(); + + final BonsaiWorldStateKeyValueStorage bonsaiWorldStateKeyValueStorage = + new BonsaiWorldStateKeyValueStorage( + new InMemoryKeyValueStorageProvider(), + new NoOpMetricsSystem(), + DataStorageConfiguration.DEFAULT_BONSAI_ARCHIVE_CONFIG); + + final BonsaiReferenceTestWorldStateStorage testWorldStateStorage = + new BonsaiReferenceTestWorldStateStorage(bonsaiWorldStateKeyValueStorage, preImageProxy); + + assertThat(testWorldStateStorage.getFlatDbMode()).isEqualTo(FlatDbMode.ARCHIVE); + + // Assume we've archived up to block 150L i.e. we're up to date with the chain head + // (SHORT_TEST_CHAIN_HEIGHT) + testWorldStateStorage.setLatestArchiveFrozenBlock(150L); + + // Set up the block cache + blockNumberCache = + CacheBuilder.newBuilder() + .maximumSize(SHORT_TEST_CHAIN_HEIGHT) + .build( + new CacheLoader<>() { + @Override + public Optional load(final Long blockNumber) { + return getGeneratedBlock(blockNumber, SHORT_TEST_CHAIN_HEIGHT); + } + }); + + // When any block is asked for by the archive freezer, generate it on the fly, cache it, and + // return it unless it + when(blockchain.getBlockByNumber(anyLong())) + .then( + requestedBlockNumber -> + blockNumberCache.getUnchecked(requestedBlockNumber.getArgument(0, Long.class))); + + when(blockchain.getBlockHeader(any())) + .then( + requestedBlockHash -> + blockHashCache + .getUnchecked(requestedBlockHash.getArgument(0, Hash.class)) + .map(Block::getHeader)); + + // Generate some trie logs to return for a specific block + + // For storage to be moved from the primary DB segment to the archive DB segment, we need the + // primary DB segment to have the storage in already + SegmentedKeyValueStorageTransaction tx = + testWorldStateStorage.getComposedWorldStateStorage().startTransaction(); + StorageSlotKey slotKey = new StorageSlotKey(UInt256.fromHexString("0x1")); + // The key for a bonsai-archive flat DB storage entry is suffixed with the block number where + // that state change took place, hence the "0x0000000000000096" suffix to the address hash below + tx.put( + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + slotKey.getSlotHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000096").toArrayUnsafe()), + Bytes.fromHexString("0x0123").toArrayUnsafe()); + tx.put( + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + slotKey.getSlotHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000097").toArrayUnsafe()), + Bytes.fromHexString("0x0234").toArrayUnsafe()); + tx.put( + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + slotKey.getSlotHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000098").toArrayUnsafe()), + Bytes.fromHexString("0x0345").toArrayUnsafe()); + tx.put( + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + slotKey.getSlotHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000099").toArrayUnsafe()), + Bytes.fromHexString("0x0456").toArrayUnsafe()); + tx.commit(); + + // Simulate a storage change in block 151. This state will be frozen because block 152 updates + // the same storage (see below) + TrieLogLayer block151TrieLogs = new TrieLogLayer(); + UInt256 oldValue = UInt256.fromHexString("0x123"); + UInt256 newValue = UInt256.fromHexString("0x234"); + UInt256 slot = UInt256.ONE; + StorageSlotKey storageSlotKey = new StorageSlotKey(slot); + block151TrieLogs.addStorageChange(address, storageSlotKey, oldValue, newValue); + + // Simulate a storage change in block 152. This state will also be frozen because block 152 + // updates the same storage (see below) + TrieLogLayer block152TrieLogs = new TrieLogLayer(); + oldValue = UInt256.fromHexString("0x234"); + newValue = UInt256.fromHexString("0x345"); + slot = UInt256.ONE; + storageSlotKey = new StorageSlotKey(slot); + block152TrieLogs.addStorageChange(address, storageSlotKey, oldValue, newValue); + + // Simulate a storage change in block 153. This state will not be frozen because it refers to a + // different slot + TrieLogLayer block153TrieLogs = new TrieLogLayer(); + oldValue = UInt256.fromHexString("0x345"); + newValue = UInt256.fromHexString("0x456"); + slot = UInt256.ONE; + storageSlotKey = new StorageSlotKey(slot); + block153TrieLogs.addStorageChange(address, storageSlotKey, oldValue, newValue); + + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0x62f948556539c8af8f44dd080bc2366fc361eac68e5623313a42323e48cb3f8e"))) // Block 151 + .thenReturn(Optional.of(block151TrieLogs)); + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0x8d6a523f547ee224ba533b34034a3056838f2dab3daf0ffbf75713daf18bf885"))) // Block 152 + .thenReturn(Optional.of(block152TrieLogs)); + when(trieLogManager.getTrieLogLayer( + Hash.fromHexString( + "0xffce5e5e58cc2737a50076e4dce8c7c715968b98a52942dc2072df4b6941d1ca"))) // Block 153 + .thenReturn(Optional.of(block153TrieLogs)); + + // Initialize the archive freezer + BonsaiArchiveFreezer archiveFreezer = + new BonsaiArchiveFreezer(testWorldStateStorage, blockchain, executeAsync, trieLogManager); + archiveFreezer.initialize(); + + // Chain height is 150, we've frozen state up to block 150 + currentBlockHeight = SHORT_TEST_CHAIN_HEIGHT; + when(blockchain.getChainHeadBlockNumber()) + .then(requestedBlockNumber -> getCurrentBlockHeight()); + assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(0); + + // Process the next 50 blocks 150-200 and count the archive changes. We'll recreate the + // block cache so we can generate blocks beyond 150 + blockNumberCache = + CacheBuilder.newBuilder() + .maximumSize(LONG_TEST_CHAIN_HEIGHT) + .build( + new CacheLoader<>() { + @Override + public Optional load(final Long blockNumber) { + return getGeneratedBlock(blockNumber, LONG_TEST_CHAIN_HEIGHT); + } + }); + + blockHashCache = + CacheBuilder.newBuilder() + .maximumSize(LONG_TEST_CHAIN_HEIGHT) + .build( + new CacheLoader<>() { + @Override + public Optional load(final Hash blockHash) { + Optional foundBlock; + for (long i = 0; i < LONG_TEST_CHAIN_HEIGHT; i++) { + if ((foundBlock = blockNumberCache.getUnchecked(i)).isPresent() + && foundBlock.get().getHash().equals(blockHash)) { + return foundBlock; + } + } + return Optional.empty(); + } + }); + + // By default we freeze state for chainheight - 10 blocks, so importing up to block 210 whould + // cause blocks up to 200 to be archived + for (long nextBlock = 151; nextBlock <= 210; nextBlock++) { + currentBlockHeight = nextBlock; + archiveFreezer.onBlockAdded( + BlockAddedEvent.createForStoredOnly(blockNumberCache.getUnchecked(nextBlock).get())); + } + + // We should have marked up to block 200 as archived + assertThat(testWorldStateStorage.getLatestArchiveFrozenBlock().get()).isEqualTo(200); + + // Only the latest/current state of the account should be in the primary DB segment + assertThat( + testWorldStateStorage.getComposedWorldStateStorage().stream( + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE) + .count()) + .isEqualTo(1); + + // All 3 previous storage states should be in the storage freezer + assertThat( + testWorldStateStorage.getComposedWorldStateStorage().stream( + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER) + .count()) + .isEqualTo(3); + + // Check the entries are in the correct segment + assertThat( + testWorldStateStorage + .getComposedWorldStateStorage() + .containsKey( + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + slotKey.getSlotHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000096").toArrayUnsafe()))) + .isTrue(); + assertThat( + testWorldStateStorage + .getComposedWorldStateStorage() + .containsKey( + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + slotKey.getSlotHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000097").toArrayUnsafe()))) + .isTrue(); + assertThat( + testWorldStateStorage + .getComposedWorldStateStorage() + .containsKey( + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + slotKey.getSlotHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000098").toArrayUnsafe()))) + .isTrue(); + assertThat( + testWorldStateStorage + .getComposedWorldStateStorage() + .containsKey( + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE, + Arrays.concatenate( + address.addressHash().toArrayUnsafe(), + slotKey.getSlotHash().toArrayUnsafe(), + Bytes.fromHexString("0x0000000000000099").toArrayUnsafe()))) + .isTrue(); + } +} From 829a98eb32951eeccb996d83f30665b26dfe7241 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Thu, 3 Oct 2024 13:26:31 +0100 Subject: [PATCH 18/39] Refactoring, fix issues, ensure only 1 batch of blocks is archived at a time Signed-off-by: Matthew Whitehead --- .../controller/BesuControllerBuilder.java | 3 +- .../worldview/BonsaiArchiveFreezer.java | 237 ++++++++++-------- .../common/trielog/ArchiveFreezerTests.java | 101 +++++++- 3 files changed, 226 insertions(+), 115 deletions(-) diff --git a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java index 054f78aa8bf..12fd41015dc 100644 --- a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java +++ b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java @@ -844,8 +844,9 @@ private BonsaiArchiveFreezer createBonsaiArchiveFreezer( blockchain, scheduler::executeServiceTask, trieLogManager); - archiveFreezer.initialize(); + long archivedBlocks = archiveFreezer.initialize(); + LOG.info("Bonsai archive initialised, caught up {} blocks", archivedBlocks); return archiveFreezer; } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java index 7f41fa22127..3a5f81dbcdb 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java @@ -26,8 +26,12 @@ import java.util.Collections; import java.util.Map; import java.util.Optional; +import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.Consumer; import org.apache.tuweni.bytes.Bytes; @@ -66,13 +70,14 @@ public BonsaiArchiveFreezer( this.trieLogManager = trieLogManager; } - private void preloadCatchupBlocks() { + private int loadNextCatchupBlocks() { Optional frozenBlocksHead = Optional.empty(); Optional latestFrozenBlock = rootWorldStateStorage.getLatestArchiveFrozenBlock(); if (latestFrozenBlock.isPresent()) { - frozenBlocksHead = latestFrozenBlock; + // Start from the next block after the most recently frozen block + frozenBlocksHead = Optional.of(latestFrozenBlock.get() + 1); } else { // Start from genesis block if (blockchain.getBlockHashByNumber(0).isPresent()) { @@ -80,8 +85,8 @@ private void preloadCatchupBlocks() { } } + int preLoadedBlocks = 0; if (frozenBlocksHead.isPresent()) { - int preLoadedBlocks = 0; Optional nextBlock = blockchain.getBlockByNumber(frozenBlocksHead.get()); for (int i = 0; i < CATCHUP_LIMIT; i++) { if (nextBlock.isPresent()) { @@ -100,17 +105,21 @@ private void preloadCatchupBlocks() { .addArgument(frozenBlocksHead.get()) .log(); } + return preLoadedBlocks; } - public void initialize() { + public long initialize() { // On startup there will be recent blocks whose state and storage hasn't been archived yet. - // Pre-load them ready for freezing state once enough new blocks have been added to the chain. - preloadCatchupBlocks(); - - // Keep catching up until we move less to the freezer than the catchup limit - while (moveBlockStateToFreezer() == CATCHUP_LIMIT) { - preloadCatchupBlocks(); + // Pre-load them in blocks of CATCHUP_LIMIT ready for freezing state once enough new blocks have + // been added to the chain. + long totalBlocksCaughtUp = 0; + int catchupBlocksLoaded = CATCHUP_LIMIT; + while (catchupBlocksLoaded >= CATCHUP_LIMIT) { + catchupBlocksLoaded = loadNextCatchupBlocks(); + moveBlockStateToFreezer(); + totalBlocksCaughtUp += catchupBlocksLoaded; } + return totalBlocksCaughtUp; } public int getPendingBlocksCount() { @@ -145,120 +154,144 @@ public int moveBlockStateToFreezer() { AtomicInteger frozenAccountStateCount = new AtomicInteger(); AtomicInteger frozenAccountStorageCount = new AtomicInteger(); - LOG.atTrace() - .setMessage( - "Moving cold state to freezer storage (chainHeadNumber: {} - numberOfBlocksToKeepInWarmStorage: {}) = {}") - .addArgument(blockchain::getChainHeadBlockNumber) - .addArgument(DISTANCE_FROM_HEAD_BEFORE_FREEZING_OLD_STATE) - .addArgument(retainAboveThisBlock) - .log(); - // Typically we will move all storage and state for a single block i.e. when a new block is // imported, move state for block-N. There are cases where we catch-up and move old state // for a number of blocks so we may iterate over a number of blocks freezing their state, // not just a single one. + final SortedMap blocksToFreeze; + synchronized (this) { + blocksToFreeze = new TreeMap<>(); + pendingBlocksToArchive.entrySet().stream() + .filter( + (e) -> blocksToFreeze.size() <= CATCHUP_LIMIT && e.getKey() <= retainAboveThisBlock) + .forEach( + (e) -> { + blocksToFreeze.put(e.getKey(), e.getValue()); + }); + } - final Map blocksToFreeze = new TreeMap<>(); - pendingBlocksToArchive.entrySet().stream() - .filter((e) -> e.getKey() <= retainAboveThisBlock) - .forEach( - (e) -> { - blocksToFreeze.put(e.getKey(), e.getValue()); - }); + if (blocksToFreeze.size() > 0) { + LOG.atInfo() + .setMessage("Moving cold state to freezer storage: {} to {} ") + .addArgument(blocksToFreeze.firstKey()) + .addArgument(blocksToFreeze.lastKey()) + .log(); - // Determine which world state keys have changed in the last N blocks by looking at the - // trie logs for the blocks. Then move the old keys to the freezer segment (if and only if they - // have changed) - blocksToFreeze - .entrySet() - .forEach( - (block) -> { - if (pendingBlocksToArchive.size() > 0 && pendingBlocksToArchive.size() % 100 == 0) { - // Log progress in case catching up causes there to be a large number of keys - // to move - LOG.atInfo() - .setMessage("state for blocks {} to {} archived") + // Vars used only for logging progress + final AtomicLong blocksFrozen = new AtomicLong(0); + final AtomicLong startBlock = new AtomicLong(-1); + + // Determine which world state keys have changed in the last N blocks by looking at the + // trie logs for the blocks. Then move the old keys to the freezer segment (if and only if + // they have changed) + blocksToFreeze + .entrySet() + .forEach( + (block) -> { + // For logging progress + if (startBlock.get() < 0) { + startBlock.set(block.getKey()); + } + if (blocksFrozen.get() > 0 && blocksFrozen.get() % 100 == 0) { + // Log progress in case catching up causes there to be a large number of keys + // to move + LOG.atInfo() + .setMessage("archive progress: state for blocks {} to {} archived") + .addArgument(startBlock.get()) + .addArgument(startBlock.get() + blocksFrozen.get()) + .log(); + } + Hash blockHash = block.getValue(); + LOG.atDebug() + .setMessage("Freezing all account state for block {}") .addArgument(block.getKey()) - .addArgument(block.getKey() + pendingBlocksToArchive.size()) .log(); - } - Hash blockHash = block.getValue(); - LOG.atDebug() - .setMessage("Freezing all account state for block {}") - .addArgument(block.getKey()) - .log(); - Optional trieLog = trieLogManager.getTrieLogLayer(blockHash); - if (trieLog.isPresent()) { - trieLog - .get() - .getAccountChanges() - .forEach( - (address, change) -> { - // Move any previous state for this account - frozenAccountStateCount.addAndGet( - rootWorldStateStorage.freezePreviousAccountState( - blockchain.getBlockHeader( - blockchain.getBlockHeader(blockHash).get().getParentHash()), - address.addressHash())); - }); + Optional trieLog = trieLogManager.getTrieLogLayer(blockHash); + if (trieLog.isPresent()) { + trieLog + .get() + .getAccountChanges() + .forEach( + (address, change) -> { + // Move any previous state for this account + frozenAccountStateCount.addAndGet( + rootWorldStateStorage.freezePreviousAccountState( + blockchain.getBlockHeader( + blockchain.getBlockHeader(blockHash).get().getParentHash()), + address.addressHash())); + }); + LOG.atDebug() + .setMessage("Freezing all storage state for block {}") + .addArgument(block.getKey()) + .log(); + trieLog + .get() + .getStorageChanges() + .forEach( + (address, storageSlotKey) -> { + storageSlotKey.forEach( + (slotKey, slotValue) -> { + // Move any previous state for this account + frozenAccountStorageCount.addAndGet( + rootWorldStateStorage.freezePreviousStorageState( + blockchain.getBlockHeader( + blockchain + .getBlockHeader(blockHash) + .get() + .getParentHash()), + Bytes.concatenate( + address.addressHash(), slotKey.getSlotHash()))); + }); + }); + } LOG.atDebug() - .setMessage("Freezing all storage state for block {}") + .setMessage("All account state and storage frozen for block {}") .addArgument(block.getKey()) .log(); - trieLog - .get() - .getStorageChanges() - .forEach( - (address, storageSlotKey) -> { - storageSlotKey.forEach( - (slotKey, slotValue) -> { - // Move any previous state for this account - frozenAccountStorageCount.addAndGet( - rootWorldStateStorage.freezePreviousStorageState( - blockchain.getBlockHeader( - blockchain - .getBlockHeader(blockHash) - .get() - .getParentHash()), - Bytes.concatenate( - address.addressHash(), slotKey.getSlotHash()))); - }); - }); - } - LOG.atDebug() - .setMessage("All account state and storage frozen for block {}") - .addArgument(block.getKey()) - .log(); - rootWorldStateStorage.setLatestArchiveFrozenBlock(block.getKey()); - }); + rootWorldStateStorage.setLatestArchiveFrozenBlock(block.getKey()); - LOG.atDebug() - .setMessage( - "finished moving cold state to freezer storage for range (chainHeadNumber: {} - numberOfBlocksToKeepInWarmStorage: {}) = {}. Froze {} account state entries, {} account storage entries from {} blocks") - .addArgument(blockchain::getChainHeadBlockNumber) - .addArgument(DISTANCE_FROM_HEAD_BEFORE_FREEZING_OLD_STATE) - .addArgument(retainAboveThisBlock) - .addArgument(frozenAccountStateCount.get()) - .addArgument(frozenAccountStorageCount.get()) - .addArgument(blocksToFreeze.size()) - .log(); + // For logging progress + blocksFrozen.getAndIncrement(); + }); - removeArchivedFromQueue(blocksToFreeze); + LOG.atDebug() + .setMessage( + "finished moving cold state for blocks {} to {}. Froze {} account state entries, {} account storage entries") + .addArgument(startBlock.get()) + .addArgument(startBlock.get() + (blocksFrozen.get() - 1)) + .addArgument(frozenAccountStateCount.get()) + .addArgument(frozenAccountStorageCount.get()) + .log(); + + removeArchivedFromQueue(blocksToFreeze); + } return frozenAccountStateCount.get() + frozenAccountStorageCount.get(); } + private final Lock archiveMutex = new ReentrantLock(true); + @Override public void onBlockAdded(final BlockAddedEvent addedBlockContext) { final Hash blockHash = addedBlockContext.getBlock().getHeader().getBlockHash(); final Optional blockNumber = Optional.of(addedBlockContext.getBlock().getHeader().getNumber()); blockNumber.ifPresent( - blockNum -> - executeAsync.accept( - () -> { - addToFreezerQueue(blockNum, blockHash); - moveBlockStateToFreezer(); - })); + blockNum -> { + addToFreezerQueue(blockNum, blockHash); + + // Since moving blocks can be done in batches we only want + // one instance running at a time + executeAsync.accept( + () -> { + if (archiveMutex.tryLock()) { + try { + moveBlockStateToFreezer(); + } finally { + archiveMutex.unlock(); + } + } + }); + }); } } diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiveFreezerTests.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiveFreezerTests.java index d3fabcb39fb..5414bee7b9d 100644 --- a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiveFreezerTests.java +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiveFreezerTests.java @@ -17,6 +17,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.when; import org.hyperledger.besu.datatypes.Address; @@ -47,6 +48,7 @@ import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction; import java.util.Optional; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import com.google.common.cache.CacheBuilder; @@ -66,8 +68,9 @@ public class ArchiveFreezerTests { // Number of blocks in the chain. This is different to the number of blocks // we have successfully frozen state for - static final long SHORT_TEST_CHAIN_HEIGHT = 150; - static final long LONG_TEST_CHAIN_HEIGHT = 2000; + static final long SHORT_TEST_CHAIN_HEIGHT = 151; + static final long LONG_TEST_CHAIN_HEIGHT = + 2001; // We want block 2000 to be returned so set to 2001 // Address used for account and storage changes final Address address = Address.fromHexString("0x95cD8499051f7FE6a2F53749eC1e9F4a81cafa13"); @@ -170,7 +173,27 @@ public Optional load(final Long blockNumber) { when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); // If we had previously frozen up to block 100... - when(worldStateStorage.getLatestArchiveFrozenBlock()).thenReturn(Optional.of(100L)); + final AtomicLong frozenBlocks = new AtomicLong(100L); + + // Mock the DB setter so it updates what the getter returns + doAnswer( + invocation -> { + long thisValue = invocation.getArgument(0, Long.class); + frozenBlocks.set(thisValue); + return null; + }) + .when(worldStateStorage) + .setLatestArchiveFrozenBlock(any(Long.class)); + + // Mock the DB getter + doAnswer( + invocation -> { + return Optional.of(frozenBlocks.get()); + }) + .when(worldStateStorage) + .getLatestArchiveFrozenBlock(); + + when(blockchain.getChainHeadBlockNumber()).thenReturn(2000L); // When any block is asked for by the archive freezer, generate it on the fly and return it // unless it is > block num 2000 @@ -181,10 +204,10 @@ public Optional load(final Long blockNumber) { BonsaiArchiveFreezer archiveFreezer = new BonsaiArchiveFreezer(worldStateStorage, blockchain, executeAsync, trieLogManager); - archiveFreezer.initialize(); + long caughtUpBlocks = archiveFreezer.initialize(); - // Check that we will only attempt to catch up 1000 blocks worth of state/storage moves - assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(1000); + // Check that blocks 101 to 1990 (10 before chain head 2000) have been caught up + assertThat(caughtUpBlocks).isEqualTo(1900); } @Test @@ -204,7 +227,25 @@ public Optional load(final Long blockNumber) { when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); // If we had previously frozen up to block 100... - when(worldStateStorage.getLatestArchiveFrozenBlock()).thenReturn(Optional.of(100L)); + final AtomicLong frozenBlocks = new AtomicLong(100L); + + // Mock the DB setter so it updates what the getter returns + doAnswer( + invocation -> { + long thisValue = invocation.getArgument(0, Long.class); + frozenBlocks.set(thisValue); + return null; + }) + .when(worldStateStorage) + .setLatestArchiveFrozenBlock(any(Long.class)); + + // Mock the DB getter + doAnswer( + invocation -> { + return Optional.of(frozenBlocks.get()); + }) + .when(worldStateStorage) + .getLatestArchiveFrozenBlock(); // Mock the number of changes the freeze action carries out for each relevant block when(worldStateStorage.freezePreviousAccountState(any(), any())) @@ -271,7 +312,7 @@ public Optional load(final Long blockNumber) { // Chain height is 150, we've frozen state up to block 100, we should have initialized the next // 50 blocks to be archived - assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(SHORT_TEST_CHAIN_HEIGHT - 100); + assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(50); when(blockchain.getChainHeadBlockNumber()) .then(requestedBlockNumber -> getCurrentBlockHeight()); @@ -311,7 +352,25 @@ public Optional load(final Long blockNumber) { when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); // If we had previously frozen up to block 100... - when(worldStateStorage.getLatestArchiveFrozenBlock()).thenReturn(Optional.of(100L)); + final AtomicLong frozenBlocks = new AtomicLong(100L); + + // Mock the DB setter so it updates what the getter returns + doAnswer( + invocation -> { + long thisValue = invocation.getArgument(0, Long.class); + frozenBlocks.set(thisValue); + return null; + }) + .when(worldStateStorage) + .setLatestArchiveFrozenBlock(any(Long.class)); + + // Mock the DB getter + doAnswer( + invocation -> { + return Optional.of(frozenBlocks.get()); + }) + .when(worldStateStorage) + .getLatestArchiveFrozenBlock(); // Mock the number of changes the freeze action carries out for each relevant block when(worldStateStorage.freezePreviousStorageState(any(), any())) @@ -395,7 +454,7 @@ public Optional load(final Long blockNumber) { // Chain height is 150, we've frozen state up to block 100, we should have initialized the next // 50 blocks to be archived - assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(SHORT_TEST_CHAIN_HEIGHT - 100); + assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(50); when(blockchain.getChainHeadBlockNumber()) .then(requestedBlockNumber -> getCurrentBlockHeight()); @@ -436,7 +495,25 @@ public Optional load(final Long blockNumber) { when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); // If we had previously frozen up to block 100... - when(worldStateStorage.getLatestArchiveFrozenBlock()).thenReturn(Optional.of(100L)); + final AtomicLong frozenBlocks = new AtomicLong(100L); + + // Mock the DB setter so it updates what the getter returns + doAnswer( + invocation -> { + long thisValue = invocation.getArgument(0, Long.class); + frozenBlocks.set(thisValue); + return null; + }) + .when(worldStateStorage) + .setLatestArchiveFrozenBlock(any(Long.class)); + + // Mock the DB getter + doAnswer( + invocation -> { + return Optional.of(frozenBlocks.get()); + }) + .when(worldStateStorage) + .getLatestArchiveFrozenBlock(); // Mock the number of changes the freeze action carries out for each relevant block when(worldStateStorage.freezePreviousStorageState(any(), any())) @@ -546,7 +623,7 @@ public Optional load(final Long blockNumber) { // Chain height is 150, we've frozen state up to block 100, we should have initialized the next // 50 blocks to be archived - assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(SHORT_TEST_CHAIN_HEIGHT - 100); + assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(50); when(blockchain.getChainHeadBlockNumber()) .then(requestedBlockNumber -> getCurrentBlockHeight()); From faa8da1767c53b885a07f7c0be3a9c1a8d64d6b0 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Fri, 4 Oct 2024 09:22:16 +0100 Subject: [PATCH 19/39] Improve archive progress logs Signed-off-by: Matthew Whitehead --- .../worldview/BonsaiArchiveFreezer.java | 41 +++++++++---------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java index 3a5f81dbcdb..6abdcf0e836 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java @@ -59,6 +59,9 @@ public class BonsaiArchiveFreezer implements BlockAddedObserver { private final Map pendingBlocksToArchive = Collections.synchronizedMap(new TreeMap<>()); + // For logging progress. Saves doing a DB read just to record our progress + final AtomicLong latestFrozenBlock = new AtomicLong(0); + public BonsaiArchiveFreezer( final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage, final Blockchain blockchain, @@ -171,16 +174,12 @@ public int moveBlockStateToFreezer() { } if (blocksToFreeze.size() > 0) { - LOG.atInfo() + LOG.atDebug() .setMessage("Moving cold state to freezer storage: {} to {} ") .addArgument(blocksToFreeze.firstKey()) .addArgument(blocksToFreeze.lastKey()) .log(); - // Vars used only for logging progress - final AtomicLong blocksFrozen = new AtomicLong(0); - final AtomicLong startBlock = new AtomicLong(-1); - // Determine which world state keys have changed in the last N blocks by looking at the // trie logs for the blocks. Then move the old keys to the freezer segment (if and only if // they have changed) @@ -188,19 +187,6 @@ public int moveBlockStateToFreezer() { .entrySet() .forEach( (block) -> { - // For logging progress - if (startBlock.get() < 0) { - startBlock.set(block.getKey()); - } - if (blocksFrozen.get() > 0 && blocksFrozen.get() % 100 == 0) { - // Log progress in case catching up causes there to be a large number of keys - // to move - LOG.atInfo() - .setMessage("archive progress: state for blocks {} to {} archived") - .addArgument(startBlock.get()) - .addArgument(startBlock.get() + blocksFrozen.get()) - .log(); - } Hash blockHash = block.getValue(); LOG.atDebug() .setMessage("Freezing all account state for block {}") @@ -250,15 +236,26 @@ public int moveBlockStateToFreezer() { .log(); rootWorldStateStorage.setLatestArchiveFrozenBlock(block.getKey()); - // For logging progress - blocksFrozen.getAndIncrement(); + // Update local var for logging progress + latestFrozenBlock.set(block.getKey()); + if (latestFrozenBlock.get() % 100 == 0) { + // Log progress in case catching up causes there to be a large number of keys + // to move + LOG.atInfo() + .setMessage( + "archive progress: state up to block {} archived ({} behind chain head {})") + .addArgument(latestFrozenBlock.get()) + .addArgument(blockchain.getChainHeadBlockNumber() - latestFrozenBlock.get()) + .addArgument(blockchain.getChainHeadBlockNumber()) + .log(); + } }); LOG.atDebug() .setMessage( "finished moving cold state for blocks {} to {}. Froze {} account state entries, {} account storage entries") - .addArgument(startBlock.get()) - .addArgument(startBlock.get() + (blocksFrozen.get() - 1)) + .addArgument(blocksToFreeze.firstKey()) + .addArgument(latestFrozenBlock.get()) .addArgument(frozenAccountStateCount.get()) .addArgument(frozenAccountStorageCount.get()) .log(); From 054301749c604061a6a0a509af84777551f13108 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Fri, 4 Oct 2024 11:48:28 +0100 Subject: [PATCH 20/39] Add archive mode tests to bonsai key/value tests Signed-off-by: Matthew Whitehead --- .../BonsaiWorldStateKeyValueStorageTest.java | 127 +++++++++++++++++- 1 file changed, 122 insertions(+), 5 deletions(-) diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java index 02818407b46..d0edddea73b 100644 --- a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java @@ -29,19 +29,25 @@ import org.hyperledger.besu.datatypes.Address; import org.hyperledger.besu.datatypes.Hash; import org.hyperledger.besu.datatypes.StorageSlotKey; +import org.hyperledger.besu.datatypes.Wei; +import org.hyperledger.besu.ethereum.core.BlockHeader; +import org.hyperledger.besu.ethereum.core.Difficulty; import org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider; import org.hyperledger.besu.ethereum.core.TrieGenerator; +import org.hyperledger.besu.ethereum.mainnet.MainnetBlockHeaderFunctions; import org.hyperledger.besu.ethereum.rlp.RLP; import org.hyperledger.besu.ethereum.storage.StorageProvider; import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier; import org.hyperledger.besu.ethereum.trie.MerkleTrie; import org.hyperledger.besu.ethereum.trie.StorageEntriesCollector; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.BonsaiAccount; import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie; import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration; import org.hyperledger.besu.ethereum.worldstate.FlatDbMode; import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration; import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue; import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator; +import org.hyperledger.besu.evm.log.LogsBloomFilter; import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem; import org.hyperledger.besu.plugin.services.storage.DataStorageFormat; import org.hyperledger.besu.plugin.services.storage.KeyValueStorage; @@ -63,7 +69,8 @@ public class BonsaiWorldStateKeyValueStorageTest { public static Collection flatDbMode() { - return Arrays.asList(new Object[][] {{FlatDbMode.FULL}, {FlatDbMode.PARTIAL}}); + return Arrays.asList( + new Object[][] {{FlatDbMode.FULL}, {FlatDbMode.PARTIAL}, {FlatDbMode.ARCHIVE}}); } public static Collection flatDbModeAndCodeStorageMode() { @@ -71,8 +78,10 @@ public static Collection flatDbModeAndCodeStorageMode() { new Object[][] { {FlatDbMode.FULL, false}, {FlatDbMode.PARTIAL, false}, + {FlatDbMode.ARCHIVE, false}, {FlatDbMode.FULL, true}, - {FlatDbMode.PARTIAL, true} + {FlatDbMode.PARTIAL, true}, + {FlatDbMode.ARCHIVE, true} }); } @@ -84,10 +93,14 @@ public BonsaiWorldStateKeyValueStorage setUp(final FlatDbMode flatDbMode) { public BonsaiWorldStateKeyValueStorage setUp( final FlatDbMode flatDbMode, final boolean useCodeHashStorage) { - storage = emptyStorage(useCodeHashStorage); - if (flatDbMode.equals(FlatDbMode.FULL)) { + if (flatDbMode.equals(FlatDbMode.ARCHIVE)) { + storage = emptyArchiveStorage(useCodeHashStorage); + storage.upgradeToFullFlatDbMode(); + } else if (flatDbMode.equals(FlatDbMode.FULL)) { + storage = emptyStorage(useCodeHashStorage); storage.upgradeToFullFlatDbMode(); } else if (flatDbMode.equals(FlatDbMode.PARTIAL)) { + storage = emptyStorage(useCodeHashStorage); storage.downgradeToPartialFlatDbMode(); } return storage; @@ -392,6 +405,44 @@ void clear_reloadFlatDbStrategy(final FlatDbMode flatDbMode) { assertThat(storage.getAccount(Hash.ZERO)).isEmpty(); } + @ParameterizedTest + @MethodSource("flatDbMode") + void clear_putGetAccountFlatDbStrategy(final FlatDbMode flatDbMode) { + final BonsaiWorldStateKeyValueStorage storage = spy(setUp(flatDbMode)); + + // save world state root hash + final BonsaiWorldStateKeyValueStorage.Updater updater = storage.updater(); + + Address account = Address.fromHexString("0x1cda99fb95e5418ae3bdc3bab5c4efa4a5a58a7c"); + + // RLP encoded account: address = 0x1cda99fb95e5418ae3bdc3bab5c4efa4a5a58a7c, balance = + // 0x0000000000000000000000000000000000000000000000007b5e41a364ea8bfc, nonce = 15768 + updater + .putAccountInfoState( + account.addressHash(), + Bytes.fromHexString( + "0xF84E823D98887B5E41A364EA8BFCA056E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421A0C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470")) + .commit(); + + assertThat(storage.getAccount(account.addressHash())).isNotEmpty(); + + BonsaiAccount retrievedAccount = + BonsaiAccount.fromRLP( + null, account, storage.getAccount(account.addressHash()).get(), false); + assertThat(retrievedAccount.getBalance()) + .isEqualTo( + Wei.fromHexString( + "0x0000000000000000000000000000000000000000000000007b5e41a364ea8bfc")); + assertThat(retrievedAccount.getNonce()).isEqualTo(15768); + + // clear + storage.clear(); + + assertThat(storage.getFlatDbStrategy()).isNotNull(); + + assertThat(storage.getAccount(account.addressHash())).isEmpty(); + } + @ParameterizedTest @MethodSource("flatDbMode") void reconcilesNonConflictingUpdaters(final FlatDbMode flatDbMode) { @@ -422,7 +473,12 @@ void reconcilesNonConflictingUpdaters(final FlatDbMode flatDbMode) { @MethodSource("flatDbMode") void isWorldStateAvailable_defaultIsFalse(final FlatDbMode flatDbMode) { setUp(flatDbMode); - assertThat(emptyStorage().isWorldStateAvailable(UInt256.valueOf(1), Hash.EMPTY)).isFalse(); + if (flatDbMode.equals(FlatDbMode.ARCHIVE)) { + assertThat(emptyArchiveStorage().isWorldStateAvailable(UInt256.valueOf(1), Hash.EMPTY)) + .isFalse(); + } else { + assertThat(emptyStorage().isWorldStateAvailable(UInt256.valueOf(1), Hash.EMPTY)).isFalse(); + } } @ParameterizedTest @@ -466,6 +522,19 @@ private BonsaiWorldStateKeyValueStorage emptyStorage() { DataStorageConfiguration.DEFAULT_BONSAI_CONFIG); } + private BonsaiWorldStateKeyValueStorage emptyArchiveStorage() { + final BonsaiWorldStateKeyValueStorage archiveStorage = + new BonsaiWorldStateKeyValueStorage( + new InMemoryKeyValueStorageProvider(), + new NoOpMetricsSystem(), + DataStorageConfiguration.DEFAULT_BONSAI_ARCHIVE_CONFIG); + archiveStorage + .getFlatDbStrategy() + .updateBlockContext( + getArchiveBlockContext(1)); // Do all archive calls under the context of block 1 + return archiveStorage; + } + private BonsaiWorldStateKeyValueStorage emptyStorage(final boolean useCodeHashStorage) { return new BonsaiWorldStateKeyValueStorage( new InMemoryKeyValueStorageProvider(), @@ -480,6 +549,26 @@ private BonsaiWorldStateKeyValueStorage emptyStorage(final boolean useCodeHashSt .build()); } + private BonsaiWorldStateKeyValueStorage emptyArchiveStorage(final boolean useCodeHashStorage) { + final BonsaiWorldStateKeyValueStorage archiveStorage = + new BonsaiWorldStateKeyValueStorage( + new InMemoryKeyValueStorageProvider(), + new NoOpMetricsSystem(), + ImmutableDataStorageConfiguration.builder() + .dataStorageFormat(DataStorageFormat.BONSAI_ARCHIVE) + .bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD) + .unstable( + ImmutableDataStorageConfiguration.Unstable.builder() + .bonsaiCodeStoredByCodeHashEnabled(useCodeHashStorage) + .build()) + .build()); + archiveStorage + .getFlatDbStrategy() + .updateBlockContext( + getArchiveBlockContext(1)); // Do all archive calls under the context of block 1 + return archiveStorage; + } + @Test void successfulPruneReturnsTrue() { final KeyValueStorage mockTrieLogStorage = mock(KeyValueStorage.class); @@ -516,4 +605,32 @@ private BonsaiWorldStateKeyValueStorage setupSpyStorage( new NoOpMetricsSystem(), DataStorageConfiguration.DEFAULT_BONSAI_CONFIG); } + + private static BlockHeader getArchiveBlockContext(final long blockNumber) { + final BlockHeader header = + new BlockHeader( + Hash.EMPTY, + Hash.EMPTY_TRIE_HASH, + Address.ZERO, + Hash.EMPTY_TRIE_HASH, + Hash.EMPTY_TRIE_HASH, + Hash.EMPTY_TRIE_HASH, + LogsBloomFilter.builder().build(), + Difficulty.ONE, + blockNumber, + 0, + 0, + 0, + Bytes.of(0x00), + Wei.ZERO, + Hash.EMPTY, + 0, + null, + null, + null, + null, + null, + new MainnetBlockHeaderFunctions()); + return header; + } } From bf3d171ccdb1883a5f6503be39ef2208ec041625 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Fri, 4 Oct 2024 17:11:11 +0100 Subject: [PATCH 21/39] Implement snap-serving from a Bonsai Archive node and add tests Signed-off-by: Matthew Whitehead --- .../storage/flat/ArchiveFlatDbStrategy.java | 112 +++++++ .../common/storage/flat/FlatDbStrategy.java | 8 +- .../BonsaiWorldStateKeyValueStorageTest.java | 300 ++++++++++++++++++ .../ethereum/eth/manager/snap/SnapServer.java | 6 +- 4 files changed, 419 insertions(+), 7 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java index 171da04365f..105643bbde3 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java @@ -31,9 +31,13 @@ import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction; import java.util.Optional; +import java.util.function.Function; import java.util.function.Supplier; +import java.util.stream.Stream; +import kotlin.Pair; import org.apache.tuweni.bytes.Bytes; +import org.apache.tuweni.bytes.Bytes32; import org.bouncycastle.util.Arrays; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -103,6 +107,102 @@ public Optional getFlatAccount( return accountFound; } + @Override + protected Stream> accountsToPairStream( + final SegmentedKeyValueStorage storage, final Bytes startKeyHash, final Bytes32 endKeyHash) { + final Stream> stream = + storage + .streamFromKey( + ACCOUNT_INFO_STATE, + calculateArchiveKeyNoContextMinSuffix(startKeyHash.toArrayUnsafe()), + calculateArchiveKeyNoContextMaxSuffix(endKeyHash.toArrayUnsafe())) + .map(e -> Bytes.of(calculateArchiveKeyNoContextMaxSuffix(trimSuffix(e.getKey())))) + .distinct() + .map( + e -> + new Pair<>( + Bytes32.wrap(trimSuffix(e.toArrayUnsafe())), + Bytes.of( + storage.getNearestBefore(ACCOUNT_INFO_STATE, e).get().value().get()))); + return stream; + } + + @Override + protected Stream> accountsToPairStream( + final SegmentedKeyValueStorage storage, final Bytes startKeyHash) { + final Stream> stream = + storage + .streamFromKey( + ACCOUNT_INFO_STATE, + calculateArchiveKeyNoContextMinSuffix(startKeyHash.toArrayUnsafe())) + .map(e -> Bytes.of(calculateArchiveKeyNoContextMaxSuffix(trimSuffix(e.getKey())))) + .distinct() + .map( + e -> + new Pair( + Bytes32.wrap(trimSuffix(e.toArrayUnsafe())), + Bytes.of( + storage.getNearestBefore(ACCOUNT_INFO_STATE, e).get().value().get()))); + return stream; + } + + @Override + protected Stream> storageToPairStream( + final SegmentedKeyValueStorage storage, + final Hash accountHash, + final Bytes startKeyHash, + final Function valueMapper) { + return storage + .streamFromKey( + ACCOUNT_STORAGE_STORAGE, + calculateArchiveKeyNoContextMinSuffix( + calculateNaturalSlotKey(accountHash, Hash.wrap(Bytes32.wrap(startKeyHash))))) + .map(e -> Bytes.of(calculateArchiveKeyNoContextMaxSuffix(trimSuffix(e.getKey())))) + .distinct() + .map( + key -> + new Pair<>( + Bytes32.wrap(trimSuffix(key.slice(Hash.SIZE).toArrayUnsafe())), + valueMapper.apply( + Bytes.of( + storage + .getNearestBefore(ACCOUNT_STORAGE_STORAGE, key) + .get() + .value() + .get()) + .trimLeadingZeros()))); + } + + @Override + protected Stream> storageToPairStream( + final SegmentedKeyValueStorage storage, + final Hash accountHash, + final Bytes startKeyHash, + final Bytes32 endKeyHash, + final Function valueMapper) { + return storage + .streamFromKey( + ACCOUNT_STORAGE_STORAGE, + calculateArchiveKeyNoContextMinSuffix( + calculateNaturalSlotKey(accountHash, Hash.wrap(Bytes32.wrap(startKeyHash)))), + calculateArchiveKeyNoContextMaxSuffix( + calculateNaturalSlotKey(accountHash, Hash.wrap(endKeyHash)))) + .map(e -> Bytes.of(calculateArchiveKeyNoContextMaxSuffix(trimSuffix(e.getKey())))) + .distinct() + .map( + key -> + new Pair<>( + Bytes32.wrap(trimSuffix(key.slice(Hash.SIZE).toArrayUnsafe())), + valueMapper.apply( + Bytes.of( + storage + .getNearestBefore(ACCOUNT_STORAGE_STORAGE, key) + .get() + .value() + .get()) + .trimLeadingZeros()))); + } + /* * Puts the account data for the given account hash and block context. */ @@ -128,6 +228,10 @@ public void removeFlatAccount( transaction.put(ACCOUNT_INFO_STATE, keySuffixed, DELETED_ACCOUNT_VALUE); } + private byte[] trimSuffix(final byte[] suffixedAddress) { + return Arrays.copyOfRange(suffixedAddress, 0, suffixedAddress.length - 8); + } + /* * Retrieves the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader. */ @@ -232,6 +336,14 @@ public static byte[] calculateArchiveKeyWithMinSuffix( return calculateArchiveKeyWithSuffix(context, naturalKey, MIN_BLOCK_SUFFIX); } + public static byte[] calculateArchiveKeyNoContextMinSuffix(final byte[] naturalKey) { + return Arrays.concatenate(naturalKey, MIN_BLOCK_SUFFIX); + } + + public static byte[] calculateArchiveKeyNoContextMaxSuffix(final byte[] naturalKey) { + return Arrays.concatenate(naturalKey, MAX_BLOCK_SUFFIX); + } + public static Bytes calculateArchiveKeyWithMaxSuffix( final BonsaiContext context, final byte[] naturalKey) { return Bytes.of(calculateArchiveKeyWithSuffix(context, naturalKey, MAX_BLOCK_SUFFIX)); diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java index 6eb13a46e1c..1988686edf2 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java @@ -250,7 +250,7 @@ public NavigableMap streamStorageFlatDatabase( .takeWhile(takeWhile)); } - private static Stream> storageToPairStream( + protected Stream> storageToPairStream( final SegmentedKeyValueStorage storage, final Hash accountHash, final Bytes startKeyHash, @@ -267,7 +267,7 @@ private static Stream> storageToPairStream( valueMapper.apply(Bytes.wrap(pair.getValue()).trimLeadingZeros()))); } - private static Stream> storageToPairStream( + protected Stream> storageToPairStream( final SegmentedKeyValueStorage storage, final Hash accountHash, final Bytes startKeyHash, @@ -286,14 +286,14 @@ private static Stream> storageToPairStream( valueMapper.apply(Bytes.wrap(pair.getValue()).trimLeadingZeros()))); } - private static Stream> accountsToPairStream( + protected Stream> accountsToPairStream( final SegmentedKeyValueStorage storage, final Bytes startKeyHash, final Bytes32 endKeyHash) { return storage .streamFromKey(ACCOUNT_INFO_STATE, startKeyHash.toArrayUnsafe(), endKeyHash.toArrayUnsafe()) .map(pair -> new Pair<>(Bytes32.wrap(pair.getKey()), Bytes.wrap(pair.getValue()))); } - private static Stream> accountsToPairStream( + protected Stream> accountsToPairStream( final SegmentedKeyValueStorage storage, final Bytes startKeyHash) { return storage .streamFromKey(ACCOUNT_INFO_STATE, startKeyHash.toArrayUnsafe()) diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java index d0edddea73b..3458525b2fe 100644 --- a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java @@ -443,6 +443,306 @@ void clear_putGetAccountFlatDbStrategy(final FlatDbMode flatDbMode) { assertThat(storage.getAccount(account.addressHash())).isEmpty(); } + @ParameterizedTest + @MethodSource("flatDbMode") + void clear_streamFlatAccounts(final FlatDbMode flatDbMode) { + final BonsaiWorldStateKeyValueStorage storage = spy(setUp(flatDbMode)); + + // save world state root hash + BonsaiWorldStateKeyValueStorage.Updater updater = storage.updater(); + + // Put 3 accounts + Address account1 = + Address.fromHexString( + "0x1111111111111111111111111111111111111111"); // 3rd entry in DB after hashing + updater.putAccountInfoState(account1.addressHash(), Bytes32.random()).commit(); + updater = storage.updater(); + Address account2 = + Address.fromHexString( + "0x2222222222222222222222222222222222222222"); // 1st entry in the DB after hashing + updater.putAccountInfoState(account2.addressHash(), Bytes32.random()).commit(); + updater = storage.updater(); + Address account3 = + Address.fromHexString( + "0x3333333333333333333333333333333333333333"); // 2nd entry in the DB after hashing + updater.putAccountInfoState(account3.addressHash(), Bytes32.random()).commit(); + + // Streaming the entire range to ensure we get all 3 accounts back + assertThat( + storage + .streamFlatAccounts( + Hash.fromHexString( + "0x0000000000000000000000000000000000000000000000000000000000000000"), + Hash.fromHexString( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), + 1000) + .size()) + .isEqualTo(3); + assertThat( + storage + .streamFlatAccounts( + Hash.fromHexString( + "0x0000000000000000000000000000000000000000000000000000000000000000"), + Hash.fromHexString( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), + 1000) + .firstEntry() + .getKey()) + .isEqualTo(account2.addressHash()); // NB: Account 2 hash is first in the DB + assertThat( + storage + .streamFlatAccounts( + Hash.fromHexString( + "0x0000000000000000000000000000000000000000000000000000000000000000"), + Hash.fromHexString( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), + 1000) + .lastEntry() + .getKey()) + .isEqualTo(account1.addressHash()); // NB: Account 1 hash is 3rd/last in the DB + + // clear + storage.clear(); + + assertThat(storage.getFlatDbStrategy()).isNotNull(); + + assertThat(storage.getAccount(account1.addressHash())).isEmpty(); + assertThat(storage.getAccount(account2.addressHash())).isEmpty(); + assertThat(storage.getAccount(account3.addressHash())).isEmpty(); + } + + @ParameterizedTest + @MethodSource("flatDbMode") + void clear_streamFlatAccountsMultipleStateChanges(final FlatDbMode flatDbMode) { + final BonsaiWorldStateKeyValueStorage storage = spy(setUp(flatDbMode)); + + // save world state root hash + BonsaiWorldStateKeyValueStorage.Updater updater = storage.updater(); + + // Put 3 accounts + Address account1 = + Address.fromHexString( + "0x1111111111111111111111111111111111111111"); // 3rd entry in DB after hashing + updater.putAccountInfoState(account1.addressHash(), Bytes32.random()).commit(); + updater = storage.updater(); + Address account2 = + Address.fromHexString( + "0x2222222222222222222222222222222222222222"); // 1st entry in the DB after hashing + updater.putAccountInfoState(account2.addressHash(), Bytes32.random()).commit(); + updater = storage.updater(); + Address account3 = + Address.fromHexString( + "0x3333333333333333333333333333333333333333"); // 2nd entry in the DB after hashing + updater.putAccountInfoState(account3.addressHash(), Bytes32.random()).commit(); + + // Update the middle account several times. For an archive mode DB this will result in N + // additional entries in the DB, but streaming the accounts should only return the most recent + // entry + + // Update the account at block 2 + storage.getFlatDbStrategy().updateBlockContext(getArchiveBlockContext(2)); + updater = storage.updater(); + updater.putAccountInfoState(account3.addressHash(), Bytes32.random()).commit(); + + // Update the account at block 3 + storage.getFlatDbStrategy().updateBlockContext(getArchiveBlockContext(3)); + updater = storage.updater(); + updater.putAccountInfoState(account3.addressHash(), Bytes32.random()).commit(); + + // Update the account at block 4 + storage.getFlatDbStrategy().updateBlockContext(getArchiveBlockContext(4)); + Bytes32 finalStateUpdate = Bytes32.random(); + updater = storage.updater(); + updater.putAccountInfoState(account3.addressHash(), finalStateUpdate).commit(); + + // Streaming the entire range to ensure we only get 3 accounts back + assertThat( + storage + .streamFlatAccounts( + Hash.fromHexString( + "0x0000000000000000000000000000000000000000000000000000000000000000"), + Hash.fromHexString( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), + 1000) + .size()) + .isEqualTo(3); + + // Check that account 2 is the first entry (as per its hash) + assertThat( + storage + .streamFlatAccounts( + Hash.fromHexString( + "0x0000000000000000000000000000000000000000000000000000000000000000"), + Hash.fromHexString( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), + 1000) + .firstEntry() + .getKey()) + .isEqualTo(account2.addressHash()); // NB: Account 2 hash is first in the DB + + // Check that account 1 is the last entry (as per its hash) + assertThat( + storage + .streamFlatAccounts( + Hash.fromHexString( + "0x0000000000000000000000000000000000000000000000000000000000000000"), + Hash.fromHexString( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), + 1000) + .lastEntry() + .getKey()) + .isEqualTo(account1.addressHash()); // NB: Account 1 hash is 3rd/last in the DB + + // Check the state for account 3 is the final state update at block 4, not an earlier state + assertThat( + storage + .streamFlatAccounts( + Hash.fromHexString( + "0x0000000000000000000000000000000000000000000000000000000000000000"), + Hash.fromHexString( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), + 1000) + .get(account3.addressHash())) + .isEqualTo(finalStateUpdate); + + // clear + storage.clear(); + + assertThat(storage.getFlatDbStrategy()).isNotNull(); + + assertThat(storage.getAccount(account1.addressHash())).isEmpty(); + assertThat(storage.getAccount(account2.addressHash())).isEmpty(); + assertThat(storage.getAccount(account3.addressHash())).isEmpty(); + } + + @ParameterizedTest + @MethodSource("flatDbMode") + void clear_streamFlatStorageMultipleStateChanges(final FlatDbMode flatDbMode) { + final BonsaiWorldStateKeyValueStorage storage = spy(setUp(flatDbMode)); + + // save world state root hash + BonsaiWorldStateKeyValueStorage.Updater updater = storage.updater(); + + // Put 3 accounts + Address account1 = + Address.fromHexString( + "0x1111111111111111111111111111111111111111"); // 3rd entry in DB after hashing + updater + .putStorageValueBySlotHash( + account1.addressHash(), + new StorageSlotKey(UInt256.ONE).getSlotHash(), + UInt256.fromHexString("0x11")) + .commit(); + + updater = storage.updater(); + Address account2 = + Address.fromHexString( + "0x2222222222222222222222222222222222222222"); // 1st entry in the DB after hashing + updater + .putStorageValueBySlotHash( + account2.addressHash(), + new StorageSlotKey(UInt256.ONE).getSlotHash(), + UInt256.fromHexString("0x22")) + .commit(); + + updater = storage.updater(); + Address account3 = + Address.fromHexString( + "0x3333333333333333333333333333333333333333"); // 2nd entry in the DB after hashing + final StorageSlotKey slot1 = new StorageSlotKey(UInt256.ONE); + updater + .putStorageValueBySlotHash( + account3.addressHash(), + new StorageSlotKey(UInt256.ONE).getSlotHash(), + UInt256.fromHexString("0x33")) + .commit(); + + // Update the middle account several times. For an archive mode DB this will result in N + // additional entries in the DB, but streaming the accounts should only return the most recent + // entry + + // Update the storage at block 2 + storage.getFlatDbStrategy().updateBlockContext(getArchiveBlockContext(2)); + updater = storage.updater(); + updater + .putStorageValueBySlotHash( + account3.addressHash(), slot1.getSlotHash(), UInt256.fromHexString("0x12")) + .commit(); + + // Update the account at block 3 + storage.getFlatDbStrategy().updateBlockContext(getArchiveBlockContext(3)); + updater = storage.updater(); + updater + .putStorageValueBySlotHash( + account3.addressHash(), slot1.getSlotHash(), UInt256.fromHexString("0x13")) + .commit(); + + // Update the account at block 4 + storage.getFlatDbStrategy().updateBlockContext(getArchiveBlockContext(4)); + updater = storage.updater(); + updater + .putStorageValueBySlotHash( + account3.addressHash(), slot1.getSlotHash(), UInt256.fromHexString("0x14")) + .commit(); + + // Check that every account only has 1 entry for slot 1 (even account 3 which updated the same + // slot 4 times) + assertThat( + storage + .streamFlatStorages( + account1.addressHash(), + Hash.fromHexString( + "0x0000000000000000000000000000000000000000000000000000000000000000"), + Hash.fromHexString( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), + 1000) + .size()) + .isEqualTo(1); + + assertThat( + storage + .streamFlatStorages( + account2.addressHash(), + Hash.fromHexString( + "0x0000000000000000000000000000000000000000000000000000000000000000"), + Hash.fromHexString( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), + 1000) + .size()) + .isEqualTo(1); + + assertThat( + storage + .streamFlatStorages( + account3.addressHash(), + Hash.fromHexString( + "0x0000000000000000000000000000000000000000000000000000000000000000"), + Hash.fromHexString( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), + 1000) + .size()) + .isEqualTo(1); + + // Check that the storage state for account 3's storage slot 1 is the latest value that was + // stored + assertThat( + storage + .streamFlatStorages( + account3.addressHash(), + Hash.fromHexString( + "0x0000000000000000000000000000000000000000000000000000000000000000"), + Hash.fromHexString( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), + 1000) + .get(slot1.getSlotHash())) + .isEqualTo(Bytes.fromHexString("0x14")); + + // clear + storage.clear(); + + assertThat(storage.getFlatDbStrategy()).isNotNull(); + } + @ParameterizedTest @MethodSource("flatDbMode") void reconcilesNonConflictingUpdaters(final FlatDbMode flatDbMode) { diff --git a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/snap/SnapServer.java b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/snap/SnapServer.java index 7de933e1370..472794bf7a3 100644 --- a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/snap/SnapServer.java +++ b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/snap/SnapServer.java @@ -39,7 +39,6 @@ import org.hyperledger.besu.ethereum.worldstate.FlatDbMode; import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator; import org.hyperledger.besu.plugin.services.BesuEvents; -import org.hyperledger.besu.plugin.services.storage.DataStorageFormat; import java.util.ArrayList; import java.util.Collections; @@ -151,8 +150,9 @@ public synchronized SnapServer start() { if (!isStarted.get() && snapServerEnabled) { // if we are bonsai and full flat, we can provide a worldstate storage: var worldStateKeyValueStorage = worldStateStorageCoordinator.worldStateKeyValueStorage(); - if (worldStateKeyValueStorage.getDataStorageFormat().equals(DataStorageFormat.BONSAI) - && worldStateStorageCoordinator.isMatchingFlatMode(FlatDbMode.FULL)) { + if (worldStateKeyValueStorage.getDataStorageFormat().isBonsaiFormat() + && (worldStateStorageCoordinator.isMatchingFlatMode(FlatDbMode.FULL) + || worldStateStorageCoordinator.isMatchingFlatMode(FlatDbMode.ARCHIVE))) { LOGGER.debug("Starting SnapServer with Bonsai full flat db"); var bonsaiArchive = protocolContext From d2e5a244c7c497dc5bc85c20ea3528967d5107aa Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Mon, 7 Oct 2024 10:55:39 +0100 Subject: [PATCH 22/39] Parameterize snap-server tests, fix bug in storage slot ranges Signed-off-by: Matthew Whitehead --- .../storage/flat/ArchiveFlatDbStrategy.java | 2 + .../eth/manager/snap/SnapServerTest.java | 286 +++++++++++++----- 2 files changed, 204 insertions(+), 84 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java index 105643bbde3..31ba6b2051c 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java @@ -158,6 +158,7 @@ protected Stream> storageToPairStream( calculateArchiveKeyNoContextMinSuffix( calculateNaturalSlotKey(accountHash, Hash.wrap(Bytes32.wrap(startKeyHash))))) .map(e -> Bytes.of(calculateArchiveKeyNoContextMaxSuffix(trimSuffix(e.getKey())))) + .takeWhile(pair -> pair.slice(0, Hash.SIZE).equals(accountHash)) .distinct() .map( key -> @@ -188,6 +189,7 @@ protected Stream> storageToPairStream( calculateArchiveKeyNoContextMaxSuffix( calculateNaturalSlotKey(accountHash, Hash.wrap(endKeyHash)))) .map(e -> Bytes.of(calculateArchiveKeyNoContextMaxSuffix(trimSuffix(e.getKey())))) + .takeWhile(pair -> pair.slice(0, Hash.SIZE).equals(accountHash)) .distinct() .map( key -> diff --git a/ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/manager/snap/SnapServerTest.java b/ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/manager/snap/SnapServerTest.java index e168b7e2fe3..9604a7df9cf 100644 --- a/ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/manager/snap/SnapServerTest.java +++ b/ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/manager/snap/SnapServerTest.java @@ -21,8 +21,11 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; +import org.hyperledger.besu.datatypes.Address; import org.hyperledger.besu.datatypes.Hash; import org.hyperledger.besu.datatypes.Wei; +import org.hyperledger.besu.ethereum.core.BlockHeader; +import org.hyperledger.besu.ethereum.core.Difficulty; import org.hyperledger.besu.ethereum.eth.manager.EthMessages; import org.hyperledger.besu.ethereum.eth.messages.snap.AccountRangeMessage; import org.hyperledger.besu.ethereum.eth.messages.snap.ByteCodesMessage; @@ -32,6 +35,7 @@ import org.hyperledger.besu.ethereum.eth.messages.snap.GetTrieNodesMessage; import org.hyperledger.besu.ethereum.eth.messages.snap.StorageRangeMessage; import org.hyperledger.besu.ethereum.eth.messages.snap.TrieNodesMessage; +import org.hyperledger.besu.ethereum.mainnet.MainnetBlockHeaderFunctions; import org.hyperledger.besu.ethereum.proof.WorldStateProofProvider; import org.hyperledger.besu.ethereum.rlp.BytesValueRLPOutput; import org.hyperledger.besu.ethereum.rlp.RLP; @@ -45,6 +49,7 @@ import org.hyperledger.besu.ethereum.worldstate.FlatDbMode; import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue; import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator; +import org.hyperledger.besu.evm.log.LogsBloomFilter; import org.hyperledger.besu.metrics.ObservableMetricsSystem; import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage; @@ -52,6 +57,8 @@ import org.hyperledger.besu.services.kvstore.SegmentedInMemoryKeyValueStorage; import java.math.BigInteger; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.NavigableMap; @@ -63,12 +70,17 @@ import org.apache.tuweni.bytes.Bytes; import org.apache.tuweni.bytes.Bytes32; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; public class SnapServerTest { static Random rand = new Random(); + // Paramaterized test to exercise BONSAI and BONSAI_ARCHIVE + public static Collection flatDbMode() { + return Arrays.asList(new Object[][] {{FlatDbMode.FULL}, {FlatDbMode.ARCHIVE}}); + } + record SnapTestAccount( Hash addressHash, StateTrieAccountValue accountValue, @@ -79,35 +91,44 @@ Bytes accountRLP() { } } + private static BlockHeader generateBonsaiArchiveContextHeader(final long blockNumber) { + // Fake up a block header + return new BlockHeader( + Hash.EMPTY, + Hash.EMPTY_TRIE_HASH, + Address.ZERO, + Hash.EMPTY_TRIE_HASH, + Hash.EMPTY_TRIE_HASH, + Hash.EMPTY_TRIE_HASH, + LogsBloomFilter.builder().build(), + Difficulty.ONE, + blockNumber, + 0, + 0, + 0, + Bytes.of(0x00), + Wei.ZERO, + Hash.EMPTY, + 0, + null, + null, + null, + null, + null, + new MainnetBlockHeaderFunctions()); + } + static final ObservableMetricsSystem noopMetrics = new NoOpMetricsSystem(); - final SegmentedInMemoryKeyValueStorage storage = new SegmentedInMemoryKeyValueStorage(); + SegmentedInMemoryKeyValueStorage storage; // force a full flat db with code stored by code hash: - final BonsaiWorldStateKeyValueStorage inMemoryStorage = - new BonsaiWorldStateKeyValueStorage( - new FlatDbStrategyProvider(noopMetrics, DataStorageConfiguration.DEFAULT_BONSAI_CONFIG) { - @Override - public FlatDbMode getFlatDbMode() { - return FlatDbMode.FULL; - } + BonsaiWorldStateKeyValueStorage inMemoryStorage; - @Override - protected boolean deriveUseCodeStorageByHash( - final SegmentedKeyValueStorage composedWorldStateStorage) { - return true; - } - }, - storage, - new InMemoryKeyValueStorage()); - - final WorldStateStorageCoordinator storageCoordinator = - new WorldStateStorageCoordinator(inMemoryStorage); - final StoredMerklePatriciaTrie storageTrie = - new StoredMerklePatriciaTrie<>( - inMemoryStorage::getAccountStateTrieNode, Function.identity(), Function.identity()); - final WorldStateProofProvider proofProvider = new WorldStateProofProvider(storageCoordinator); - - final Function> spyProvider = + WorldStateStorageCoordinator storageCoordinator; + StoredMerklePatriciaTrie storageTrie; + WorldStateProofProvider proofProvider; + + Function> spyProvider = spy( new Function>() { // explicit non-final class is necessary for Mockito to spy: @@ -117,21 +138,67 @@ public Optional apply(final Hash hash) { } }); - final SnapServer snapServer = - new SnapServer(new EthMessages(), storageCoordinator, spyProvider).start(); - - final SnapTestAccount acct1 = createTestAccount("10"); - final SnapTestAccount acct2 = createTestAccount("20"); - final SnapTestAccount acct3 = createTestContractAccount("30", inMemoryStorage); - final SnapTestAccount acct4 = createTestContractAccount("40", inMemoryStorage); + SnapServer snapServer; + + SnapTestAccount acct1; + SnapTestAccount acct2; + SnapTestAccount acct3; + SnapTestAccount acct4; + + public void setup(final FlatDbMode dbMode) { + storage = new SegmentedInMemoryKeyValueStorage(); + + // force a full flat db with code stored by code hash: + inMemoryStorage = + new BonsaiWorldStateKeyValueStorage( + new FlatDbStrategyProvider( + noopMetrics, + dbMode == FlatDbMode.FULL + ? DataStorageConfiguration.DEFAULT_BONSAI_CONFIG + : DataStorageConfiguration.DEFAULT_BONSAI_ARCHIVE_CONFIG) { + @Override + public FlatDbMode getFlatDbMode() { + return dbMode; + } + + @Override + protected boolean deriveUseCodeStorageByHash( + final SegmentedKeyValueStorage composedWorldStateStorage) { + return true; + } + }, + storage, + new InMemoryKeyValueStorage()); + + storageCoordinator = new WorldStateStorageCoordinator(inMemoryStorage); + storageTrie = + new StoredMerklePatriciaTrie<>( + inMemoryStorage::getAccountStateTrieNode, Function.identity(), Function.identity()); + proofProvider = new WorldStateProofProvider(storageCoordinator); + + spyProvider = + spy( + new Function>() { + // explicit non-final class is necessary for Mockito to spy: + @Override + public Optional apply(final Hash hash) { + return Optional.of(inMemoryStorage); + } + }); - @BeforeEach - public void setup() { + snapServer = new SnapServer(new EthMessages(), storageCoordinator, spyProvider).start(); snapServer.start(); + + acct1 = createTestAccount("10"); + acct2 = createTestAccount("20"); + acct3 = createTestContractAccount("30", inMemoryStorage); + acct4 = createTestContractAccount("40", inMemoryStorage); } - @Test - public void assertNoStartNoOp() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertNoStartNoOp(final FlatDbMode flatDbMode) { + setup(flatDbMode); // account found at startHash insertTestAccounts(acct4, acct3, acct1, acct2); @@ -166,8 +233,10 @@ public void assertNoStartNoOp() { verify(spyProvider, never()).apply(any()); } - @Test - public void assertEmptyRangeLeftProofOfExclusionAndNextAccount() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertEmptyRangeLeftProofOfExclusionAndNextAccount(final FlatDbMode flatDbMode) { + setup(flatDbMode); // for a range request that returns empty, we should return just a proof of exclusion on the // left and the next account after the limit hash insertTestAccounts(acct1, acct4); @@ -184,8 +253,10 @@ public void assertEmptyRangeLeftProofOfExclusionAndNextAccount() { assertThat(assertIsValidAccountRangeProof(acct2.addressHash, rangeData)).isTrue(); } - @Test - public void assertAccountLimitRangeResponse() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertAccountLimitRangeResponse(final FlatDbMode flatDbMode) { + setup(flatDbMode); // assert we limit the range response according to size final int acctCount = 2000; final long acctRLPSize = 37; @@ -220,8 +291,10 @@ public void assertAccountLimitRangeResponse() { assertThat(assertIsValidAccountRangeProof(Hash.ZERO, rangeData)).isTrue(); } - @Test - public void assertAccountLimitRangeResponse_atLeastOneAccount() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertAccountLimitRangeResponse_atLeastOneAccount(final FlatDbMode flatDbMode) { + setup(flatDbMode); List randomLoad = IntStream.range(1, 4096).boxed().collect(Collectors.toList()); Collections.shuffle(randomLoad); randomLoad.stream() @@ -251,8 +324,10 @@ public void assertAccountLimitRangeResponse_atLeastOneAccount() { assertThat(assertIsValidAccountRangeProof(Hash.ZERO, rangeData)).isTrue(); } - @Test - public void assertLastEmptyRange() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertLastEmptyRange(final FlatDbMode flatDbMode) { + setup(flatDbMode); // When our final range request is empty, no next account is possible, // and we should return just a proof of exclusion of the right insertTestAccounts(acct1, acct2); @@ -263,8 +338,10 @@ public void assertLastEmptyRange() { assertThat(assertIsValidAccountRangeProof(acct3.addressHash, rangeData)).isTrue(); } - @Test - public void assertAccountFoundAtStartHashProof() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertAccountFoundAtStartHashProof(final FlatDbMode flatDbMode) { + setup(flatDbMode); // account found at startHash insertTestAccounts(acct4, acct3, acct1, acct2); var rangeData = @@ -274,8 +351,10 @@ public void assertAccountFoundAtStartHashProof() { assertThat(assertIsValidAccountRangeProof(acct1.addressHash, rangeData)).isTrue(); } - @Test - public void assertCompleteStorageForSingleAccount() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertCompleteStorageForSingleAccount(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1, acct2, acct3, acct4); var rangeData = requestStorageRange(List.of(acct3.addressHash), Hash.ZERO, HASH_LAST); assertThat(rangeData).isNotNull(); @@ -293,8 +372,10 @@ public void assertCompleteStorageForSingleAccount() { .isTrue(); } - @Test - public void assertPartialStorageForSingleAccountEmptyRange() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertPartialStorageForSingleAccountEmptyRange(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct3); var rangeData = requestStorageRange( @@ -314,8 +395,10 @@ public void assertPartialStorageForSingleAccountEmptyRange() { .isTrue(); } - @Test - public void assertPartialStorageLimitHashBetweenSlots() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertPartialStorageLimitHashBetweenSlots(final FlatDbMode flatDbMode) { + setup(flatDbMode); Bytes accountShortHash = Bytes.fromHexStringLenient("0x40"); Hash accountFullHash = Hash.wrap(Bytes32.leftPad(accountShortHash)); SnapTestAccount testAccount = createTestContractAccount(accountFullHash, 2, inMemoryStorage); @@ -339,8 +422,10 @@ public void assertPartialStorageLimitHashBetweenSlots() { .isTrue(); } - @Test - public void assertLastEmptyPartialStorageForSingleAccount() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertLastEmptyPartialStorageForSingleAccount(final FlatDbMode flatDbMode) { + setup(flatDbMode); // When our final range request is empty, no next account is possible, // and we should return just a proof of exclusion of the right @@ -364,8 +449,10 @@ public void assertLastEmptyPartialStorageForSingleAccount() { .isTrue(); } - @Test - public void assertStorageLimitRangeResponse() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertStorageLimitRangeResponse(final FlatDbMode flatDbMode) { + setup(flatDbMode); // assert we limit the range response according to bytessize final int storageSlotSize = 69; final int storageSlotCount = 16; @@ -407,8 +494,10 @@ public void assertStorageLimitRangeResponse() { .isTrue(); } - @Test - public void assertStorageLimitRangeResponse_atLeastOneSlot() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertStorageLimitRangeResponse_atLeastOneSlot(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1, acct2, acct3, acct4); final BytesValueRLPOutput tmp = new BytesValueRLPOutput(); @@ -443,8 +532,10 @@ public void assertStorageLimitRangeResponse_atLeastOneSlot() { .isTrue(); } - @Test - public void assertAccountTriePathRequest() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertAccountTriePathRequest(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1, acct2, acct3, acct4); var partialPathToAcct2 = CompactEncoding.bytesToPath(acct2.addressHash).slice(0, 1); var partialPathToAcct1 = Bytes.fromHexString("0x01"); // first nibble is 1 @@ -458,8 +549,10 @@ public void assertAccountTriePathRequest() { assertThat(trieNodes.size()).isEqualTo(2); } - @Test - public void assertAccountTrieRequest_invalidEmptyPath() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertAccountTrieRequest_invalidEmptyPath(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1); var partialPathToAcct1 = Bytes.fromHexString("0x01"); // first nibble is 1 var trieNodeRequest = @@ -470,8 +563,10 @@ public void assertAccountTrieRequest_invalidEmptyPath() { assertThat(trieNodes.isEmpty()).isTrue(); } - @Test - public void assertAccountTrieLimitRequest() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertAccountTrieLimitRequest(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1, acct2, acct3, acct4); final int accountNodeSize = 147; final int accountNodeLimit = 3; @@ -506,8 +601,10 @@ public void assertAccountTrieLimitRequest() { assertThat(trieNodes.size()).isEqualTo(accountNodeLimit * 90 / 100); } - @Test - public void assertAccountTrieLimitRequest_atLeastOneTrieNode() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertAccountTrieLimitRequest_atLeastOneTrieNode(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1, acct2, acct3, acct4); var partialPathToAcct1 = Bytes.fromHexString("0x01"); // first nibble is 1 @@ -539,8 +636,10 @@ public void assertAccountTrieLimitRequest_atLeastOneTrieNode() { assertThat(trieNodes.size()).isEqualTo(1); } - @Test - public void assertStorageTriePathRequest() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertStorageTriePathRequest(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1, acct2, acct3, acct4); var pathToSlot11 = CompactEncoding.encode(Bytes.fromHexStringLenient("0x0101")); var pathToSlot12 = CompactEncoding.encode(Bytes.fromHexStringLenient("0x0102")); @@ -559,8 +658,10 @@ public void assertStorageTriePathRequest() { assertThat(trieNodes.get(5)).isEqualTo(Bytes.EMPTY); } - @Test - public void assertStorageTriePathRequest_accountNotPresent() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertStorageTriePathRequest_accountNotPresent(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct4); var pathToSlot11 = CompactEncoding.encode(Bytes.fromHexStringLenient("0x0101")); var trieNodeRequest = @@ -575,8 +676,10 @@ public void assertStorageTriePathRequest_accountNotPresent() { assertThat(trieNodes.size()).isEqualTo(0); } - @Test - public void assertStorageTrieShortAccountHashPathRequest() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertStorageTrieShortAccountHashPathRequest(final FlatDbMode flatDbMode) { + setup(flatDbMode); Bytes accountShortHash = Bytes.fromHexStringLenient("0x40"); Hash accountFullHash = Hash.wrap(Bytes32.leftPad(accountShortHash)); SnapTestAccount testAccount = createTestContractAccount(accountFullHash, 1, inMemoryStorage); @@ -593,8 +696,10 @@ public void assertStorageTrieShortAccountHashPathRequest() { assertThat(trieNodes.size()).isEqualTo(2); } - @Test - public void assertStorageTrieLimitRequest() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertStorageTrieLimitRequest(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1, acct2, acct3, acct4); final int trieNodeSize = 69; final int trieNodeLimit = 3; @@ -626,8 +731,10 @@ public void assertStorageTrieLimitRequest() { assertThat(trieNodes.size()).isEqualTo(3); } - @Test - public void assertStorageTrieLimitRequest_atLeastOneTrieNode() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertStorageTrieLimitRequest_atLeastOneTrieNode(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1, acct2, acct3, acct4); var pathToSlot11 = CompactEncoding.encode(Bytes.fromHexStringLenient("0x0101")); @@ -657,8 +764,10 @@ public void assertStorageTrieLimitRequest_atLeastOneTrieNode() { assertThat(trieNodes.size()).isEqualTo(1); } - @Test - public void assertCodePresent() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertCodePresent(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1, acct2, acct3, acct4); var codeRequest = requestByteCodes( @@ -669,8 +778,10 @@ public void assertCodePresent() { assertThat(codes.codes().size()).isEqualTo(2); } - @Test - public void assertCodeLimitRequest() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertCodeLimitRequest(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1, acct2, acct3, acct4); final int codeSize = 32; final int codeLimit = 2; @@ -695,8 +806,10 @@ public void assertCodeLimitRequest() { assertThat(codes.codes().size()).isEqualTo(codeLimit * 90 / 100); } - @Test - public void assertCodeLimitRequest_atLeastOneByteCode() { + @ParameterizedTest + @MethodSource("flatDbMode") + public void assertCodeLimitRequest_atLeastOneByteCode(final FlatDbMode flatDbMode) { + setup(flatDbMode); insertTestAccounts(acct1, acct2, acct3, acct4); final BytesValueRLPOutput tmp = new BytesValueRLPOutput(); @@ -745,6 +858,11 @@ static SnapTestAccount createTestContractAccount( // mock some storage data var flatdb = storage.getFlatDbStrategy(); + + // Only Bonsai archive cares about this. Do everything as if we're at + // block 1 so we know which entry to retrieve from the DB + flatdb.updateBlockContext(generateBonsaiArchiveContextHeader(1)); + var updater = storage.updater(); updater.putCode(Hash.hash(mockCode), mockCode); IntStream.iterate(10, i -> i < 20, i -> i + slotKeyGap) From ee7c2fc112bb0950aca102edc3e67e9e0bcce31e Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Mon, 7 Oct 2024 11:12:58 +0100 Subject: [PATCH 23/39] Remove todo comments Signed-off-by: Matthew Whitehead --- .../storage/flat/FlatDbStrategyProvider.java | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java index dbcae20c07d..04a6ff2722a 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java @@ -88,7 +88,6 @@ synchronized FlatDbMode deriveFlatDbStrategy( : FlatDbMode.FULL) : FlatDbMode.PARTIAL; - // TODO: commented out for archive testing final var existingTrieData = composedWorldStateStorage.get(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY).isPresent(); @@ -100,24 +99,13 @@ synchronized FlatDbMode deriveFlatDbStrategy( .orElseGet( () -> { // if we do not have a db-supplied config for flatdb, derive it: - // default to partial if trie data exists, but the flat config does not, - // and default to the storage config otherwise - - // TODO: temporarily hard code ARCHIVE mode for testing - /*var flatDbModeVal = - dataStorageConfiguration - .getDataStorageFormat() - .equals(DataStorageFormat.BONSAI_ARCHIVE) - ? FlatDbMode.ARCHIVE.getVersion() - : FlatDbMode.FULL.getVersion();*/ + // - default to partial if trie data exists but the flat config does not, + // - otherwise go with the requested mode var flatDbModeVal = existingTrieData - ? FlatDbMode.ARCHIVE.getVersion() + ? FlatDbMode.PARTIAL.getVersion() : requestedFlatDbMode.getVersion(); - // MRW TODO - If there is archive data in the freezer segment, we can assume - // archive mode - // persist this config in the db var setDbModeTx = composedWorldStateStorage.startTransaction(); setDbModeTx.put( @@ -126,6 +114,7 @@ synchronized FlatDbMode deriveFlatDbStrategy( return flatDbModeVal; })); + LOG.info("Bonsai flat db mode found {}", flatDbMode); return flatDbMode; From 38cfdd9b00272ee30171cefd8fea47b57cffd207 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Mon, 7 Oct 2024 14:15:22 +0100 Subject: [PATCH 24/39] Make BONSAI_ARCHIVE experimental for the first release. Add metrics. Use the term archive, not freezer Signed-off-by: Matthew Whitehead --- .../org/hyperledger/besu/cli/BesuCommand.java | 6 +- .../besu/cli/options/DataStorageOptions.java | 2 +- .../storage/RevertMetadataSubCommand.java | 2 +- .../storage/TrieLogSubCommand.java | 4 +- .../controller/BesuControllerBuilder.java | 28 +- .../besu/cli/PrivacyOptionsTest.java | 4 +- .../keyvalue/KeyValueSegmentIdentifier.java | 24 +- .../common/GenesisWorldStateProvider.java | 2 +- .../storage/flat/ArchiveFlatDbStrategy.java | 51 ++- ...rchiveFreezer.java => BonsaiArchiver.java} | 135 ++++---- .../DiffBasedWorldStateKeyValueStorage.java | 81 +++-- .../storage/flat/FlatDbStrategyProvider.java | 4 +- .../worldstate/DataStorageConfiguration.java | 2 +- .../WorldStateStorageCoordinator.java | 4 +- .../BonsaiWorldStateKeyValueStorageTest.java | 2 +- .../flat/FlatDbStrategyProviderTest.java | 6 +- ...veFreezerTests.java => ArchiverTests.java} | 302 +++++++++--------- .../eth/sync/fastsync/FastSyncDownloader.java | 2 +- .../sync/snapsync/SnapWorldDownloadState.java | 2 +- .../snapsync/SnapWorldStateDownloader.java | 2 +- .../services/storage/DataStorageFormat.java | 6 +- .../RocksDBKeyValueStorageFactory.java | 2 +- .../BaseVersionedStorageFormat.java | 6 +- .../configuration/DatabaseMetadata.java | 2 +- .../PrivacyVersionedStorageFormat.java | 2 +- .../services/storage/rocksdb/Utils.java | 2 +- 26 files changed, 371 insertions(+), 314 deletions(-) rename ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/{BonsaiArchiveFreezer.java => BonsaiArchiver.java} (68%) rename ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/{ArchiveFreezerTests.java => ArchiverTests.java} (82%) diff --git a/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java b/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java index a4fee410acc..7a43b211f04 100644 --- a/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java +++ b/besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java @@ -1939,8 +1939,10 @@ private PrivacyParameters privacyParameters() { if (getDataStorageConfiguration().getDataStorageFormat() == DataStorageFormat.BONSAI) { throw new ParameterException(commandLine, String.format("%s %s", "Bonsai", errorSuffix)); } - if (getDataStorageConfiguration().getDataStorageFormat() == DataStorageFormat.BONSAI_ARCHIVE) { - throw new ParameterException(commandLine, String.format("%s %s", "Bonsai archive", errorSuffix)); + if (getDataStorageConfiguration().getDataStorageFormat() + == DataStorageFormat.X_BONSAI_ARCHIVE) { + throw new ParameterException( + commandLine, String.format("%s %s", "Bonsai archive", errorSuffix)); } if (Boolean.TRUE.equals(privacyOptionGroup.isPrivacyMultiTenancyEnabled) diff --git a/besu/src/main/java/org/hyperledger/besu/cli/options/DataStorageOptions.java b/besu/src/main/java/org/hyperledger/besu/cli/options/DataStorageOptions.java index 10cf42dd6c7..d5da71112f8 100644 --- a/besu/src/main/java/org/hyperledger/besu/cli/options/DataStorageOptions.java +++ b/besu/src/main/java/org/hyperledger/besu/cli/options/DataStorageOptions.java @@ -47,7 +47,7 @@ public class DataStorageOptions implements CLIOptions @Option( names = {DATA_STORAGE_FORMAT}, description = - "Format to store trie data in. Either FOREST, BONSAI or BONSAI_ARCHIVE (default: ${DEFAULT-VALUE}).", + "Format to store trie data in. Either FOREST, BONSAI or X_BONSAI_ARCHIVE (default: ${DEFAULT-VALUE}).", arity = "1") private DataStorageFormat dataStorageFormat = DataStorageFormat.BONSAI; diff --git a/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/RevertMetadataSubCommand.java b/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/RevertMetadataSubCommand.java index 5cfcd9ef492..b3a2b513ec9 100644 --- a/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/RevertMetadataSubCommand.java +++ b/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/RevertMetadataSubCommand.java @@ -123,7 +123,7 @@ public void run() { switch (dataStorageFormat) { case FOREST -> 1; case BONSAI -> 2; - case BONSAI_ARCHIVE -> 3; + case X_BONSAI_ARCHIVE -> 3; }; @JsonSerialize diff --git a/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java b/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java index ecf367e97d8..ea69690fece 100644 --- a/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java +++ b/besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java @@ -28,6 +28,7 @@ import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldStateKeyValueStorage; import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogPruner; import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration; +import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration; import java.io.IOException; import java.io.PrintWriter; @@ -39,7 +40,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.core.config.Configurator; -import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration; import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -323,7 +323,7 @@ private static TrieLogContext getTrieLogContext() { final DataStorageConfiguration config = besuController.getDataStorageConfiguration(); checkArgument( config.getDataStorageFormat().isBonsaiFormat(), - "Subcommand only works with data-storage-format=BONSAI or BONSAI_ARCHIVE"); + "Subcommand only works with data-storage-format=BONSAI or X_BONSAI_ARCHIVE"); final StorageProvider storageProvider = besuController.getStorageProvider(); final BonsaiWorldStateKeyValueStorage rootWorldStateStorage = diff --git a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java index 12fd41015dc..72ff58afe16 100644 --- a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java +++ b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java @@ -86,7 +86,7 @@ import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.BonsaiWorldStateProvider; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.cache.BonsaiCachedMerkleTrieLoader; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldStateKeyValueStorage; -import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.worldview.BonsaiArchiveFreezer; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.worldview.BonsaiArchiver; import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogManager; import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogPruner; import org.hyperledger.besu.ethereum.trie.forest.ForestWorldStateArchive; @@ -755,16 +755,17 @@ public BesuController build() { } // TODO - do we want a flag to turn this on and off? - if (DataStorageFormat.BONSAI_ARCHIVE.equals(dataStorageConfiguration.getDataStorageFormat())) { + if (DataStorageFormat.X_BONSAI_ARCHIVE.equals( + dataStorageConfiguration.getDataStorageFormat())) { final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage = worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class); - final BonsaiArchiveFreezer archiveFreezer = - createBonsaiArchiveFreezer( + final BonsaiArchiver archiver = + createBonsaiArchiver( worldStateKeyValueStorage, blockchain, scheduler, ((BonsaiWorldStateProvider) worldStateArchive).getTrieLogManager()); - blockchain.observeBlockAdded(archiveFreezer); + blockchain.observeBlockAdded(archiver); } final List closeables = new ArrayList<>(); @@ -833,21 +834,22 @@ private TrieLogPruner createTrieLogPruner( return trieLogPruner; } - private BonsaiArchiveFreezer createBonsaiArchiveFreezer( + private BonsaiArchiver createBonsaiArchiver( final WorldStateKeyValueStorage worldStateStorage, final Blockchain blockchain, final EthScheduler scheduler, final TrieLogManager trieLogManager) { - final BonsaiArchiveFreezer archiveFreezer = - new BonsaiArchiveFreezer( + final BonsaiArchiver archiver = + new BonsaiArchiver( (BonsaiWorldStateKeyValueStorage) worldStateStorage, blockchain, scheduler::executeServiceTask, - trieLogManager); + trieLogManager, + metricsSystem); - long archivedBlocks = archiveFreezer.initialize(); - LOG.info("Bonsai archive initialised, caught up {} blocks", archivedBlocks); - return archiveFreezer; + long archivedBlocks = archiver.initialize(); + LOG.info("Bonsai archiver initialised, caught up {} blocks", archivedBlocks); + return archiver; } /** @@ -1143,7 +1145,7 @@ yield new BonsaiWorldStateProvider( besuComponent.map(BesuComponent::getBesuPluginContext).orElse(null), evmConfiguration); } - case BONSAI_ARCHIVE -> { + case X_BONSAI_ARCHIVE -> { final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage = worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class); diff --git a/besu/src/test/java/org/hyperledger/besu/cli/PrivacyOptionsTest.java b/besu/src/test/java/org/hyperledger/besu/cli/PrivacyOptionsTest.java index 752f496f35e..095cc11071c 100644 --- a/besu/src/test/java/org/hyperledger/besu/cli/PrivacyOptionsTest.java +++ b/besu/src/test/java/org/hyperledger/besu/cli/PrivacyOptionsTest.java @@ -218,10 +218,10 @@ public void privacyWithBonsaiExplicitMustError() { @Test public void privacyWithBonsaiArchiveExplicitMustError() { // bypass overridden parseCommand method which specifies bonsai - super.parseCommand("--privacy-enabled", "--data-storage-format", "BONSAI_ARCHIVE"); + super.parseCommand("--privacy-enabled", "--data-storage-format", "X_BONSAI_ARCHIVE"); assertThat(commandErrorOutput.toString(UTF_8)) - .contains("Bonsai archive cannot be enabled with privacy."); + .contains("Bonsai archive cannot be enabled with privacy."); assertThat(commandOutput.toString(UTF_8)).isEmpty(); } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java index 06ba67a6456..19bea1274ee 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java @@ -15,8 +15,8 @@ package org.hyperledger.besu.ethereum.storage.keyvalue; import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI; -import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI_ARCHIVE; import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.FOREST; +import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.X_BONSAI_ARCHIVE; import org.hyperledger.besu.plugin.services.storage.DataStorageFormat; import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier; @@ -31,20 +31,20 @@ public enum KeyValueSegmentIdentifier implements SegmentIdentifier { PRIVATE_TRANSACTIONS(new byte[] {3}), PRIVATE_STATE(new byte[] {4}), PRUNING_STATE(new byte[] {5}, EnumSet.of(FOREST)), - ACCOUNT_INFO_STATE(new byte[] {6}, EnumSet.of(BONSAI, BONSAI_ARCHIVE), false, true, false), - CODE_STORAGE(new byte[] {7}, EnumSet.of(BONSAI, BONSAI_ARCHIVE)), - ACCOUNT_STORAGE_STORAGE(new byte[] {8}, EnumSet.of(BONSAI, BONSAI_ARCHIVE), false, true, false), - TRIE_BRANCH_STORAGE(new byte[] {9}, EnumSet.of(BONSAI, BONSAI_ARCHIVE), false, true, false), - TRIE_LOG_STORAGE(new byte[] {10}, EnumSet.of(BONSAI, BONSAI_ARCHIVE), true, false, true), - ACCOUNT_INFO_STATE_FREEZER( - "ACCOUNT_INFO_STATE_FREEZER".getBytes(StandardCharsets.UTF_8), - EnumSet.of(BONSAI_ARCHIVE), + ACCOUNT_INFO_STATE(new byte[] {6}, EnumSet.of(BONSAI, X_BONSAI_ARCHIVE), false, true, false), + CODE_STORAGE(new byte[] {7}, EnumSet.of(BONSAI, X_BONSAI_ARCHIVE)), + ACCOUNT_STORAGE_STORAGE(new byte[] {8}, EnumSet.of(BONSAI, X_BONSAI_ARCHIVE), false, true, false), + TRIE_BRANCH_STORAGE(new byte[] {9}, EnumSet.of(BONSAI, X_BONSAI_ARCHIVE), false, true, false), + TRIE_LOG_STORAGE(new byte[] {10}, EnumSet.of(BONSAI, X_BONSAI_ARCHIVE), true, false, true), + ACCOUNT_INFO_STATE_ARCHIVE( + "ACCOUNT_INFO_STATE_ARCHIVE".getBytes(StandardCharsets.UTF_8), + EnumSet.of(X_BONSAI_ARCHIVE), true, false, true), - ACCOUNT_STORAGE_FREEZER( - "ACCOUNT_STORAGE_FREEZER".getBytes(StandardCharsets.UTF_8), - EnumSet.of(BONSAI_ARCHIVE), + ACCOUNT_STORAGE_ARCHIVE( + "ACCOUNT_STORAGE_ARCHIVE".getBytes(StandardCharsets.UTF_8), + EnumSet.of(X_BONSAI_ARCHIVE), true, false, true), diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java index efe601acbea..c8340ce9f80 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java @@ -49,7 +49,7 @@ public static MutableWorldState createGenesisWorldState( == DataStorageFormat.BONSAI) { return createGenesisBonsaiWorldState(false); } else if (Objects.requireNonNull(dataStorageConfiguration).getDataStorageFormat() - == DataStorageFormat.BONSAI_ARCHIVE) { + == DataStorageFormat.X_BONSAI_ARCHIVE) { return createGenesisBonsaiWorldState(true); } else { return createGenesisForestWorldState(); diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java index 31ba6b2051c..a3c17d5308b 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/ArchiveFlatDbStrategy.java @@ -15,8 +15,8 @@ package org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE; -import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_FREEZER; -import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_ARCHIVE; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_ARCHIVE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE; import org.hyperledger.besu.datatypes.Hash; @@ -25,8 +25,10 @@ import org.hyperledger.besu.ethereum.trie.NodeLoader; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.CodeStorageStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy; +import org.hyperledger.besu.metrics.BesuMetricCategory; import org.hyperledger.besu.plugin.data.BlockHeader; import org.hyperledger.besu.plugin.services.MetricsSystem; +import org.hyperledger.besu.plugin.services.metrics.Counter; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage; import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction; @@ -46,12 +48,27 @@ public class ArchiveFlatDbStrategy extends FullFlatDbStrategy { private final BonsaiContext context; private static final Logger LOG = LoggerFactory.getLogger(ArchiveFlatDbStrategy.class); + protected final Counter getAccountFromArchiveCounter; + protected final Counter getStorageFromArchiveCounter; + public ArchiveFlatDbStrategy( final BonsaiContext context, final MetricsSystem metricsSystem, final CodeStorageStrategy codeStorageStrategy) { super(metricsSystem, codeStorageStrategy); this.context = context; + + getAccountFromArchiveCounter = + metricsSystem.createCounter( + BesuMetricCategory.BLOCKCHAIN, + "get_account_from_archive_counter", + "Total number of calls to get account that were from archived state"); + + getStorageFromArchiveCounter = + metricsSystem.createCounter( + BesuMetricCategory.BLOCKCHAIN, + "get_storage_from_archive_counter", + "Total number of calls to get storage that were from archived state"); } static final byte[] MAX_BLOCK_SUFFIX = Bytes.ofUnsignedLong(Long.MAX_VALUE).toArrayUnsafe(); @@ -79,13 +96,17 @@ public Optional getFlatAccount( .getNearestBefore(ACCOUNT_INFO_STATE, keyNearest) .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()); - // If there isn't a match look in the freezer DB segment + // If there isn't a match look in the archive DB segment if (nearestAccount.isEmpty()) { accountFound = storage - .getNearestBefore(ACCOUNT_INFO_STATE_FREEZER, keyNearest) + .getNearestBefore(ACCOUNT_INFO_STATE_ARCHIVE, keyNearest) .filter(found -> accountHash.commonPrefixLength(found.key()) >= accountHash.size()) .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); + + if (accountFound.isPresent()) { + getAccountFromArchiveCounter.inc(); + } } else { accountFound = nearestAccount @@ -95,13 +116,12 @@ public Optional getFlatAccount( DELETED_ACCOUNT_VALUE, found.value().orElse(DELETED_ACCOUNT_VALUE))) // return empty when we find a "deleted value key" .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); - } - if (accountFound.isPresent()) { - // TODO - different metric for frozen lookups? - getAccountFoundInFlatDatabaseCounter.inc(); - } else { - getAccountNotFoundInFlatDatabaseCounter.inc(); + if (accountFound.isPresent()) { + getAccountFoundInFlatDatabaseCounter.inc(); + } else { + getAccountNotFoundInFlatDatabaseCounter.inc(); + } } return accountFound; @@ -261,17 +281,21 @@ public Optional getFlatStorageValueByStorageSlotKey( .filter( found -> Bytes.of(naturalKey).commonPrefixLength(found.key()) >= naturalKey.length); - // If there isn't a match look in the freezer DB segment + // If there isn't a match look in the archive DB segment if (nearestStorage.isEmpty()) { - // Check the frozen storage as old state is moved out of the primary DB segment + // Check the archived storage as old state is moved out of the primary DB segment storageFound = storage - .getNearestBefore(ACCOUNT_STORAGE_FREEZER, keyNearest) + .getNearestBefore(ACCOUNT_STORAGE_ARCHIVE, keyNearest) // don't return accounts that do not have a matching account hash .filter( found -> Bytes.of(naturalKey).commonPrefixLength(found.key()) >= naturalKey.length) .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); + + if (storageFound.isPresent()) { + getStorageFromArchiveCounter.inc(); + } } else { storageFound = nearestStorage @@ -284,7 +308,6 @@ public Optional getFlatStorageValueByStorageSlotKey( .flatMap(SegmentedKeyValueStorage.NearestKeyValue::wrapBytes); if (storageFound.isPresent()) { - // TODO - different metric for frozen lookups? getStorageValueFlatDatabaseCounter.inc(); } else { getStorageValueNotFoundInFlatDatabaseCounter.inc(); diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java similarity index 68% rename from ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java rename to ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java index 6abdcf0e836..5e596fa6fbe 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiveFreezer.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java @@ -21,6 +21,9 @@ import org.hyperledger.besu.ethereum.core.Block; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage; import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogManager; +import org.hyperledger.besu.metrics.BesuMetricCategory; +import org.hyperledger.besu.plugin.services.MetricsSystem; +import org.hyperledger.besu.plugin.services.metrics.Counter; import org.hyperledger.besu.plugin.services.trielogs.TrieLog; import java.util.Collections; @@ -39,61 +42,71 @@ import org.slf4j.LoggerFactory; /** - * This class manages the "freezing" of historic state that is still needed to satisfy queries but + * This class manages the archiving of historic state that is still needed to satisfy queries but * doesn't need to be in the main DB segment for. Doing so would degrade block-import performance * over time so we move state beyond a certain age (in blocks) to other DB segments, assuming there * is a more recent (i.e. changed) version of the state. If state is created once and never changed * it will remain in the primary DB segment(s). */ -public class BonsaiArchiveFreezer implements BlockAddedObserver { +public class BonsaiArchiver implements BlockAddedObserver { - private static final Logger LOG = LoggerFactory.getLogger(BonsaiArchiveFreezer.class); + private static final Logger LOG = LoggerFactory.getLogger(BonsaiArchiver.class); private final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage; private final Blockchain blockchain; private final Consumer executeAsync; private static final int CATCHUP_LIMIT = 1000; - private static final int DISTANCE_FROM_HEAD_BEFORE_FREEZING_OLD_STATE = 10; + private static final int DISTANCE_FROM_HEAD_BEFORE_ARCHIVING_OLD_STATE = 10; private final TrieLogManager trieLogManager; + protected final MetricsSystem metricsSystem; + protected final Counter archivedBlocksCounter; private final Map pendingBlocksToArchive = Collections.synchronizedMap(new TreeMap<>()); // For logging progress. Saves doing a DB read just to record our progress - final AtomicLong latestFrozenBlock = new AtomicLong(0); + final AtomicLong latestArchivedBlock = new AtomicLong(0); - public BonsaiArchiveFreezer( + public BonsaiArchiver( final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage, final Blockchain blockchain, final Consumer executeAsync, - final TrieLogManager trieLogManager) { + final TrieLogManager trieLogManager, + final MetricsSystem metricsSystem) { this.rootWorldStateStorage = rootWorldStateStorage; this.blockchain = blockchain; this.executeAsync = executeAsync; this.trieLogManager = trieLogManager; + this.metricsSystem = metricsSystem; + + archivedBlocksCounter = + metricsSystem.createCounter( + BesuMetricCategory.BLOCKCHAIN, + "archived_blocks_state_total", + "Total number of blocks for which state has been archived"); } private int loadNextCatchupBlocks() { - Optional frozenBlocksHead = Optional.empty(); + Optional archivedBlocksHead = Optional.empty(); - Optional latestFrozenBlock = rootWorldStateStorage.getLatestArchiveFrozenBlock(); + Optional latestArchivedBlock = rootWorldStateStorage.getLatestArchivedBlock(); - if (latestFrozenBlock.isPresent()) { - // Start from the next block after the most recently frozen block - frozenBlocksHead = Optional.of(latestFrozenBlock.get() + 1); + if (latestArchivedBlock.isPresent()) { + // Start from the next block after the most recently archived block + archivedBlocksHead = Optional.of(latestArchivedBlock.get() + 1); } else { // Start from genesis block if (blockchain.getBlockHashByNumber(0).isPresent()) { - frozenBlocksHead = Optional.of(0L); + archivedBlocksHead = Optional.of(0L); } } int preLoadedBlocks = 0; - if (frozenBlocksHead.isPresent()) { - Optional nextBlock = blockchain.getBlockByNumber(frozenBlocksHead.get()); + if (archivedBlocksHead.isPresent()) { + Optional nextBlock = blockchain.getBlockByNumber(archivedBlocksHead.get()); for (int i = 0; i < CATCHUP_LIMIT; i++) { if (nextBlock.isPresent()) { - addToFreezerQueue( + addToArchivingQueue( nextBlock.get().getHeader().getNumber(), nextBlock.get().getHeader().getHash()); preLoadedBlocks++; nextBlock = blockchain.getBlockByNumber(nextBlock.get().getHeader().getNumber() + 1); @@ -102,10 +115,9 @@ private int loadNextCatchupBlocks() { } } LOG.atInfo() - .setMessage( - "Preloaded {} blocks from {} to move their state and storage to the archive freezer") + .setMessage("Preloaded {} blocks from {} to move their state and storage to the archive") .addArgument(preLoadedBlocks) - .addArgument(frozenBlocksHead.get()) + .addArgument(archivedBlocksHead.get()) .log(); } return preLoadedBlocks; @@ -113,13 +125,14 @@ private int loadNextCatchupBlocks() { public long initialize() { // On startup there will be recent blocks whose state and storage hasn't been archived yet. - // Pre-load them in blocks of CATCHUP_LIMIT ready for freezing state once enough new blocks have + // Pre-load them in blocks of CATCHUP_LIMIT ready for archiving state once enough new blocks + // have // been added to the chain. long totalBlocksCaughtUp = 0; int catchupBlocksLoaded = CATCHUP_LIMIT; while (catchupBlocksLoaded >= CATCHUP_LIMIT) { catchupBlocksLoaded = loadNextCatchupBlocks(); - moveBlockStateToFreezer(); + moveBlockStateToArchive(); totalBlocksCaughtUp += catchupBlocksLoaded; } return totalBlocksCaughtUp; @@ -129,10 +142,10 @@ public int getPendingBlocksCount() { return pendingBlocksToArchive.size(); } - public synchronized void addToFreezerQueue(final long blockNumber, final Hash blockHash) { + public synchronized void addToArchivingQueue(final long blockNumber, final Hash blockHash) { LOG.atDebug() .setMessage( - "Adding block to archive freezer queue for moving to cold storage, blockNumber {}; blockHash {}") + "Adding block to archiving queue for moving to cold storage, blockNumber {}; blockHash {}") .addArgument(blockNumber) .addArgument(blockHash) .log(); @@ -143,53 +156,54 @@ private synchronized void removeArchivedFromQueue(final Map archived archivedBlocks.keySet().forEach(e -> pendingBlocksToArchive.remove(e)); } - // Move state and storage entries from their primary DB segments to the freezer segments. This is + // Move state and storage entries from their primary DB segments to their archive DB segments. + // This is // intended to maintain good performance for new block imports by keeping the primary DB segments // to live state only. Returns the number of state and storage entries moved. - public int moveBlockStateToFreezer() { + public int moveBlockStateToArchive() { final long retainAboveThisBlock = - blockchain.getChainHeadBlockNumber() - DISTANCE_FROM_HEAD_BEFORE_FREEZING_OLD_STATE; + blockchain.getChainHeadBlockNumber() - DISTANCE_FROM_HEAD_BEFORE_ARCHIVING_OLD_STATE; if (rootWorldStateStorage.getFlatDbMode().getVersion() == Bytes.EMPTY) { throw new IllegalStateException("DB mode version not set"); } - AtomicInteger frozenAccountStateCount = new AtomicInteger(); - AtomicInteger frozenAccountStorageCount = new AtomicInteger(); + AtomicInteger archivedAccountStateCount = new AtomicInteger(); + AtomicInteger archivedAccountStorageCount = new AtomicInteger(); // Typically we will move all storage and state for a single block i.e. when a new block is // imported, move state for block-N. There are cases where we catch-up and move old state - // for a number of blocks so we may iterate over a number of blocks freezing their state, + // for a number of blocks so we may iterate over a number of blocks archiving their state, // not just a single one. - final SortedMap blocksToFreeze; + final SortedMap blocksToArchive; synchronized (this) { - blocksToFreeze = new TreeMap<>(); + blocksToArchive = new TreeMap<>(); pendingBlocksToArchive.entrySet().stream() .filter( - (e) -> blocksToFreeze.size() <= CATCHUP_LIMIT && e.getKey() <= retainAboveThisBlock) + (e) -> blocksToArchive.size() <= CATCHUP_LIMIT && e.getKey() <= retainAboveThisBlock) .forEach( (e) -> { - blocksToFreeze.put(e.getKey(), e.getValue()); + blocksToArchive.put(e.getKey(), e.getValue()); }); } - if (blocksToFreeze.size() > 0) { + if (blocksToArchive.size() > 0) { LOG.atDebug() - .setMessage("Moving cold state to freezer storage: {} to {} ") - .addArgument(blocksToFreeze.firstKey()) - .addArgument(blocksToFreeze.lastKey()) + .setMessage("Moving cold state to archive storage: {} to {} ") + .addArgument(blocksToArchive.firstKey()) + .addArgument(blocksToArchive.lastKey()) .log(); // Determine which world state keys have changed in the last N blocks by looking at the - // trie logs for the blocks. Then move the old keys to the freezer segment (if and only if + // trie logs for the blocks. Then move the old keys to the archive segment (if and only if // they have changed) - blocksToFreeze + blocksToArchive .entrySet() .forEach( (block) -> { Hash blockHash = block.getValue(); LOG.atDebug() - .setMessage("Freezing all account state for block {}") + .setMessage("Archiving all account state for block {}") .addArgument(block.getKey()) .log(); Optional trieLog = trieLogManager.getTrieLogLayer(blockHash); @@ -200,14 +214,14 @@ public int moveBlockStateToFreezer() { .forEach( (address, change) -> { // Move any previous state for this account - frozenAccountStateCount.addAndGet( - rootWorldStateStorage.freezePreviousAccountState( + archivedAccountStateCount.addAndGet( + rootWorldStateStorage.archivePreviousAccountState( blockchain.getBlockHeader( blockchain.getBlockHeader(blockHash).get().getParentHash()), address.addressHash())); }); LOG.atDebug() - .setMessage("Freezing all storage state for block {}") + .setMessage("Archiving all storage state for block {}") .addArgument(block.getKey()) .log(); trieLog @@ -218,8 +232,8 @@ public int moveBlockStateToFreezer() { storageSlotKey.forEach( (slotKey, slotValue) -> { // Move any previous state for this account - frozenAccountStorageCount.addAndGet( - rootWorldStateStorage.freezePreviousStorageState( + archivedAccountStorageCount.addAndGet( + rootWorldStateStorage.archivePreviousStorageState( blockchain.getBlockHeader( blockchain .getBlockHeader(blockHash) @@ -231,21 +245,22 @@ public int moveBlockStateToFreezer() { }); } LOG.atDebug() - .setMessage("All account state and storage frozen for block {}") + .setMessage("All account state and storage archived for block {}") .addArgument(block.getKey()) .log(); - rootWorldStateStorage.setLatestArchiveFrozenBlock(block.getKey()); + rootWorldStateStorage.setLatestArchivedBlock(block.getKey()); + archivedBlocksCounter.inc(); // Update local var for logging progress - latestFrozenBlock.set(block.getKey()); - if (latestFrozenBlock.get() % 100 == 0) { + latestArchivedBlock.set(block.getKey()); + if (latestArchivedBlock.get() % 100 == 0) { // Log progress in case catching up causes there to be a large number of keys // to move LOG.atInfo() .setMessage( "archive progress: state up to block {} archived ({} behind chain head {})") - .addArgument(latestFrozenBlock.get()) - .addArgument(blockchain.getChainHeadBlockNumber() - latestFrozenBlock.get()) + .addArgument(latestArchivedBlock.get()) + .addArgument(blockchain.getChainHeadBlockNumber() - latestArchivedBlock.get()) .addArgument(blockchain.getChainHeadBlockNumber()) .log(); } @@ -253,17 +268,17 @@ public int moveBlockStateToFreezer() { LOG.atDebug() .setMessage( - "finished moving cold state for blocks {} to {}. Froze {} account state entries, {} account storage entries") - .addArgument(blocksToFreeze.firstKey()) - .addArgument(latestFrozenBlock.get()) - .addArgument(frozenAccountStateCount.get()) - .addArgument(frozenAccountStorageCount.get()) + "finished moving state for blocks {} to {}. Archived {} account state entries, {} account storage entries") + .addArgument(blocksToArchive.firstKey()) + .addArgument(latestArchivedBlock.get()) + .addArgument(archivedAccountStateCount.get()) + .addArgument(archivedAccountStorageCount.get()) .log(); - removeArchivedFromQueue(blocksToFreeze); + removeArchivedFromQueue(blocksToArchive); } - return frozenAccountStateCount.get() + frozenAccountStorageCount.get(); + return archivedAccountStateCount.get() + archivedAccountStorageCount.get(); } private final Lock archiveMutex = new ReentrantLock(true); @@ -275,7 +290,7 @@ public void onBlockAdded(final BlockAddedEvent addedBlockContext) { Optional.of(addedBlockContext.getBlock().getHeader().getNumber()); blockNumber.ifPresent( blockNum -> { - addToFreezerQueue(blockNum, blockHash); + addToArchivingQueue(blockNum, blockHash); // Since moving blocks can be done in batches we only want // one instance running at a time @@ -283,7 +298,7 @@ public void onBlockAdded(final BlockAddedEvent addedBlockContext) { () -> { if (archiveMutex.tryLock()) { try { - moveBlockStateToFreezer(); + moveBlockStateToArchive(); } finally { archiveMutex.unlock(); } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java index f1985e8d381..e835a3ef3ae 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java @@ -15,8 +15,8 @@ package org.hyperledger.besu.ethereum.trie.diffbased.common.storage; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE; -import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_FREEZER; -import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_ARCHIVE; +import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_ARCHIVE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE; import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE; @@ -66,9 +66,8 @@ public abstract class DiffBasedWorldStateKeyValueStorage public static final byte[] WORLD_BLOCK_HASH_KEY = "worldBlockHash".getBytes(StandardCharsets.UTF_8); - // 0x61726368697665426C6F636B7346726F7A656E - public static final byte[] ARCHIVE_BLOCKS_FROZEN = - "archiveBlocksFrozen".getBytes(StandardCharsets.UTF_8); + // 0x6172636869766564426C6F636B73 + public static final byte[] ARCHIVED_BLOCKS = "archivedBlocks".getBytes(StandardCharsets.UTF_8); private final AtomicBoolean shouldClose = new AtomicBoolean(false); @@ -205,18 +204,18 @@ public boolean pruneTrieLog(final Hash blockHash) { } /** - * Move old account state from the primary DB segments to "freezer" segments that will only be + * Move old account state from the primary DB segment to the archive segment that will only be * used for historic state queries. This prevents performance degradation over time for writes to * the primary DB segments. * * @param previousBlockHeader the block header for the previous block, used to get the "nearest * before" state - * @param accountHash the account to freeze old state for - * @return the number of account states that were moved to frozen storage + * @param accountHash the account to archive old state for + * @return the number of account states that were moved to the archive */ - public int freezePreviousAccountState( + public int archivePreviousAccountState( final Optional previousBlockHeader, final Hash accountHash) { - AtomicInteger frozenStateCount = new AtomicInteger(); + AtomicInteger archivedStateCount = new AtomicInteger(); if (previousBlockHeader.isPresent()) { try { // Get the key for the previous block @@ -229,7 +228,7 @@ public int freezePreviousAccountState( Optional nextMatch; - // Move all entries that match this address hash to the freezer DB segment + // Move all entries that match this address hash to the archive DB segment while ((nextMatch = composedWorldStateStorage .getNearestBefore(ACCOUNT_INFO_STATE, previousKey) @@ -244,14 +243,14 @@ public int freezePreviousAccountState( (nearestKey) -> { moveDBEntry( ACCOUNT_INFO_STATE, - ACCOUNT_INFO_STATE_FREEZER, + ACCOUNT_INFO_STATE_ARCHIVE, nearestKey.key().toArrayUnsafe(), nearestKey.value().get()); - frozenStateCount.getAndIncrement(); + archivedStateCount.getAndIncrement(); }); } - if (frozenStateCount.get() == 0) { + if (archivedStateCount.get() == 0) { // A lot of entries will have no previous history, so use trace to log when no previous // storage was found LOG.atTrace() @@ -261,33 +260,33 @@ public int freezePreviousAccountState( .log(); } else { LOG.atDebug() - .setMessage("{} storage entries frozen for block {}, address hash {}") - .addArgument(frozenStateCount.get()) + .setMessage("{} storage entries archived for block {}, address hash {}") + .addArgument(archivedStateCount.get()) .addArgument(previousBlockHeader.get().getNumber()) .addArgument(accountHash) .log(); } } catch (Exception e) { - LOG.error("Error moving account state for account {} to cold storage", accountHash, e); + LOG.error("Error moving account state for account {} to archived storage", accountHash, e); } } - return frozenStateCount.get(); + return archivedStateCount.get(); } /** - * Move old storage state from the primary DB segments to "cold" segments that will only be used - * for historic state queries. This prevents performance degradation over time for writes to the - * primary DB segments. + * Move old storage state from the primary DB segment to the archive segment that will only be + * used for historic state queries. This prevents performance degradation over time for writes to + * the primary DB segments. * * @param previousBlockHeader the block header for the previous block, used to get the "nearest * before" state - * @param storageSlotKey the storage slot to freeze old state for - * @return the number of storage states that were moved to frozen storage + * @param storageSlotKey the storage slot to archive old state for + * @return the number of storage states that were moved to archive storage */ - public int freezePreviousStorageState( + public int archivePreviousStorageState( final Optional previousBlockHeader, final Bytes storageSlotKey) { - AtomicInteger frozenStorageCount = new AtomicInteger(); + AtomicInteger archivedStorageCount = new AtomicInteger(); if (previousBlockHeader.isPresent()) { try { // Get the key for the previous block @@ -301,7 +300,7 @@ public int freezePreviousStorageState( Optional nextMatch; // Move all entries that match the storage hash for this address & slot - // to the freezer DB segment + // to the archive DB segment while ((nextMatch = composedWorldStateStorage .getNearestBefore(ACCOUNT_STORAGE_STORAGE, previousKey) @@ -314,13 +313,13 @@ public int freezePreviousStorageState( nextMatch.stream() .forEach( (nearestKey) -> { - if (frozenStorageCount.get() > 0 && frozenStorageCount.get() % 100 == 0) { + if (archivedStorageCount.get() > 0 && archivedStorageCount.get() % 100 == 0) { // Log progress in case catching up causes there to be a large number of keys // to move LOG.atDebug() .setMessage( - "{} storage entries frozen for block {}, slot hash {}, latest key {}") - .addArgument(frozenStorageCount.get()) + "{} storage entries archived for block {}, slot hash {}, latest key {}") + .addArgument(archivedStorageCount.get()) .addArgument(previousBlockHeader.get().getNumber()) .addArgument(storageSlotKey) .addArgument(nearestKey.key()) @@ -328,14 +327,14 @@ public int freezePreviousStorageState( } moveDBEntry( ACCOUNT_STORAGE_STORAGE, - ACCOUNT_STORAGE_FREEZER, + ACCOUNT_STORAGE_ARCHIVE, nearestKey.key().toArrayUnsafe(), nearestKey.value().get()); - frozenStorageCount.getAndIncrement(); + archivedStorageCount.getAndIncrement(); }); } - if (frozenStorageCount.get() == 0) { + if (archivedStorageCount.get() == 0) { // A lot of entries will have no previous history, so use trace to log when no previous // storage was found LOG.atTrace() @@ -345,18 +344,18 @@ public int freezePreviousStorageState( .log(); } else { LOG.atDebug() - .setMessage("{} storage entries frozen for block {}, slot hash {}") - .addArgument(frozenStorageCount.get()) + .setMessage("{} storage entries archived for block {}, slot hash {}") + .addArgument(archivedStorageCount.get()) .addArgument(previousBlockHeader.get().getNumber()) .addArgument(storageSlotKey) .log(); } } catch (Exception e) { - LOG.error("Error moving storage state for slot {} to cold storage", storageSlotKey, e); + LOG.error("Error moving storage state for slot {} to archived storage", storageSlotKey, e); } } - return frozenStorageCount.get(); + return archivedStorageCount.get(); } private void moveDBEntry( @@ -382,18 +381,18 @@ private void moveDBEntry( } } - public Optional getLatestArchiveFrozenBlock() { + public Optional getLatestArchivedBlock() { return composedWorldStateStorage - .get(ACCOUNT_INFO_STATE_FREEZER, ARCHIVE_BLOCKS_FROZEN) + .get(ACCOUNT_INFO_STATE_ARCHIVE, ARCHIVED_BLOCKS) .map(Bytes::wrap) .map(Bytes::toLong); } - public void setLatestArchiveFrozenBlock(final Long blockNumber) { + public void setLatestArchivedBlock(final Long blockNumber) { SegmentedKeyValueStorageTransaction tx = composedWorldStateStorage.startTransaction(); tx.put( - ACCOUNT_INFO_STATE_FREEZER, - ARCHIVE_BLOCKS_FROZEN, + ACCOUNT_INFO_STATE_ARCHIVE, + ARCHIVED_BLOCKS, Bytes.ofUnsignedLong(blockNumber).toArrayUnsafe()); tx.commit(); } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java index 04a6ff2722a..782b1a78676 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java @@ -83,7 +83,7 @@ synchronized FlatDbMode deriveFlatDbStrategy( dataStorageConfiguration.getUnstable().getBonsaiFullFlatDbEnabled() ? (dataStorageConfiguration .getDataStorageFormat() - .equals(DataStorageFormat.BONSAI_ARCHIVE) + .equals(DataStorageFormat.X_BONSAI_ARCHIVE) ? FlatDbMode.ARCHIVE : FlatDbMode.FULL) : FlatDbMode.PARTIAL; @@ -167,7 +167,7 @@ public void upgradeToFullFlatDbMode(final SegmentedKeyValueStorage composedWorld transaction.put( TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.FULL.getVersion().toArrayUnsafe()); } else if (dataStorageConfiguration.getDataStorageFormat() - == DataStorageFormat.BONSAI_ARCHIVE) { + == DataStorageFormat.X_BONSAI_ARCHIVE) { LOG.info("setting FlatDbStrategy to ARCHIVE"); transaction.put( TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.ARCHIVE.getVersion().toArrayUnsafe()); diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageConfiguration.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageConfiguration.java index 48f8cf01737..67c031918be 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageConfiguration.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageConfiguration.java @@ -43,7 +43,7 @@ public interface DataStorageConfiguration { DataStorageConfiguration DEFAULT_BONSAI_ARCHIVE_CONFIG = ImmutableDataStorageConfiguration.builder() - .dataStorageFormat(DataStorageFormat.BONSAI_ARCHIVE) + .dataStorageFormat(DataStorageFormat.X_BONSAI_ARCHIVE) .bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD) .build(); diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/WorldStateStorageCoordinator.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/WorldStateStorageCoordinator.java index 41a6fea3743..ebea4628edc 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/WorldStateStorageCoordinator.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/WorldStateStorageCoordinator.java @@ -86,7 +86,7 @@ public boolean isMatchingFlatMode(final FlatDbMode flatDbMode) { public void applyOnMatchingFlatMode( final FlatDbMode flatDbMode, final Consumer onStrategy) { applyOnMatchingStrategies( - List.of(DataStorageFormat.BONSAI, DataStorageFormat.BONSAI_ARCHIVE), + List.of(DataStorageFormat.BONSAI, DataStorageFormat.X_BONSAI_ARCHIVE), worldStateKeyValueStorage -> { final BonsaiWorldStateKeyValueStorage bonsaiWorldStateStorageStrategy = (BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage(); @@ -98,7 +98,7 @@ public void applyOnMatchingFlatMode( public void applyWhenFlatModeEnabled(final Consumer onStrategy) { applyOnMatchingStrategies( - List.of(DataStorageFormat.BONSAI, DataStorageFormat.BONSAI_ARCHIVE), + List.of(DataStorageFormat.BONSAI, DataStorageFormat.X_BONSAI_ARCHIVE), worldStateKeyValueStorage -> { final BonsaiWorldStateKeyValueStorage bonsaiWorldStateStorageStrategy = (BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage(); diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java index 3458525b2fe..f2a8e6d6bfa 100644 --- a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java @@ -855,7 +855,7 @@ private BonsaiWorldStateKeyValueStorage emptyArchiveStorage(final boolean useCod new InMemoryKeyValueStorageProvider(), new NoOpMetricsSystem(), ImmutableDataStorageConfiguration.builder() - .dataStorageFormat(DataStorageFormat.BONSAI_ARCHIVE) + .dataStorageFormat(DataStorageFormat.X_BONSAI_ARCHIVE) .bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD) .unstable( ImmutableDataStorageConfiguration.Unstable.builder() diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProviderTest.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProviderTest.java index 12ee5b1ef07..80c8c8b7be0 100644 --- a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProviderTest.java +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProviderTest.java @@ -128,7 +128,7 @@ void emptyDbCreatesFlatDbStrategyUsingCodeByHashConfig(final boolean codeByHashE void emptyDbCreatesArchiveFlatDbStrategyUsingCodeByHashConfig(final boolean codeByHashEnabled) { final DataStorageConfiguration dataStorageConfiguration = ImmutableDataStorageConfiguration.builder() - .dataStorageFormat(DataStorageFormat.BONSAI_ARCHIVE) + .dataStorageFormat(DataStorageFormat.X_BONSAI_ARCHIVE) .bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD) .unstable( ImmutableDataStorageConfiguration.Unstable.builder() @@ -183,7 +183,7 @@ void existingAccountHashDbUsesAccountHash(final boolean codeByHashEnabled) { void existingAccountHashArchiveDbUsesAccountHash(final boolean codeByHashEnabled) { final DataStorageConfiguration dataStorageConfiguration = ImmutableDataStorageConfiguration.builder() - .dataStorageFormat(DataStorageFormat.BONSAI_ARCHIVE) + .dataStorageFormat(DataStorageFormat.X_BONSAI_ARCHIVE) .bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD) .unstable( ImmutableDataStorageConfiguration.Unstable.builder() @@ -242,7 +242,7 @@ void existingCodeHashDbUsesCodeHash(final boolean codeByHashEnabled) { void existingCodeHashArchiveDbUsesCodeHash(final boolean codeByHashEnabled) { final DataStorageConfiguration dataStorageConfiguration = ImmutableDataStorageConfiguration.builder() - .dataStorageFormat(DataStorageFormat.BONSAI_ARCHIVE) + .dataStorageFormat(DataStorageFormat.X_BONSAI_ARCHIVE) .bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD) .unstable( ImmutableDataStorageConfiguration.Unstable.builder() diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiveFreezerTests.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiverTests.java similarity index 82% rename from ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiveFreezerTests.java rename to ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiverTests.java index 5414bee7b9d..05456f92645 100644 --- a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiveFreezerTests.java +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiverTests.java @@ -38,7 +38,7 @@ import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.BonsaiAccount; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiPreImageProxy; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldStateKeyValueStorage; -import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.worldview.BonsaiArchiveFreezer; +import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.worldview.BonsaiArchiver; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.worldview.BonsaiWorldState; import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration; import org.hyperledger.besu.ethereum.worldstate.FlatDbMode; @@ -64,10 +64,10 @@ import org.junit.jupiter.api.Test; import org.mockito.Mockito; -public class ArchiveFreezerTests { +public class ArchiverTests { // Number of blocks in the chain. This is different to the number of blocks - // we have successfully frozen state for + // we have successfully archived state for static final long SHORT_TEST_CHAIN_HEIGHT = 151; static final long LONG_TEST_CHAIN_HEIGHT = 2001; // We want block 2000 to be returned so set to 2001 @@ -107,7 +107,7 @@ public Optional load(final Hash blockHash) { @SuppressWarnings("BannedMethod") @BeforeEach public void setup() { - Configurator.setLevel(LogManager.getLogger(ArchiveFreezerTests.class).getName(), Level.TRACE); + Configurator.setLevel(LogManager.getLogger(ArchiverTests.class).getName(), Level.TRACE); worldStateStorage = Mockito.mock(BonsaiWorldStateKeyValueStorage.class); blockchain = Mockito.mock(Blockchain.class); trieLogManager = Mockito.mock(TrieLogManager.class); @@ -157,7 +157,7 @@ private static Optional getGeneratedBlock(final long blockNumber, final l } @Test - public void archiveFreezerLimitsInitialArchiveBlocks() { + public void archiveLimitsInitialArchiveBlocks() { blockNumberCache = CacheBuilder.newBuilder() @@ -172,46 +172,47 @@ public Optional load(final Long blockNumber) { when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); - // If we had previously frozen up to block 100... - final AtomicLong frozenBlocks = new AtomicLong(100L); + // If we had previously archived up to block 100... + final AtomicLong archivedBlocks = new AtomicLong(100L); // Mock the DB setter so it updates what the getter returns doAnswer( invocation -> { long thisValue = invocation.getArgument(0, Long.class); - frozenBlocks.set(thisValue); + archivedBlocks.set(thisValue); return null; }) .when(worldStateStorage) - .setLatestArchiveFrozenBlock(any(Long.class)); + .setLatestArchivedBlock(any(Long.class)); // Mock the DB getter doAnswer( invocation -> { - return Optional.of(frozenBlocks.get()); + return Optional.of(archivedBlocks.get()); }) .when(worldStateStorage) - .getLatestArchiveFrozenBlock(); + .getLatestArchivedBlock(); when(blockchain.getChainHeadBlockNumber()).thenReturn(2000L); - // When any block is asked for by the archive freezer, generate it on the fly and return it + // When any block is asked for during the test, generate it on the fly and return it // unless it is > block num 2000 when(blockchain.getBlockByNumber(anyLong())) .then( requestedBlockNumber -> blockNumberCache.getUnchecked(requestedBlockNumber.getArgument(0, Long.class))); - BonsaiArchiveFreezer archiveFreezer = - new BonsaiArchiveFreezer(worldStateStorage, blockchain, executeAsync, trieLogManager); - long caughtUpBlocks = archiveFreezer.initialize(); + BonsaiArchiver archiver = + new BonsaiArchiver( + worldStateStorage, blockchain, executeAsync, trieLogManager, new NoOpMetricsSystem()); + long caughtUpBlocks = archiver.initialize(); // Check that blocks 101 to 1990 (10 before chain head 2000) have been caught up assertThat(caughtUpBlocks).isEqualTo(1900); } @Test - public void archiveFreezerMoves1AccountStateChangeToFreezerSegment() { + public void archiverMoves1AccountStateChangeToArchiveSegment() { // Set up the block cache blockNumberCache = CacheBuilder.newBuilder() @@ -226,29 +227,29 @@ public Optional load(final Long blockNumber) { when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); - // If we had previously frozen up to block 100... - final AtomicLong frozenBlocks = new AtomicLong(100L); + // If we had previously archived up to block 100... + final AtomicLong archivedBlocks = new AtomicLong(100L); // Mock the DB setter so it updates what the getter returns doAnswer( invocation -> { long thisValue = invocation.getArgument(0, Long.class); - frozenBlocks.set(thisValue); + archivedBlocks.set(thisValue); return null; }) .when(worldStateStorage) - .setLatestArchiveFrozenBlock(any(Long.class)); + .setLatestArchivedBlock(any(Long.class)); // Mock the DB getter doAnswer( invocation -> { - return Optional.of(frozenBlocks.get()); + return Optional.of(archivedBlocks.get()); }) .when(worldStateStorage) - .getLatestArchiveFrozenBlock(); + .getLatestArchivedBlock(); - // Mock the number of changes the freeze action carries out for each relevant block - when(worldStateStorage.freezePreviousAccountState(any(), any())) + // Mock the number of changes the archive action carries out for each relevant block + when(worldStateStorage.archivePreviousAccountState(any(), any())) .then( request -> { Object objHeader = request.getArgument(0, Optional.class).get(); @@ -256,8 +257,8 @@ public Optional load(final Long blockNumber) { BlockHeader blockHeader = (BlockHeader) objHeader; if (blockHeader.getNumber() == 101) { // Mock 1 state change when block 102 is being processed, because state changes in - // block 101 can be frozen NB: the trie log in this test for block 102 isn't - // frozen because no further changes to that account are made + // block 101 can be archived NB: the trie log in this test for block 102 isn't + // archived because no further changes to that account are made return 1; } return 0; @@ -265,8 +266,8 @@ public Optional load(final Long blockNumber) { return 0; }); - // When any block is asked for by the archive freezer, generate it on the fly, cache it, and - // return it unless it + // When any block is asked for by the archiver during the test, generate it on the fly, cache + // it, and return it unless it exceeds the max block for the test when(blockchain.getBlockByNumber(anyLong())) .then( requestedBlockNumber -> @@ -281,8 +282,8 @@ public Optional load(final Long blockNumber) { // Generate some trie logs to return for a specific block - // Simulate an account change in block 101. This state will be frozen because block 102 updates - // the same account (see below) + // Simulate an account change in block 101. This state will be archived because block 102 + // updates the same account (see below) TrieLogLayer block101TrieLogs = new TrieLogLayer(); StateTrieAccountValue oldValue = new StateTrieAccountValue(12, Wei.fromHexString("0x123"), Hash.EMPTY, Hash.EMPTY); @@ -291,7 +292,7 @@ public Optional load(final Long blockNumber) { block101TrieLogs.addAccountChange(address, oldValue, newValue); // Simulate another change to the same account, this time in block 102. This change won't be - // frozen during the test because it is the current state of the account. + // archived during the test because it is the current state of the account. TrieLogLayer block102TrieLogs = new TrieLogLayer(); oldValue = new StateTrieAccountValue(13, Wei.fromHexString("0x234"), Hash.EMPTY, Hash.EMPTY); newValue = new StateTrieAccountValue(14, Wei.fromHexString("0x345"), Hash.EMPTY, Hash.EMPTY); @@ -305,14 +306,15 @@ public Optional load(final Long blockNumber) { "0x0d22db864d4effa62b640de645bffd44fb5d130578fbea4399f9abf8d7ac7789"))) .thenReturn(Optional.of(block102TrieLogs)); - // Initialize the archive freezer - BonsaiArchiveFreezer archiveFreezer = - new BonsaiArchiveFreezer(worldStateStorage, blockchain, executeAsync, trieLogManager); - archiveFreezer.initialize(); + // Initialize the archiver + BonsaiArchiver archiver = + new BonsaiArchiver( + worldStateStorage, blockchain, executeAsync, trieLogManager, new NoOpMetricsSystem()); + archiver.initialize(); - // Chain height is 150, we've frozen state up to block 100, we should have initialized the next - // 50 blocks to be archived - assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(50); + // Chain height is 150, we've archived state up to block 100, we should have initialized the + // next 50 blocks to be archived + assertThat(archiver.getPendingBlocksCount()).isEqualTo(50); when(blockchain.getChainHeadBlockNumber()) .then(requestedBlockNumber -> getCurrentBlockHeight()); @@ -322,21 +324,21 @@ public Optional load(final Long blockNumber) { for (long nextBlock = 101; nextBlock < 150; nextBlock++) { currentBlockHeight = nextBlock; if (nextBlock == 112) { - archiveFreezer.addToFreezerQueue( + archiver.addToArchivingQueue( nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); - int accountsMoved = archiveFreezer.moveBlockStateToFreezer(); + int accountsMoved = archiver.moveBlockStateToArchive(); assertThat(accountsMoved).isEqualTo(1); } else { - archiveFreezer.addToFreezerQueue( + archiver.addToArchivingQueue( nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); - int accountsMoved = archiveFreezer.moveBlockStateToFreezer(); + int accountsMoved = archiver.moveBlockStateToArchive(); assertThat(accountsMoved).isEqualTo(0); } } } @Test - public void archiveFreezerMoves2StorageChangesToFreezerSegment() { + public void archiverMoves2StorageChangesToArchiveSegment() { // Set up the block cache blockNumberCache = CacheBuilder.newBuilder() @@ -351,29 +353,29 @@ public Optional load(final Long blockNumber) { when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); - // If we had previously frozen up to block 100... - final AtomicLong frozenBlocks = new AtomicLong(100L); + // If we had previously archived up to block 100... + final AtomicLong archivedBlocks = new AtomicLong(100L); // Mock the DB setter so it updates what the getter returns doAnswer( invocation -> { long thisValue = invocation.getArgument(0, Long.class); - frozenBlocks.set(thisValue); + archivedBlocks.set(thisValue); return null; }) .when(worldStateStorage) - .setLatestArchiveFrozenBlock(any(Long.class)); + .setLatestArchivedBlock(any(Long.class)); // Mock the DB getter doAnswer( invocation -> { - return Optional.of(frozenBlocks.get()); + return Optional.of(archivedBlocks.get()); }) .when(worldStateStorage) - .getLatestArchiveFrozenBlock(); + .getLatestArchivedBlock(); - // Mock the number of changes the freeze action carries out for each relevant block - when(worldStateStorage.freezePreviousStorageState(any(), any())) + // Mock the number of changes the archive action carries out for each relevant block + when(worldStateStorage.archivePreviousStorageState(any(), any())) .then( request -> { Object objHeader = request.getArgument(0, Optional.class).get(); @@ -381,9 +383,10 @@ public Optional load(final Long blockNumber) { BlockHeader blockHeader = (BlockHeader) objHeader; if (blockHeader.getNumber() == 101 || blockHeader.getNumber() == 102) { // Mock 1 state change when block 102 is being processed, because state changes in - // block 101 can be frozen (and likewise for block 103). NB: the trie log in this - // test for block 103 isn't frozen because no further changes to that storage are - // made + // block 101 can be archived (and likewise for block 103). NB: the trie log in + // this test for block 103 isn't archived because no further changes to that + // storage + // are made return 1; } return 0; @@ -391,7 +394,7 @@ public Optional load(final Long blockNumber) { return 0; }); - // When any block is asked for by the archive freezer, generate it on the fly, cache it, and + // When any block is asked for by the archiver, generate it on the fly, cache it, and // return it unless it when(blockchain.getBlockByNumber(anyLong())) .then( @@ -407,7 +410,7 @@ public Optional load(final Long blockNumber) { // Generate some trie logs to return for a specific block - // Simulate a storage change in block 101. This state will be frozen because block 102 updates + // Simulate a storage change in block 101. This state will be archived because block 102 updates // the same storage (see below) TrieLogLayer block101TrieLogs = new TrieLogLayer(); UInt256 oldValue = UInt256.ZERO; @@ -416,7 +419,7 @@ public Optional load(final Long blockNumber) { StorageSlotKey storageSlotKey = new StorageSlotKey(slot); block101TrieLogs.addStorageChange(address, storageSlotKey, oldValue, newValue); - // Simulate a storage change in block 102. This state will also be frozen because block 102 + // Simulate a storage change in block 102. This state will also be archived because block 102 // updates the same storage (see below) TrieLogLayer block102TrieLogs = new TrieLogLayer(); oldValue = UInt256.ONE; @@ -425,8 +428,8 @@ public Optional load(final Long blockNumber) { storageSlotKey = new StorageSlotKey(slot); block102TrieLogs.addStorageChange(address, storageSlotKey, oldValue, newValue); - // Simulate a storage change in block 103. This state will not be frozen because it refers to a - // different slot + // Simulate a storage change in block 103. This state will not be archived because it refers to + // a different slot TrieLogLayer block103TrieLogs = new TrieLogLayer(); oldValue = UInt256.ZERO; newValue = UInt256.ONE; @@ -447,26 +450,27 @@ public Optional load(final Long blockNumber) { "0x96440b533326c26f4611e4c0b123ce732aa7a68e3b275f4a5a2ea9bc4b089c73"))) .thenReturn(Optional.of(block103TrieLogs)); - // Initialize the archive freezer - BonsaiArchiveFreezer archiveFreezer = - new BonsaiArchiveFreezer(worldStateStorage, blockchain, executeAsync, trieLogManager); - archiveFreezer.initialize(); + // Initialize the archiver + BonsaiArchiver archiver = + new BonsaiArchiver( + worldStateStorage, blockchain, executeAsync, trieLogManager, new NoOpMetricsSystem()); + archiver.initialize(); - // Chain height is 150, we've frozen state up to block 100, we should have initialized the next - // 50 blocks to be archived - assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(50); + // Chain height is 150, we've archived state up to block 100, we should have initialized the + // next 50 blocks to be archived + assertThat(archiver.getPendingBlocksCount()).isEqualTo(50); when(blockchain.getChainHeadBlockNumber()) .then(requestedBlockNumber -> getCurrentBlockHeight()); int totalStorageMoved = 0; - // Process the next 50 blocks. 2 storage changes should be frozen during this time should happen - // during this processing since there are only trie logs for blocks 101 and 102 + // Process the next 50 blocks. 2 storage changes should be archived during this time should + // happen during this processing since there are only trie logs for blocks 101 and 102 for (long nextBlock = 101; nextBlock < 150; nextBlock++) { currentBlockHeight = nextBlock; - archiveFreezer.addToFreezerQueue( + archiver.addToArchivingQueue( nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); - int storageMoved = archiveFreezer.moveBlockStateToFreezer(); + int storageMoved = archiver.moveBlockStateToArchive(); totalStorageMoved += storageMoved; if (nextBlock == 112 || nextBlock == 113) { assertThat(storageMoved).isEqualTo(1); @@ -479,7 +483,7 @@ public Optional load(final Long blockNumber) { } @Test - public void archiveFreezerMoves1AccountAnd2StorageChangesToFreezerSegment() { + public void archiverMoves1AccountAnd2StorageChangesToArchiveSegment() { // Set up the block cache blockNumberCache = CacheBuilder.newBuilder() @@ -494,29 +498,29 @@ public Optional load(final Long blockNumber) { when(worldStateStorage.getFlatDbMode()).thenReturn(FlatDbMode.ARCHIVE); - // If we had previously frozen up to block 100... - final AtomicLong frozenBlocks = new AtomicLong(100L); + // If we had previously archived up to block 100... + final AtomicLong archivedBlocks = new AtomicLong(100L); // Mock the DB setter so it updates what the getter returns doAnswer( invocation -> { long thisValue = invocation.getArgument(0, Long.class); - frozenBlocks.set(thisValue); + archivedBlocks.set(thisValue); return null; }) .when(worldStateStorage) - .setLatestArchiveFrozenBlock(any(Long.class)); + .setLatestArchivedBlock(any(Long.class)); // Mock the DB getter doAnswer( invocation -> { - return Optional.of(frozenBlocks.get()); + return Optional.of(archivedBlocks.get()); }) .when(worldStateStorage) - .getLatestArchiveFrozenBlock(); + .getLatestArchivedBlock(); - // Mock the number of changes the freeze action carries out for each relevant block - when(worldStateStorage.freezePreviousStorageState(any(), any())) + // Mock the number of changes the archive action carries out for each relevant block + when(worldStateStorage.archivePreviousStorageState(any(), any())) .then( request -> { Object objHeader = request.getArgument(0, Optional.class).get(); @@ -524,8 +528,9 @@ public Optional load(final Long blockNumber) { BlockHeader blockHeader = (BlockHeader) objHeader; if (blockHeader.getNumber() == 101 || blockHeader.getNumber() == 102) { // Mock 1 storage change when block 102 is being processed, because state changes - // in block 101 can be frozen (and likewise for block 103). NB: the trie log in - // this test for block 103 isn't frozen because no further changes to that storage + // in block 101 can be archived (and likewise for block 103). NB: the trie log in + // this test for block 103 isn't archived because no further changes to that + // storage // are made return 1; } @@ -533,8 +538,8 @@ public Optional load(final Long blockNumber) { return 0; }); - // Mock the number of changes the freeze action carries out for each relevant block - when(worldStateStorage.freezePreviousAccountState(any(), any())) + // Mock the number of changes the archive action carries out for each relevant block + when(worldStateStorage.archivePreviousAccountState(any(), any())) .then( request -> { Object objHeader = request.getArgument(0, Optional.class).get(); @@ -542,14 +547,14 @@ public Optional load(final Long blockNumber) { BlockHeader blockHeader = (BlockHeader) objHeader; if (blockHeader.getNumber() == 101) { // Mock 1 state change when block 102 is being processed, because state changes in - // block 101 can be frozen + // block 101 can be archived return 1; } } return 0; }); - // When any block is asked for by the archive freezer, generate it on the fly, cache it, and + // When any block is asked for by the archiver, generate it on the fly, cache it, and // return it unless it when(blockchain.getBlockByNumber(anyLong())) .then( @@ -568,7 +573,7 @@ public Optional load(final Long blockNumber) { Address address = Address.fromHexString("0x95cD8499051f7FE6a2F53749eC1e9F4a81cafa13"); // Simulate a storage change AND an account change in block 101. This state and storage will be - // frozen because block 102 updates both again (see below) + // archived because block 102 updates both again (see below) TrieLogLayer block101TrieLogs = new TrieLogLayer(); UInt256 oldStorageValue = UInt256.ZERO; UInt256 newStorageValue = UInt256.ONE; @@ -594,8 +599,8 @@ public Optional load(final Long blockNumber) { new StateTrieAccountValue(14, Wei.fromHexString("0x345"), Hash.EMPTY, Hash.EMPTY); block102TrieLogs.addAccountChange(address, oldAccountValue, newAccountValue); - // Simulate a storage change in block 103. This state will not be frozen because it refers to a - // different slot + // Simulate a storage change in block 103. This state will not be archived because it refers to + // a different slot TrieLogLayer block103TrieLogs = new TrieLogLayer(); oldStorageValue = UInt256.ZERO; newStorageValue = UInt256.ONE; @@ -616,26 +621,27 @@ public Optional load(final Long blockNumber) { "0x96440b533326c26f4611e4c0b123ce732aa7a68e3b275f4a5a2ea9bc4b089c73"))) .thenReturn(Optional.of(block103TrieLogs)); - // Initialize the archive freezer - BonsaiArchiveFreezer archiveFreezer = - new BonsaiArchiveFreezer(worldStateStorage, blockchain, executeAsync, trieLogManager); - archiveFreezer.initialize(); + // Initialize the archiver + BonsaiArchiver archiver = + new BonsaiArchiver( + worldStateStorage, blockchain, executeAsync, trieLogManager, new NoOpMetricsSystem()); + archiver.initialize(); - // Chain height is 150, we've frozen state up to block 100, we should have initialized the next - // 50 blocks to be archived - assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(50); + // Chain height is 150, we've archived state up to block 100, we should have initialized the + // next 50 blocks to be archived + assertThat(archiver.getPendingBlocksCount()).isEqualTo(50); when(blockchain.getChainHeadBlockNumber()) .then(requestedBlockNumber -> getCurrentBlockHeight()); int totalStorageMoved = 0; - // Process the next 50 blocks. 2 storage changes should be frozen during this time should happen - // during this processing since there are only trie logs for blocks 101 and 102 + // Process the next 50 blocks. 2 storage changes should be archived during this time should + // happen during this processing since there are only trie logs for blocks 101 and 102 for (long nextBlock = 101; nextBlock < 150; nextBlock++) { currentBlockHeight = nextBlock; - archiveFreezer.addToFreezerQueue( + archiver.addToArchivingQueue( nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); - int storageAndAccountsMoved = archiveFreezer.moveBlockStateToFreezer(); + int storageAndAccountsMoved = archiver.moveBlockStateToArchive(); if (nextBlock == 112) { assertThat(storageAndAccountsMoved).isEqualTo(2); } else if (nextBlock == 113) { @@ -650,7 +656,7 @@ public Optional load(final Long blockNumber) { } @Test - public void archiveFreezerInMemoryDBFreezesAccountStateCorrectly() { + public void archiveInMemoryDBArchivesAccountStateCorrectly() { final BonsaiPreImageProxy preImageProxy = new BonsaiPreImageProxy.BonsaiReferenceTestPreImageProxy(); @@ -667,7 +673,7 @@ public void archiveFreezerInMemoryDBFreezesAccountStateCorrectly() { // Assume we've archived up to block 150L i.e. we're up to date with the chain head // (SHORT_TEST_CHAIN_HEIGHT) - testWorldStateStorage.setLatestArchiveFrozenBlock(150L); + testWorldStateStorage.setLatestArchivedBlock(150L); // Set up the block cache blockNumberCache = @@ -681,7 +687,7 @@ public Optional load(final Long blockNumber) { } }); - // When any block is asked for by the archive freezer, generate it on the fly, cache it, and + // When any block is asked for by the archiver, generate it on the fly, cache it, and // return it unless it when(blockchain.getBlockByNumber(anyLong())) .then( @@ -698,8 +704,7 @@ public Optional load(final Long blockNumber) { // Generate some trie logs to return for a specific block // For state to be moved from the primary DB segment to the archive DB segment, we need the - // primary DB segment - // to have the account in already + // primary DB segment to have the account in already SegmentedKeyValueStorageTransaction tx = testWorldStateStorage.getComposedWorldStateStorage().startTransaction(); final BonsaiAccount block150Account = @@ -760,8 +765,8 @@ public Optional load(final Long blockNumber) { out.encoded().toArrayUnsafe()); tx.commit(); - // Simulate an account change in block 151. This state will be frozen because block 152 updates - // the same account (see below) + // Simulate an account change in block 151. This state will be archived because block 152 + // updates the same account (see below) TrieLogLayer block151TrieLogs = new TrieLogLayer(); StateTrieAccountValue oldValue = new StateTrieAccountValue(12, Wei.fromHexString("0x123"), Hash.EMPTY, Hash.EMPTY); @@ -770,7 +775,7 @@ public Optional load(final Long blockNumber) { block151TrieLogs.addAccountChange(address, oldValue, newValue); // Simulate another change to the same account, this time in block 152. This change won't be - // frozen during the test because it is the current state of the account. + // archived during the test because it is the current state of the account. TrieLogLayer block152TrieLogs = new TrieLogLayer(); oldValue = new StateTrieAccountValue(13, Wei.fromHexString("0x234"), Hash.EMPTY, Hash.EMPTY); newValue = new StateTrieAccountValue(14, Wei.fromHexString("0x345"), Hash.EMPTY, Hash.EMPTY); @@ -784,16 +789,21 @@ public Optional load(final Long blockNumber) { "0x8d6a523f547ee224ba533b34034a3056838f2dab3daf0ffbf75713daf18bf885"))) // Block 152 .thenReturn(Optional.of(block152TrieLogs)); - // Initialize the archive freezer - BonsaiArchiveFreezer archiveFreezer = - new BonsaiArchiveFreezer(testWorldStateStorage, blockchain, executeAsync, trieLogManager); - archiveFreezer.initialize(); - - // Chain height is 150, we've frozen state up to block 150 + // Initialize the archiver + BonsaiArchiver archiver = + new BonsaiArchiver( + testWorldStateStorage, + blockchain, + executeAsync, + trieLogManager, + new NoOpMetricsSystem()); + archiver.initialize(); + + // Chain height is 150, we've archived state up to block 150 currentBlockHeight = SHORT_TEST_CHAIN_HEIGHT; when(blockchain.getChainHeadBlockNumber()) .then(requestedBlockNumber -> getCurrentBlockHeight()); - assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(0); + assertThat(archiver.getPendingBlocksCount()).isEqualTo(0); // Process the next 50 blocks 150-200 and count the archive changes. We'll recreate the // block cache so we can generate blocks beyond 150 @@ -826,16 +836,16 @@ public Optional load(final Hash blockHash) { } }); - // By default we freeze state for chainheight - 10 blocks, so importing up to block 210 whould + // By default we archive state for chainheight - 10 blocks, so importing up to block 210 whould // cause blocks up to 200 to be archived for (long nextBlock = 151; nextBlock <= 210; nextBlock++) { currentBlockHeight = nextBlock; - archiveFreezer.onBlockAdded( + archiver.onBlockAdded( BlockAddedEvent.createForStoredOnly(blockNumberCache.getUnchecked(nextBlock).get())); } // We should have marked up to block 200 as archived - assertThat(testWorldStateStorage.getLatestArchiveFrozenBlock().get()).isEqualTo(200); + assertThat(testWorldStateStorage.getLatestArchivedBlock().get()).isEqualTo(200); // Only the latest/current state of the account should be in the primary DB segment assertThat( @@ -844,11 +854,11 @@ public Optional load(final Hash blockHash) { .count()) .isEqualTo(1); - // Both the previous account states should be in the freezer segment, plus the special key that - // records the latest frozen block + // Both the previous account states should be in the archive segment, plus the special key that + // records the latest archived block assertThat( testWorldStateStorage.getComposedWorldStateStorage().stream( - KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_FREEZER) + KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_ARCHIVE) .count()) .isEqualTo(3); @@ -857,7 +867,7 @@ public Optional load(final Hash blockHash) { testWorldStateStorage .getComposedWorldStateStorage() .containsKey( - KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_FREEZER, + KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_ARCHIVE, Arrays.concatenate( address.addressHash().toArrayUnsafe(), Bytes.fromHexString("0x0000000000000096").toArrayUnsafe()))) @@ -866,7 +876,7 @@ public Optional load(final Hash blockHash) { testWorldStateStorage .getComposedWorldStateStorage() .containsKey( - KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_FREEZER, + KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE_ARCHIVE, Arrays.concatenate( address.addressHash().toArrayUnsafe(), Bytes.fromHexString("0x0000000000000097").toArrayUnsafe()))) @@ -883,7 +893,7 @@ public Optional load(final Hash blockHash) { } @Test - public void archiveFreezerInMemoryDBFreezesStorageStateCorrectly() { + public void archiverInMemoryDBArchivesStorageStateCorrectly() { final BonsaiPreImageProxy preImageProxy = new BonsaiPreImageProxy.BonsaiReferenceTestPreImageProxy(); @@ -900,7 +910,7 @@ public void archiveFreezerInMemoryDBFreezesStorageStateCorrectly() { // Assume we've archived up to block 150L i.e. we're up to date with the chain head // (SHORT_TEST_CHAIN_HEIGHT) - testWorldStateStorage.setLatestArchiveFrozenBlock(150L); + testWorldStateStorage.setLatestArchivedBlock(150L); // Set up the block cache blockNumberCache = @@ -914,7 +924,7 @@ public Optional load(final Long blockNumber) { } }); - // When any block is asked for by the archive freezer, generate it on the fly, cache it, and + // When any block is asked for by the archiver, generate it on the fly, cache it, and // return it unless it when(blockchain.getBlockByNumber(anyLong())) .then( @@ -967,7 +977,7 @@ public Optional load(final Long blockNumber) { Bytes.fromHexString("0x0456").toArrayUnsafe()); tx.commit(); - // Simulate a storage change in block 151. This state will be frozen because block 152 updates + // Simulate a storage change in block 151. This state will be archived because block 152 updates // the same storage (see below) TrieLogLayer block151TrieLogs = new TrieLogLayer(); UInt256 oldValue = UInt256.fromHexString("0x123"); @@ -976,7 +986,7 @@ public Optional load(final Long blockNumber) { StorageSlotKey storageSlotKey = new StorageSlotKey(slot); block151TrieLogs.addStorageChange(address, storageSlotKey, oldValue, newValue); - // Simulate a storage change in block 152. This state will also be frozen because block 152 + // Simulate a storage change in block 152. This state will also be archived because block 152 // updates the same storage (see below) TrieLogLayer block152TrieLogs = new TrieLogLayer(); oldValue = UInt256.fromHexString("0x234"); @@ -985,7 +995,8 @@ public Optional load(final Long blockNumber) { storageSlotKey = new StorageSlotKey(slot); block152TrieLogs.addStorageChange(address, storageSlotKey, oldValue, newValue); - // Simulate a storage change in block 153. This state will not be frozen because it refers to a + // Simulate a storage change in block 153. This state will not be archived because it refers to + // a // different slot TrieLogLayer block153TrieLogs = new TrieLogLayer(); oldValue = UInt256.fromHexString("0x345"); @@ -1007,16 +1018,21 @@ public Optional load(final Long blockNumber) { "0xffce5e5e58cc2737a50076e4dce8c7c715968b98a52942dc2072df4b6941d1ca"))) // Block 153 .thenReturn(Optional.of(block153TrieLogs)); - // Initialize the archive freezer - BonsaiArchiveFreezer archiveFreezer = - new BonsaiArchiveFreezer(testWorldStateStorage, blockchain, executeAsync, trieLogManager); - archiveFreezer.initialize(); - - // Chain height is 150, we've frozen state up to block 150 + // Initialize the archiver + BonsaiArchiver archiver = + new BonsaiArchiver( + testWorldStateStorage, + blockchain, + executeAsync, + trieLogManager, + new NoOpMetricsSystem()); + archiver.initialize(); + + // Chain height is 150, we've archived state up to block 150 currentBlockHeight = SHORT_TEST_CHAIN_HEIGHT; when(blockchain.getChainHeadBlockNumber()) .then(requestedBlockNumber -> getCurrentBlockHeight()); - assertThat(archiveFreezer.getPendingBlocksCount()).isEqualTo(0); + assertThat(archiver.getPendingBlocksCount()).isEqualTo(0); // Process the next 50 blocks 150-200 and count the archive changes. We'll recreate the // block cache so we can generate blocks beyond 150 @@ -1049,16 +1065,16 @@ public Optional load(final Hash blockHash) { } }); - // By default we freeze state for chainheight - 10 blocks, so importing up to block 210 whould + // By default we archive state for chainheight - 10 blocks, so importing up to block 210 whould // cause blocks up to 200 to be archived for (long nextBlock = 151; nextBlock <= 210; nextBlock++) { currentBlockHeight = nextBlock; - archiveFreezer.onBlockAdded( + archiver.onBlockAdded( BlockAddedEvent.createForStoredOnly(blockNumberCache.getUnchecked(nextBlock).get())); } // We should have marked up to block 200 as archived - assertThat(testWorldStateStorage.getLatestArchiveFrozenBlock().get()).isEqualTo(200); + assertThat(testWorldStateStorage.getLatestArchivedBlock().get()).isEqualTo(200); // Only the latest/current state of the account should be in the primary DB segment assertThat( @@ -1067,10 +1083,10 @@ public Optional load(final Hash blockHash) { .count()) .isEqualTo(1); - // All 3 previous storage states should be in the storage freezer + // All 3 previous storage states should be in the storage archiver assertThat( testWorldStateStorage.getComposedWorldStateStorage().stream( - KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER) + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_ARCHIVE) .count()) .isEqualTo(3); @@ -1079,7 +1095,7 @@ public Optional load(final Hash blockHash) { testWorldStateStorage .getComposedWorldStateStorage() .containsKey( - KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER, + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_ARCHIVE, Arrays.concatenate( address.addressHash().toArrayUnsafe(), slotKey.getSlotHash().toArrayUnsafe(), @@ -1089,7 +1105,7 @@ public Optional load(final Hash blockHash) { testWorldStateStorage .getComposedWorldStateStorage() .containsKey( - KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER, + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_ARCHIVE, Arrays.concatenate( address.addressHash().toArrayUnsafe(), slotKey.getSlotHash().toArrayUnsafe(), @@ -1099,7 +1115,7 @@ public Optional load(final Hash blockHash) { testWorldStateStorage .getComposedWorldStateStorage() .containsKey( - KeyValueSegmentIdentifier.ACCOUNT_STORAGE_FREEZER, + KeyValueSegmentIdentifier.ACCOUNT_STORAGE_ARCHIVE, Arrays.concatenate( address.addressHash().toArrayUnsafe(), slotKey.getSlotHash().toArrayUnsafe(), diff --git a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloader.java b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloader.java index ad2ebe43a9f..afdd7b6bdf5 100644 --- a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloader.java +++ b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloader.java @@ -92,7 +92,7 @@ public CompletableFuture start() { protected CompletableFuture start(final FastSyncState fastSyncState) { worldStateStorageCoordinator.applyOnMatchingStrategies( - List.of(DataStorageFormat.BONSAI, DataStorageFormat.BONSAI_ARCHIVE), + List.of(DataStorageFormat.BONSAI, DataStorageFormat.X_BONSAI_ARCHIVE), worldStateKeyValueStorage -> { BonsaiWorldStateKeyValueStorage onBonsai = (BonsaiWorldStateKeyValueStorage) worldStateKeyValueStorage; diff --git a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadState.java b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadState.java index aff1d373992..01d4d00a77b 100644 --- a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadState.java +++ b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadState.java @@ -259,7 +259,7 @@ public synchronized void startTrieHeal() { public synchronized void reloadTrieHeal() { // Clear the flat database and trie log from the world state storage if needed worldStateStorageCoordinator.applyOnMatchingStrategies( - List.of(DataStorageFormat.BONSAI, DataStorageFormat.BONSAI_ARCHIVE), + List.of(DataStorageFormat.BONSAI, DataStorageFormat.X_BONSAI_ARCHIVE), worldStateKeyValueStorage -> { final BonsaiWorldStateKeyValueStorage strategy = worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class); diff --git a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloader.java b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloader.java index da0eea49a91..bf474020bf5 100644 --- a/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloader.java +++ b/ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloader.java @@ -175,7 +175,7 @@ public CompletableFuture run( } else if (!snapContext.getAccountsHealingList().isEmpty()) { // restart only the heal step snapSyncState.setHealTrieStatus(true); worldStateStorageCoordinator.applyOnMatchingStrategies( - List.of(DataStorageFormat.BONSAI, DataStorageFormat.BONSAI_ARCHIVE), + List.of(DataStorageFormat.BONSAI, DataStorageFormat.X_BONSAI_ARCHIVE), strategy -> { BonsaiWorldStateKeyValueStorage onBonsai = (BonsaiWorldStateKeyValueStorage) strategy; onBonsai.clearFlatDatabase(); diff --git a/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java b/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java index b5efbc38127..5eda64bae62 100644 --- a/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java +++ b/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java @@ -20,10 +20,10 @@ public enum DataStorageFormat { FOREST, /** New format. Store one trie, and trie logs to roll forward and backward */ BONSAI, - /** The new option for storing archive data e.g. state at any block */ - BONSAI_ARCHIVE; + /** The option for storing archive data e.g. state at any block */ + X_BONSAI_ARCHIVE; public boolean isBonsaiFormat() { - return this == BONSAI || this == BONSAI_ARCHIVE; + return this == BONSAI || this == X_BONSAI_ARCHIVE; } } diff --git a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactory.java b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactory.java index 4a3a08ebe0a..ee0ec25c863 100644 --- a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactory.java +++ b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactory.java @@ -164,7 +164,7 @@ public SegmentedKeyValueStorage create( metricsSystem, rocksDBMetricsFactory); } - case BONSAI, BONSAI_ARCHIVE -> { + case BONSAI, X_BONSAI_ARCHIVE -> { LOG.debug("BONSAI mode detected, Using OptimisticTransactionDB."); segmentedStorage = new OptimisticRocksDBColumnarKeyValueStorage( diff --git a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/BaseVersionedStorageFormat.java b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/BaseVersionedStorageFormat.java index 52e8cfe4aee..09cc97a1fbb 100644 --- a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/BaseVersionedStorageFormat.java +++ b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/BaseVersionedStorageFormat.java @@ -49,12 +49,12 @@ public enum BaseVersionedStorageFormat implements VersionedStorageFormat { * Current Bonsai version, with blockchain variables in a dedicated column family, in order to * make BlobDB more effective */ - BONSAI_ARCHIVE_WITH_VARIABLES(DataStorageFormat.BONSAI_ARCHIVE, 1), + BONSAI_ARCHIVE_WITH_VARIABLES(DataStorageFormat.X_BONSAI_ARCHIVE, 1), /** * Current Bonsai version, with receipts using compaction, in order to make Receipts use less disk * space */ - BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION(DataStorageFormat.BONSAI_ARCHIVE, 2); + BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION(DataStorageFormat.X_BONSAI_ARCHIVE, 2); private final DataStorageFormat format; private final int version; @@ -75,7 +75,7 @@ public static BaseVersionedStorageFormat defaultForNewDB( return switch (configuration.getDatabaseFormat()) { case FOREST -> FOREST_WITH_RECEIPT_COMPACTION; case BONSAI -> BONSAI_WITH_RECEIPT_COMPACTION; - case BONSAI_ARCHIVE -> BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION; + case X_BONSAI_ARCHIVE -> BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION; }; } diff --git a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/DatabaseMetadata.java b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/DatabaseMetadata.java index 0e7a96578fb..b44412013f5 100644 --- a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/DatabaseMetadata.java +++ b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/DatabaseMetadata.java @@ -239,7 +239,7 @@ public DatabaseMetadata upgradeToPrivacy() { "Unsupported database with format FOREST and version " + versionedStorageFormat.getVersion()); }; - case BONSAI, BONSAI_ARCHIVE -> + case BONSAI, X_BONSAI_ARCHIVE -> switch (versionedStorageFormat.getVersion()) { case 1 -> PrivacyVersionedStorageFormat.BONSAI_ORIGINAL; case 2 -> PrivacyVersionedStorageFormat.BONSAI_WITH_VARIABLES; diff --git a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/PrivacyVersionedStorageFormat.java b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/PrivacyVersionedStorageFormat.java index fed089b8cea..e373cd96f2e 100644 --- a/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/PrivacyVersionedStorageFormat.java +++ b/plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/PrivacyVersionedStorageFormat.java @@ -72,7 +72,7 @@ public static VersionedStorageFormat defaultForNewDB( return switch (configuration.getDatabaseFormat()) { case FOREST -> FOREST_WITH_RECEIPT_COMPACTION; case BONSAI -> BONSAI_WITH_RECEIPT_COMPACTION; - case BONSAI_ARCHIVE -> BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION; + case X_BONSAI_ARCHIVE -> BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION; }; } diff --git a/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/Utils.java b/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/Utils.java index 6d96045e5a5..e947b2635a1 100644 --- a/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/Utils.java +++ b/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/Utils.java @@ -86,7 +86,7 @@ private static int dataStorageFormatToV1(final DataStorageFormat dataStorageForm return switch (dataStorageFormat) { case FOREST -> 1; case BONSAI -> 2; - case BONSAI_ARCHIVE -> 3; + case X_BONSAI_ARCHIVE -> 3; }; } } From 35d60dd25aae2c4b568278fcc68f2604ed07725e Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Tue, 8 Oct 2024 08:43:15 +0100 Subject: [PATCH 25/39] Add changelog entry Signed-off-by: Matthew Whitehead --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5bcbfa51b7f..cbb8fc55b3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ - Expose chainId in the `BlockchainService` [7702](https://github.com/hyperledger/besu/pull/7702) - Use head block instead of safe block for snap sync [7536](https://github.com/hyperledger/besu/issues/7536) - Add support for `chainId` in `CallParameters` [#7720](https://github.com/hyperledger/besu/pull/7720) +- Experimental Bonsai Archive support [#7475](https://github.com/hyperledger/besu/pull/7475) ### Bug fixes - Fix mounted data path directory permissions for besu user [#7575](https://github.com/hyperledger/besu/pull/7575) From b8facef02afe4d507f0ca3b2c8aee7e46bd8f385 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Tue, 8 Oct 2024 09:07:25 +0100 Subject: [PATCH 26/39] Don't store every block that is waiting to be archived Signed-off-by: Matthew Whitehead --- .../bonsai/worldview/BonsaiArchiver.java | 112 +++++------------- 1 file changed, 28 insertions(+), 84 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java index 5e596fa6fbe..08e01dff357 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java @@ -18,7 +18,6 @@ import org.hyperledger.besu.ethereum.chain.BlockAddedEvent; import org.hyperledger.besu.ethereum.chain.BlockAddedObserver; import org.hyperledger.besu.ethereum.chain.Blockchain; -import org.hyperledger.besu.ethereum.core.Block; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage; import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogManager; import org.hyperledger.besu.metrics.BesuMetricCategory; @@ -26,8 +25,6 @@ import org.hyperledger.besu.plugin.services.metrics.Counter; import org.hyperledger.besu.plugin.services.trielogs.TrieLog; -import java.util.Collections; -import java.util.Map; import java.util.Optional; import java.util.SortedMap; import java.util.TreeMap; @@ -61,9 +58,6 @@ public class BonsaiArchiver implements BlockAddedObserver { protected final MetricsSystem metricsSystem; protected final Counter archivedBlocksCounter; - private final Map pendingBlocksToArchive = - Collections.synchronizedMap(new TreeMap<>()); - // For logging progress. Saves doing a DB read just to record our progress final AtomicLong latestArchivedBlock = new AtomicLong(0); @@ -86,80 +80,26 @@ public BonsaiArchiver( "Total number of blocks for which state has been archived"); } - private int loadNextCatchupBlocks() { - Optional archivedBlocksHead = Optional.empty(); - - Optional latestArchivedBlock = rootWorldStateStorage.getLatestArchivedBlock(); - - if (latestArchivedBlock.isPresent()) { - // Start from the next block after the most recently archived block - archivedBlocksHead = Optional.of(latestArchivedBlock.get() + 1); - } else { - // Start from genesis block - if (blockchain.getBlockHashByNumber(0).isPresent()) { - archivedBlocksHead = Optional.of(0L); - } - } - - int preLoadedBlocks = 0; - if (archivedBlocksHead.isPresent()) { - Optional nextBlock = blockchain.getBlockByNumber(archivedBlocksHead.get()); - for (int i = 0; i < CATCHUP_LIMIT; i++) { - if (nextBlock.isPresent()) { - addToArchivingQueue( - nextBlock.get().getHeader().getNumber(), nextBlock.get().getHeader().getHash()); - preLoadedBlocks++; - nextBlock = blockchain.getBlockByNumber(nextBlock.get().getHeader().getNumber() + 1); - } else { - break; - } - } - LOG.atInfo() - .setMessage("Preloaded {} blocks from {} to move their state and storage to the archive") - .addArgument(preLoadedBlocks) - .addArgument(archivedBlocksHead.get()) - .log(); - } - return preLoadedBlocks; - } - public long initialize() { // On startup there will be recent blocks whose state and storage hasn't been archived yet. - // Pre-load them in blocks of CATCHUP_LIMIT ready for archiving state once enough new blocks - // have - // been added to the chain. - long totalBlocksCaughtUp = 0; - int catchupBlocksLoaded = CATCHUP_LIMIT; - while (catchupBlocksLoaded >= CATCHUP_LIMIT) { - catchupBlocksLoaded = loadNextCatchupBlocks(); + // Start archiving them straight away to catch up with the chain head. + // long totalBlocksCaughtUp = 0; + latestArchivedBlock.set(rootWorldStateStorage.getLatestArchivedBlock().orElse(0L)); + long startingBlock = latestArchivedBlock.get(); + while (blockchain.getChainHeadBlockNumber() - latestArchivedBlock.get() + > DISTANCE_FROM_HEAD_BEFORE_ARCHIVING_OLD_STATE) { moveBlockStateToArchive(); - totalBlocksCaughtUp += catchupBlocksLoaded; } - return totalBlocksCaughtUp; + return latestArchivedBlock.get() - startingBlock; } - public int getPendingBlocksCount() { - return pendingBlocksToArchive.size(); - } - - public synchronized void addToArchivingQueue(final long blockNumber, final Hash blockHash) { - LOG.atDebug() - .setMessage( - "Adding block to archiving queue for moving to cold storage, blockNumber {}; blockHash {}") - .addArgument(blockNumber) - .addArgument(blockHash) - .log(); - pendingBlocksToArchive.put(blockNumber, blockHash); - } - - private synchronized void removeArchivedFromQueue(final Map archivedBlocks) { - archivedBlocks.keySet().forEach(e -> pendingBlocksToArchive.remove(e)); + public long getPendingBlocksCount() { + return blockchain.getChainHeadBlockNumber() - latestArchivedBlock.get(); } // Move state and storage entries from their primary DB segments to their archive DB segments. - // This is - // intended to maintain good performance for new block imports by keeping the primary DB segments - // to live state only. Returns the number of state and storage entries moved. + // This is intended to maintain good performance for new block imports by keeping the primary + // DB segments to live state only. Returns the number of state and storage entries moved. public int moveBlockStateToArchive() { final long retainAboveThisBlock = blockchain.getChainHeadBlockNumber() - DISTANCE_FROM_HEAD_BEFORE_ARCHIVING_OLD_STATE; @@ -178,18 +118,27 @@ public int moveBlockStateToArchive() { final SortedMap blocksToArchive; synchronized (this) { blocksToArchive = new TreeMap<>(); - pendingBlocksToArchive.entrySet().stream() - .filter( - (e) -> blocksToArchive.size() <= CATCHUP_LIMIT && e.getKey() <= retainAboveThisBlock) - .forEach( - (e) -> { - blocksToArchive.put(e.getKey(), e.getValue()); - }); + + long nextToArchive = latestArchivedBlock.get() + 1; + while (blocksToArchive.size() <= CATCHUP_LIMIT && nextToArchive < retainAboveThisBlock) { + blocksToArchive.put( + nextToArchive, blockchain.getBlockByNumber(nextToArchive).get().getHash()); + + if (!blockchain.blockIsOnCanonicalChain( + blockchain.getBlockHashByNumber(nextToArchive).orElse(Hash.EMPTY))) { + LOG.error( + "Attempted to archive a non-canonical block: {} / {}", + nextToArchive, + blockchain.getBlockByNumber(nextToArchive).get().getHash()); + } + + nextToArchive++; + } } if (blocksToArchive.size() > 0) { LOG.atDebug() - .setMessage("Moving cold state to archive storage: {} to {} ") + .setMessage("Moving state to archive storage: {} to {} ") .addArgument(blocksToArchive.firstKey()) .addArgument(blocksToArchive.lastKey()) .log(); @@ -274,8 +223,6 @@ public int moveBlockStateToArchive() { .addArgument(archivedAccountStateCount.get()) .addArgument(archivedAccountStorageCount.get()) .log(); - - removeArchivedFromQueue(blocksToArchive); } return archivedAccountStateCount.get() + archivedAccountStorageCount.get(); @@ -285,13 +232,10 @@ public int moveBlockStateToArchive() { @Override public void onBlockAdded(final BlockAddedEvent addedBlockContext) { - final Hash blockHash = addedBlockContext.getBlock().getHeader().getBlockHash(); final Optional blockNumber = Optional.of(addedBlockContext.getBlock().getHeader().getNumber()); blockNumber.ifPresent( blockNum -> { - addToArchivingQueue(blockNum, blockHash); - // Since moving blocks can be done in batches we only want // one instance running at a time executeAsync.accept( From 75e57cc6d91172f17e99d989947e476edad46aeb Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Tue, 8 Oct 2024 12:53:15 +0100 Subject: [PATCH 27/39] Remove pre-startup catchup. Start on first block import instead Signed-off-by: Matthew Whitehead --- .../besu/controller/BesuControllerBuilder.java | 4 ++-- .../diffbased/bonsai/worldview/BonsaiArchiver.java | 13 +++---------- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java index 72ff58afe16..f274177ce46 100644 --- a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java +++ b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java @@ -847,8 +847,8 @@ private BonsaiArchiver createBonsaiArchiver( trieLogManager, metricsSystem); - long archivedBlocks = archiver.initialize(); - LOG.info("Bonsai archiver initialised, caught up {} blocks", archivedBlocks); + archiver.initialize(); + LOG.info("Bonsai archiver initialised"); return archiver; } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java index 08e01dff357..47e62185da7 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java @@ -80,17 +80,9 @@ public BonsaiArchiver( "Total number of blocks for which state has been archived"); } - public long initialize() { - // On startup there will be recent blocks whose state and storage hasn't been archived yet. - // Start archiving them straight away to catch up with the chain head. - // long totalBlocksCaughtUp = 0; + public void initialize() { + // Read from the DB where we got to previously latestArchivedBlock.set(rootWorldStateStorage.getLatestArchivedBlock().orElse(0L)); - long startingBlock = latestArchivedBlock.get(); - while (blockchain.getChainHeadBlockNumber() - latestArchivedBlock.get() - > DISTANCE_FROM_HEAD_BEFORE_ARCHIVING_OLD_STATE) { - moveBlockStateToArchive(); - } - return latestArchivedBlock.get() - startingBlock; } public long getPendingBlocksCount() { @@ -232,6 +224,7 @@ public int moveBlockStateToArchive() { @Override public void onBlockAdded(final BlockAddedEvent addedBlockContext) { + initialize(); final Optional blockNumber = Optional.of(addedBlockContext.getBlock().getHeader().getNumber()); blockNumber.ifPresent( From a6552df9c80ea77b91b156bca093cf49b910b605 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Tue, 8 Oct 2024 14:36:21 +0100 Subject: [PATCH 28/39] Multiple deletes of the same address in a single block causes Bonsai to fail the block Signed-off-by: Matthew Whitehead --- .../bonsai/worldview/BonsaiWorldState.java | 52 +++++++++++-------- 1 file changed, 31 insertions(+), 21 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiWorldState.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiWorldState.java index d00afda9ee3..18ad3b8d49d 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiWorldState.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiWorldState.java @@ -57,11 +57,15 @@ import org.apache.tuweni.bytes.Bytes32; import org.apache.tuweni.rlp.RLP; import org.apache.tuweni.units.bigints.UInt256; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class BonsaiWorldState extends DiffBasedWorldState { protected final BonsaiCachedMerkleTrieLoader bonsaiCachedMerkleTrieLoader; + private static final Logger LOG = LoggerFactory.getLogger(BonsaiWorldState.class); + public BonsaiWorldState( final BonsaiWorldStateProvider archive, final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage, @@ -337,28 +341,34 @@ private void clearStorage( try { final StorageConsumingMap> storageToDelete = worldStateUpdater.getStorageToUpdate().get(address); - Map entriesToDelete = storageTrie.entriesFrom(Bytes32.ZERO, 256); - while (!entriesToDelete.isEmpty()) { - entriesToDelete.forEach( - (k, v) -> { - final StorageSlotKey storageSlotKey = - new StorageSlotKey(Hash.wrap(k), Optional.empty()); - final UInt256 slotValue = UInt256.fromBytes(Bytes32.leftPad(RLP.decodeValue(v))); - maybeStateUpdater.ifPresent( - bonsaiUpdater -> - bonsaiUpdater.removeStorageValueBySlotHash( - address.addressHash(), storageSlotKey.getSlotHash())); - storageToDelete - .computeIfAbsent( - storageSlotKey, key -> new DiffBasedValue<>(slotValue, null, true)) - .setPrior(slotValue); - }); - entriesToDelete.keySet().forEach(storageTrie::remove); - if (entriesToDelete.size() == 256) { - entriesToDelete = storageTrie.entriesFrom(Bytes32.ZERO, 256); - } else { - break; + if (storageToDelete != null) { + Map entriesToDelete = storageTrie.entriesFrom(Bytes32.ZERO, 256); + while (!entriesToDelete.isEmpty()) { + entriesToDelete.forEach( + (k, v) -> { + final StorageSlotKey storageSlotKey = + new StorageSlotKey(Hash.wrap(k), Optional.empty()); + final UInt256 slotValue = UInt256.fromBytes(Bytes32.leftPad(RLP.decodeValue(v))); + maybeStateUpdater.ifPresent( + bonsaiUpdater -> + bonsaiUpdater.removeStorageValueBySlotHash( + address.addressHash(), storageSlotKey.getSlotHash())); + storageToDelete + .computeIfAbsent( + storageSlotKey, key -> new DiffBasedValue<>(slotValue, null, true)) + .setPrior(slotValue); + }); + entriesToDelete.keySet().forEach(storageTrie::remove); + if (entriesToDelete.size() == 256) { + entriesToDelete = storageTrie.entriesFrom(Bytes32.ZERO, 256); + } else { + break; + } } + } else { + LOG.warn( + "No storage entries to delete for deleted address {}. Address storage may already have been deleted in this block.", + address); } } catch (MerkleTrieException e) { // need to throw to trigger the heal From 329da00cc11076274c464e52fbc2fe8f8d12a6d8 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Wed, 9 Oct 2024 10:48:34 +0100 Subject: [PATCH 29/39] Metric should be a gauge not a counter Signed-off-by: Matthew Whitehead --- .../diffbased/bonsai/worldview/BonsaiArchiver.java | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java index 47e62185da7..ed604996ac9 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/worldview/BonsaiArchiver.java @@ -22,7 +22,6 @@ import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogManager; import org.hyperledger.besu.metrics.BesuMetricCategory; import org.hyperledger.besu.plugin.services.MetricsSystem; -import org.hyperledger.besu.plugin.services.metrics.Counter; import org.hyperledger.besu.plugin.services.trielogs.TrieLog; import java.util.Optional; @@ -56,7 +55,6 @@ public class BonsaiArchiver implements BlockAddedObserver { private static final int DISTANCE_FROM_HEAD_BEFORE_ARCHIVING_OLD_STATE = 10; private final TrieLogManager trieLogManager; protected final MetricsSystem metricsSystem; - protected final Counter archivedBlocksCounter; // For logging progress. Saves doing a DB read just to record our progress final AtomicLong latestArchivedBlock = new AtomicLong(0); @@ -73,11 +71,11 @@ public BonsaiArchiver( this.trieLogManager = trieLogManager; this.metricsSystem = metricsSystem; - archivedBlocksCounter = - metricsSystem.createCounter( - BesuMetricCategory.BLOCKCHAIN, - "archived_blocks_state_total", - "Total number of blocks for which state has been archived"); + metricsSystem.createLongGauge( + BesuMetricCategory.BLOCKCHAIN, + "archived_blocks_state_total", + "Total number of blocks for which state has been archived", + () -> latestArchivedBlock.get()); } public void initialize() { @@ -190,7 +188,6 @@ public int moveBlockStateToArchive() { .addArgument(block.getKey()) .log(); rootWorldStateStorage.setLatestArchivedBlock(block.getKey()); - archivedBlocksCounter.inc(); // Update local var for logging progress latestArchivedBlock.set(block.getKey()); From e6364b2381cbd6999e9cc235810b2ad29147391c Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Wed, 9 Oct 2024 11:39:11 +0100 Subject: [PATCH 30/39] Refactor archive tests after code refactor Signed-off-by: Matthew Whitehead --- .../common/trielog/ArchiverTests.java | 72 ++++++++++--------- 1 file changed, 40 insertions(+), 32 deletions(-) diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiverTests.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiverTests.java index 05456f92645..cd03907c637 100644 --- a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiverTests.java +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiverTests.java @@ -68,9 +68,9 @@ public class ArchiverTests { // Number of blocks in the chain. This is different to the number of blocks // we have successfully archived state for - static final long SHORT_TEST_CHAIN_HEIGHT = 151; + static final long SHORT_TEST_CHAIN_HEIGHT = 150; static final long LONG_TEST_CHAIN_HEIGHT = - 2001; // We want block 2000 to be returned so set to 2001 + 2000; // We want block 2000 to be returned so set to 2001 // Address used for account and storage changes final Address address = Address.fromHexString("0x95cD8499051f7FE6a2F53749eC1e9F4a81cafa13"); @@ -86,7 +86,7 @@ public class ArchiverTests { @Override public Optional load(final Hash blockHash) { Optional foundBlock; - for (long i = 0; i < SHORT_TEST_CHAIN_HEIGHT; i++) { + for (long i = 0; i <= SHORT_TEST_CHAIN_HEIGHT; i++) { if ((foundBlock = blockNumberCache.getUnchecked(i)).isPresent() && foundBlock.get().getHash().equals(blockHash)) { return foundBlock; @@ -157,7 +157,7 @@ private static Optional getGeneratedBlock(final long blockNumber, final l } @Test - public void archiveLimitsInitialArchiveBlocks() { + public void archiveInitialisesAndSetsPendingBlockCount() { blockNumberCache = CacheBuilder.newBuilder() @@ -205,10 +205,11 @@ public Optional load(final Long blockNumber) { BonsaiArchiver archiver = new BonsaiArchiver( worldStateStorage, blockchain, executeAsync, trieLogManager, new NoOpMetricsSystem()); - long caughtUpBlocks = archiver.initialize(); + archiver.getPendingBlocksCount(); + archiver.initialize(); // Check that blocks 101 to 1990 (10 before chain head 2000) have been caught up - assertThat(caughtUpBlocks).isEqualTo(1900); + assertThat(archiver.getPendingBlocksCount()).isEqualTo(1900); } @Test @@ -312,26 +313,27 @@ public Optional load(final Long blockNumber) { worldStateStorage, blockchain, executeAsync, trieLogManager, new NoOpMetricsSystem()); archiver.initialize(); - // Chain height is 150, we've archived state up to block 100, we should have initialized the - // next 50 blocks to be archived - assertThat(archiver.getPendingBlocksCount()).isEqualTo(50); - + currentBlockHeight = 100L; when(blockchain.getChainHeadBlockNumber()) .then(requestedBlockNumber -> getCurrentBlockHeight()); + // No new blocks yet, no pending blocks to archive + assertThat(archiver.getPendingBlocksCount()).isEqualTo(0); + // Process the next 50 blocks. Only 1 account state change should happen during this processing // since there are only trie logs for blocks 101 and 102 for (long nextBlock = 101; nextBlock < 150; nextBlock++) { currentBlockHeight = nextBlock; - if (nextBlock == 112) { - archiver.addToArchivingQueue( - nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); + if (nextBlock == 113) { + // archiver.addToArchivingQueue( + // nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); int accountsMoved = archiver.moveBlockStateToArchive(); assertThat(accountsMoved).isEqualTo(1); } else { - archiver.addToArchivingQueue( - nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); + // archiver.addToArchivingQueue( + // nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); int accountsMoved = archiver.moveBlockStateToArchive(); + System.out.println("nextBlock = " + nextBlock + ". Accounts moved = " + accountsMoved); assertThat(accountsMoved).isEqualTo(0); } } @@ -456,9 +458,12 @@ public Optional load(final Long blockNumber) { worldStateStorage, blockchain, executeAsync, trieLogManager, new NoOpMetricsSystem()); archiver.initialize(); - // Chain height is 150, we've archived state up to block 100, we should have initialized the - // next 50 blocks to be archived - assertThat(archiver.getPendingBlocksCount()).isEqualTo(50); + currentBlockHeight = 100L; + when(blockchain.getChainHeadBlockNumber()) + .then(requestedBlockNumber -> getCurrentBlockHeight()); + + // No new blocks yet, no pending blocks to archive + assertThat(archiver.getPendingBlocksCount()).isEqualTo(0); when(blockchain.getChainHeadBlockNumber()) .then(requestedBlockNumber -> getCurrentBlockHeight()); @@ -468,11 +473,11 @@ public Optional load(final Long blockNumber) { // happen during this processing since there are only trie logs for blocks 101 and 102 for (long nextBlock = 101; nextBlock < 150; nextBlock++) { currentBlockHeight = nextBlock; - archiver.addToArchivingQueue( - nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); + // archiver.addToArchivingQueue( + // nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); int storageMoved = archiver.moveBlockStateToArchive(); totalStorageMoved += storageMoved; - if (nextBlock == 112 || nextBlock == 113) { + if (nextBlock == 113 || nextBlock == 114) { assertThat(storageMoved).isEqualTo(1); } else { assertThat(storageMoved).isEqualTo(0); @@ -627,9 +632,12 @@ public Optional load(final Long blockNumber) { worldStateStorage, blockchain, executeAsync, trieLogManager, new NoOpMetricsSystem()); archiver.initialize(); - // Chain height is 150, we've archived state up to block 100, we should have initialized the - // next 50 blocks to be archived - assertThat(archiver.getPendingBlocksCount()).isEqualTo(50); + currentBlockHeight = 100L; + when(blockchain.getChainHeadBlockNumber()) + .then(requestedBlockNumber -> getCurrentBlockHeight()); + + // No new blocks yet, no pending blocks to archive + assertThat(archiver.getPendingBlocksCount()).isEqualTo(0); when(blockchain.getChainHeadBlockNumber()) .then(requestedBlockNumber -> getCurrentBlockHeight()); @@ -639,12 +647,12 @@ public Optional load(final Long blockNumber) { // happen during this processing since there are only trie logs for blocks 101 and 102 for (long nextBlock = 101; nextBlock < 150; nextBlock++) { currentBlockHeight = nextBlock; - archiver.addToArchivingQueue( - nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); + // archiver.addToArchivingQueue( + // nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); int storageAndAccountsMoved = archiver.moveBlockStateToArchive(); - if (nextBlock == 112) { + if (nextBlock == 113) { assertThat(storageAndAccountsMoved).isEqualTo(2); - } else if (nextBlock == 113) { + } else if (nextBlock == 114) { assertThat(storageAndAccountsMoved).isEqualTo(1); } else { assertThat(storageAndAccountsMoved).isEqualTo(0); @@ -805,7 +813,7 @@ public Optional load(final Long blockNumber) { .then(requestedBlockNumber -> getCurrentBlockHeight()); assertThat(archiver.getPendingBlocksCount()).isEqualTo(0); - // Process the next 50 blocks 150-200 and count the archive changes. We'll recreate the + // Process the next 50 blocks 151-200 and count the archive changes. We'll recreate the // block cache so we can generate blocks beyond 150 blockNumberCache = CacheBuilder.newBuilder() @@ -826,7 +834,7 @@ public Optional load(final Long blockNumber) { @Override public Optional load(final Hash blockHash) { Optional foundBlock; - for (long i = 0; i < LONG_TEST_CHAIN_HEIGHT; i++) { + for (long i = 0; i <= LONG_TEST_CHAIN_HEIGHT; i++) { if ((foundBlock = blockNumberCache.getUnchecked(i)).isPresent() && foundBlock.get().getHash().equals(blockHash)) { return foundBlock; @@ -838,7 +846,7 @@ public Optional load(final Hash blockHash) { // By default we archive state for chainheight - 10 blocks, so importing up to block 210 whould // cause blocks up to 200 to be archived - for (long nextBlock = 151; nextBlock <= 210; nextBlock++) { + for (long nextBlock = 151; nextBlock <= 211; nextBlock++) { currentBlockHeight = nextBlock; archiver.onBlockAdded( BlockAddedEvent.createForStoredOnly(blockNumberCache.getUnchecked(nextBlock).get())); @@ -1067,7 +1075,7 @@ public Optional load(final Hash blockHash) { // By default we archive state for chainheight - 10 blocks, so importing up to block 210 whould // cause blocks up to 200 to be archived - for (long nextBlock = 151; nextBlock <= 210; nextBlock++) { + for (long nextBlock = 151; nextBlock <= 211; nextBlock++) { currentBlockHeight = nextBlock; archiver.onBlockAdded( BlockAddedEvent.createForStoredOnly(blockNumberCache.getUnchecked(nextBlock).get())); From 46a33efa41074ed8609c9647188b21eaf2427835 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Wed, 9 Oct 2024 13:37:42 +0100 Subject: [PATCH 31/39] Remove commented out sections from tests Signed-off-by: Matthew Whitehead --- .../diffbased/common/trielog/ArchiverTests.java | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiverTests.java b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiverTests.java index cd03907c637..90271309b1c 100644 --- a/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiverTests.java +++ b/ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/diffbased/common/trielog/ArchiverTests.java @@ -325,15 +325,10 @@ public Optional load(final Long blockNumber) { for (long nextBlock = 101; nextBlock < 150; nextBlock++) { currentBlockHeight = nextBlock; if (nextBlock == 113) { - // archiver.addToArchivingQueue( - // nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); int accountsMoved = archiver.moveBlockStateToArchive(); assertThat(accountsMoved).isEqualTo(1); } else { - // archiver.addToArchivingQueue( - // nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); int accountsMoved = archiver.moveBlockStateToArchive(); - System.out.println("nextBlock = " + nextBlock + ". Accounts moved = " + accountsMoved); assertThat(accountsMoved).isEqualTo(0); } } @@ -473,8 +468,6 @@ public Optional load(final Long blockNumber) { // happen during this processing since there are only trie logs for blocks 101 and 102 for (long nextBlock = 101; nextBlock < 150; nextBlock++) { currentBlockHeight = nextBlock; - // archiver.addToArchivingQueue( - // nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); int storageMoved = archiver.moveBlockStateToArchive(); totalStorageMoved += storageMoved; if (nextBlock == 113 || nextBlock == 114) { @@ -535,8 +528,7 @@ public Optional load(final Long blockNumber) { // Mock 1 storage change when block 102 is being processed, because state changes // in block 101 can be archived (and likewise for block 103). NB: the trie log in // this test for block 103 isn't archived because no further changes to that - // storage - // are made + // storage are made return 1; } } @@ -647,8 +639,6 @@ public Optional load(final Long blockNumber) { // happen during this processing since there are only trie logs for blocks 101 and 102 for (long nextBlock = 101; nextBlock < 150; nextBlock++) { currentBlockHeight = nextBlock; - // archiver.addToArchivingQueue( - // nextBlock, blockNumberCache.getUnchecked(nextBlock).get().getHash()); int storageAndAccountsMoved = archiver.moveBlockStateToArchive(); if (nextBlock == 113) { assertThat(storageAndAccountsMoved).isEqualTo(2); @@ -1004,8 +994,7 @@ public Optional load(final Long blockNumber) { block152TrieLogs.addStorageChange(address, storageSlotKey, oldValue, newValue); // Simulate a storage change in block 153. This state will not be archived because it refers to - // a - // different slot + // a different slot TrieLogLayer block153TrieLogs = new TrieLogLayer(); oldValue = UInt256.fromHexString("0x345"); newValue = UInt256.fromHexString("0x456"); From 4051eb0d396f9911cb5e0d4cceeb5db9f2eb3861 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Thu, 17 Oct 2024 17:05:50 +0100 Subject: [PATCH 32/39] Update the BFT soak test to include a Bonsai archive node Signed-off-by: Matthew Whitehead --- .../node/configuration/BesuNodeFactory.java | 4 ++- .../BftAcceptanceTestParameterization.java | 5 +++ .../acceptance/bftsoak/BftMiningSoakTest.java | 36 +++++++++++++++++-- 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/acceptance-tests/dsl/src/main/java/org/hyperledger/besu/tests/acceptance/dsl/node/configuration/BesuNodeFactory.java b/acceptance-tests/dsl/src/main/java/org/hyperledger/besu/tests/acceptance/dsl/node/configuration/BesuNodeFactory.java index b39274c3d9c..804a1b55acb 100644 --- a/acceptance-tests/dsl/src/main/java/org/hyperledger/besu/tests/acceptance/dsl/node/configuration/BesuNodeFactory.java +++ b/acceptance-tests/dsl/src/main/java/org/hyperledger/besu/tests/acceptance/dsl/node/configuration/BesuNodeFactory.java @@ -479,7 +479,9 @@ public BesuNode createQbftNode( .dataStorageConfiguration( storageFormat == DataStorageFormat.FOREST ? DataStorageConfiguration.DEFAULT_FOREST_CONFIG - : DataStorageConfiguration.DEFAULT_BONSAI_CONFIG) + : storageFormat == DataStorageFormat.BONSAI + ? DataStorageConfiguration.DEFAULT_BONSAI_CONFIG + : DataStorageConfiguration.DEFAULT_BONSAI_ARCHIVE_CONFIG) .genesisConfigProvider(GenesisConfigurationFactory::createQbftGenesisConfig); if (fixedPort) { builder.p2pPort( diff --git a/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bft/BftAcceptanceTestParameterization.java b/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bft/BftAcceptanceTestParameterization.java index 2351f740bfb..df759ee8b5f 100644 --- a/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bft/BftAcceptanceTestParameterization.java +++ b/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bft/BftAcceptanceTestParameterization.java @@ -74,6 +74,11 @@ public BesuNode createBonsaiNodeFixedPort(BesuNodeFactory factory, String name) return creatorFn.create(factory, name, true, DataStorageFormat.BONSAI); } + public BesuNode createBonsaiArchiveNodeFixedPort(BesuNodeFactory factory, String name) + throws Exception { + return creatorFn.create(factory, name, true, DataStorageFormat.X_BONSAI_ARCHIVE); + } + public BesuNode createForestNodeFixedPort(BesuNodeFactory factory, String name) throws Exception { return creatorFn.create(factory, name, true, DataStorageFormat.FOREST); } diff --git a/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bftsoak/BftMiningSoakTest.java b/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bftsoak/BftMiningSoakTest.java index 878e503ba39..0aab630a532 100644 --- a/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bftsoak/BftMiningSoakTest.java +++ b/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bftsoak/BftMiningSoakTest.java @@ -32,9 +32,13 @@ import org.assertj.core.api.Assertions; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class BftMiningSoakTest extends ParameterizedBftTestBase { + private static final Logger LOG = LoggerFactory.getLogger(BftMiningSoakTest.class); + private final int NUM_STEPS = 5; private final int MIN_TEST_TIME_MINS = 60; @@ -62,7 +66,7 @@ public void shouldBeStableDuringLongTest( // Create a mix of Bonsai and Forest DB nodes final BesuNode minerNode1 = nodeFactory.createBonsaiNodeFixedPort(besu, "miner1"); - final BesuNode minerNode2 = nodeFactory.createForestNodeFixedPort(besu, "miner2"); + final BesuNode minerNode2 = nodeFactory.createBonsaiArchiveNodeFixedPort(besu, "miner2"); final BesuNode minerNode3 = nodeFactory.createBonsaiNodeFixedPort(besu, "miner3"); final BesuNode minerNode4 = nodeFactory.createForestNodeFixedPort(besu, "miner4"); @@ -132,6 +136,7 @@ public void shouldBeStableDuringLongTest( // Step 2 // Stop one of the nodes, check that the chain continues mining // blocks + LOG.info("Stopping node 4 to check the chain continues mining (albeit more slowly)"); stopNode(minerNode4); nextStepEndTime = @@ -151,6 +156,7 @@ public void shouldBeStableDuringLongTest( // Step 3 // Stop another one of the nodes, check that the chain now stops // mining blocks + LOG.info("Stopping node 3 to check that the chain stalls"); stopNode(minerNode3); chainHeight = minerNode1.execute(ethTransactions.blockNumber()); @@ -170,8 +176,8 @@ public void shouldBeStableDuringLongTest( // Step 4 // Restart both of the stopped nodes. Check that the chain resumes // mining blocks + LOG.info("Starting node 3 and node 4 to ensure the chain resumes mining new blocks"); startNode(minerNode3); - startNode(minerNode4); previousStepEndTime = Instant.now(); @@ -203,11 +209,16 @@ public void shouldBeStableDuringLongTest( lastChainHeight = chainHeight; } + LOG.info("Updating the value in the smart contract from 101 to 201"); // Update our smart contract before upgrading from berlin to london assertThat(simpleStorageContract.get().send()).isEqualTo(BigInteger.valueOf(101)); simpleStorageContract.set(BigInteger.valueOf(201)).send(); assertThat(simpleStorageContract.get().send()).isEqualTo(BigInteger.valueOf(201)); + LOG.info( + "Upgrading the entire chain to the London fork one node at a time. The genesis for each node will be updated to londonBlock = " + + lastChainHeight.intValue() + + 120); // Upgrade the chain from berlin to london in 120 blocks time upgradeToLondon( minerNode1, minerNode2, minerNode3, minerNode4, lastChainHeight.intValue() + 120); @@ -219,6 +230,9 @@ public void shouldBeStableDuringLongTest( previousStepEndTime.plus(getTestDurationMins() / NUM_STEPS, ChronoUnit.MINUTES); lastChainHeight = chainHeight; + // Allow the chain to restart mining blocks + Thread.sleep(THREE_MINUTES); + while (System.currentTimeMillis() < nextStepEndTime.toEpochMilli()) { Thread.sleep(ONE_MINUTE); chainHeight = minerNode1.execute(ethTransactions.blockNumber()); @@ -229,6 +243,8 @@ public void shouldBeStableDuringLongTest( lastChainHeight = chainHeight; } + LOG.info( + "Chain has successfully upgraded to the London fork. Checking the contract state is correct"); // Check that the state of our smart contract is still correct assertThat(simpleStorageContract.get().send()).isEqualTo(BigInteger.valueOf(201)); @@ -237,11 +253,18 @@ public void shouldBeStableDuringLongTest( assertThat(simpleStorageContract.get().send()).isEqualTo(BigInteger.valueOf(301)); // Upgrade the chain to shanghai in 120 seconds. Then try to deploy a shanghai contract + LOG.info( + "Upgrading the entire chain to the Shanghai fork one node at a time. The genesis for each node will be updated to shanghaiTime = " + + Instant.now().getEpochSecond() + + 120); upgradeToShanghai( minerNode1, minerNode2, minerNode3, minerNode4, Instant.now().getEpochSecond() + 120); + // Allow the chain to restart mining blocks Thread.sleep(THREE_MINUTES); + LOG.info( + "Deploying a smart contract that should only work if the chain is running on the shanghai fork"); SimpleStorageShanghai simpleStorageContractShanghai = minerNode1.execute(contractTransactions.createSmartContract(SimpleStorageShanghai.class)); @@ -284,21 +307,26 @@ private void upgradeToLondon( final int londonBlockNumber) throws InterruptedException { // Node 1 + + LOG.info("Upgrading node 1 to london fork"); stopNode(minerNode1); updateGenesisConfigToLondon(minerNode1, true, londonBlockNumber); startNode(minerNode1); // Node 2 + LOG.info("Upgrading node 2 to london fork"); stopNode(minerNode2); updateGenesisConfigToLondon(minerNode2, true, londonBlockNumber); startNode(minerNode2); // Node 3 + LOG.info("Upgrading node 3 to london fork"); stopNode(minerNode3); updateGenesisConfigToLondon(minerNode3, true, londonBlockNumber); startNode(minerNode3); // Node 4 + LOG.info("Upgrading node 4 to london fork"); stopNode(minerNode4); updateGenesisConfigToLondon(minerNode4, true, londonBlockNumber); startNode(minerNode4); @@ -312,21 +340,25 @@ private void upgradeToShanghai( final long shanghaiTime) throws InterruptedException { // Node 1 + LOG.info("Upgrading node 1 to shanghai fork"); stopNode(minerNode1); updateGenesisConfigToShanghai(minerNode1, shanghaiTime); startNode(minerNode1); // Node 2 + LOG.info("Upgrading node 2 to shanghai fork"); stopNode(minerNode2); updateGenesisConfigToShanghai(minerNode2, shanghaiTime); startNode(minerNode2); // Node 3 + LOG.info("Upgrading node 3 to shanghai fork"); stopNode(minerNode3); updateGenesisConfigToShanghai(minerNode3, shanghaiTime); startNode(minerNode3); // Node 4 + LOG.info("Upgrading node 4 to shanghai fork"); stopNode(minerNode4); updateGenesisConfigToShanghai(minerNode4, shanghaiTime); startNode(minerNode4); From c28a383b9631d227c58b6287be2132a14792006d Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Tue, 22 Oct 2024 11:07:18 +0100 Subject: [PATCH 33/39] Add archive-specific checks to the BFT soak test Signed-off-by: Matthew Whitehead --- .../acceptance/bftsoak/BftMiningSoakTest.java | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bftsoak/BftMiningSoakTest.java b/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bftsoak/BftMiningSoakTest.java index 0aab630a532..c2e7866fbe0 100644 --- a/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bftsoak/BftMiningSoakTest.java +++ b/acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/bftsoak/BftMiningSoakTest.java @@ -34,6 +34,8 @@ import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.web3j.protocol.core.DefaultBlockParameter; +import org.web3j.tx.exceptions.ContractCallException; public class BftMiningSoakTest extends ParameterizedBftTestBase { @@ -86,6 +88,13 @@ public void shouldBeStableDuringLongTest( SimpleStorage simpleStorageContract = minerNode1.execute(contractTransactions.createSmartContract(SimpleStorage.class)); + // Create another instance of the contract referencing the same contract address but on the + // archive node. This contract instance should be able to query state from the beginning of + // the test + SimpleStorage simpleStorageArchive = + minerNode2.execute(contractTransactions.createSmartContract(SimpleStorage.class)); + simpleStorageArchive.setContractAddress(simpleStorageContract.getContractAddress()); + // Check the contract address is as expected for this sender & nonce contractVerifier .validTransactionReceipt("0x42699a7612a82f1d9c36148af9c77354759b210b") @@ -108,6 +117,9 @@ public void shouldBeStableDuringLongTest( // Set to something new simpleStorageContract.set(BigInteger.valueOf(101)).send(); + // Save this block height to check on the archive node at the end of the test + BigInteger archiveChainHeight = minerNode1.execute(ethTransactions.blockNumber()); + // Check the state of the contract has updated correctly. We'll set & get this several times // during the test assertThat(simpleStorageContract.get().send()).isEqualTo(BigInteger.valueOf(101)); @@ -270,8 +282,25 @@ public void shouldBeStableDuringLongTest( // Check the contract address is as expected for this sender & nonce contractVerifier - .validTransactionReceipt("0x05d91b9031a655d08e654177336d08543ac4b711") + .validTransactionReceipt("0xfeae27388a65ee984f452f86effed42aabd438fd") .verify(simpleStorageContractShanghai); + + // Archive node test. Check the state of the contract when it was first updated in the test + LOG.info( + "Checking that the archive node shows us the original smart contract value if we set a historic block number"); + simpleStorageArchive.setDefaultBlockParameter( + DefaultBlockParameter.valueOf(archiveChainHeight)); + assertThat(simpleStorageArchive.get().send()).isEqualTo(BigInteger.valueOf(101)); + + try { + simpleStorageContract.setDefaultBlockParameter( + DefaultBlockParameter.valueOf(archiveChainHeight)); + // Should throw ContractCallException because a non-archive not can't satisfy this request + simpleStorageContract.get().send(); + Assertions.fail("Request for historic state from non-archive node should have failed"); + } catch (ContractCallException e) { + // Ignore + } } private static void updateGenesisConfigToLondon( From c226aa680f2383beaa00a2fdc28600c8aa290ab6 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Mon, 4 Nov 2024 17:20:04 +0000 Subject: [PATCH 34/39] Update plugin API hash Signed-off-by: Matthew Whitehead --- plugin-api/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin-api/build.gradle b/plugin-api/build.gradle index c7ec6728674..32f25409974 100644 --- a/plugin-api/build.gradle +++ b/plugin-api/build.gradle @@ -71,7 +71,7 @@ Calculated : ${currentHash} tasks.register('checkAPIChanges', FileStateChecker) { description = "Checks that the API for the Plugin-API project does not change without deliberate thought" files = sourceSets.main.allJava.files - knownHash = '8rPIE3fYl48RPRQXxYhMk559e/r+wHSKU9bGSJmruKQ=' + knownHash = 'tCOEAOnsPZ5a5LPNnue85kvWXkW9/6nv560a3bGWQ4w=' } check.dependsOn('checkAPIChanges') From a6123bbf120358df2839c37779d09dfaabfbce7f Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Tue, 5 Nov 2024 09:30:02 +0000 Subject: [PATCH 35/39] Add javadoc Signed-off-by: Matthew Whitehead --- plugin-api/build.gradle | 2 +- .../besu/plugin/services/storage/DataStorageFormat.java | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/plugin-api/build.gradle b/plugin-api/build.gradle index 32f25409974..c605dfc40cc 100644 --- a/plugin-api/build.gradle +++ b/plugin-api/build.gradle @@ -71,7 +71,7 @@ Calculated : ${currentHash} tasks.register('checkAPIChanges', FileStateChecker) { description = "Checks that the API for the Plugin-API project does not change without deliberate thought" files = sourceSets.main.allJava.files - knownHash = 'tCOEAOnsPZ5a5LPNnue85kvWXkW9/6nv560a3bGWQ4w=' + knownHash = 'l8XABCgXU29VjS05uDhhxc3s6J1qxFQDRGAIyrH5Wwc=' } check.dependsOn('checkAPIChanges') diff --git a/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java b/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java index 5eda64bae62..dd88e8bd078 100644 --- a/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java +++ b/plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java @@ -23,6 +23,11 @@ public enum DataStorageFormat { /** The option for storing archive data e.g. state at any block */ X_BONSAI_ARCHIVE; + /** + * Returns whether the storage format is one of the Bonsai DB formats + * + * @return true if it is, otherwise false + */ public boolean isBonsaiFormat() { return this == BONSAI || this == X_BONSAI_ARCHIVE; } From c4a9516f3a59c89bb27dc1e514268a2ab50a6ef3 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Tue, 5 Nov 2024 10:37:55 +0000 Subject: [PATCH 36/39] Merge fix Signed-off-by: Matthew Whitehead --- .../flat/BonsaiArchiveFlatDbStrategy.java | 2 +- .../storage/flat/BonsaiFlatDbStrategy.java | 1 + .../flat/BonsaiFlatDbStrategyProvider.java | 8 ++++++-- .../trie/diffbased/common/BonsaiContext.java | 19 ++++++++++++++++++- .../DiffBasedWorldStateKeyValueStorage.java | 2 +- .../common/storage/flat/FlatDbStrategy.java | 5 +++++ .../storage/flat/FlatDbStrategyProvider.java | 2 +- 7 files changed, 33 insertions(+), 6 deletions(-) diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiArchiveFlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiArchiveFlatDbStrategy.java index c6b3f0e849c..12d13c09a52 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiArchiveFlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiArchiveFlatDbStrategy.java @@ -21,9 +21,9 @@ import org.hyperledger.besu.datatypes.Hash; import org.hyperledger.besu.datatypes.StorageSlotKey; -import org.hyperledger.besu.ethereum.bonsai.common.BonsaiContext; import org.hyperledger.besu.ethereum.core.BlockHeader; import org.hyperledger.besu.ethereum.trie.NodeLoader; +import org.hyperledger.besu.ethereum.trie.diffbased.common.BonsaiContext; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.CodeStorageStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy; import org.hyperledger.besu.metrics.BesuMetricCategory; diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiFlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiFlatDbStrategy.java index d4dd3529e85..70aaa2abf75 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiFlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiFlatDbStrategy.java @@ -171,6 +171,7 @@ protected Stream> accountsToPairStream( .map(pair -> new Pair<>(Bytes32.wrap(pair.getKey()), Bytes.wrap(pair.getValue()))); } + @Override public FlatDbStrategy contextSafeClone() { // FlatDBStrategies that care about bonsai context changes should override this return this; diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiFlatDbStrategyProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiFlatDbStrategyProvider.java index febdd6ba82a..996c0b5c085 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiFlatDbStrategyProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/storage/flat/BonsaiFlatDbStrategyProvider.java @@ -16,7 +16,7 @@ import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE; -import org.hyperledger.besu.ethereum.bonsai.common.BonsaiContext; +import org.hyperledger.besu.ethereum.trie.diffbased.common.BonsaiContext; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.CodeStorageStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategyProvider; @@ -35,7 +35,11 @@ public class BonsaiFlatDbStrategyProvider extends FlatDbStrategyProvider { private static final Logger LOG = LoggerFactory.getLogger(BonsaiFlatDbStrategyProvider.class); public BonsaiFlatDbStrategyProvider contextSafeClone() { - return null; + BonsaiFlatDbStrategyProvider copy = + new BonsaiFlatDbStrategyProvider(metricsSystem, dataStorageConfiguration); + copy.flatDbStrategy = flatDbStrategy.contextSafeClone(); + copy.flatDbMode = flatDbMode; + return copy; } public BonsaiFlatDbStrategyProvider( diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java index 3892f89d086..44160b99a90 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/BonsaiContext.java @@ -12,7 +12,7 @@ * * SPDX-License-Identifier: Apache-2.0 */ -package org.hyperledger.besu.ethereum.bonsai.common; +package org.hyperledger.besu.ethereum.trie.diffbased.common; import org.hyperledger.besu.ethereum.core.BlockHeader; @@ -24,21 +24,38 @@ public class BonsaiContext { private final AtomicReference blockHeader; + /** Context for Bonsai storage i.e. the block the storage applies to */ public BonsaiContext() { blockHeader = new AtomicReference<>(); } + /** + * Create a copy the current context + * + * @return the copied context + */ public BonsaiContext copy() { var newCtx = new BonsaiContext(); Optional.ofNullable(blockHeader.get()).ifPresent(newCtx::setBlockHeader); return newCtx; } + /** + * Set the new block header for the context + * + * @param blockHeader the new block header + * @return the updated context + */ public BonsaiContext setBlockHeader(final BlockHeader blockHeader) { this.blockHeader.set(blockHeader); return this; } + /** + * Get the block header currently applied to this context + * + * @return the optional block header + */ public Optional getBlockHeader() { return Optional.ofNullable(blockHeader.get()); } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java index 4ab351e4eb4..c353d2a0593 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/DiffBasedWorldStateKeyValueStorage.java @@ -22,11 +22,11 @@ import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE; import org.hyperledger.besu.datatypes.Hash; -import org.hyperledger.besu.ethereum.bonsai.common.BonsaiContext; import org.hyperledger.besu.ethereum.core.BlockHeader; import org.hyperledger.besu.ethereum.storage.StorageProvider; import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier; import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.BonsaiArchiveFlatDbStrategy; +import org.hyperledger.besu.ethereum.trie.diffbased.common.BonsaiContext; import org.hyperledger.besu.ethereum.trie.diffbased.common.StorageSubscriber; import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy; import org.hyperledger.besu.ethereum.worldstate.FlatDbMode; diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java index 7c7fe0ffb49..09f1bc76f21 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategy.java @@ -236,4 +236,9 @@ private NavigableMap toNavigableMap( } public abstract void updateBlockContext(final BlockHeader blockHeader); + + public FlatDbStrategy contextSafeClone() { + // FlatDBStrategies that care about bonsai context changes should override this + return this; + } } diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java index c3c11de3e8a..bc432dfb786 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/storage/flat/FlatDbStrategyProvider.java @@ -37,7 +37,7 @@ public abstract class FlatDbStrategyProvider { // 0x666C61744462537461747573 public static final byte[] FLAT_DB_MODE = "flatDbStatus".getBytes(StandardCharsets.UTF_8); - private final MetricsSystem metricsSystem; + protected final MetricsSystem metricsSystem; protected final DataStorageConfiguration dataStorageConfiguration; protected FlatDbMode flatDbMode; protected FlatDbStrategy flatDbStrategy; From 81a0907cfac1b799a282238e9c936d655c10016b Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Tue, 5 Nov 2024 16:37:47 +0000 Subject: [PATCH 37/39] Unit test fix Signed-off-by: Matthew Whitehead --- .../rocksdb/RocksDBKeyValueStorageFactoryTest.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactoryTest.java b/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactoryTest.java index 7148f601cbd..151591d3cc6 100644 --- a/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactoryTest.java +++ b/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactoryTest.java @@ -20,6 +20,7 @@ import static org.assertj.core.api.Assertions.fail; import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI; import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.FOREST; +import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.X_BONSAI_ARCHIVE; import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.when; @@ -77,7 +78,9 @@ public void shouldCreateCorrectMetadataFileForLatestVersionForNewDb( final BaseVersionedStorageFormat expectedVersion = dataStorageFormat == BONSAI ? BaseVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION - : BaseVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION; + : (dataStorageFormat == X_BONSAI_ARCHIVE + ? BaseVersionedStorageFormat.BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION + : BaseVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION); assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat()) .isEqualTo(expectedVersion); } @@ -100,7 +103,9 @@ public void shouldCreateCorrectMetadataFileForLatestVersionForNewDbWithReceiptCo final BaseVersionedStorageFormat expectedVersion = dataStorageFormat == BONSAI ? BaseVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION - : BaseVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION; + : (dataStorageFormat == X_BONSAI_ARCHIVE + ? BaseVersionedStorageFormat.BONSAI_ARCHIVE_WITH_RECEIPT_COMPACTION + : BaseVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION); assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat()) .isEqualTo(expectedVersion); } From 0b0bed4b3d096e6f86d307b9a66cd48c42e84666 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Tue, 5 Nov 2024 17:12:30 +0000 Subject: [PATCH 38/39] More UT fixing Signed-off-by: Matthew Whitehead --- .../rocksdb/RocksDBKeyValuePrivacyStorageFactoryTest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValuePrivacyStorageFactoryTest.java b/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValuePrivacyStorageFactoryTest.java index 17a3fb874a4..b96c44fcbc8 100644 --- a/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValuePrivacyStorageFactoryTest.java +++ b/plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValuePrivacyStorageFactoryTest.java @@ -102,7 +102,7 @@ public void shouldCreateCorrectMetadataFileForLatestVersion() throws Exception { } @ParameterizedTest - @EnumSource(DataStorageFormat.class) + @EnumSource(names = {"BONSAI", "FOREST"}) public void shouldUpdateCorrectMetadataFileForLatestVersion( final DataStorageFormat dataStorageFormat) throws Exception { final Path tempDataDir = temporaryFolder.resolve("data"); @@ -139,7 +139,7 @@ public void shouldUpdateCorrectMetadataFileForLatestVersion( } @ParameterizedTest - @EnumSource(DataStorageFormat.class) + @EnumSource(names = {"BONSAI", "FOREST"}) public void shouldUpdateCorrectMetadataFileForLatestVersionWithReceiptCompaction( final DataStorageFormat dataStorageFormat) throws Exception { final Path tempDataDir = temporaryFolder.resolve("data"); From 614e5c2be05f9113830dfa7bbeca6cb3620bbb90 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Wed, 6 Nov 2024 10:06:17 +0000 Subject: [PATCH 39/39] Tidy up Signed-off-by: Matthew Whitehead --- .../hyperledger/besu/controller/BesuControllerBuilder.java | 2 -- .../trie/diffbased/bonsai/BonsaiWorldStateProvider.java | 5 ----- 2 files changed, 7 deletions(-) diff --git a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java index 3da65ba4d5b..b1b226edf18 100644 --- a/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java +++ b/besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java @@ -763,7 +763,6 @@ public BesuController build() { } } - // TODO - do we want a flag to turn this on and off? if (DataStorageFormat.X_BONSAI_ARCHIVE.equals( dataStorageConfiguration.getDataStorageFormat())) { final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage = @@ -1155,7 +1154,6 @@ yield new BonsaiWorldStateProvider( final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage = worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class); - // TODO, better integrate. Just for PoC, explicitly set our bonsai context chain head: worldStateKeyValueStorage .getFlatDbStrategy() .updateBlockContext(blockchain.getChainHeadHeader()); diff --git a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java index 972acedc121..38fc736c8bd 100644 --- a/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java +++ b/ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/bonsai/BonsaiWorldStateProvider.java @@ -89,11 +89,6 @@ public Optional getMutable( if (shouldPersistState) { return getMutable(blockHeader.getStateRoot(), blockHeader.getHash()); } else { - // TODO this needs to be better integrated && ensure block is canonical - // HACK for kikori PoC, if we have the trielog for this block, we can assume we have it in - // flatDB - // although, in practice we can only serve canonical chain worldstates and need to fall back - // to state rolling if the requested block is a fork. if (this.worldStateKeyValueStorage.getFlatDbStrategy() instanceof BonsaiArchiveFlatDbStrategy && trieLogManager.getTrieLogLayer(blockHeader.getBlockHash()).isPresent()) { var contextSafeCopy =