From 31f2bf897b16aaba4bb2d655388d50dc5593d3a1 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Mon, 14 Nov 2022 10:01:45 +0200 Subject: [PATCH 01/13] Move MMR utils methods from pallet to primitives Signed-off-by: Serban Iorga --- Cargo.lock | 2 +- frame/merkle-mountain-range/Cargo.toml | 2 - frame/merkle-mountain-range/src/lib.rs | 27 ++++++---- frame/merkle-mountain-range/src/mmr/mmr.rs | 8 +-- frame/merkle-mountain-range/src/mmr/mod.rs | 3 +- .../merkle-mountain-range/src/mmr/storage.rs | 20 ++++---- frame/merkle-mountain-range/src/tests.rs | 38 ++++++++------ primitives/merkle-mountain-range/Cargo.toml | 2 + primitives/merkle-mountain-range/src/lib.rs | 4 ++ .../merkle-mountain-range/src}/utils.rs | 50 +++++++++++-------- 10 files changed, 86 insertions(+), 70 deletions(-) rename {frame/merkle-mountain-range/src/mmr => primitives/merkle-mountain-range/src}/utils.rs (83%) diff --git a/Cargo.lock b/Cargo.lock index a22cfa8ba8dd6..d204fcfcdeefc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5638,7 +5638,6 @@ name = "pallet-mmr" version = "4.0.0-dev" dependencies = [ "array-bytes", - "ckb-merkle-mountain-range", "env_logger", "frame-benchmarking", "frame-support", @@ -9714,6 +9713,7 @@ name = "sp-mmr-primitives" version = "4.0.0-dev" dependencies = [ "array-bytes", + "ckb-merkle-mountain-range", "log", "parity-scale-codec", "scale-info", diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index 8d1f897a65cd4..cf26cfb231c85 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -13,7 +13,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } @@ -36,7 +35,6 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", - "mmr-lib/std", "scale-info/std", "sp-core/std", "sp-io/std", diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index a2d42417ae5dc..7941a713d6b45 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -56,10 +56,9 @@ //! NOTE This pallet is experimental and not proven to work in production. #![cfg_attr(not(feature = "std"), no_std)] -use codec::Encode; use frame_support::{log, traits::Get, weights::Weight}; use sp_runtime::{ - traits::{self, CheckedSub, One, Saturating, UniqueSaturatedInto}, + traits::{self, CheckedSub, One, Saturating}, SaturatedConversion, }; @@ -73,7 +72,9 @@ mod mock; mod tests; pub use pallet::*; -pub use sp_mmr_primitives::{self as primitives, Error, LeafDataProvider, LeafIndex, NodeIndex}; +pub use sp_mmr_primitives::{ + self as primitives, utils::NodesUtils, Error, LeafDataProvider, LeafIndex, NodeIndex, +}; use sp_std::prelude::*; /// The most common use case for MMRs is to store historical block hashes, @@ -219,7 +220,7 @@ pub mod pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { use primitives::LeafDataProvider; let leaves = Self::mmr_leaves(); - let peaks_before = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); + let peaks_before = sp_mmr_primitives::utils::NodesUtils::new(leaves).number_of_peaks(); let data = T::LeafData::leaf_data(); // append new leaf to MMR @@ -242,7 +243,7 @@ pub mod pallet { >::put(leaves); >::put(root); - let peaks_after = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); + let peaks_after = sp_mmr_primitives::utils::NodesUtils::new(leaves).number_of_peaks(); T::WeightInfo::on_initialize(peaks_before.max(peaks_after)) } @@ -313,11 +314,15 @@ impl, I: 'static> Pallet { /// Build offchain key from `parent_hash` of block that originally added node `pos` to MMR. /// /// This combination makes the offchain (key,value) entry resilient to chain forks. - fn node_offchain_key( + fn node_temp_offchain_key( pos: NodeIndex, - parent_hash: ::Hash, + parent_hash: &::Hash, ) -> sp_std::prelude::Vec { - (T::INDEXING_PREFIX, pos, parent_hash).encode() + NodesUtils::node_temp_offchain_key::<::Header>( + &T::INDEXING_PREFIX, + pos, + parent_hash, + ) } /// Build canonical offchain key for node `pos` in MMR. @@ -326,7 +331,7 @@ impl, I: 'static> Pallet { /// Never read keys using `node_canon_offchain_key` unless you sure that /// there's no `node_offchain_key` key in the storage. fn node_canon_offchain_key(pos: NodeIndex) -> sp_std::prelude::Vec { - (T::INDEXING_PREFIX, pos).encode() + NodesUtils::node_canon_offchain_key(&T::INDEXING_PREFIX, pos) } /// Return size of rolling window of leaves saved in offchain under fork-unique keys. @@ -336,7 +341,7 @@ impl, I: 'static> Pallet { /// can be built using `frame_system::block_hash` map. fn offchain_canonicalization_window() -> LeafIndex { let window_size: LeafIndex = - ::BlockHashCount::get().unique_saturated_into(); + ::BlockHashCount::get().saturated_into(); window_size.saturating_sub(1) } @@ -355,7 +360,7 @@ impl, I: 'static> Pallet { .saturating_add(leaf_index.saturated_into()) } - /// Convert a `block_num` into a leaf index. + /// Convert a block number into a leaf index. fn block_num_to_leaf_index(block_num: T::BlockNumber) -> Result where T: frame_system::Config, diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs index 1f5a5bdae380b..58e55b02fe9a3 100644 --- a/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -18,12 +18,12 @@ use crate::{ mmr::{ storage::{OffchainStorage, RuntimeStorage, Storage}, - utils::NodesUtils, Hasher, Node, NodeOf, }, primitives::{self, Error, NodeIndex}, Config, HashingOf, }; +use sp_mmr_primitives::{mmr_lib, utils::NodesUtils}; use sp_std::prelude::*; /// Stateless verification of the proof for a batch of leaves. @@ -116,12 +116,6 @@ where p.verify(root, leaves_positions_and_data) .map_err(|e| Error::Verify.log_debug(e)) } - - /// Return the internal size of the MMR (number of nodes). - #[cfg(test)] - pub fn size(&self) -> NodeIndex { - self.mmr.mmr_size() - } } /// Runtime specific MMR functions. diff --git a/frame/merkle-mountain-range/src/mmr/mod.rs b/frame/merkle-mountain-range/src/mmr/mod.rs index 19fb7b34382bd..51de294753151 100644 --- a/frame/merkle-mountain-range/src/mmr/mod.rs +++ b/frame/merkle-mountain-range/src/mmr/mod.rs @@ -17,9 +17,8 @@ mod mmr; pub mod storage; -pub mod utils; -use sp_mmr_primitives::{DataOrHash, FullLeaf}; +use sp_mmr_primitives::{mmr_lib, DataOrHash, FullLeaf}; use sp_runtime::traits; pub use self::mmr::{verify_leaves_proof, Mmr}; diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index d16ca8cf1e5c8..efa3501024da4 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -19,15 +19,15 @@ use codec::Encode; use frame_support::log::{debug, error, trace}; -use mmr_lib::helper; use sp_core::offchain::StorageKind; use sp_io::{offchain, offchain_index}; +use sp_mmr_primitives::{mmr_lib, mmr_lib::helper, utils::NodesUtils}; use sp_std::iter::Peekable; #[cfg(not(feature = "std"))] use sp_std::prelude::*; use crate::{ - mmr::{utils::NodesUtils, Node, NodeOf}, + mmr::{Node, NodeOf}, primitives::{self, NodeIndex}, Config, Nodes, NumberOfLeaves, Pallet, }; @@ -150,7 +150,7 @@ where // persisted. All entries added by same block number on other forks will be cleared. let to_canon_hash = >::block_hash(to_canon_block_num); - Self::canonicalize_nodes_for_hash(&to_canon_nodes, to_canon_hash); + Self::canonicalize_nodes_for_hash(&to_canon_nodes, &to_canon_hash); // Get all the forks to prune, also remove them from the offchain pruning_map. PruningMap::::remove(to_canon_block_num) .map(|forks| { @@ -169,7 +169,7 @@ where fn prune_nodes_for_forks(nodes: &[NodeIndex], forks: Vec<::Hash>) { for hash in forks { for pos in nodes { - let key = Pallet::::node_offchain_key(*pos, hash); + let key = Pallet::::node_temp_offchain_key(*pos, &hash); debug!( target: "runtime::mmr::offchain", "Clear elem at pos {} with key {:?}", @@ -182,10 +182,10 @@ where fn canonicalize_nodes_for_hash( to_canon_nodes: &[NodeIndex], - to_canon_hash: ::Hash, + to_canon_hash: &::Hash, ) { for pos in to_canon_nodes { - let key = Pallet::::node_offchain_key(*pos, to_canon_hash); + let key = Pallet::::node_temp_offchain_key(*pos, to_canon_hash); // Retrieve the element from Off-chain DB under fork-aware key. if let Some(elem) = offchain::local_storage_get(StorageKind::PERSISTENT, &key) { let canon_key = Pallet::::node_canon_offchain_key(*pos); @@ -239,7 +239,7 @@ where let ancestor_parent_block_num = Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx, leaves); let ancestor_parent_hash = >::block_hash(ancestor_parent_block_num); - let key = Pallet::::node_offchain_key(pos, ancestor_parent_hash); + let key = Pallet::::node_temp_offchain_key(pos, &ancestor_parent_hash); debug!( target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, hash {:?}, key {:?}", pos, ancestor_leaf_idx, ancestor_parent_hash, key @@ -308,7 +308,7 @@ where >::insert(node_index, elem.hash()); } // We are storing full node off-chain (using indexing API). - Self::store_to_offchain(node_index, parent_hash, &elem); + Self::store_to_offchain(node_index, &parent_hash, &elem); // Increase the indices. if let Node::Data(..) = elem { @@ -337,7 +337,7 @@ where { fn store_to_offchain( pos: NodeIndex, - parent_hash: ::Hash, + parent_hash: &::Hash, node: &NodeOf, ) { let encoded_node = node.encode(); @@ -345,7 +345,7 @@ where // fork-resistant. Offchain worker task will "canonicalize" it // `frame_system::BlockHashCount` blocks later, when we are not worried about forks anymore // (multi-era-deep forks should not happen). - let key = Pallet::::node_offchain_key(pos, parent_hash); + let key = Pallet::::node_temp_offchain_key(pos, parent_hash); debug!( target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}", pos, parent_hash, key diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index e47f1b3b2e63a..791702917f992 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -15,19 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{ - mmr::{storage::PruningMap, utils}, - mock::*, - *, -}; +use crate::{mmr::storage::PruningMap, mock::*, *}; use frame_support::traits::{Get, OnInitialize}; -use mmr_lib::helper; use sp_core::{ offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, H256, }; -use sp_mmr_primitives::{Compact, Proof}; +use sp_mmr_primitives::{mmr_lib::helper, utils, Compact, Proof}; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { frame_system::GenesisConfig::default().build_storage::().unwrap().into() @@ -171,20 +166,29 @@ fn should_append_to_mmr_when_on_initialize_is_called() { let offchain_db = ext.offchain_db(); let expected = Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1)))); - assert_eq!(offchain_db.get(&MMR::node_offchain_key(0, parent_b1)).map(decode_node), expected); + assert_eq!( + offchain_db.get(&MMR::node_temp_offchain_key(0, &parent_b1)).map(decode_node), + expected + ); assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(0)).map(decode_node), expected); let expected = Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2)))); - assert_eq!(offchain_db.get(&MMR::node_offchain_key(1, parent_b2)).map(decode_node), expected); + assert_eq!( + offchain_db.get(&MMR::node_temp_offchain_key(1, &parent_b2)).map(decode_node), + expected + ); assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(1)).map(decode_node), expected); let expected = Some(mmr::Node::Hash(hex( "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854", ))); - assert_eq!(offchain_db.get(&MMR::node_offchain_key(2, parent_b2)).map(decode_node), expected); + assert_eq!( + offchain_db.get(&MMR::node_temp_offchain_key(2, &parent_b2)).map(decode_node), + expected + ); assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(2)).map(decode_node), expected); - assert_eq!(offchain_db.get(&MMR::node_offchain_key(3, parent_b2)), None); + assert_eq!(offchain_db.get(&MMR::node_temp_offchain_key(3, &parent_b2)), None); assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(3)), None); } @@ -820,7 +824,9 @@ fn should_canonicalize_offchain() { expected ); assert_eq!( - offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)).map(decode_node), + offchain_db + .get(&MMR::node_temp_offchain_key(pos, &parent_hash)) + .map(decode_node), expected ); } @@ -840,7 +846,9 @@ fn should_canonicalize_offchain() { expected ); assert_eq!( - offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)).map(decode_node), + offchain_db + .get(&MMR::node_temp_offchain_key(pos, &parent_hash)) + .map(decode_node), expected ); }; @@ -868,7 +876,7 @@ fn should_canonicalize_offchain() { let parent_num: BlockNumber = (block_num - 1).into(); let parent_hash = >::block_hash(parent_num); // no longer available in fork-proof storage (was pruned), - assert_eq!(offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)), None); + assert_eq!(offchain_db.get(&MMR::node_temp_offchain_key(pos, &parent_hash)), None); // but available using canon key. assert_eq!( offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), @@ -887,7 +895,7 @@ fn should_canonicalize_offchain() { let parent_num: BlockNumber = leaf_index.try_into().unwrap(); let parent_hash = >::block_hash(parent_num); // no longer available in fork-proof storage (was pruned), - assert_eq!(offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)), None); + assert_eq!(offchain_db.get(&MMR::node_temp_offchain_key(pos, &parent_hash)), None); // but available using canon key. assert_eq!( offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), diff --git a/primitives/merkle-mountain-range/Cargo.toml b/primitives/merkle-mountain-range/Cargo.toml index 7f8b3b6afe5f3..2e532990a5caf 100644 --- a/primitives/merkle-mountain-range/Cargo.toml +++ b/primitives/merkle-mountain-range/Cargo.toml @@ -15,6 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } +mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } sp-core = { version = "7.0.0", default-features = false, path = "../core" } @@ -31,6 +32,7 @@ default = ["std"] std = [ "codec/std", "log/std", + "mmr-lib/std", "serde", "sp-api/std", "sp-core/std", diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index d46cb73c3c5e8..4510acc47eb92 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -20,6 +20,8 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] +pub use mmr_lib; + use scale_info::TypeInfo; use sp_debug_derive::RuntimeDebug; use sp_runtime::traits; @@ -27,6 +29,8 @@ use sp_std::fmt; #[cfg(not(feature = "std"))] use sp_std::prelude::Vec; +pub mod utils; + /// A type to describe node position in the MMR (node index). pub type NodeIndex = u64; diff --git a/frame/merkle-mountain-range/src/mmr/utils.rs b/primitives/merkle-mountain-range/src/utils.rs similarity index 83% rename from frame/merkle-mountain-range/src/mmr/utils.rs rename to primitives/merkle-mountain-range/src/utils.rs index 0b8e88a9283da..470cfb94104f4 100644 --- a/frame/merkle-mountain-range/src/mmr/utils.rs +++ b/primitives/merkle-mountain-range/src/utils.rs @@ -17,9 +17,15 @@ //! Merkle Mountain Range utilities. -use crate::primitives::{LeafIndex, NodeIndex}; +use codec::Encode; use mmr_lib::helper; +use sp_runtime::traits::Header; +#[cfg(not(feature = "std"))] +use sp_std::prelude::Vec; + +use crate::{LeafIndex, NodeIndex}; + /// MMR nodes & size -related utilities. pub struct NodesUtils { no_of_leaves: LeafIndex, @@ -70,11 +76,31 @@ impl NodesUtils { /// Starting from any leaf index, get the sequence of positions of the nodes added /// to the mmr when this leaf was added (inclusive of the leaf's position itself). /// That is, all of these nodes are right children of their respective parents. - pub fn right_branch_ending_in_leaf(leaf_index: LeafIndex) -> crate::Vec { + pub fn right_branch_ending_in_leaf(leaf_index: LeafIndex) -> Vec { let pos = helper::leaf_index_to_pos(leaf_index); let num_parents = leaf_index.trailing_ones() as u64; return (pos..=pos + num_parents).collect() } + + /// Build offchain key from `parent_hash` of block that originally added node `pos` to MMR. + /// + /// This combination makes the offchain (key,value) entry resilient to chain forks. + pub fn node_temp_offchain_key( + prefix: &[u8], + pos: NodeIndex, + parent_hash: &H::Hash, + ) -> Vec { + (prefix, pos, parent_hash).encode() + } + + /// Build canonical offchain key for node `pos` in MMR. + /// + /// Used for nodes added by now finalized blocks. + /// Never read keys using `node_canon_offchain_key` unless you sure that + /// there's no `node_offchain_key` key in the storage. + pub fn node_canon_offchain_key(prefix: &[u8], pos: NodeIndex) -> sp_std::prelude::Vec { + (prefix, pos).encode() + } } #[cfg(test)] @@ -142,8 +168,6 @@ mod tests { #[test] fn should_calculate_the_size_correctly() { - let _ = env_logger::try_init(); - let leaves = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 21]; let sizes = vec![0, 1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26, 39]; assert_eq!( @@ -154,23 +178,5 @@ mod tests { .collect::>(), sizes.clone() ); - - // size cross-check - let mut actual_sizes = vec![]; - for s in &leaves[1..] { - crate::tests::new_test_ext().execute_with(|| { - let mut mmr = crate::mmr::Mmr::< - crate::mmr::storage::RuntimeStorage, - crate::mock::Test, - _, - _, - >::new(0); - for i in 0..*s { - mmr.push(i); - } - actual_sizes.push(mmr.size()); - }) - } - assert_eq!(sizes[1..], actual_sizes[..]); } } From ad18dc2056efbd0568603a22bd501e916b100216 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Thu, 17 Nov 2022 10:01:15 +0200 Subject: [PATCH 02/13] Add method to MmrApi --- bin/node/runtime/src/lib.rs | 4 ++ frame/merkle-mountain-range/rpc/src/lib.rs | 2 +- frame/merkle-mountain-range/src/lib.rs | 38 +++++++++---------- frame/merkle-mountain-range/src/tests.rs | 9 ++--- primitives/merkle-mountain-range/src/lib.rs | 7 +++- primitives/merkle-mountain-range/src/utils.rs | 33 +++++++++++++++- 6 files changed, 61 insertions(+), 32 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index cff33e0918981..0f44febdb02dc 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2064,6 +2064,10 @@ impl_runtime_apis! { Ok(Mmr::mmr_root()) } + fn num_mmr_blocks() -> Result { + Mmr::num_mmr_blocks() + } + fn generate_proof( block_numbers: Vec, best_known_block_number: Option, diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index 8476d82f3e70d..6e520b3bf130e 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -238,7 +238,7 @@ fn mmr_error_into_rpc_error(err: MmrError) -> CallError { MmrError::LeafNotFound => 1, MmrError::GenerateProof => 2, MmrError::Verify => 3, - MmrError::BlockNumToLeafIndex => 4, + MmrError::InvalidNumericOp => 4, MmrError::InvalidBestKnownBlock => 5, _ => 0, }; diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 7941a713d6b45..dc24136118f89 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -58,7 +58,7 @@ use frame_support::{log, traits::Get, weights::Weight}; use sp_runtime::{ - traits::{self, CheckedSub, One, Saturating}, + traits::{self, One, Saturating}, SaturatedConversion, }; @@ -72,6 +72,7 @@ mod mock; mod tests; pub use pallet::*; +use sp_mmr_primitives::utils; pub use sp_mmr_primitives::{ self as primitives, utils::NodesUtils, Error, LeafDataProvider, LeafIndex, NodeIndex, }; @@ -360,30 +361,25 @@ impl, I: 'static> Pallet { .saturating_add(leaf_index.saturated_into()) } + /// Get the number of MMR blocks in the chain. + pub fn num_mmr_blocks() -> Result { + Self::mmr_leaves().try_into().map_err(|_| { + Error::InvalidNumericOp + .log_debug("The number of leaves couldn't be converted to a block number.") + }) + } + /// Convert a block number into a leaf index. - fn block_num_to_leaf_index(block_num: T::BlockNumber) -> Result + fn block_num_to_leaf_index(block_num: T::BlockNumber) -> Result where T: frame_system::Config, { - // leaf_idx = (leaves_count - 1) - (current_block_num - block_num); - let best_block_num = >::block_number(); - let blocks_diff = best_block_num.checked_sub(&block_num).ok_or_else(|| { - primitives::Error::BlockNumToLeafIndex - .log_debug("The provided block_number is greater than the best block number.") - })?; - let blocks_diff_as_leaf_idx = blocks_diff.try_into().map_err(|_| { - primitives::Error::BlockNumToLeafIndex - .log_debug("The `blocks_diff` couldn't be converted to `LeafIndex`.") - })?; - - let leaf_idx = Self::mmr_leaves() - .checked_sub(1) - .and_then(|last_leaf_idx| last_leaf_idx.checked_sub(blocks_diff_as_leaf_idx)) - .ok_or_else(|| { - primitives::Error::BlockNumToLeafIndex - .log_debug("There aren't enough leaves in the chain.") - })?; - Ok(leaf_idx) + let first_mmr_block = utils::first_mmr_block_num::( + >::block_number(), + Self::num_mmr_blocks()?, + )?; + + utils::block_num_to_leaf_index::(block_num, first_mmr_block) } /// Generate an MMR proof for the given `block_numbers`. diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index 791702917f992..309e25c72047e 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -963,21 +963,18 @@ fn does_not_panic_when_generating_historical_proofs() { register_offchain_ext(&mut ext); ext.execute_with(|| { // when leaf index is invalid - assert_eq!( - crate::Pallet::::generate_proof(vec![10], None), - Err(Error::BlockNumToLeafIndex), - ); + assert_eq!(crate::Pallet::::generate_proof(vec![10], None), Err(Error::LeafNotFound),); // when leaves count is invalid assert_eq!( crate::Pallet::::generate_proof(vec![3], Some(100)), - Err(Error::BlockNumToLeafIndex), + Err(Error::GenerateProof), ); // when both leaf index and leaves count are invalid assert_eq!( crate::Pallet::::generate_proof(vec![10], Some(100)), - Err(Error::BlockNumToLeafIndex), + Err(Error::LeafNotFound), ); }); } diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 4510acc47eb92..e2f5430fafff8 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -361,8 +361,8 @@ pub struct Proof { #[derive(RuntimeDebug, codec::Encode, codec::Decode, PartialEq, Eq)] pub enum Error { /// Error during translation of a block number into a leaf index. - #[cfg_attr(feature = "std", error("Error translation block number into leaf index"))] - BlockNumToLeafIndex, + #[cfg_attr(feature = "std", error("Error performing numeric op"))] + InvalidNumericOp, /// Error while pushing new node. #[cfg_attr(feature = "std", error("Error pushing new node"))] Push, @@ -423,6 +423,9 @@ sp_api::decl_runtime_apis! { /// Return the on-chain MMR root hash. fn mmr_root() -> Result; + /// Return the number of MMR blocks in the chain. + fn num_mmr_blocks() -> Result; + /// Generate MMR proof for a series of block numbers. If `best_known_block_number = Some(n)`, /// use historical MMR state at given block height `n`. Else, use current MMR state. fn generate_proof( diff --git a/primitives/merkle-mountain-range/src/utils.rs b/primitives/merkle-mountain-range/src/utils.rs index 470cfb94104f4..966afaeccd7fa 100644 --- a/primitives/merkle-mountain-range/src/utils.rs +++ b/primitives/merkle-mountain-range/src/utils.rs @@ -20,11 +20,40 @@ use codec::Encode; use mmr_lib::helper; -use sp_runtime::traits::Header; +use sp_runtime::traits::{CheckedAdd, CheckedSub, Header, One}; #[cfg(not(feature = "std"))] use sp_std::prelude::Vec; -use crate::{LeafIndex, NodeIndex}; +use crate::{Error, LeafIndex, NodeIndex}; + +/// Get the first block with MMR. +pub fn first_mmr_block_num( + best_block_num: H::Number, + num_mmr_blocks: H::Number, +) -> Result { + best_block_num + .checked_sub(&num_mmr_blocks) + .and_then(|last_non_mmr_block| last_non_mmr_block.checked_add(&One::one())) + .ok_or_else(|| { + Error::InvalidNumericOp + .log_debug("The best block should be greater than the number of mmr blocks.") + }) +} + +/// Convert a block number into a leaf index. +pub fn block_num_to_leaf_index( + block_num: H::Number, + first_mmr_block_num: H::Number, +) -> Result { + let leaf_idx = block_num.checked_sub(&first_mmr_block_num).ok_or_else(|| { + Error::InvalidNumericOp + .log_debug("The provided block should be greater than the first mmr block.") + })?; + + leaf_idx.try_into().map_err(|_| { + Error::InvalidNumericOp.log_debug("Couldn't convert the leaf index to `LeafIndex`.") + }) +} /// MMR nodes & size -related utilities. pub struct NodesUtils { From ccb71cde4506a903e9afedcff654ca8c4e09889e Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 18 Nov 2022 11:05:34 +0200 Subject: [PATCH 03/13] Move forks expanding logic from babe to primitives --- client/consensus/babe/src/lib.rs | 53 ++++++------------------- primitives/blockchain/src/backend.rs | 58 +++++++++++++++++++++++++++- 2 files changed, 69 insertions(+), 42 deletions(-) diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 109e5aade02a7..81d1666a0ba89 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -111,7 +111,8 @@ use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_application_crypto::AppKey; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{ - Backend as _, Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult, + Backend as _, Error as ClientError, ForkBackend, HeaderBackend, HeaderMetadata, + Result as ClientResult, }; use sp_consensus::{ BlockOrigin, CacheKeyId, Environment, Error as ConsensusError, Proposer, SelectChain, @@ -123,7 +124,7 @@ use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvid use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, - traits::{Block as BlockT, Header, NumberFor, SaturatedConversion, Saturating, Zero}, + traits::{Block as BlockT, Header, NumberFor, SaturatedConversion, Zero}, DigestItem, }; @@ -520,7 +521,7 @@ fn aux_storage_cleanup + HeaderBackend, Block: B let first = notification.tree_route.first().unwrap_or(¬ification.hash); match client.header_metadata(*first) { Ok(meta) => { - aux_keys.insert(aux_schema::block_weight_key(meta.parent)); + aux_keys.insert(meta.parent); }, Err(err) => warn!( target: "babe", @@ -537,47 +538,17 @@ fn aux_storage_cleanup + HeaderBackend, Block: B .iter() // Ensure we don't prune latest finalized block. // This should not happen, but better be safe than sorry! - .filter(|h| **h != notification.hash) - .map(aux_schema::block_weight_key), + .filter(|h| **h != notification.hash), ); - // Cleans data for stale branches. - - for head in notification.stale_heads.iter() { - let mut hash = *head; - // Insert stale blocks hashes until canonical chain is reached. - // If we reach a block that is already part of the `aux_keys` we can stop the processing the - // head. - while aux_keys.insert(aux_schema::block_weight_key(hash)) { - match client.header_metadata(hash) { - Ok(meta) => { - hash = meta.parent; - - // If the parent is part of the canonical chain or there doesn't exist a block - // hash for the parent number (bug?!), we can abort adding blocks. - if client - .hash(meta.number.saturating_sub(1u32.into())) - .ok() - .flatten() - .map_or(true, |h| h == hash) - { - break - } - }, - Err(err) => { - warn!( - target: "babe", - "Header lookup fail while cleaning data for block {:?}: {}", - hash, - err, - ); - break - }, - } - } - } + // Cleans data for stale forks. + let stale_forks = client.expand_forks(¬ification.stale_heads); + aux_keys.extend(stale_forks.iter()); - aux_keys.into_iter().map(|val| (val, None)).collect() + aux_keys + .into_iter() + .map(|val| (aux_schema::block_weight_key(val), None)) + .collect() } async fn answer_requests( diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index dea3a7f285117..887953a177909 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -21,9 +21,11 @@ use log::warn; use parking_lot::RwLock; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor}, + sp_std, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Saturating}, Justifications, }; +use sp_std::collections::btree_set::BTreeSet; use crate::header_metadata::HeaderMetadata; @@ -84,6 +86,60 @@ pub trait HeaderBackend: Send + Sync { } } +/// Handles stale forks. +pub trait ForkBackend: + HeaderMetadata + HeaderBackend + Send + Sync +{ + /// Get all the header hashes that are part of the provided forks starting only from the fork + /// heads. + fn expand_forks(&self, fork_heads: &[Block::Hash]) -> BTreeSet { + let mut expanded_forks = BTreeSet::new(); + for fork_head in fork_heads { + let mut hash = *fork_head; + // Insert stale blocks hashes until canonical chain is reached. + // If we reach a block that is already part of the `expanded_forks` we can stop + // processing the fork. + while expanded_forks.insert(hash) { + match self.header_metadata(hash) { + Ok(meta) => { + hash = meta.parent; + + // If the parent is part of the canonical chain or there doesn't exist a + // block hash for the parent number (bug?!), we can abort adding blocks. + if self + .hash(meta.number.saturating_sub(1u32.into())) + .ok() + .flatten() + .map_or(true, |h| h == hash) + { + break + } + }, + Err(err) => { + warn!( + target: "primitives::blockchain", + "Stale header {:?} lookup fail while expanding fork {:?}: {}", + fork_head, + hash, + err, + ); + break + }, + } + } + } + + expanded_forks + } +} + +impl ForkBackend for T +where + Block: BlockT, + T: HeaderMetadata + HeaderBackend + Send + Sync, +{ +} + /// Blockchain database backend. Does not perform any validation. pub trait Backend: HeaderBackend + HeaderMetadata From 67ebfde6fdb84f01890ec5dcfbeda49b7ba69b1c Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Thu, 3 Nov 2022 19:02:09 +0200 Subject: [PATCH 04/13] Implement MMR gadget --- Cargo.lock | 25 ++ Cargo.toml | 1 + client/merkle-mountain-range/Cargo.toml | 32 ++ .../merkle-mountain-range/src/canon_engine.rs | 227 ++++++++++++ client/merkle-mountain-range/src/lib.rs | 233 +++++++++++++ .../merkle-mountain-range/src/test_utils.rs | 326 ++++++++++++++++++ test-utils/runtime/Cargo.toml | 4 + test-utils/runtime/src/lib.rs | 64 +++- 8 files changed, 909 insertions(+), 3 deletions(-) create mode 100644 client/merkle-mountain-range/Cargo.toml create mode 100644 client/merkle-mountain-range/src/canon_engine.rs create mode 100644 client/merkle-mountain-range/src/lib.rs create mode 100644 client/merkle-mountain-range/src/test_utils.rs diff --git a/Cargo.lock b/Cargo.lock index d204fcfcdeefc..5ca83de86ae7d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4224,6 +4224,29 @@ dependencies = [ "windows-sys 0.36.1", ] +[[package]] +name = "mmr-gadget" +version = "4.0.0-dev" +dependencies = [ + "async-std", + "beefy-primitives", + "futures", + "log", + "parity-scale-codec", + "sc-block-builder", + "sc-client-api", + "sc-offchain", + "sp-api", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-io", + "sp-mmr-primitives", + "sp-runtime", + "substrate-test-runtime-client", + "tokio", +] + [[package]] name = "mockall" version = "0.11.2" @@ -10394,6 +10417,7 @@ dependencies = [ "log", "memory-db", "pallet-babe", + "pallet-mmr", "pallet-timestamp", "parity-scale-codec", "parity-util-mem", @@ -10414,6 +10438,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keyring", + "sp-mmr-primitives", "sp-offchain", "sp-runtime", "sp-runtime-interface", diff --git a/Cargo.toml b/Cargo.toml index 956c106e0dc2d..ebdf73db0dba3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,6 +41,7 @@ members = [ "client/finality-grandpa", "client/informant", "client/keystore", + "client/merkle-mountain-range", "client/network", "client/network-gossip", "client/network/bitswap", diff --git a/client/merkle-mountain-range/Cargo.toml b/client/merkle-mountain-range/Cargo.toml new file mode 100644 index 0000000000000..3630c42414964 --- /dev/null +++ b/client/merkle-mountain-range/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "mmr-gadget" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository = "https://github.com/paritytech/substrate" +description = "MMR Client gadget for substrate" +homepage = "https://substrate.io" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0" } +futures = "0.3" +log = "0.4" +beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-mmr-primitives = { version = "4.0.0-dev", path = "../../primitives/merkle-mountain-range" } +sc-offchain = { version = "4.0.0-dev", path = "../offchain" } +sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } + +[dev-dependencies] +tokio = "1.17.0" +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +async-std = { version = "1.11.0", default-features = false } diff --git a/client/merkle-mountain-range/src/canon_engine.rs b/client/merkle-mountain-range/src/canon_engine.rs new file mode 100644 index 0000000000000..fde8bdba7e006 --- /dev/null +++ b/client/merkle-mountain-range/src/canon_engine.rs @@ -0,0 +1,227 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{marker::PhantomData, sync::Arc}; + +use log::{debug, error}; + +use sc_client_api::FinalityNotification; +use sc_offchain::OffchainDb; +use sp_blockchain::{ForkBackend, HeaderBackend, HeaderMetadata}; +use sp_core::offchain::{DbExternalities, OffchainStorage, StorageKind}; +use sp_mmr_primitives::{utils, utils::NodesUtils, LeafIndex, NodeIndex}; +use sp_runtime::{ + traits::{Block, Header, One}, + Saturating, +}; + +use crate::LOG_TARGET; + +pub struct CanonEngine { + pub client: Arc, + pub offchain_db: OffchainDb, + pub indexing_prefix: Vec, + pub first_mmr_block: ::Number, + + pub _phantom: PhantomData, +} + +impl CanonEngine +where + C: HeaderBackend + HeaderMetadata, + S: OffchainStorage, + B: Block, +{ + fn block_number_to_leaf_index( + &self, + block_num: ::Number, + ) -> Result { + utils::block_num_to_leaf_index::(block_num, self.first_mmr_block).map_err(|e| { + error!( + target: LOG_TARGET, + "Error converting block number {} to leaf index: {:?}", block_num, e + ); + e + }) + } + + fn node_temp_offchain_key(&self, pos: NodeIndex, parent_hash: &B::Hash) -> Vec { + NodesUtils::node_temp_offchain_key::(&self.indexing_prefix, pos, parent_hash) + } + + fn node_canon_offchain_key(&self, pos: NodeIndex) -> Vec { + NodesUtils::node_canon_offchain_key(&self.indexing_prefix, pos) + } + + fn prune_branch(&mut self, block_hash: &B::Hash) { + let header = match self.client.header_metadata(*block_hash) { + Ok(header) => header, + _ => { + error!( + target: LOG_TARGET, + "Stale block {} not found. Couldn't prune associated stale branch.", block_hash + ); + return + }, + }; + + // If we can't convert the block number to a leaf index, the chain state is probably + // corrupted. We only log the error, hoping that the chain state will be fixed. + let stale_leaf = match self.block_number_to_leaf_index(header.number) { + Ok(stale_leaf) => stale_leaf, + Err(_) => return, + }; + // We prune the leaf associated with the provided block and all the nodes added by that + // leaf. + let stale_nodes = NodesUtils::right_branch_ending_in_leaf(stale_leaf); + debug!( + target: LOG_TARGET, + "Nodes to prune for stale block {}: {:?}", header.number, stale_nodes + ); + + for pos in stale_nodes { + let temp_key = self.node_temp_offchain_key(pos, &header.parent); + self.offchain_db.local_storage_clear(StorageKind::PERSISTENT, &temp_key); + debug!(target: LOG_TARGET, "Pruned elem at pos {} with temp key {:?}", pos, temp_key); + } + } + + fn canonicalize_branch( + &mut self, + block_num: ::Number, + parent_hash: &B::Hash, + ) { + // Don't canonicalize branches corresponding to blocks for which the MMR pallet + // wasn't yet initialized. + if block_num < self.first_mmr_block { + return + } + + // If we can't convert the block number to a leaf index, the chain state is probably + // corrupted. We only log the error, hoping that the chain state will be fixed. + let to_canon_leaf = match self.block_number_to_leaf_index(block_num) { + Ok(to_canon_leaf) => to_canon_leaf, + Err(_) => return, + }; + // We "canonicalize" the leaf associated with the provided block + // and all the nodes added by that leaf. + let to_canon_nodes = NodesUtils::right_branch_ending_in_leaf(to_canon_leaf); + debug!( + target: LOG_TARGET, + "Nodes to canonicalize for block {}: {:?}", block_num, to_canon_nodes + ); + + for pos in to_canon_nodes { + let temp_key = self.node_temp_offchain_key(pos, parent_hash); + if let Some(elem) = + self.offchain_db.local_storage_get(StorageKind::PERSISTENT, &temp_key) + { + let canon_key = self.node_canon_offchain_key(pos); + self.offchain_db.local_storage_set(StorageKind::PERSISTENT, &canon_key, &elem); + self.offchain_db.local_storage_clear(StorageKind::PERSISTENT, &temp_key); + debug!( + target: LOG_TARGET, + "Moved elem at pos {} from temp key {:?} to canon key {:?}", + pos, + temp_key, + canon_key + ); + } else { + error!( + target: LOG_TARGET, + "Couldn't canonicalize elem at pos {} using temp key {:?}", pos, temp_key + ); + } + } + } + + /// Move leafs and nodes added by finalized blocks in offchain db from _fork-aware key_ to + /// _canonical key_. + /// Prune leafs and nodes added by stale blocks in offchain db from _fork-aware key_. + pub fn canonicalize_and_prune( + &mut self, + best_finalized: &(::Number, B::Hash), + notification: &FinalityNotification, + ) { + // Move offchain MMR nodes for finalized blocks to canonical keys. + let (mut parent_number, mut parent_hash) = *best_finalized; + for block_hash in notification + .tree_route + .iter() + .cloned() + .chain(std::iter::once(notification.hash)) + { + let block_number = parent_number.saturating_add(One::one()); + self.canonicalize_branch(block_number, &parent_hash); + + (parent_number, parent_hash) = (block_number, block_hash); + } + + // Remove offchain MMR nodes for stale forks. + let stale_forks = self.client.expand_forks(¬ification.stale_heads); + for hash in stale_forks.iter() { + self.prune_branch(hash); + } + } +} + +#[cfg(test)] +mod tests { + use crate::test_utils::run_test_with_mmr_gadget; + use sp_runtime::generic::BlockId; + use std::time::Duration; + + #[test] + fn canonicalize_and_prune_works_correctly() { + run_test_with_mmr_gadget(|client| async move { + // -> D4 -> D5 + // G -> A1 -> A2 -> A3 -> A4 + // -> B1 -> B2 -> B3 + // -> C1 + + let a1 = client.import_block(&BlockId::Number(0), b"a1", Some(0)).await; + let a2 = client.import_block(&BlockId::Hash(a1.hash()), b"a2", Some(1)).await; + let a3 = client.import_block(&BlockId::Hash(a2.hash()), b"a3", Some(2)).await; + let a4 = client.import_block(&BlockId::Hash(a3.hash()), b"a4", Some(3)).await; + + let b1 = client.import_block(&BlockId::Number(0), b"b1", Some(0)).await; + let b2 = client.import_block(&BlockId::Hash(b1.hash()), b"b2", Some(1)).await; + let b3 = client.import_block(&BlockId::Hash(b2.hash()), b"b3", Some(2)).await; + + let c1 = client.import_block(&BlockId::Number(0), b"c1", Some(0)).await; + + let d4 = client.import_block(&BlockId::Hash(a3.hash()), b"d4", Some(3)).await; + let d5 = client.import_block(&BlockId::Hash(d4.hash()), b"d5", Some(4)).await; + + client.finalize_block(a3.hash(), Some(3)); + async_std::task::sleep(Duration::from_millis(200)).await; + // expected finalized heads: a1, a2, a3 + client.assert_canonicalized(&[&a1, &a2, &a3]); + // expected stale heads: c1 + // expected pruned heads because of temp key collision: b1 + client.assert_pruned(&[&c1, &b1]); + + client.finalize_block(d5.hash(), None); + async_std::task::sleep(Duration::from_millis(200)).await; + // expected finalized heads: d4, d5, + client.assert_canonicalized(&[&d4, &d5]); + // expected stale heads: b1, b2, b3, a4 + client.assert_pruned(&[&b1, &b2, &b3, &a4]); + }) + } +} diff --git a/client/merkle-mountain-range/src/lib.rs b/client/merkle-mountain-range/src/lib.rs new file mode 100644 index 0000000000000..79be99a6a9c75 --- /dev/null +++ b/client/merkle-mountain-range/src/lib.rs @@ -0,0 +1,233 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +extern crate core; + +mod canon_engine; +#[cfg(test)] +pub mod test_utils; + +use std::{marker::PhantomData, sync::Arc}; + +use futures::StreamExt; +use log::{debug, error, trace, warn}; + +use sc_client_api::{Backend, BlockchainEvents, FinalityNotification, FinalityNotifications}; +use sc_offchain::OffchainDb; +use sp_api::ProvideRuntimeApi; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; +use sp_mmr_primitives::{utils, LeafIndex, MmrApi}; +use sp_runtime::{ + generic::BlockId, + traits::{Block, Header, NumberFor, Zero}, +}; + +use crate::canon_engine::CanonEngine; +use beefy_primitives::MmrRootHash; + +pub const LOG_TARGET: &str = "mmr"; + +enum MaybeCanonEngine { + Uninitialized { client: Arc, offchain_db: OffchainDb, indexing_prefix: Vec }, + Initialized(CanonEngine), +} + +impl MaybeCanonEngine +where + B: Block, + C: ProvideRuntimeApi, + C::Api: MmrApi>, +{ + fn try_prepare_init( + &mut self, + notification: &FinalityNotification, + ) -> Option<::Number> { + if let MaybeCanonEngine::Uninitialized { client, .. } = self { + let best_block = *notification.header.number(); + return match client.runtime_api().num_mmr_blocks(&BlockId::number(best_block)) { + Ok(Ok(num_mmr_blocks)) => { + match utils::first_mmr_block_num::(best_block, num_mmr_blocks) { + Ok(first_mmr_block) => Some(first_mmr_block), + Err(e) => { + error!( + target: LOG_TARGET, + "Error calculating the first mmr block: {:?}", e + ); + return None + }, + } + }, + _ => { + trace!(target: LOG_TARGET, "Finality notification: {:?}", notification); + debug!(target: LOG_TARGET, "Waiting for MMR pallet to become available ..."); + + None + }, + } + } + + None + } + + fn try_init(self, first_mmr_block: ::Number) -> Self { + if let MaybeCanonEngine::Uninitialized { client, offchain_db, indexing_prefix } = self { + debug!( + target: LOG_TARGET, + "MMR pallet available since block {:?}. \ + Starting offchain MMR leafs canonicalization.", + first_mmr_block + ); + + return MaybeCanonEngine::Initialized(CanonEngine { + client, + offchain_db, + indexing_prefix, + first_mmr_block, + _phantom: Default::default(), + }) + } + + return self + } +} + +/// A MMR Gadget. +pub struct MmrGadget, C> { + finality_notifications: FinalityNotifications, + best_finalized_head: (::Number, B::Hash), + maybe_engine: MaybeCanonEngine, + + _phantom: PhantomData<(B, BE)>, +} + +impl MmrGadget +where + B: Block, + ::Number: Into, + BE: Backend, + C: BlockchainEvents + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + C::Api: MmrApi>, +{ + fn new(client: Arc, backend: Arc, indexing_prefix: Vec) -> Option { + let offchain_db = match backend.offchain_storage() { + Some(offchain_storage) => OffchainDb::new(offchain_storage), + None => { + warn!( + target: LOG_TARGET, + "Can't spawn a MmrGadget for a node without offchain storage." + ); + return None + }, + }; + + Some(MmrGadget { + finality_notifications: client.finality_notification_stream(), + best_finalized_head: (Zero::zero(), client.info().genesis_hash), + maybe_engine: MaybeCanonEngine::Uninitialized { client, offchain_db, indexing_prefix }, + + _phantom: Default::default(), + }) + } + + async fn run(mut self) { + while let Some(notification) = self.finality_notifications.next().await { + // Initialize the canonicalization engine if the MMR pallet is available. + if let Some(first_mmr_block) = self.maybe_engine.try_prepare_init(¬ification) { + self.maybe_engine = self.maybe_engine.try_init(first_mmr_block); + } + + if let MaybeCanonEngine::Initialized(engine) = &mut self.maybe_engine { + // Perform the actual logic. + engine.canonicalize_and_prune(&self.best_finalized_head, ¬ification); + } + + self.best_finalized_head = (*notification.header.number(), notification.hash); + } + } + + /// Create and run the MMR gadget. + pub async fn start(client: Arc, backend: Arc, indexing_prefix: Vec) { + if let Some(mmr_gadget) = Self::new(client, backend, indexing_prefix) { + mmr_gadget.run().await + } + } +} + +#[cfg(test)] +mod tests { + use crate::test_utils::run_test_with_mmr_gadget; + use sp_runtime::generic::BlockId; + use std::time::Duration; + + #[test] + fn mmr_first_block_is_computed_correctly() { + // Check the case where the first block is also the first block with MMR. + run_test_with_mmr_gadget(|client| async move { + // G -> A1 -> A2 + // | + // | -> first mmr block + + let a1 = client.import_block(&BlockId::Number(0), b"a1", Some(0)).await; + let a2 = client.import_block(&BlockId::Hash(a1.hash()), b"a2", Some(1)).await; + + client.finalize_block(a1.hash(), Some(1)); + async_std::task::sleep(Duration::from_millis(200)).await; + // expected finalized heads: a1 + client.assert_canonicalized(&[&a1]); + client.assert_not_pruned(&[&a2]); + }); + + // Check the case where the first block with MMR comes later. + run_test_with_mmr_gadget(|client| async move { + // G -> A1 -> A2 -> A3 -> A4 -> A5 -> A6 + // | + // | -> first mmr block + + let a1 = client.import_block(&BlockId::Number(0), b"a1", None).await; + let a2 = client.import_block(&BlockId::Hash(a1.hash()), b"a2", None).await; + let a3 = client.import_block(&BlockId::Hash(a2.hash()), b"a3", None).await; + let a4 = client.import_block(&BlockId::Hash(a3.hash()), b"a4", Some(0)).await; + let a5 = client.import_block(&BlockId::Hash(a4.hash()), b"a5", Some(1)).await; + let a6 = client.import_block(&BlockId::Hash(a5.hash()), b"a6", Some(2)).await; + + client.finalize_block(a5.hash(), Some(2)); + async_std::task::sleep(Duration::from_millis(200)).await; + // expected finalized heads: a4, a5 + client.assert_canonicalized(&[&a4, &a5]); + client.assert_not_pruned(&[&a6]); + }); + } + + #[test] + fn does_not_panic_on_invalid_num_mmr_blocks() { + run_test_with_mmr_gadget(|client| async move { + // G -> A1 + // | + // | -> first mmr block + + let a1 = client.import_block(&BlockId::Number(0), b"a1", Some(0)).await; + + // Simulate the case where the runtime says that there are 2 mmr_blocks when in fact + // there is only 1. + client.finalize_block(a1.hash(), Some(2)); + async_std::task::sleep(Duration::from_millis(200)).await; + // expected finalized heads: - + client.assert_not_canonicalized(&[&a1]); + }); + } +} diff --git a/client/merkle-mountain-range/src/test_utils.rs b/client/merkle-mountain-range/src/test_utils.rs new file mode 100644 index 0000000000000..92c46b6dc181d --- /dev/null +++ b/client/merkle-mountain-range/src/test_utils.rs @@ -0,0 +1,326 @@ +use futures::{executor::LocalPool, task::LocalSpawn, FutureExt}; +use std::{ + future::Future, + sync::{Arc, Mutex}, + time::Duration, +}; + +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{ + Backend as BackendT, BlockchainEvents, FinalityNotifications, ImportNotifications, + StorageEventStream, StorageKey, +}; +use sc_offchain::OffchainDb; +use sp_api::{ApiRef, ProvideRuntimeApi}; +use sp_blockchain::{BlockStatus, CachedHeaderMetadata, HeaderBackend, HeaderMetadata, Info}; +use sp_consensus::BlockOrigin; +use sp_core::{ + offchain::{DbExternalities, StorageKind}, + H256, +}; +use sp_mmr_primitives as mmr; +use sp_mmr_primitives::{utils::NodesUtils, LeafIndex, NodeIndex}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, +}; +use substrate_test_runtime_client::{ + runtime::{Block, BlockNumber, Hash, Header}, + Backend, BlockBuilderExt, Client, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, + TestClientBuilder, TestClientBuilderExt, +}; + +use crate::MmrGadget; + +type MmrHash = H256; + +struct MockRuntimeApiData { + num_blocks: BlockNumber, +} + +#[derive(Clone)] +pub struct MockRuntimeApi { + data: Arc>, +} + +impl MockRuntimeApi { + pub const INDEXING_PREFIX: &'static [u8] = b"mmr_test"; +} + +pub struct MmrBlock { + block: Block, + leaf_idx: Option, + leaf_data: Vec, +} + +#[derive(Clone, Copy)] +pub enum OffchainKeyType { + Temp, + Canon, +} + +impl MmrBlock { + pub fn hash(&self) -> Hash { + self.block.hash() + } + + pub fn parent_hash(&self) -> Hash { + *self.block.header.parent_hash() + } + + pub fn get_offchain_key(&self, node: NodeIndex, key_type: OffchainKeyType) -> Vec { + match key_type { + OffchainKeyType::Temp => NodesUtils::node_temp_offchain_key::
( + MockRuntimeApi::INDEXING_PREFIX, + node, + self.block.header.parent_hash(), + ), + OffchainKeyType::Canon => + NodesUtils::node_canon_offchain_key(MockRuntimeApi::INDEXING_PREFIX, node), + } + } +} + +pub struct MockClient { + client: Mutex>, + backend: Arc, + runtime_api_params: Arc>, +} + +impl MockClient { + fn new() -> Self { + let client_builder = TestClientBuilder::new().enable_offchain_indexing_api(); + let (client, backend) = client_builder.build_with_backend(); + MockClient { + client: Mutex::new(client), + backend, + runtime_api_params: Arc::new(Mutex::new(MockRuntimeApiData { num_blocks: 0 })), + } + } + + fn offchain_db(&self) -> OffchainDb<>::OffchainStorage> { + OffchainDb::new(self.backend.offchain_storage().unwrap()) + } + + pub async fn import_block( + &self, + at: &BlockId, + name: &[u8], + maybe_leaf_idx: Option, + ) -> MmrBlock { + let mut client = self.client.lock().unwrap(); + + let mut block_builder = client.new_block_at(at, Default::default(), false).unwrap(); + // Make sure the block has a different hash than its siblings + block_builder + .push_storage_change(b"name".to_vec(), Some(name.to_vec())) + .unwrap(); + let block = block_builder.build().unwrap().block; + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + let parent_hash = *block.header.parent_hash(); + // Simulate writing MMR nodes in offchain storage + if let Some(leaf_idx) = maybe_leaf_idx { + let mut offchain_db = self.offchain_db(); + for node in NodesUtils::right_branch_ending_in_leaf(leaf_idx) { + let temp_key = NodesUtils::node_temp_offchain_key::
( + MockRuntimeApi::INDEXING_PREFIX, + node, + &parent_hash, + ); + offchain_db.local_storage_set( + StorageKind::PERSISTENT, + &temp_key, + parent_hash.as_ref(), + ) + } + } + + MmrBlock { block, leaf_idx: maybe_leaf_idx, leaf_data: parent_hash.as_ref().to_vec() } + } + + pub fn finalize_block(&self, hash: Hash, maybe_num_mmr_blocks: Option) { + let client = self.client.lock().unwrap(); + if let Some(num_mmr_blocks) = maybe_num_mmr_blocks { + self.runtime_api_params.lock().unwrap().num_blocks = num_mmr_blocks; + } + + client.finalize_block(hash, None).unwrap(); + } + + pub fn check_offchain_storage( + &self, + key_type: OffchainKeyType, + blocks: &[&MmrBlock], + mut f: F, + ) where + F: FnMut(Option>, &MmrBlock), + { + let mut offchain_db = self.offchain_db(); + for mmr_block in blocks { + for node in NodesUtils::right_branch_ending_in_leaf(mmr_block.leaf_idx.unwrap()) { + let temp_key = mmr_block.get_offchain_key(node, key_type); + let val = offchain_db.local_storage_get(StorageKind::PERSISTENT, &temp_key); + f(val, mmr_block); + } + } + } + + pub fn assert_pruned(&self, blocks: &[&MmrBlock]) { + self.check_offchain_storage(OffchainKeyType::Temp, blocks, |val, _block| { + assert!(val.is_none()); + }) + } + + pub fn assert_not_pruned(&self, blocks: &[&MmrBlock]) { + self.check_offchain_storage(OffchainKeyType::Temp, blocks, |val, block| { + assert_eq!(val.as_ref(), Some(&block.leaf_data)); + }) + } + + pub fn assert_canonicalized(&self, blocks: &[&MmrBlock]) { + self.check_offchain_storage(OffchainKeyType::Canon, blocks, |val, block| { + assert_eq!(val.as_ref(), Some(&block.leaf_data)); + }); + + self.assert_pruned(blocks); + } + + pub fn assert_not_canonicalized(&self, blocks: &[&MmrBlock]) { + self.check_offchain_storage(OffchainKeyType::Canon, blocks, |val, _block| { + assert!(val.is_none()); + }); + + self.assert_not_pruned(blocks); + } +} + +impl HeaderMetadata for MockClient { + type Error = as HeaderMetadata>::Error; + + fn header_metadata(&self, hash: Hash) -> Result, Self::Error> { + self.client.lock().unwrap().header_metadata(hash) + } + + fn insert_header_metadata(&self, _hash: Hash, _header_metadata: CachedHeaderMetadata) { + todo!() + } + + fn remove_header_metadata(&self, _hash: Hash) { + todo!() + } +} + +impl HeaderBackend for MockClient { + fn header(&self, id: BlockId) -> sc_client_api::blockchain::Result> { + self.client.lock().unwrap().header(&id) + } + + fn info(&self) -> Info { + self.client.lock().unwrap().info() + } + + fn status(&self, id: BlockId) -> sc_client_api::blockchain::Result { + self.client.lock().unwrap().status(id) + } + + fn number(&self, hash: Hash) -> sc_client_api::blockchain::Result> { + self.client.lock().unwrap().number(hash) + } + + fn hash(&self, number: BlockNumber) -> sc_client_api::blockchain::Result> { + self.client.lock().unwrap().hash(number) + } +} + +impl BlockchainEvents for MockClient { + fn import_notification_stream(&self) -> ImportNotifications { + unimplemented!() + } + + fn finality_notification_stream(&self) -> FinalityNotifications { + self.client.lock().unwrap().finality_notification_stream() + } + + fn storage_changes_notification_stream( + &self, + _filter_keys: Option<&[StorageKey]>, + _child_filter_keys: Option<&[(StorageKey, Option>)]>, + ) -> sc_client_api::blockchain::Result> { + unimplemented!() + } +} + +impl ProvideRuntimeApi for MockClient { + type Api = MockRuntimeApi; + + fn runtime_api(&self) -> ApiRef<'_, Self::Api> { + MockRuntimeApi { data: self.runtime_api_params.clone() }.into() + } +} + +sp_api::mock_impl_runtime_apis! { + impl mmr::MmrApi for MockRuntimeApi { + fn mmr_root() -> Result { + Err(mmr::Error::PalletNotIncluded) + } + + fn num_mmr_blocks(&self) -> Result { + Ok(self.data.lock().unwrap().num_blocks) + } + + fn generate_proof( + &self, + _block_numbers: Vec, + _best_known_block_number: Option, + ) -> Result<(Vec, mmr::Proof), mmr::Error> { + Err(mmr::Error::PalletNotIncluded) + } + + fn verify_proof(_leaves: Vec, _proof: mmr::Proof) + -> Result<(), mmr::Error> + { + Err(mmr::Error::PalletNotIncluded) + } + + fn verify_proof_stateless( + _root: MmrHash, + _leaves: Vec, + _proof: mmr::Proof + ) -> Result<(), mmr::Error> { + Err(mmr::Error::PalletNotIncluded) + } + } +} + +pub fn run_test_with_mmr_gadget(f: F) +where + F: FnOnce(Arc) -> Fut + 'static, + Fut: Future, +{ + let mut pool = LocalPool::new(); + let client = Arc::new(MockClient::new()); + + let client_clone = client.clone(); + pool.spawner() + .spawn_local_obj( + async move { + let backend = client_clone.backend.clone(); + MmrGadget::start( + client_clone.clone(), + backend, + MockRuntimeApi::INDEXING_PREFIX.to_vec(), + ) + .await + } + .boxed_local() + .into(), + ) + .unwrap(); + + pool.run_until(async move { + async_std::task::sleep(Duration::from_millis(200)).await; + + f(client).await + }); +} diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index b64a847b15943..3808b2711f56a 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -30,6 +30,7 @@ sp-std = { version = "5.0.0", default-features = false, path = "../../primitives sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../../primitives/runtime-interface" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../frame/support" } +sp-mmr-primitives = { version = "4.0.0-dev", default-features = false, path = "../../primitives/merkle-mountain-range" } sp-version = { version = "5.0.0", default-features = false, path = "../../primitives/version" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } @@ -38,6 +39,7 @@ pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../f frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../frame/system/rpc/runtime-api" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../frame/timestamp" } +pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../../frame/merkle-mountain-range" } sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/finality-grandpa" } sp-trie = { version = "7.0.0", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" } @@ -87,6 +89,7 @@ std = [ "sp-runtime-interface/std", "sp-io/std", "frame-support/std", + "sp-mmr-primitives/std", "sp-version/std", "serde", "sp-session/std", @@ -98,6 +101,7 @@ std = [ "frame-system-rpc-runtime-api/std", "frame-system/std", "pallet-timestamp/std", + "pallet-mmr/std", "sc-service", "sp-finality-grandpa/std", "sp-trie/std", diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 8bda4ea602428..86ca45d0396e0 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -29,6 +29,7 @@ use sp_std::{marker::PhantomData, prelude::*}; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{offchain::KeyTypeId, OpaqueMetadata, RuntimeDebug}; +// use sp_mmr_primitives as mmr; use sp_trie::{ trie_types::{TrieDBBuilder, TrieDBMutBuilderV1}, PrefixedMemoryDB, StorageProof, @@ -66,6 +67,7 @@ use sp_version::RuntimeVersion; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::{AllowedSlots, AuthorityId, Slot}; +use sp_runtime::traits::Keccak256; pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; @@ -608,7 +610,7 @@ impl From> for Extrinsic { } } -impl frame_system::Config for Runtime { +impl frame_system::pallet::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; @@ -635,6 +637,19 @@ impl frame_system::Config for Runtime { type MaxConsumers = ConstU32<16>; } +type MmrHash = ::Output; + +impl pallet_mmr::Config for Runtime { + const INDEXING_PREFIX: &'static [u8] = b"mmr"; + type Hashing = Keccak256; + type Hash = MmrHash; + type OnNewRoot = (); + type WeightInfo = (); + type LeafData = pallet_mmr::ParentNumberAndHash; +} + +pub type MmrHashing = ::Hashing; + impl system::Config for Runtime {} impl pallet_timestamp::Config for Runtime { @@ -966,13 +981,13 @@ cfg_if! { } } - impl beefy_primitives::BeefyApi for RuntimeApi { + impl beefy_primitives::BeefyApi for Runtime { fn validator_set() -> Option> { None } } - impl beefy_merkle_tree::BeefyMmrApi for RuntimeApi { + impl beefy_merkle_tree::BeefyMmrApi for Runtime { fn authority_set_proof() -> beefy_primitives::mmr::BeefyAuthoritySet { Default::default() } @@ -987,6 +1002,49 @@ cfg_if! { 0 } } + + // impl mmr::MmrApi for Runtime { + // fn mmr_root() -> Result { + // Ok(Mmr::mmr_root()) + // } + // + // fn generate_proof( + // block_numbers: Vec, + // best_known_block_number: Option, + // ) -> Result<(Vec, mmr::Proof), mmr::Error> { + // Mmr::generate_proof(block_numbers, best_known_block_number).map( + // |(leaves, proof)| { + // ( + // leaves + // .into_iter() + // .map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)) + // .collect(), + // proof, + // ) + // }, + // ) + // } + // + // fn verify_proof(leaves: Vec, proof: mmr::Proof) + // -> Result<(), mmr::Error> + // { + // pub type MmrLeaf = <::LeafData as mmr::LeafDataProvider>::LeafData; + // let leaves = leaves.into_iter().map(|leaf| + // leaf.into_opaque_leaf() + // .try_decode() + // .ok_or(mmr::Error::Verify)).collect::, mmr::Error>>()?; + // Mmr::verify_leaves(leaves, proof) + // } + // + // fn verify_proof_stateless( + // root: Hash, + // leaves: Vec, + // proof: mmr::Proof + // ) -> Result<(), mmr::Error> { + // let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); + // pallet_mmr::verify_leaves_proof::(root, nodes, proof) + // } + // } } } else { impl_runtime_apis! { From ebb81df0b874551efee9e4f8128711128b2665c0 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Mon, 14 Nov 2022 14:26:52 +0200 Subject: [PATCH 05/13] Remove prunning logic from the MMR pallet --- frame/merkle-mountain-range/src/lib.rs | 49 +--- .../merkle-mountain-range/src/mmr/storage.rs | 205 ++--------------- frame/merkle-mountain-range/src/tests.rs | 212 +----------------- 3 files changed, 26 insertions(+), 440 deletions(-) diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index dc24136118f89..9c8ec63b49821 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -56,7 +56,7 @@ //! NOTE This pallet is experimental and not proven to work in production. #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::{log, traits::Get, weights::Weight}; +use frame_support::{log, weights::Weight}; use sp_runtime::{ traits::{self, One, Saturating}, SaturatedConversion, @@ -248,42 +248,6 @@ pub mod pallet { T::WeightInfo::on_initialize(peaks_before.max(peaks_after)) } - - fn offchain_worker(n: T::BlockNumber) { - use mmr::storage::{OffchainStorage, Storage}; - // The MMR nodes can be found in offchain db under either: - // - fork-unique keys `(prefix, pos, parent_hash)`, or, - // - "canonical" keys `(prefix, pos)`, - // depending on how many blocks in the past the node at position `pos` was - // added to the MMR. - // - // For the fork-unique keys, the MMR pallet depends on - // `frame_system::block_hash(parent_num)` mappings to find the relevant parent block - // hashes, so it is limited by `frame_system::BlockHashCount` in terms of how many - // historical forks it can track. Nodes added to MMR by block `N` can be found in - // offchain db at: - // - fork-unique keys `(prefix, pos, parent_hash)` when (`N` >= `latest_block` - - // `frame_system::BlockHashCount`); - // - "canonical" keys `(prefix, pos)` when (`N` < `latest_block` - - // `frame_system::BlockHashCount`); - // - // The offchain worker is responsible for maintaining the nodes' positions in - // offchain db as the chain progresses by moving a rolling window of the same size as - // `frame_system::block_hash` map, where nodes/leaves added by blocks that are just - // about to exit the window are "canonicalized" so that their offchain key no longer - // depends on `parent_hash`. - // - // This approach works to eliminate fork-induced leaf collisions in offchain db, - // under the assumption that no fork will be deeper than `frame_system::BlockHashCount` - // blocks: - // entries pertaining to block `N` where `N < current-BlockHashCount` are moved to a - // key based solely on block number. The only way to have collisions is if two - // competing forks are deeper than `frame_system::BlockHashCount` blocks and they - // both "canonicalize" their view of block `N` - // Once a block is canonicalized, all MMR entries pertaining to sibling blocks from - // other forks are pruned from offchain db. - Storage::>::canonicalize_and_prune(n); - } } } @@ -335,17 +299,6 @@ impl, I: 'static> Pallet { NodesUtils::node_canon_offchain_key(&T::INDEXING_PREFIX, pos) } - /// Return size of rolling window of leaves saved in offchain under fork-unique keys. - /// - /// Leaves outside this window are canonicalized. - /// Window size is `frame_system::BlockHashCount - 1` to make sure fork-unique keys - /// can be built using `frame_system::block_hash` map. - fn offchain_canonicalization_window() -> LeafIndex { - let window_size: LeafIndex = - ::BlockHashCount::get().saturated_into(); - window_size.saturating_sub(1) - } - /// Provide the parent number for the block that added `leaf_index` to the MMR. fn leaf_index_to_parent_block_num( leaf_index: LeafIndex, diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index efa3501024da4..3fe34623e30e1 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -18,9 +18,9 @@ //! An MMR storage implementation. use codec::Encode; -use frame_support::log::{debug, error, trace}; +use frame_support::log::{debug, trace}; use sp_core::offchain::StorageKind; -use sp_io::{offchain, offchain_index}; +use sp_io::offchain_index; use sp_mmr_primitives::{mmr_lib, mmr_lib::helper, utils::NodesUtils}; use sp_std::iter::Peekable; #[cfg(not(feature = "std"))] @@ -48,51 +48,6 @@ pub struct RuntimeStorage; /// DOES NOT support adding new items to the MMR. pub struct OffchainStorage; -/// Suffix of key for the 'pruning_map'. -/// -/// Nodes and leaves are initially saved under fork-specific keys in offchain db, -/// eventually they are "canonicalized" and this map is used to prune non-canon entries. -const OFFCHAIN_PRUNING_MAP_KEY_SUFFIX: &str = "pruning_map"; - -/// Used to store offchain mappings of `BlockNumber -> Vec[Hash]` to track all forks. -/// Size of this offchain map is at most `frame_system::BlockHashCount`, its entries are pruned -/// as part of the mechanism that prunes the forks this map tracks. -pub(crate) struct PruningMap(sp_std::marker::PhantomData<(T, I)>); -impl PruningMap -where - T: Config, - I: 'static, -{ - pub(crate) fn pruning_map_offchain_key(block: T::BlockNumber) -> sp_std::prelude::Vec { - (T::INDEXING_PREFIX, block, OFFCHAIN_PRUNING_MAP_KEY_SUFFIX).encode() - } - - /// Append `hash` to the list of parent hashes for `block` in offchain db. - pub fn append(block: T::BlockNumber, hash: ::Hash) { - let map_key = Self::pruning_map_offchain_key(block); - offchain::local_storage_get(StorageKind::PERSISTENT, &map_key) - .and_then(|v| codec::Decode::decode(&mut &*v).ok()) - .or_else(|| Some(Vec::<::Hash>::new())) - .map(|mut parents| { - parents.push(hash); - offchain::local_storage_set( - StorageKind::PERSISTENT, - &map_key, - &Encode::encode(&parents), - ); - }); - } - - /// Remove list of parent hashes for `block` from offchain db and return it. - pub fn remove(block: T::BlockNumber) -> Option::Hash>> { - let map_key = Self::pruning_map_offchain_key(block); - offchain::local_storage_get(StorageKind::PERSISTENT, &map_key).and_then(|v| { - offchain::local_storage_clear(StorageKind::PERSISTENT, &map_key); - codec::Decode::decode(&mut &*v).ok() - }) - } -} - /// A storage layer for MMR. /// /// There are two different implementations depending on the use case. @@ -111,100 +66,6 @@ where I: 'static, L: primitives::FullLeaf, { - /// Move nodes and leaves added by block `N` in offchain db from _fork-aware key_ to - /// _canonical key_, - /// where `N` is `frame_system::BlockHashCount` blocks behind current block number. - /// - /// This "canonicalization" process is required because the _fork-aware key_ value depends - /// on `frame_system::block_hash(block_num)` map which only holds the last - /// `frame_system::BlockHashCount` blocks. - /// - /// For the canonicalized block, prune all nodes pertaining to other forks from offchain db. - /// - /// Should only be called from offchain context, because it requires both read and write - /// access to offchain db. - pub(crate) fn canonicalize_and_prune(block: T::BlockNumber) { - // Add "block_num -> hash" mapping to offchain db, - // with all forks pushing hashes to same entry (same block number). - let parent_hash = >::parent_hash(); - PruningMap::::append(block, parent_hash); - - // Effectively move a rolling window of fork-unique leaves. Once out of the window, leaves - // are "canonicalized" in offchain by moving them under `Pallet::node_canon_offchain_key`. - let leaves = NumberOfLeaves::::get(); - let window_size = Pallet::::offchain_canonicalization_window(); - if leaves >= window_size { - // Move the rolling window towards the end of `block_num->hash` mappings available - // in the runtime: we "canonicalize" the leaf at the end, - let to_canon_leaf = leaves.saturating_sub(window_size); - // and all the nodes added by that leaf. - let to_canon_nodes = NodesUtils::right_branch_ending_in_leaf(to_canon_leaf); - debug!( - target: "runtime::mmr::offchain", "Nodes to canon for leaf {}: {:?}", - to_canon_leaf, to_canon_nodes - ); - // For this block number there may be node entries saved from multiple forks. - let to_canon_block_num = - Pallet::::leaf_index_to_parent_block_num(to_canon_leaf, leaves); - // Only entries under this hash (retrieved from state on current canon fork) are to be - // persisted. All entries added by same block number on other forks will be cleared. - let to_canon_hash = >::block_hash(to_canon_block_num); - - Self::canonicalize_nodes_for_hash(&to_canon_nodes, &to_canon_hash); - // Get all the forks to prune, also remove them from the offchain pruning_map. - PruningMap::::remove(to_canon_block_num) - .map(|forks| { - Self::prune_nodes_for_forks(&to_canon_nodes, forks); - }) - .unwrap_or_else(|| { - error!( - target: "runtime::mmr::offchain", - "Offchain: could not prune: no entry in pruning map for block {:?}", - to_canon_block_num - ); - }) - } - } - - fn prune_nodes_for_forks(nodes: &[NodeIndex], forks: Vec<::Hash>) { - for hash in forks { - for pos in nodes { - let key = Pallet::::node_temp_offchain_key(*pos, &hash); - debug!( - target: "runtime::mmr::offchain", - "Clear elem at pos {} with key {:?}", - pos, key - ); - offchain::local_storage_clear(StorageKind::PERSISTENT, &key); - } - } - } - - fn canonicalize_nodes_for_hash( - to_canon_nodes: &[NodeIndex], - to_canon_hash: &::Hash, - ) { - for pos in to_canon_nodes { - let key = Pallet::::node_temp_offchain_key(*pos, to_canon_hash); - // Retrieve the element from Off-chain DB under fork-aware key. - if let Some(elem) = offchain::local_storage_get(StorageKind::PERSISTENT, &key) { - let canon_key = Pallet::::node_canon_offchain_key(*pos); - // Add under new canon key. - offchain::local_storage_set(StorageKind::PERSISTENT, &canon_key, &elem); - debug!( - target: "runtime::mmr::offchain", - "Moved elem at pos {} from key {:?} to canon key {:?}", - pos, key, canon_key - ); - } else { - error!( - target: "runtime::mmr::offchain", - "Could not canonicalize elem at pos {} using key {:?}", - pos, key - ); - } - } - } } impl mmr_lib::MMRStore> for Storage @@ -218,42 +79,31 @@ where // Find out which leaf added node `pos` in the MMR. let ancestor_leaf_idx = NodesUtils::leaf_index_that_added_node(pos); - let window_size = Pallet::::offchain_canonicalization_window(); - // Leaves older than this window should have been canonicalized. - if leaves.saturating_sub(ancestor_leaf_idx) > window_size { - let key = Pallet::::node_canon_offchain_key(pos); - debug!( - target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, key {:?}", - pos, ancestor_leaf_idx, key - ); - // Just for safety, to easily handle runtime upgrades where any of the window params - // change and maybe we mess up storage migration, - // return _if and only if_ node is found (in normal conditions it's always found), - if let Some(elem) = sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) { - return Ok(codec::Decode::decode(&mut &*elem).ok()) - } - // BUT if we DID MESS UP, fall through to searching node using fork-specific key. + // We should only get here when trying to generate proofs. The client requests + // for proofs for finalized blocks, which should usually be already canonicalized, + // unless the MMR client gadget has a delay. + let key = Pallet::::node_canon_offchain_key(pos); + debug!( + target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, canon key {:?}", + pos, ancestor_leaf_idx, key + ); + // Try to retrieve the element from Off-chain DB. + if let Some(elem) = sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) { + return Ok(codec::Decode::decode(&mut &*elem).ok()) } - // Leaves still within the window will be found in offchain db under fork-aware keys. + // Fall through to searching node using fork-specific key. let ancestor_parent_block_num = Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx, leaves); let ancestor_parent_hash = >::block_hash(ancestor_parent_block_num); - let key = Pallet::::node_temp_offchain_key(pos, &ancestor_parent_hash); + let temp_key = Pallet::::node_temp_offchain_key(pos, &ancestor_parent_hash); debug!( - target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, hash {:?}, key {:?}", - pos, ancestor_leaf_idx, ancestor_parent_hash, key + target: "runtime::mmr::offchain", + "offchain db get {}: leaf idx {:?}, hash {:?}, temp key {:?}", + pos, ancestor_leaf_idx, ancestor_parent_hash, temp_key ); // Retrieve the element from Off-chain DB. - Ok(sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) - .or_else(|| { - // Again, this is just us being extra paranoid. - // We get here only if we mess up a storage migration for a runtime upgrades where - // say the window is increased, and for a little while following the upgrade there's - // leaves inside new 'window' that had been already canonicalized before upgrade. - let key = Pallet::::node_canon_offchain_key(pos); - sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) - }) + Ok(sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &temp_key) .and_then(|v| codec::Decode::decode(&mut &*v).ok())) } @@ -342,22 +192,15 @@ where ) { let encoded_node = node.encode(); // We store this leaf offchain keyed by `(parent_hash, node_index)` to make it - // fork-resistant. Offchain worker task will "canonicalize" it - // `frame_system::BlockHashCount` blocks later, when we are not worried about forks anymore - // (multi-era-deep forks should not happen). - let key = Pallet::::node_temp_offchain_key(pos, parent_hash); + // fork-resistant. The MMR client gadget task will "canonicalize" it on the first + // finality notification that follows, when we are not worried about forks anymore. + let temp_key = Pallet::::node_temp_offchain_key(pos, parent_hash); debug!( target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}", - pos, parent_hash, key + pos, parent_hash, temp_key ); // Indexing API is used to store the full node content. - offchain_index::set(&key, &encoded_node); - // We also directly save the full node under the "canonical" key. - // This is superfluous for the normal case - this entry will possibly be overwritten - // by forks, and will also be overwritten by "offchain_worker canonicalization". - // But it is required for blocks imported during initial sync where none of the above apply - // (`offchain_worker` doesn't run for initial sync blocks). - offchain_index::set(&Pallet::::node_canon_offchain_key(pos), &encoded_node); + offchain_index::set(&temp_key, &encoded_node); } } diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index 309e25c72047e..233dc1824dec7 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{mmr::storage::PruningMap, mock::*, *}; +use crate::{mock::*, *}; use frame_support::traits::{Get, OnInitialize}; use sp_core::{ @@ -170,14 +170,12 @@ fn should_append_to_mmr_when_on_initialize_is_called() { offchain_db.get(&MMR::node_temp_offchain_key(0, &parent_b1)).map(decode_node), expected ); - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(0)).map(decode_node), expected); let expected = Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2)))); assert_eq!( offchain_db.get(&MMR::node_temp_offchain_key(1, &parent_b2)).map(decode_node), expected ); - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(1)).map(decode_node), expected); let expected = Some(mmr::Node::Hash(hex( "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854", @@ -186,10 +184,8 @@ fn should_append_to_mmr_when_on_initialize_is_called() { offchain_db.get(&MMR::node_temp_offchain_key(2, &parent_b2)).map(decode_node), expected ); - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(2)).map(decode_node), expected); assert_eq!(offchain_db.get(&MMR::node_temp_offchain_key(3, &parent_b2)), None); - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(3)), None); } #[test] @@ -702,212 +698,6 @@ fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() { }); } -#[test] -fn should_verify_pruning_map() { - use sp_core::offchain::StorageKind; - use sp_io::offchain; - - let _ = env_logger::try_init(); - let mut ext = new_test_ext(); - register_offchain_ext(&mut ext); - - ext.execute_with(|| { - type TestPruningMap = PruningMap; - fn offchain_decoded(key: Vec) -> Option> { - offchain::local_storage_get(StorageKind::PERSISTENT, &key) - .and_then(|v| codec::Decode::decode(&mut &*v).ok()) - } - - // test append - { - TestPruningMap::append(1, H256::repeat_byte(1)); - - TestPruningMap::append(2, H256::repeat_byte(21)); - TestPruningMap::append(2, H256::repeat_byte(22)); - - TestPruningMap::append(3, H256::repeat_byte(31)); - TestPruningMap::append(3, H256::repeat_byte(32)); - TestPruningMap::append(3, H256::repeat_byte(33)); - - // `0` not present - let map_key = TestPruningMap::pruning_map_offchain_key(0); - assert_eq!(offchain::local_storage_get(StorageKind::PERSISTENT, &map_key), None); - - // verify `1` entries - let map_key = TestPruningMap::pruning_map_offchain_key(1); - let expected = vec![H256::repeat_byte(1)]; - assert_eq!(offchain_decoded(map_key), Some(expected)); - - // verify `2` entries - let map_key = TestPruningMap::pruning_map_offchain_key(2); - let expected = vec![H256::repeat_byte(21), H256::repeat_byte(22)]; - assert_eq!(offchain_decoded(map_key), Some(expected)); - - // verify `3` entries - let map_key = TestPruningMap::pruning_map_offchain_key(3); - let expected = - vec![H256::repeat_byte(31), H256::repeat_byte(32), H256::repeat_byte(33)]; - assert_eq!(offchain_decoded(map_key), Some(expected)); - - // `4` not present - let map_key = TestPruningMap::pruning_map_offchain_key(4); - assert_eq!(offchain::local_storage_get(StorageKind::PERSISTENT, &map_key), None); - } - - // test remove - { - // `0` doesn't return anything - assert_eq!(TestPruningMap::remove(0), None); - - // remove and verify `1` entries - let expected = vec![H256::repeat_byte(1)]; - assert_eq!(TestPruningMap::remove(1), Some(expected)); - - // remove and verify `2` entries - let expected = vec![H256::repeat_byte(21), H256::repeat_byte(22)]; - assert_eq!(TestPruningMap::remove(2), Some(expected)); - - // remove and verify `3` entries - let expected = - vec![H256::repeat_byte(31), H256::repeat_byte(32), H256::repeat_byte(33)]; - assert_eq!(TestPruningMap::remove(3), Some(expected)); - - // `4` doesn't return anything - assert_eq!(TestPruningMap::remove(4), None); - - // no entries left in offchain map - for block in 0..5 { - let map_key = TestPruningMap::pruning_map_offchain_key(block); - assert_eq!(offchain::local_storage_get(StorageKind::PERSISTENT, &map_key), None); - } - } - }) -} - -#[test] -fn should_canonicalize_offchain() { - use frame_support::traits::Hooks; - - let _ = env_logger::try_init(); - let mut ext = new_test_ext(); - register_offchain_ext(&mut ext); - - // adding 13 blocks that we'll later check have been canonicalized, - // (test assumes `13 < frame_system::BlockHashCount`). - let to_canon_count = 13u32; - - // add 13 blocks and verify leaves and nodes for them have been added to - // offchain MMR using fork-proof keys. - for blocknum in 0..to_canon_count { - ext.execute_with(|| { - new_block(); - as Hooks>::offchain_worker(blocknum.into()); - }); - ext.persist_offchain_overlay(); - } - let offchain_db = ext.offchain_db(); - ext.execute_with(|| { - // verify leaves added by blocks 1..=13 - for block_num in 1..=to_canon_count { - let parent_num: BlockNumber = (block_num - 1).into(); - let leaf_index = u64::from(block_num - 1); - let pos = helper::leaf_index_to_pos(leaf_index.into()); - let parent_hash = >::block_hash(parent_num); - // Available in offchain db under both fork-proof key and canon key. - // We'll later check it is pruned from fork-proof key. - let expected = Some(mmr::Node::Data(( - (leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())), - LeafData::new(block_num.into()), - ))); - assert_eq!( - offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), - expected - ); - assert_eq!( - offchain_db - .get(&MMR::node_temp_offchain_key(pos, &parent_hash)) - .map(decode_node), - expected - ); - } - - // verify a couple of nodes and peaks: - // `pos` is node to verify, - // `leaf_index` is leaf that added node `pos`, - // `expected` is expected value of node at `pos`. - let verify = |pos: NodeIndex, leaf_index: LeafIndex, expected: H256| { - let parent_num: BlockNumber = leaf_index.try_into().unwrap(); - let parent_hash = >::block_hash(parent_num); - // Available in offchain db under both fork-proof key and canon key. - // We'll later check it is pruned from fork-proof key. - let expected = Some(mmr::Node::Hash(expected)); - assert_eq!( - offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), - expected - ); - assert_eq!( - offchain_db - .get(&MMR::node_temp_offchain_key(pos, &parent_hash)) - .map(decode_node), - expected - ); - }; - verify(2, 1, hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")); - verify(13, 7, hex("441bf63abc7cf9b9e82eb57b8111c883d50ae468d9fd7f301e12269fc0fa1e75")); - verify(21, 11, hex("f323ac1a7f56de5f40ed8df3e97af74eec0ee9d72883679e49122ffad2ffd03b")); - }); - - // add another `frame_system::BlockHashCount` blocks and verify all nodes and leaves - // added by our original `to_canon_count` blocks have now been canonicalized in offchain db. - let block_hash_size: u64 = ::BlockHashCount::get(); - let base = to_canon_count; - for blocknum in base..(base + u32::try_from(block_hash_size).unwrap()) { - ext.execute_with(|| { - new_block(); - as Hooks>::offchain_worker(blocknum.into()); - }); - ext.persist_offchain_overlay(); - } - ext.execute_with(|| { - // verify leaves added by blocks 1..=13, should be in offchain under canon key. - for block_num in 1..=to_canon_count { - let leaf_index = u64::from(block_num - 1); - let pos = helper::leaf_index_to_pos(leaf_index.into()); - let parent_num: BlockNumber = (block_num - 1).into(); - let parent_hash = >::block_hash(parent_num); - // no longer available in fork-proof storage (was pruned), - assert_eq!(offchain_db.get(&MMR::node_temp_offchain_key(pos, &parent_hash)), None); - // but available using canon key. - assert_eq!( - offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), - Some(mmr::Node::Data(( - (leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())), - LeafData::new(block_num.into()), - ))) - ); - } - - // also check some nodes and peaks: - // `pos` is node to verify, - // `leaf_index` is leaf that added node `pos`, - // `expected` is expected value of node at `pos`. - let verify = |pos: NodeIndex, leaf_index: LeafIndex, expected: H256| { - let parent_num: BlockNumber = leaf_index.try_into().unwrap(); - let parent_hash = >::block_hash(parent_num); - // no longer available in fork-proof storage (was pruned), - assert_eq!(offchain_db.get(&MMR::node_temp_offchain_key(pos, &parent_hash)), None); - // but available using canon key. - assert_eq!( - offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), - Some(mmr::Node::Hash(expected)) - ); - }; - verify(2, 1, hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")); - verify(13, 7, hex("441bf63abc7cf9b9e82eb57b8111c883d50ae468d9fd7f301e12269fc0fa1e75")); - verify(21, 11, hex("f323ac1a7f56de5f40ed8df3e97af74eec0ee9d72883679e49122ffad2ffd03b")); - }); -} - #[test] fn should_verify_canonicalized() { use frame_support::traits::Hooks; From 8857248c44badcb42f6a2d04a4d483858f10697b Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 22 Nov 2022 14:54:26 +0200 Subject: [PATCH 06/13] Code review changes: 1st iteration --- Cargo.lock | 2 - bin/node/runtime/src/lib.rs | 4 +- client/consensus/babe/src/lib.rs | 8 +- .../merkle-mountain-range/src/canon_engine.rs | 78 ++++++++++++------- client/merkle-mountain-range/src/lib.rs | 6 +- .../merkle-mountain-range/src/test_utils.rs | 20 ++++- frame/merkle-mountain-range/src/lib.rs | 10 +-- primitives/blockchain/src/backend.rs | 65 ++++++++++------ primitives/blockchain/src/error.rs | 3 + primitives/merkle-mountain-range/src/lib.rs | 2 +- primitives/merkle-mountain-range/src/utils.rs | 8 +- test-utils/runtime/Cargo.toml | 4 - test-utils/runtime/src/lib.rs | 58 -------------- 13 files changed, 131 insertions(+), 137 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ca83de86ae7d..7073fe93b0eb8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10417,7 +10417,6 @@ dependencies = [ "log", "memory-db", "pallet-babe", - "pallet-mmr", "pallet-timestamp", "parity-scale-codec", "parity-util-mem", @@ -10438,7 +10437,6 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keyring", - "sp-mmr-primitives", "sp-offchain", "sp-runtime", "sp-runtime-interface", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 0f44febdb02dc..0e7d22109792f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2064,8 +2064,8 @@ impl_runtime_apis! { Ok(Mmr::mmr_root()) } - fn num_mmr_blocks() -> Result { - Mmr::num_mmr_blocks() + fn mmr_leaves_count() -> Result { + Mmr::mmr_leaves() } fn generate_proof( diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 81d1666a0ba89..d4cfdfa941c1e 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -542,7 +542,13 @@ fn aux_storage_cleanup + HeaderBackend, Block: B ); // Cleans data for stale forks. - let stale_forks = client.expand_forks(¬ification.stale_heads); + let stale_forks = match client.expand_forks(¬ification.stale_heads) { + Ok(stale_forks) => stale_forks, + Err((stale_forks, e)) => { + warn!(target: "babe", "{:?}", e,); + stale_forks + }, + }; aux_keys.extend(stale_forks.iter()); aux_keys diff --git a/client/merkle-mountain-range/src/canon_engine.rs b/client/merkle-mountain-range/src/canon_engine.rs index fde8bdba7e006..bd02c1765f05c 100644 --- a/client/merkle-mountain-range/src/canon_engine.rs +++ b/client/merkle-mountain-range/src/canon_engine.rs @@ -18,13 +18,13 @@ use std::{marker::PhantomData, sync::Arc}; -use log::{debug, error}; +use log::{debug, error, warn}; use sc_client_api::FinalityNotification; use sc_offchain::OffchainDb; use sp_blockchain::{ForkBackend, HeaderBackend, HeaderMetadata}; use sp_core::offchain::{DbExternalities, OffchainStorage, StorageKind}; -use sp_mmr_primitives::{utils, utils::NodesUtils, LeafIndex, NodeIndex}; +use sp_mmr_primitives::{utils, utils::NodesUtils, NodeIndex}; use sp_runtime::{ traits::{Block, Header, One}, Saturating, @@ -47,19 +47,6 @@ where S: OffchainStorage, B: Block, { - fn block_number_to_leaf_index( - &self, - block_num: ::Number, - ) -> Result { - utils::block_num_to_leaf_index::(block_num, self.first_mmr_block).map_err(|e| { - error!( - target: LOG_TARGET, - "Error converting block number {} to leaf index: {:?}", block_num, e - ); - e - }) - } - fn node_temp_offchain_key(&self, pos: NodeIndex, parent_hash: &B::Hash) -> Vec { NodesUtils::node_temp_offchain_key::(&self.indexing_prefix, pos, parent_hash) } @@ -68,6 +55,15 @@ where NodesUtils::node_canon_offchain_key(&self.indexing_prefix, pos) } + fn right_branch_ending_in_block( + &self, + block_num: ::Number, + ) -> Result, sp_mmr_primitives::Error> { + let leaf_idx = + utils::block_num_to_leaf_index::(block_num, self.first_mmr_block)?; + Ok(NodesUtils::right_branch_ending_in_leaf(leaf_idx)) + } + fn prune_branch(&mut self, block_hash: &B::Hash) { let header = match self.client.header_metadata(*block_hash) { Ok(header) => header, @@ -80,15 +76,23 @@ where }, }; - // If we can't convert the block number to a leaf index, the chain state is probably - // corrupted. We only log the error, hoping that the chain state will be fixed. - let stale_leaf = match self.block_number_to_leaf_index(header.number) { - Ok(stale_leaf) => stale_leaf, - Err(_) => return, - }; // We prune the leaf associated with the provided block and all the nodes added by that // leaf. - let stale_nodes = NodesUtils::right_branch_ending_in_leaf(stale_leaf); + let stale_nodes = match self.right_branch_ending_in_block(header.number) { + Ok(nodes) => nodes, + Err(e) => { + // If we can't convert the block number to a leaf index, the chain state is probably + // corrupted. We only log the error, hoping that the chain state will be fixed. + error!( + target: LOG_TARGET, + "Error converting block number {} to leaf index: {:?}. \ + Won't be able to prune stale branch.", + header.number, + e + ); + return + }, + }; debug!( target: LOG_TARGET, "Nodes to prune for stale block {}: {:?}", header.number, stale_nodes @@ -112,15 +116,23 @@ where return } - // If we can't convert the block number to a leaf index, the chain state is probably - // corrupted. We only log the error, hoping that the chain state will be fixed. - let to_canon_leaf = match self.block_number_to_leaf_index(block_num) { - Ok(to_canon_leaf) => to_canon_leaf, - Err(_) => return, - }; // We "canonicalize" the leaf associated with the provided block // and all the nodes added by that leaf. - let to_canon_nodes = NodesUtils::right_branch_ending_in_leaf(to_canon_leaf); + let to_canon_nodes = match self.right_branch_ending_in_block(block_num) { + Ok(nodes) => nodes, + Err(e) => { + // If we can't convert the block number to a leaf index, the chain state is probably + // corrupted. We only log the error, hoping that the chain state will be fixed. + error!( + target: LOG_TARGET, + "Error converting block number {} to leaf index: {:?}. \ + Won't be able to canonicalize finalized branch.", + block_num, + e + ); + return + }, + }; debug!( target: LOG_TARGET, "Nodes to canonicalize for block {}: {:?}", block_num, to_canon_nodes @@ -173,7 +185,13 @@ where } // Remove offchain MMR nodes for stale forks. - let stale_forks = self.client.expand_forks(¬ification.stale_heads); + let stale_forks = match self.client.expand_forks(¬ification.stale_heads) { + Ok(stale_forks) => stale_forks, + Err((stale_forks, e)) => { + warn!(target: LOG_TARGET, "{:?}", e,); + stale_forks + }, + }; for hash in stale_forks.iter() { self.prune_branch(hash); } diff --git a/client/merkle-mountain-range/src/lib.rs b/client/merkle-mountain-range/src/lib.rs index 79be99a6a9c75..13e93f2603a0c 100644 --- a/client/merkle-mountain-range/src/lib.rs +++ b/client/merkle-mountain-range/src/lib.rs @@ -59,9 +59,9 @@ where ) -> Option<::Number> { if let MaybeCanonEngine::Uninitialized { client, .. } = self { let best_block = *notification.header.number(); - return match client.runtime_api().num_mmr_blocks(&BlockId::number(best_block)) { - Ok(Ok(num_mmr_blocks)) => { - match utils::first_mmr_block_num::(best_block, num_mmr_blocks) { + return match client.runtime_api().mmr_leaves_count(&BlockId::number(best_block)) { + Ok(Ok(mmr_leaves_count)) => { + match utils::first_mmr_block_num::(best_block, mmr_leaves_count) { Ok(first_mmr_block) => Some(first_mmr_block), Err(e) => { error!( diff --git a/client/merkle-mountain-range/src/test_utils.rs b/client/merkle-mountain-range/src/test_utils.rs index 92c46b6dc181d..fd11eceb7ae1a 100644 --- a/client/merkle-mountain-range/src/test_utils.rs +++ b/client/merkle-mountain-range/src/test_utils.rs @@ -1,3 +1,21 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use futures::{executor::LocalPool, task::LocalSpawn, FutureExt}; use std::{ future::Future, @@ -265,7 +283,7 @@ sp_api::mock_impl_runtime_apis! { Err(mmr::Error::PalletNotIncluded) } - fn num_mmr_blocks(&self) -> Result { + fn mmr_leaves_count(&self) -> Result { Ok(self.data.lock().unwrap().num_blocks) } diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 9c8ec63b49821..174a868549d39 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -314,14 +314,6 @@ impl, I: 'static> Pallet { .saturating_add(leaf_index.saturated_into()) } - /// Get the number of MMR blocks in the chain. - pub fn num_mmr_blocks() -> Result { - Self::mmr_leaves().try_into().map_err(|_| { - Error::InvalidNumericOp - .log_debug("The number of leaves couldn't be converted to a block number.") - }) - } - /// Convert a block number into a leaf index. fn block_num_to_leaf_index(block_num: T::BlockNumber) -> Result where @@ -329,7 +321,7 @@ impl, I: 'static> Pallet { { let first_mmr_block = utils::first_mmr_block_num::( >::block_number(), - Self::num_mmr_blocks()?, + Self::mmr_leaves(), )?; utils::block_num_to_leaf_index::(block_num, first_mmr_block) diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 887953a177909..8c19538913c18 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -90,46 +90,63 @@ pub trait HeaderBackend: Send + Sync { pub trait ForkBackend: HeaderMetadata + HeaderBackend + Send + Sync { - /// Get all the header hashes that are part of the provided forks starting only from the fork - /// heads. - fn expand_forks(&self, fork_heads: &[Block::Hash]) -> BTreeSet { + /// Best effort to get all the header hashes that are part of the provided forks + /// starting only from the fork heads. + /// + /// The function tries to reconstruct the route from the fork head to the canonical chain. + /// If any of the hashes on the route can't be found in the db, the function won't be able + /// to reconstruct the route anymore. In this case it will give up expanding the current fork, + /// move on to the next ones and at the end it will return an error that also contains + /// the partially expanded forks. + fn expand_forks( + &self, + fork_heads: &[Block::Hash], + ) -> sp_std::result::Result, (BTreeSet, Error)> { + let mut missing_blocks = vec![]; let mut expanded_forks = BTreeSet::new(); for fork_head in fork_heads { - let mut hash = *fork_head; + let mut route_head = *fork_head; // Insert stale blocks hashes until canonical chain is reached. // If we reach a block that is already part of the `expanded_forks` we can stop // processing the fork. - while expanded_forks.insert(hash) { - match self.header_metadata(hash) { + while expanded_forks.insert(route_head) { + match self.header_metadata(route_head) { Ok(meta) => { - hash = meta.parent; - // If the parent is part of the canonical chain or there doesn't exist a // block hash for the parent number (bug?!), we can abort adding blocks. - if self - .hash(meta.number.saturating_sub(1u32.into())) - .ok() - .flatten() - .map_or(true, |h| h == hash) - { - break + let parent_number = meta.number.saturating_sub(1u32.into()); + match self.hash(parent_number) { + Ok(Some(parent_hash)) => + if parent_hash == meta.parent { + break + }, + Ok(None) | Err(_) => { + missing_blocks.push(BlockId::::Number(parent_number)); + break + }, } + + route_head = meta.parent; }, - Err(err) => { - warn!( - target: "primitives::blockchain", - "Stale header {:?} lookup fail while expanding fork {:?}: {}", - fork_head, - hash, - err, - ); + Err(_e) => { + missing_blocks.push(BlockId::::Hash(route_head)); break }, } } } - expanded_forks + if !missing_blocks.is_empty() { + return Err(( + expanded_forks, + Error::UnknownBlocks(format!( + "Missing stale headers {:?} while expanding forks {:?}.", + fork_heads, missing_blocks + )), + )) + } + + Ok(expanded_forks) } } diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index c82fb9bebf4ee..783c40c4061ad 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -59,6 +59,9 @@ pub enum Error { #[error("UnknownBlock: {0}")] UnknownBlock(String), + #[error("UnknownBlocks: {0}")] + UnknownBlocks(String), + #[error(transparent)] ApplyExtrinsicFailed(#[from] ApplyExtrinsicFailed), diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index e2f5430fafff8..bffd87c8b639f 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -424,7 +424,7 @@ sp_api::decl_runtime_apis! { fn mmr_root() -> Result; /// Return the number of MMR blocks in the chain. - fn num_mmr_blocks() -> Result; + fn mmr_leaves_count() -> Result; /// Generate MMR proof for a series of block numbers. If `best_known_block_number = Some(n)`, /// use historical MMR state at given block height `n`. Else, use current MMR state. diff --git a/primitives/merkle-mountain-range/src/utils.rs b/primitives/merkle-mountain-range/src/utils.rs index 966afaeccd7fa..aa3665d5df0db 100644 --- a/primitives/merkle-mountain-range/src/utils.rs +++ b/primitives/merkle-mountain-range/src/utils.rs @@ -29,10 +29,14 @@ use crate::{Error, LeafIndex, NodeIndex}; /// Get the first block with MMR. pub fn first_mmr_block_num( best_block_num: H::Number, - num_mmr_blocks: H::Number, + mmr_leaves_count: LeafIndex, ) -> Result { + let mmr_blocks_count = mmr_leaves_count.try_into().map_err(|_| { + Error::InvalidNumericOp + .log_debug("The number of leaves couldn't be converted to a block number.") + })?; best_block_num - .checked_sub(&num_mmr_blocks) + .checked_sub(&mmr_blocks_count) .and_then(|last_non_mmr_block| last_non_mmr_block.checked_add(&One::one())) .ok_or_else(|| { Error::InvalidNumericOp diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 3808b2711f56a..b64a847b15943 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -30,7 +30,6 @@ sp-std = { version = "5.0.0", default-features = false, path = "../../primitives sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../../primitives/runtime-interface" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../frame/support" } -sp-mmr-primitives = { version = "4.0.0-dev", default-features = false, path = "../../primitives/merkle-mountain-range" } sp-version = { version = "5.0.0", default-features = false, path = "../../primitives/version" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } @@ -39,7 +38,6 @@ pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../f frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../frame/system/rpc/runtime-api" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../frame/timestamp" } -pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../../frame/merkle-mountain-range" } sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/finality-grandpa" } sp-trie = { version = "7.0.0", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" } @@ -89,7 +87,6 @@ std = [ "sp-runtime-interface/std", "sp-io/std", "frame-support/std", - "sp-mmr-primitives/std", "sp-version/std", "serde", "sp-session/std", @@ -101,7 +98,6 @@ std = [ "frame-system-rpc-runtime-api/std", "frame-system/std", "pallet-timestamp/std", - "pallet-mmr/std", "sc-service", "sp-finality-grandpa/std", "sp-trie/std", diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 86ca45d0396e0..6b93e53cf3910 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -29,7 +29,6 @@ use sp_std::{marker::PhantomData, prelude::*}; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{offchain::KeyTypeId, OpaqueMetadata, RuntimeDebug}; -// use sp_mmr_primitives as mmr; use sp_trie::{ trie_types::{TrieDBBuilder, TrieDBMutBuilderV1}, PrefixedMemoryDB, StorageProof, @@ -67,7 +66,6 @@ use sp_version::RuntimeVersion; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::{AllowedSlots, AuthorityId, Slot}; -use sp_runtime::traits::Keccak256; pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; @@ -637,19 +635,6 @@ impl frame_system::pallet::Config for Runtime { type MaxConsumers = ConstU32<16>; } -type MmrHash = ::Output; - -impl pallet_mmr::Config for Runtime { - const INDEXING_PREFIX: &'static [u8] = b"mmr"; - type Hashing = Keccak256; - type Hash = MmrHash; - type OnNewRoot = (); - type WeightInfo = (); - type LeafData = pallet_mmr::ParentNumberAndHash; -} - -pub type MmrHashing = ::Hashing; - impl system::Config for Runtime {} impl pallet_timestamp::Config for Runtime { @@ -1002,49 +987,6 @@ cfg_if! { 0 } } - - // impl mmr::MmrApi for Runtime { - // fn mmr_root() -> Result { - // Ok(Mmr::mmr_root()) - // } - // - // fn generate_proof( - // block_numbers: Vec, - // best_known_block_number: Option, - // ) -> Result<(Vec, mmr::Proof), mmr::Error> { - // Mmr::generate_proof(block_numbers, best_known_block_number).map( - // |(leaves, proof)| { - // ( - // leaves - // .into_iter() - // .map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)) - // .collect(), - // proof, - // ) - // }, - // ) - // } - // - // fn verify_proof(leaves: Vec, proof: mmr::Proof) - // -> Result<(), mmr::Error> - // { - // pub type MmrLeaf = <::LeafData as mmr::LeafDataProvider>::LeafData; - // let leaves = leaves.into_iter().map(|leaf| - // leaf.into_opaque_leaf() - // .try_decode() - // .ok_or(mmr::Error::Verify)).collect::, mmr::Error>>()?; - // Mmr::verify_leaves(leaves, proof) - // } - // - // fn verify_proof_stateless( - // root: Hash, - // leaves: Vec, - // proof: mmr::Proof - // ) -> Result<(), mmr::Error> { - // let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); - // pallet_mmr::verify_leaves_proof::(root, nodes, proof) - // } - // } } } else { impl_runtime_apis! { From 4f56ae6090c877a0ac5da286cfb46f210a171a8c Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Thu, 24 Nov 2022 09:31:16 +0200 Subject: [PATCH 07/13] Replace MaybeCanonEngine with CanonEngineBuilder --- .../merkle-mountain-range/src/canon_engine.rs | 130 +++++++++-------- client/merkle-mountain-range/src/lib.rs | 132 ++++++++---------- 2 files changed, 125 insertions(+), 137 deletions(-) diff --git a/client/merkle-mountain-range/src/canon_engine.rs b/client/merkle-mountain-range/src/canon_engine.rs index bd02c1765f05c..f9180142b71a6 100644 --- a/client/merkle-mountain-range/src/canon_engine.rs +++ b/client/merkle-mountain-range/src/canon_engine.rs @@ -22,13 +22,10 @@ use log::{debug, error, warn}; use sc_client_api::FinalityNotification; use sc_offchain::OffchainDb; -use sp_blockchain::{ForkBackend, HeaderBackend, HeaderMetadata}; +use sp_blockchain::{CachedHeaderMetadata, ForkBackend, HeaderBackend, HeaderMetadata}; use sp_core::offchain::{DbExternalities, OffchainStorage, StorageKind}; use sp_mmr_primitives::{utils, utils::NodesUtils, NodeIndex}; -use sp_runtime::{ - traits::{Block, Header, One}, - Saturating, -}; +use sp_runtime::traits::{Block, Header}; use crate::LOG_TARGET; @@ -55,48 +52,68 @@ where NodesUtils::node_canon_offchain_key(&self.indexing_prefix, pos) } - fn right_branch_ending_in_block( + fn header_metadata_or_log( &self, - block_num: ::Number, - ) -> Result, sp_mmr_primitives::Error> { - let leaf_idx = - utils::block_num_to_leaf_index::(block_num, self.first_mmr_block)?; - Ok(NodesUtils::right_branch_ending_in_leaf(leaf_idx)) + hash: B::Hash, + action: &str, + ) -> Option> { + match self.client.header_metadata(hash) { + Ok(header) => Some(header), + _ => { + error!( + target: LOG_TARGET, + "Block {} not found. Couldn't {} associated branch.", hash, action + ); + None + }, + } } - fn prune_branch(&mut self, block_hash: &B::Hash) { - let header = match self.client.header_metadata(*block_hash) { - Ok(header) => header, - _ => { + fn right_branch_ending_in_block_or_log( + &self, + block_num: ::Number, + action: &str, + ) -> Option> { + match utils::block_num_to_leaf_index::(block_num, self.first_mmr_block) { + Ok(leaf_idx) => { + let branch = NodesUtils::right_branch_ending_in_leaf(leaf_idx); + debug!( + target: LOG_TARGET, + "Nodes to {} for block {}: {:?}", action, block_num, branch + ); + Some(branch) + }, + Err(e) => { error!( target: LOG_TARGET, - "Stale block {} not found. Couldn't prune associated stale branch.", block_hash + "Error converting block number {} to leaf index: {:?}. \ + Couldn't {} associated branch.", + block_num, + e, + action ); - return + None }, + } + } + + fn prune_branch(&mut self, block_hash: &B::Hash) { + let action = "prune"; + let header = match self.header_metadata_or_log(*block_hash, action) { + Some(header) => header, + _ => return, }; // We prune the leaf associated with the provided block and all the nodes added by that // leaf. - let stale_nodes = match self.right_branch_ending_in_block(header.number) { - Ok(nodes) => nodes, - Err(e) => { + let stale_nodes = match self.right_branch_ending_in_block_or_log(header.number, action) { + Some(nodes) => nodes, + None => { // If we can't convert the block number to a leaf index, the chain state is probably // corrupted. We only log the error, hoping that the chain state will be fixed. - error!( - target: LOG_TARGET, - "Error converting block number {} to leaf index: {:?}. \ - Won't be able to prune stale branch.", - header.number, - e - ); return }, }; - debug!( - target: LOG_TARGET, - "Nodes to prune for stale block {}: {:?}", header.number, stale_nodes - ); for pos in stale_nodes { let temp_key = self.node_temp_offchain_key(pos, &header.parent); @@ -105,41 +122,32 @@ where } } - fn canonicalize_branch( - &mut self, - block_num: ::Number, - parent_hash: &B::Hash, - ) { + fn canonicalize_branch(&mut self, block_hash: &B::Hash) { + let action = "canonicalize"; + let header = match self.header_metadata_or_log(*block_hash, action) { + Some(header) => header, + _ => return, + }; + // Don't canonicalize branches corresponding to blocks for which the MMR pallet // wasn't yet initialized. - if block_num < self.first_mmr_block { + if header.number < self.first_mmr_block { return } // We "canonicalize" the leaf associated with the provided block // and all the nodes added by that leaf. - let to_canon_nodes = match self.right_branch_ending_in_block(block_num) { - Ok(nodes) => nodes, - Err(e) => { + let to_canon_nodes = match self.right_branch_ending_in_block_or_log(header.number, action) { + Some(nodes) => nodes, + None => { // If we can't convert the block number to a leaf index, the chain state is probably // corrupted. We only log the error, hoping that the chain state will be fixed. - error!( - target: LOG_TARGET, - "Error converting block number {} to leaf index: {:?}. \ - Won't be able to canonicalize finalized branch.", - block_num, - e - ); return }, }; - debug!( - target: LOG_TARGET, - "Nodes to canonicalize for block {}: {:?}", block_num, to_canon_nodes - ); for pos in to_canon_nodes { - let temp_key = self.node_temp_offchain_key(pos, parent_hash); + let temp_key = self.node_temp_offchain_key(pos, &header.parent); if let Some(elem) = self.offchain_db.local_storage_get(StorageKind::PERSISTENT, &temp_key) { @@ -165,23 +173,11 @@ where /// Move leafs and nodes added by finalized blocks in offchain db from _fork-aware key_ to /// _canonical key_. /// Prune leafs and nodes added by stale blocks in offchain db from _fork-aware key_. - pub fn canonicalize_and_prune( - &mut self, - best_finalized: &(::Number, B::Hash), - notification: &FinalityNotification, - ) { + pub fn canonicalize_and_prune(&mut self, notification: &FinalityNotification) { // Move offchain MMR nodes for finalized blocks to canonical keys. - let (mut parent_number, mut parent_hash) = *best_finalized; - for block_hash in notification - .tree_route - .iter() - .cloned() - .chain(std::iter::once(notification.hash)) + for block_hash in notification.tree_route.iter().chain(std::iter::once(¬ification.hash)) { - let block_number = parent_number.saturating_add(One::one()); - self.canonicalize_branch(block_number, &parent_hash); - - (parent_number, parent_hash) = (block_number, block_hash); + self.canonicalize_branch(block_hash); } // Remove offchain MMR nodes for stale forks. diff --git a/client/merkle-mountain-range/src/lib.rs b/client/merkle-mountain-range/src/lib.rs index 13e93f2603a0c..dcb9d454421f4 100644 --- a/client/merkle-mountain-range/src/lib.rs +++ b/client/merkle-mountain-range/src/lib.rs @@ -27,92 +27,89 @@ use std::{marker::PhantomData, sync::Arc}; use futures::StreamExt; use log::{debug, error, trace, warn}; -use sc_client_api::{Backend, BlockchainEvents, FinalityNotification, FinalityNotifications}; +use sc_client_api::{Backend, BlockchainEvents, FinalityNotifications}; use sc_offchain::OffchainDb; use sp_api::ProvideRuntimeApi; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_mmr_primitives::{utils, LeafIndex, MmrApi}; use sp_runtime::{ generic::BlockId, - traits::{Block, Header, NumberFor, Zero}, + traits::{Block, Header, NumberFor}, }; use crate::canon_engine::CanonEngine; use beefy_primitives::MmrRootHash; +use sp_core::offchain::OffchainStorage; pub const LOG_TARGET: &str = "mmr"; -enum MaybeCanonEngine { - Uninitialized { client: Arc, offchain_db: OffchainDb, indexing_prefix: Vec }, - Initialized(CanonEngine), +struct CanonEngineBuilder { + client: Arc, + offchain_db: OffchainDb, + indexing_prefix: Vec, + + _phantom: PhantomData, } -impl MaybeCanonEngine +impl CanonEngineBuilder where B: Block, - C: ProvideRuntimeApi, + C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, C::Api: MmrApi>, + S: OffchainStorage, { - fn try_prepare_init( - &mut self, - notification: &FinalityNotification, - ) -> Option<::Number> { - if let MaybeCanonEngine::Uninitialized { client, .. } = self { + async fn try_build( + self, + finality_notifications: &mut FinalityNotifications, + ) -> Option> { + while let Some(notification) = finality_notifications.next().await { let best_block = *notification.header.number(); - return match client.runtime_api().mmr_leaves_count(&BlockId::number(best_block)) { + match self.client.runtime_api().mmr_leaves_count(&BlockId::number(best_block)) { Ok(Ok(mmr_leaves_count)) => { match utils::first_mmr_block_num::(best_block, mmr_leaves_count) { - Ok(first_mmr_block) => Some(first_mmr_block), + Ok(first_mmr_block) => { + let mut engine = CanonEngine { + client: self.client, + offchain_db: self.offchain_db, + indexing_prefix: self.indexing_prefix, + first_mmr_block, + + _phantom: Default::default(), + }; + // We have to canonicalize and prune the blocks in the + // finality notification that lead to building the engine as well. + engine.canonicalize_and_prune(¬ification); + return Some(engine) + }, Err(e) => { error!( target: LOG_TARGET, "Error calculating the first mmr block: {:?}", e ); - return None }, } }, _ => { trace!(target: LOG_TARGET, "Finality notification: {:?}", notification); debug!(target: LOG_TARGET, "Waiting for MMR pallet to become available ..."); - - None }, } } + warn!( + target: LOG_TARGET, + "Finality notifications stream closed unexpectedly. \ + Couldn't build the canonicalization engine", + ); None } - - fn try_init(self, first_mmr_block: ::Number) -> Self { - if let MaybeCanonEngine::Uninitialized { client, offchain_db, indexing_prefix } = self { - debug!( - target: LOG_TARGET, - "MMR pallet available since block {:?}. \ - Starting offchain MMR leafs canonicalization.", - first_mmr_block - ); - - return MaybeCanonEngine::Initialized(CanonEngine { - client, - offchain_db, - indexing_prefix, - first_mmr_block, - _phantom: Default::default(), - }) - } - - return self - } } /// A MMR Gadget. pub struct MmrGadget, C> { finality_notifications: FinalityNotifications, - best_finalized_head: (::Number, B::Hash), - maybe_engine: MaybeCanonEngine, - _phantom: PhantomData<(B, BE)>, + _phantom: PhantomData<(B, BE, C)>, } impl MmrGadget @@ -123,7 +120,19 @@ where C: BlockchainEvents + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, C::Api: MmrApi>, { - fn new(client: Arc, backend: Arc, indexing_prefix: Vec) -> Option { + async fn run(mut self, engine_builder: CanonEngineBuilder) { + let mut engine = match engine_builder.try_build(&mut self.finality_notifications).await { + Some(engine) => engine, + None => return, + }; + + while let Some(notification) = self.finality_notifications.next().await { + engine.canonicalize_and_prune(¬ification); + } + } + + /// Create and run the MMR gadget. + pub async fn start(client: Arc, backend: Arc, indexing_prefix: Vec) { let offchain_db = match backend.offchain_storage() { Some(offchain_storage) => OffchainDb::new(offchain_storage), None => { @@ -131,40 +140,23 @@ where target: LOG_TARGET, "Can't spawn a MmrGadget for a node without offchain storage." ); - return None + return }, }; - Some(MmrGadget { + let mmr_gadget = MmrGadget:: { finality_notifications: client.finality_notification_stream(), - best_finalized_head: (Zero::zero(), client.info().genesis_hash), - maybe_engine: MaybeCanonEngine::Uninitialized { client, offchain_db, indexing_prefix }, _phantom: Default::default(), - }) - } - - async fn run(mut self) { - while let Some(notification) = self.finality_notifications.next().await { - // Initialize the canonicalization engine if the MMR pallet is available. - if let Some(first_mmr_block) = self.maybe_engine.try_prepare_init(¬ification) { - self.maybe_engine = self.maybe_engine.try_init(first_mmr_block); - } - - if let MaybeCanonEngine::Initialized(engine) = &mut self.maybe_engine { - // Perform the actual logic. - engine.canonicalize_and_prune(&self.best_finalized_head, ¬ification); - } - - self.best_finalized_head = (*notification.header.number(), notification.hash); - } - } - - /// Create and run the MMR gadget. - pub async fn start(client: Arc, backend: Arc, indexing_prefix: Vec) { - if let Some(mmr_gadget) = Self::new(client, backend, indexing_prefix) { - mmr_gadget.run().await - } + }; + mmr_gadget + .run(CanonEngineBuilder { + client, + offchain_db, + indexing_prefix, + _phantom: Default::default(), + }) + .await } } From cb6a96f061fae7ccc64e211af13633d6895b47dd Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Thu, 24 Nov 2022 12:30:07 +0200 Subject: [PATCH 08/13] fix mmr_leaves_count() for kitchen sink demo --- bin/node/runtime/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 0e7d22109792f..0d2de7755f6d6 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2064,8 +2064,8 @@ impl_runtime_apis! { Ok(Mmr::mmr_root()) } - fn mmr_leaves_count() -> Result { - Mmr::mmr_leaves() + fn mmr_leaves_count() -> Result { + Ok(Mmr::mmr_leaves()) } fn generate_proof( From f21593ca825699ccc5ebc28f0aeed6624d5e10c7 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 25 Nov 2022 13:59:25 +0200 Subject: [PATCH 09/13] Update client/merkle-mountain-range/src/canon_engine.rs Co-authored-by: Adrian Catangiu --- client/merkle-mountain-range/src/canon_engine.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/client/merkle-mountain-range/src/canon_engine.rs b/client/merkle-mountain-range/src/canon_engine.rs index f9180142b71a6..3f9f06e98b931 100644 --- a/client/merkle-mountain-range/src/canon_engine.rs +++ b/client/merkle-mountain-range/src/canon_engine.rs @@ -181,12 +181,9 @@ where } // Remove offchain MMR nodes for stale forks. - let stale_forks = match self.client.expand_forks(¬ification.stale_heads) { - Ok(stale_forks) => stale_forks, - Err((stale_forks, e)) => { - warn!(target: LOG_TARGET, "{:?}", e,); - stale_forks - }, + let stale_forks = self.client.expand_forks(¬ification.stale_heads).unwrap_or_else(|(stale_forks, e)| { + warn!(target: LOG_TARGET, "{:?}", e); + stale_forks }; for hash in stale_forks.iter() { self.prune_branch(hash); From 2cb3b3bc005e835320afeed70d44bdc203dfb06a Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 25 Nov 2022 14:11:10 +0200 Subject: [PATCH 10/13] Code review changes: 2nd iteration --- bin/node/runtime/src/lib.rs | 2 +- .../merkle-mountain-range/src/canon_engine.rs | 16 +++++----- client/merkle-mountain-range/src/lib.rs | 6 ++-- .../merkle-mountain-range/src/test_utils.rs | 6 ++-- frame/merkle-mountain-range/src/lib.rs | 2 +- frame/merkle-mountain-range/src/mmr/mmr.rs | 6 ++++ .../merkle-mountain-range/src/mmr/storage.rs | 6 ++-- frame/merkle-mountain-range/src/tests.rs | 29 ++++++++++++++++--- primitives/merkle-mountain-range/src/lib.rs | 5 +++- primitives/merkle-mountain-range/src/utils.rs | 6 ++-- 10 files changed, 58 insertions(+), 26 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 0d2de7755f6d6..97bf9166cb056 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2064,7 +2064,7 @@ impl_runtime_apis! { Ok(Mmr::mmr_root()) } - fn mmr_leaves_count() -> Result { + fn mmr_leaf_count() -> Result { Ok(Mmr::mmr_leaves()) } diff --git a/client/merkle-mountain-range/src/canon_engine.rs b/client/merkle-mountain-range/src/canon_engine.rs index 3f9f06e98b931..236aa71283d5b 100644 --- a/client/merkle-mountain-range/src/canon_engine.rs +++ b/client/merkle-mountain-range/src/canon_engine.rs @@ -44,7 +44,7 @@ where S: OffchainStorage, B: Block, { - fn node_temp_offchain_key(&self, pos: NodeIndex, parent_hash: &B::Hash) -> Vec { + fn node_temp_offchain_key(&self, pos: NodeIndex, parent_hash: B::Hash) -> Vec { NodesUtils::node_temp_offchain_key::(&self.indexing_prefix, pos, parent_hash) } @@ -116,7 +116,7 @@ where }; for pos in stale_nodes { - let temp_key = self.node_temp_offchain_key(pos, &header.parent); + let temp_key = self.node_temp_offchain_key(pos, header.parent); self.offchain_db.local_storage_clear(StorageKind::PERSISTENT, &temp_key); debug!(target: LOG_TARGET, "Pruned elem at pos {} with temp key {:?}", pos, temp_key); } @@ -147,7 +147,7 @@ where }; for pos in to_canon_nodes { - let temp_key = self.node_temp_offchain_key(pos, &header.parent); + let temp_key = self.node_temp_offchain_key(pos, header.parent); if let Some(elem) = self.offchain_db.local_storage_get(StorageKind::PERSISTENT, &temp_key) { @@ -181,10 +181,12 @@ where } // Remove offchain MMR nodes for stale forks. - let stale_forks = self.client.expand_forks(¬ification.stale_heads).unwrap_or_else(|(stale_forks, e)| { - warn!(target: LOG_TARGET, "{:?}", e); - stale_forks - }; + let stale_forks = self.client.expand_forks(¬ification.stale_heads).unwrap_or_else( + |(stale_forks, e)| { + warn!(target: LOG_TARGET, "{:?}", e); + stale_forks + }, + ); for hash in stale_forks.iter() { self.prune_branch(hash); } diff --git a/client/merkle-mountain-range/src/lib.rs b/client/merkle-mountain-range/src/lib.rs index dcb9d454421f4..c18a799188ed3 100644 --- a/client/merkle-mountain-range/src/lib.rs +++ b/client/merkle-mountain-range/src/lib.rs @@ -64,9 +64,9 @@ where ) -> Option> { while let Some(notification) = finality_notifications.next().await { let best_block = *notification.header.number(); - match self.client.runtime_api().mmr_leaves_count(&BlockId::number(best_block)) { - Ok(Ok(mmr_leaves_count)) => { - match utils::first_mmr_block_num::(best_block, mmr_leaves_count) { + match self.client.runtime_api().mmr_leaf_count(&BlockId::number(best_block)) { + Ok(Ok(mmr_leaf_count)) => { + match utils::first_mmr_block_num::(best_block, mmr_leaf_count) { Ok(first_mmr_block) => { let mut engine = CanonEngine { client: self.client, diff --git a/client/merkle-mountain-range/src/test_utils.rs b/client/merkle-mountain-range/src/test_utils.rs index fd11eceb7ae1a..0ba297c2808d2 100644 --- a/client/merkle-mountain-range/src/test_utils.rs +++ b/client/merkle-mountain-range/src/test_utils.rs @@ -91,7 +91,7 @@ impl MmrBlock { OffchainKeyType::Temp => NodesUtils::node_temp_offchain_key::
( MockRuntimeApi::INDEXING_PREFIX, node, - self.block.header.parent_hash(), + *self.block.header.parent_hash(), ), OffchainKeyType::Canon => NodesUtils::node_canon_offchain_key(MockRuntimeApi::INDEXING_PREFIX, node), @@ -144,7 +144,7 @@ impl MockClient { let temp_key = NodesUtils::node_temp_offchain_key::
( MockRuntimeApi::INDEXING_PREFIX, node, - &parent_hash, + parent_hash, ); offchain_db.local_storage_set( StorageKind::PERSISTENT, @@ -283,7 +283,7 @@ sp_api::mock_impl_runtime_apis! { Err(mmr::Error::PalletNotIncluded) } - fn mmr_leaves_count(&self) -> Result { + fn mmr_leaf_count(&self) -> Result { Ok(self.data.lock().unwrap().num_blocks) } diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 174a868549d39..85320e9c0d3a4 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -281,7 +281,7 @@ impl, I: 'static> Pallet { /// This combination makes the offchain (key,value) entry resilient to chain forks. fn node_temp_offchain_key( pos: NodeIndex, - parent_hash: &::Hash, + parent_hash: ::Hash, ) -> sp_std::prelude::Vec { NodesUtils::node_temp_offchain_key::<::Header>( &T::INDEXING_PREFIX, diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs index 58e55b02fe9a3..cdb189a14b66d 100644 --- a/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -116,6 +116,12 @@ where p.verify(root, leaves_positions_and_data) .map_err(|e| Error::Verify.log_debug(e)) } + + /// Return the internal size of the MMR (number of nodes). + #[cfg(test)] + pub fn size(&self) -> NodeIndex { + self.mmr.mmr_size() + } } /// Runtime specific MMR functions. diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index 3fe34623e30e1..1f5d01f87e273 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -96,7 +96,7 @@ where let ancestor_parent_block_num = Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx, leaves); let ancestor_parent_hash = >::block_hash(ancestor_parent_block_num); - let temp_key = Pallet::::node_temp_offchain_key(pos, &ancestor_parent_hash); + let temp_key = Pallet::::node_temp_offchain_key(pos, ancestor_parent_hash); debug!( target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, hash {:?}, temp key {:?}", @@ -158,7 +158,7 @@ where >::insert(node_index, elem.hash()); } // We are storing full node off-chain (using indexing API). - Self::store_to_offchain(node_index, &parent_hash, &elem); + Self::store_to_offchain(node_index, parent_hash, &elem); // Increase the indices. if let Node::Data(..) = elem { @@ -187,7 +187,7 @@ where { fn store_to_offchain( pos: NodeIndex, - parent_hash: &::Hash, + parent_hash: ::Hash, node: &NodeOf, ) { let encoded_node = node.encode(); diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index 233dc1824dec7..b5f9a78ede010 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -167,13 +167,13 @@ fn should_append_to_mmr_when_on_initialize_is_called() { let expected = Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1)))); assert_eq!( - offchain_db.get(&MMR::node_temp_offchain_key(0, &parent_b1)).map(decode_node), + offchain_db.get(&MMR::node_temp_offchain_key(0, parent_b1)).map(decode_node), expected ); let expected = Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2)))); assert_eq!( - offchain_db.get(&MMR::node_temp_offchain_key(1, &parent_b2)).map(decode_node), + offchain_db.get(&MMR::node_temp_offchain_key(1, parent_b2)).map(decode_node), expected ); @@ -181,11 +181,11 @@ fn should_append_to_mmr_when_on_initialize_is_called() { "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854", ))); assert_eq!( - offchain_db.get(&MMR::node_temp_offchain_key(2, &parent_b2)).map(decode_node), + offchain_db.get(&MMR::node_temp_offchain_key(2, parent_b2)).map(decode_node), expected ); - assert_eq!(offchain_db.get(&MMR::node_temp_offchain_key(3, &parent_b2)), None); + assert_eq!(offchain_db.get(&MMR::node_temp_offchain_key(3, parent_b2)), None); } #[test] @@ -219,6 +219,27 @@ fn should_construct_larger_mmr_correctly() { }); } +#[test] +fn should_calculate_the_size_correctly() { + let _ = env_logger::try_init(); + + let leaves = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 21]; + let sizes = vec![0, 1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26, 39]; + + // size cross-check + let mut actual_sizes = vec![]; + for s in &leaves[1..] { + new_test_ext().execute_with(|| { + let mut mmr = mmr::Mmr::::new(0); + for i in 0..*s { + mmr.push(i); + } + actual_sizes.push(mmr.size()); + }) + } + assert_eq!(sizes[1..], actual_sizes[..]); +} + #[test] fn should_generate_proofs_correctly() { let _ = env_logger::try_init(); diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index bffd87c8b639f..74806d9a6a843 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -31,6 +31,9 @@ use sp_std::prelude::Vec; pub mod utils; +/// Prefix for elements stored in the Off-chain DB via Indexing API. +pub const INDEXING_PREFIX: &str = "mmr"; + /// A type to describe node position in the MMR (node index). pub type NodeIndex = u64; @@ -424,7 +427,7 @@ sp_api::decl_runtime_apis! { fn mmr_root() -> Result; /// Return the number of MMR blocks in the chain. - fn mmr_leaves_count() -> Result; + fn mmr_leaf_count() -> Result; /// Generate MMR proof for a series of block numbers. If `best_known_block_number = Some(n)`, /// use historical MMR state at given block height `n`. Else, use current MMR state. diff --git a/primitives/merkle-mountain-range/src/utils.rs b/primitives/merkle-mountain-range/src/utils.rs index aa3665d5df0db..619ca7e98160f 100644 --- a/primitives/merkle-mountain-range/src/utils.rs +++ b/primitives/merkle-mountain-range/src/utils.rs @@ -29,9 +29,9 @@ use crate::{Error, LeafIndex, NodeIndex}; /// Get the first block with MMR. pub fn first_mmr_block_num( best_block_num: H::Number, - mmr_leaves_count: LeafIndex, + mmr_leaf_count: LeafIndex, ) -> Result { - let mmr_blocks_count = mmr_leaves_count.try_into().map_err(|_| { + let mmr_blocks_count = mmr_leaf_count.try_into().map_err(|_| { Error::InvalidNumericOp .log_debug("The number of leaves couldn't be converted to a block number.") })?; @@ -121,7 +121,7 @@ impl NodesUtils { pub fn node_temp_offchain_key( prefix: &[u8], pos: NodeIndex, - parent_hash: &H::Hash, + parent_hash: H::Hash, ) -> Vec { (prefix, pos, parent_hash).encode() } From 54fa18e4100f86aae32ea8721d25685cc2a44856 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 25 Nov 2022 18:15:47 +0200 Subject: [PATCH 11/13] fix INDEXING_PREFIX --- primitives/merkle-mountain-range/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 74806d9a6a843..606906185077c 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -32,7 +32,7 @@ use sp_std::prelude::Vec; pub mod utils; /// Prefix for elements stored in the Off-chain DB via Indexing API. -pub const INDEXING_PREFIX: &str = "mmr"; +pub const INDEXING_PREFIX: &'static [u8] = b"mmr"; /// A type to describe node position in the MMR (node index). pub type NodeIndex = u64; From 498170234924f33d88adcb38a77a6c6a1bb95958 Mon Sep 17 00:00:00 2001 From: acatangiu Date: Mon, 28 Nov 2022 14:11:47 +0200 Subject: [PATCH 12/13] impl review comments --- client/consensus/babe/src/lib.rs | 10 +++++----- client/merkle-mountain-range/src/lib.rs | 2 +- primitives/blockchain/src/backend.rs | 5 ++--- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index d4cfdfa941c1e..4cb300b9bcd04 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -516,12 +516,12 @@ fn aux_storage_cleanup + HeaderBackend, Block: B client: &C, notification: &FinalityNotification, ) -> AuxDataOperations { - let mut aux_keys = HashSet::new(); + let mut hashes = HashSet::new(); let first = notification.tree_route.first().unwrap_or(¬ification.hash); match client.header_metadata(*first) { Ok(meta) => { - aux_keys.insert(meta.parent); + hashes.insert(meta.parent); }, Err(err) => warn!( target: "babe", @@ -532,7 +532,7 @@ fn aux_storage_cleanup + HeaderBackend, Block: B } // Cleans data for finalized block's ancestors - aux_keys.extend( + hashes.extend( notification .tree_route .iter() @@ -549,9 +549,9 @@ fn aux_storage_cleanup + HeaderBackend, Block: B stale_forks }, }; - aux_keys.extend(stale_forks.iter()); + hashes.extend(stale_forks.iter()); - aux_keys + hashes .into_iter() .map(|val| (aux_schema::block_weight_key(val), None)) .collect() diff --git a/client/merkle-mountain-range/src/lib.rs b/client/merkle-mountain-range/src/lib.rs index c18a799188ed3..7809bc4c26c87 100644 --- a/client/merkle-mountain-range/src/lib.rs +++ b/client/merkle-mountain-range/src/lib.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -extern crate core; +#![warn(missing_docs)] mod canon_engine; #[cfg(test)] diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 8c19538913c18..33edc56d4b6ba 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -21,11 +21,10 @@ use log::warn; use parking_lot::RwLock; use sp_runtime::{ generic::BlockId, - sp_std, traits::{Block as BlockT, Header as HeaderT, NumberFor, Saturating}, Justifications, }; -use sp_std::collections::btree_set::BTreeSet; +use std::collections::btree_set::BTreeSet; use crate::header_metadata::HeaderMetadata; @@ -101,7 +100,7 @@ pub trait ForkBackend: fn expand_forks( &self, fork_heads: &[Block::Hash], - ) -> sp_std::result::Result, (BTreeSet, Error)> { + ) -> std::result::Result, (BTreeSet, Error)> { let mut missing_blocks = vec![]; let mut expanded_forks = BTreeSet::new(); for fork_head in fork_heads { From e39b6510baa0ec6d6a9130cbdba41dbd35ea0396 Mon Sep 17 00:00:00 2001 From: acatangiu Date: Mon, 28 Nov 2022 18:57:42 +0200 Subject: [PATCH 13/13] add documentation and minor rename --- client/merkle-mountain-range/src/lib.rs | 50 +++++++++++++------ .../src/{canon_engine.rs => offchain_mmr.rs} | 10 +++- frame/merkle-mountain-range/src/lib.rs | 2 +- 3 files changed, 44 insertions(+), 18 deletions(-) rename client/merkle-mountain-range/src/{canon_engine.rs => offchain_mmr.rs} (96%) diff --git a/client/merkle-mountain-range/src/lib.rs b/client/merkle-mountain-range/src/lib.rs index 7809bc4c26c87..4f911a78a51d7 100644 --- a/client/merkle-mountain-range/src/lib.rs +++ b/client/merkle-mountain-range/src/lib.rs @@ -16,9 +16,28 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +//! # MMR offchain gadget +//! +//! The MMR offchain gadget is run alongside `pallet-mmr` to assist it with offchain +//! canonicalization of finalized MMR leaves and nodes. +//! The gadget should only be run on nodes that have Indexing API enabled (otherwise +//! `pallet-mmr` cannot write to offchain and this gadget has nothing to do). +//! +//! The runtime `pallet-mmr` creates one new MMR leaf per block and all inner MMR parent nodes +//! generated by the MMR when adding said leaf. MMR nodes are stored both in: +//! - on-chain storage - hashes only; not full leaf content; +//! - off-chain storage - via Indexing API, full leaf content (and all internal nodes as well) is +//! saved to the Off-chain DB using a key derived from `parent_hash` and node index in MMR. The +//! `parent_hash` is also used within the key to avoid conflicts and overwrites on forks (leaf +//! data is only allowed to reference data coming from parent block). +//! +//! This gadget is driven by block finality and in responsible for pruning stale forks from +//! offchain db, and moving finalized forks under a "canonical" key based solely on node `pos` +//! in the MMR. + #![warn(missing_docs)] -mod canon_engine; +mod offchain_mmr; #[cfg(test)] pub mod test_utils; @@ -37,13 +56,14 @@ use sp_runtime::{ traits::{Block, Header, NumberFor}, }; -use crate::canon_engine::CanonEngine; +use crate::offchain_mmr::OffchainMMR; use beefy_primitives::MmrRootHash; use sp_core::offchain::OffchainStorage; +/// Logging target for the mmr gadget. pub const LOG_TARGET: &str = "mmr"; -struct CanonEngineBuilder { +struct OffchainMmrBuilder { client: Arc, offchain_db: OffchainDb, indexing_prefix: Vec, @@ -51,7 +71,7 @@ struct CanonEngineBuilder { _phantom: PhantomData, } -impl CanonEngineBuilder +impl OffchainMmrBuilder where B: Block, C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, @@ -61,14 +81,14 @@ where async fn try_build( self, finality_notifications: &mut FinalityNotifications, - ) -> Option> { + ) -> Option> { while let Some(notification) = finality_notifications.next().await { let best_block = *notification.header.number(); match self.client.runtime_api().mmr_leaf_count(&BlockId::number(best_block)) { Ok(Ok(mmr_leaf_count)) => { match utils::first_mmr_block_num::(best_block, mmr_leaf_count) { Ok(first_mmr_block) => { - let mut engine = CanonEngine { + let mut offchain_mmr = OffchainMMR { client: self.client, offchain_db: self.offchain_db, indexing_prefix: self.indexing_prefix, @@ -76,10 +96,10 @@ where _phantom: Default::default(), }; - // We have to canonicalize and prune the blocks in the - // finality notification that lead to building the engine as well. - engine.canonicalize_and_prune(¬ification); - return Some(engine) + // We have to canonicalize and prune the blocks in the finality + // notification that lead to building the offchain-mmr as well. + offchain_mmr.canonicalize_and_prune(¬ification); + return Some(offchain_mmr) }, Err(e) => { error!( @@ -120,14 +140,14 @@ where C: BlockchainEvents + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, C::Api: MmrApi>, { - async fn run(mut self, engine_builder: CanonEngineBuilder) { - let mut engine = match engine_builder.try_build(&mut self.finality_notifications).await { - Some(engine) => engine, + async fn run(mut self, builder: OffchainMmrBuilder) { + let mut offchain_mmr = match builder.try_build(&mut self.finality_notifications).await { + Some(offchain_mmr) => offchain_mmr, None => return, }; while let Some(notification) = self.finality_notifications.next().await { - engine.canonicalize_and_prune(¬ification); + offchain_mmr.canonicalize_and_prune(¬ification); } } @@ -150,7 +170,7 @@ where _phantom: Default::default(), }; mmr_gadget - .run(CanonEngineBuilder { + .run(OffchainMmrBuilder { client, offchain_db, indexing_prefix, diff --git a/client/merkle-mountain-range/src/canon_engine.rs b/client/merkle-mountain-range/src/offchain_mmr.rs similarity index 96% rename from client/merkle-mountain-range/src/canon_engine.rs rename to client/merkle-mountain-range/src/offchain_mmr.rs index 236aa71283d5b..7400226b73c44 100644 --- a/client/merkle-mountain-range/src/canon_engine.rs +++ b/client/merkle-mountain-range/src/offchain_mmr.rs @@ -16,6 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +//! Logic for canonicalizing MMR offchain entries for finalized forks, +//! and for pruning MMR offchain entries for stale forks. + +#![warn(missing_docs)] + use std::{marker::PhantomData, sync::Arc}; use log::{debug, error, warn}; @@ -29,7 +34,8 @@ use sp_runtime::traits::{Block, Header}; use crate::LOG_TARGET; -pub struct CanonEngine { +/// `OffchainMMR` exposes MMR offchain canonicalization and pruning logic. +pub struct OffchainMMR { pub client: Arc, pub offchain_db: OffchainDb, pub indexing_prefix: Vec, @@ -38,7 +44,7 @@ pub struct CanonEngine { pub _phantom: PhantomData, } -impl CanonEngine +impl OffchainMMR where C: HeaderBackend + HeaderMetadata, S: OffchainStorage, diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 85320e9c0d3a4..cb567c0137e78 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -24,7 +24,7 @@ //! //! The MMR pallet constructs an MMR from leaf data obtained on every block from //! `LeafDataProvider`. MMR nodes are stored both in: -//! - on-chain storage - hashes only; not full leaf content) +//! - on-chain storage - hashes only; not full leaf content; //! - off-chain storage - via Indexing API we push full leaf content (and all internal nodes as //! well) to the Off-chain DB, so that the data is available for Off-chain workers. //! Hashing used for MMR is configurable independently from the rest of the runtime (i.e. not using