Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

pallet-mmr: move offchain logic to client-side gadget #12753

Merged
merged 15 commits into from
Nov 29, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 12 additions & 41 deletions client/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,8 @@ use sp_api::{ApiExt, ProvideRuntimeApi};
use sp_application_crypto::AppKey;
use sp_block_builder::BlockBuilder as BlockBuilderApi;
use sp_blockchain::{
Backend as _, Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult,
Backend as _, Error as ClientError, ForkBackend, HeaderBackend, HeaderMetadata,
Result as ClientResult,
};
use sp_consensus::{
BlockOrigin, CacheKeyId, Environment, Error as ConsensusError, Proposer, SelectChain,
Expand All @@ -123,7 +124,7 @@ use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvid
use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
use sp_runtime::{
generic::{BlockId, OpaqueDigestItemId},
traits::{Block as BlockT, Header, NumberFor, SaturatedConversion, Saturating, Zero},
traits::{Block as BlockT, Header, NumberFor, SaturatedConversion, Zero},
DigestItem,
};

Expand Down Expand Up @@ -520,7 +521,7 @@ fn aux_storage_cleanup<C: HeaderMetadata<Block> + HeaderBackend<Block>, Block: B
let first = notification.tree_route.first().unwrap_or(&notification.hash);
match client.header_metadata(*first) {
Ok(meta) => {
aux_keys.insert(aux_schema::block_weight_key(meta.parent));
aux_keys.insert(meta.parent);
acatangiu marked this conversation as resolved.
Show resolved Hide resolved
},
Err(err) => warn!(
target: "babe",
Expand All @@ -537,47 +538,17 @@ fn aux_storage_cleanup<C: HeaderMetadata<Block> + HeaderBackend<Block>, Block: B
.iter()
// Ensure we don't prune latest finalized block.
// This should not happen, but better be safe than sorry!
.filter(|h| **h != notification.hash)
.map(aux_schema::block_weight_key),
.filter(|h| **h != notification.hash),
);

// Cleans data for stale branches.

for head in notification.stale_heads.iter() {
let mut hash = *head;
// Insert stale blocks hashes until canonical chain is reached.
// If we reach a block that is already part of the `aux_keys` we can stop the processing the
// head.
while aux_keys.insert(aux_schema::block_weight_key(hash)) {
match client.header_metadata(hash) {
Ok(meta) => {
hash = meta.parent;

// If the parent is part of the canonical chain or there doesn't exist a block
// hash for the parent number (bug?!), we can abort adding blocks.
if client
.hash(meta.number.saturating_sub(1u32.into()))
.ok()
.flatten()
.map_or(true, |h| h == hash)
{
break
}
},
Err(err) => {
warn!(
target: "babe",
"Header lookup fail while cleaning data for block {:?}: {}",
hash,
err,
);
break
},
}
}
}
// Cleans data for stale forks.
let stale_forks = client.expand_forks(&notification.stale_heads);
aux_keys.extend(stale_forks.iter());

aux_keys.into_iter().map(|val| (val, None)).collect()
aux_keys
.into_iter()
.map(|val| (aux_schema::block_weight_key(val), None))
.collect()
}

async fn answer_requests<B: BlockT, C>(
Expand Down
58 changes: 57 additions & 1 deletion primitives/blockchain/src/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,11 @@ use log::warn;
use parking_lot::RwLock;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT, NumberFor},
sp_std,
traits::{Block as BlockT, Header as HeaderT, NumberFor, Saturating},
Justifications,
};
use sp_std::collections::btree_set::BTreeSet;
acatangiu marked this conversation as resolved.
Show resolved Hide resolved

use crate::header_metadata::HeaderMetadata;

Expand Down Expand Up @@ -84,6 +86,60 @@ pub trait HeaderBackend<Block: BlockT>: Send + Sync {
}
}

/// Handles stale forks.
pub trait ForkBackend<Block: BlockT>:
HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync
{
/// Get all the header hashes that are part of the provided forks starting only from the fork
/// heads.
fn expand_forks(&self, fork_heads: &[Block::Hash]) -> BTreeSet<Block::Hash> {
let mut expanded_forks = BTreeSet::new();
for fork_head in fork_heads {
let mut hash = *fork_head;
// Insert stale blocks hashes until canonical chain is reached.
// If we reach a block that is already part of the `expanded_forks` we can stop
// processing the fork.
while expanded_forks.insert(hash) {
match self.header_metadata(hash) {
Ok(meta) => {
hash = meta.parent;

// If the parent is part of the canonical chain or there doesn't exist a
// block hash for the parent number (bug?!), we can abort adding blocks.
if self
.hash(meta.number.saturating_sub(1u32.into()))
.ok()
.flatten()
.map_or(true, |h| h == hash)
{
break
}
},
Err(err) => {
warn!(
target: "primitives::blockchain",
acatangiu marked this conversation as resolved.
Show resolved Hide resolved
"Stale header {:?} lookup fail while expanding fork {:?}: {}",
fork_head,
hash,
err,
);
break
},
}
}
}

expanded_forks
}
}

impl<Block, T> ForkBackend<Block> for T
where
Block: BlockT,
T: HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync,
{
}

/// Blockchain database backend. Does not perform any validation.
pub trait Backend<Block: BlockT>:
HeaderBackend<Block> + HeaderMetadata<Block, Error = Error>
Expand Down