diff --git a/Cargo.lock b/Cargo.lock index 8c21d575b8583..34c2d93102b3a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -344,9 +344,9 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-trait" -version = "0.1.47" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e098e9c493fdf92832223594d9a164f96bdf17ba81a42aff86f85c76768726a" +checksum = "36ea56748e10732c49404c153638a15ec3d6211ec5ff35d9bb20e13b93576adf" dependencies = [ "proc-macro2", "quote", @@ -1871,7 +1871,6 @@ dependencies = [ "rustversion", "serde", "sp-core", - "sp-inherents", "sp-io", "sp-runtime", "sp-state-machine", @@ -4130,6 +4129,7 @@ dependencies = [ "sc-consensus-babe", "sc-consensus-epochs", "sc-consensus-slots", + "sc-consensus-uncles", "sc-finality-grandpa", "sc-finality-grandpa-warp-sync", "sc-keystore", @@ -4145,6 +4145,7 @@ dependencies = [ "serde_json", "soketto", "sp-authority-discovery", + "sp-authorship", "sp-consensus", "sp-consensus-babe", "sp-core", @@ -4384,6 +4385,7 @@ dependencies = [ "sp-finality-grandpa", "sp-inherents", "sp-runtime", + "sp-timestamp", "sp-transaction-pool", "structopt", "substrate-build-script-utils", @@ -4691,7 +4693,6 @@ dependencies = [ "serde", "sp-authorship", "sp-core", - "sp-inherents", "sp-io", "sp-runtime", "sp-std", @@ -7287,7 +7288,6 @@ dependencies = [ "sp-core", "sp-inherents", "sp-runtime", - "sp-timestamp", "substrate-prometheus-endpoint", ] @@ -7298,6 +7298,7 @@ dependencies = [ "async-trait", "futures 0.3.13", "futures-timer 3.0.2", + "impl-trait-for-tuples", "log", "parity-scale-codec", "sc-client-api", @@ -7322,13 +7323,10 @@ dependencies = [ name = "sc-consensus-uncles" version = "0.9.0" dependencies = [ - "log", "sc-client-api", "sp-authorship", - "sp-consensus", - "sp-core", - "sp-inherents", "sp-runtime", + "thiserror", ] [[package]] @@ -8606,6 +8604,7 @@ dependencies = [ name = "sp-authorship" version = "3.0.0" dependencies = [ + "async-trait", "parity-scale-codec", "sp-inherents", "sp-runtime", @@ -8679,6 +8678,7 @@ dependencies = [ name = "sp-consensus-aura" version = "0.9.0" dependencies = [ + "async-trait", "parity-scale-codec", "sp-api", "sp-application-crypto", @@ -8694,6 +8694,7 @@ dependencies = [ name = "sp-consensus-babe" version = "0.9.0" dependencies = [ + "async-trait", "merlin", "parity-scale-codec", "serde", @@ -8837,9 +8838,12 @@ dependencies = [ name = "sp-inherents" version = "3.0.0" dependencies = [ + "async-trait", + "futures 0.3.13", + "impl-trait-for-tuples", "parity-scale-codec", - "parking_lot 0.11.1", "sp-core", + "sp-runtime", "sp-std", "thiserror", ] @@ -9180,11 +9184,15 @@ dependencies = [ name = "sp-timestamp" version = "3.0.0" dependencies = [ + "async-trait", + "futures-timer 3.0.2", + "log", "parity-scale-codec", "sp-api", "sp-inherents", "sp-runtime", "sp-std", + "thiserror", "wasm-timer", ] @@ -9778,6 +9786,7 @@ dependencies = [ "sp-keyring", "sp-keystore", "sp-runtime", + "sp-timestamp", "test-runner", ] diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index e53320c940510..d45241362fd2f 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -35,6 +35,7 @@ sc-finality-grandpa = { version = "0.9.0", path = "../../../client/finality-gran sp-finality-grandpa = { version = "3.0.0", path = "../../../primitives/finality-grandpa" } sc-client-api = { version = "3.0.0", path = "../../../client/api" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } # These dependencies are used for the node template's RPCs jsonrpc-core = "15.1.0" diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 197a495b438b2..c73956d885bf3 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -1,11 +1,9 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use sc_client_api::{ExecutorProvider, RemoteBackend}; use node_template_runtime::{self, opaque::Block, RuntimeApi}; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; -use sp_inherents::InherentDataProviders; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; @@ -13,6 +11,7 @@ use sc_consensus_aura::{ImportQueueParams, StartAuraParams, SlotProportion}; use sc_finality_grandpa::SharedVoterState; use sc_keystore::LocalKeystore; use sc_telemetry::{Telemetry, TelemetryWorker}; +use sp_consensus::SlotData; // Our native executor instance. native_executor_instance!( @@ -45,7 +44,6 @@ pub fn new_partial(config: &Configuration) -> Result Result( + let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); + + let import_queue = sc_consensus_aura::import_queue::( ImportQueueParams { block_import: aura_block_import.clone(), justification_import: Some(Box::new(grandpa_block_import.clone())), client: client.clone(), - inherent_data_providers: inherent_data_providers.clone(), + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + Ok((timestamp, slot)) + }, spawner: &task_manager.spawn_essential_handle(), can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), - slot_duration: sc_consensus_aura::slot_duration(&*client)?, registry: config.prometheus_registry(), check_for_equivocation: Default::default(), telemetry: telemetry.as_ref().map(|x| x.handle()), @@ -113,7 +122,6 @@ pub fn new_partial(config: &Configuration) -> Result Result mut keystore_container, select_chain, transaction_pool, - inherent_data_providers, other: (block_import, grandpa_link, mut telemetry), } = new_partial(&config)?; @@ -220,14 +227,27 @@ pub fn new_full(mut config: Configuration) -> Result let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let aura = sc_consensus_aura::start_aura::( + let slot_duration = sc_consensus_aura::slot_duration(&*client)?; + let raw_slot_duration = slot_duration.slot_duration(); + + let aura = sc_consensus_aura::start_aura::( StartAuraParams { - slot_duration: sc_consensus_aura::slot_duration(&*client)?, + slot_duration, client: client.clone(), select_chain, block_import, proposer_factory, - inherent_data_providers: inherent_data_providers.clone(), + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + raw_slot_duration, + ); + + Ok((timestamp, slot)) + }, force_authoring, backoff_authoring_blocks, keystore: keystore_container.sync_keystore(), @@ -338,15 +358,26 @@ pub fn new_light(mut config: Configuration) -> Result client.clone(), ); - let import_queue = sc_consensus_aura::import_queue::( + let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); + + let import_queue = sc_consensus_aura::import_queue::( ImportQueueParams { block_import: aura_block_import.clone(), justification_import: Some(Box::new(grandpa_block_import.clone())), client: client.clone(), - inherent_data_providers: InherentDataProviders::new(), + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + Ok((timestamp, slot)) + }, spawner: &task_manager.spawn_essential_handle(), can_author_with: sp_consensus::NeverCanAuthor, - slot_duration: sc_consensus_aura::slot_duration(&*client)?, registry: config.prometheus_registry(), check_for_equivocation: Default::default(), telemetry: telemetry.as_ref().map(|x| x.handle()), diff --git a/bin/node/bench/src/construct.rs b/bin/node/bench/src/construct.rs index 6524662317148..3dce8966f7a1d 100644 --- a/bin/node/bench/src/construct.rs +++ b/bin/node/bench/src/construct.rs @@ -49,6 +49,7 @@ use sp_transaction_pool::{ TxHash, }; use sp_consensus::{Environment, Proposer}; +use sp_inherents::InherentDataProvider; use crate::{ common::SizeType, @@ -153,10 +154,7 @@ impl core::Benchmark for ConstructionBenchmark { None, None, ); - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - inherent_data_providers - .register_provider(sp_timestamp::InherentDataProvider) - .expect("Failed to register timestamp data provider"); + let timestamp_provider = sp_timestamp::InherentDataProvider::from_system_time(); let start = std::time::Instant::now(); @@ -168,7 +166,7 @@ impl core::Benchmark for ConstructionBenchmark { let _block = futures::executor::block_on( proposer.propose( - inherent_data_providers.create_inherent_data().expect("Create inherent data failed"), + timestamp_provider.create_inherent_data().expect("Create inherent data failed"), Default::default(), std::time::Duration::from_secs(20), None, diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index f552d3fff36cc..ccba896a20682 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -49,7 +49,8 @@ sp-consensus-babe = { version = "0.9.0", path = "../../../primitives/consensus/b grandpa-primitives = { version = "3.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } sp-core = { version = "3.0.0", path = "../../../primitives/core" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } -sp-timestamp = { version = "3.0.0", default-features = false, path = "../../../primitives/timestamp" } +sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } +sp-authorship = { version = "3.0.0", path = "../../../primitives/authorship" } sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } @@ -65,6 +66,7 @@ sc-transaction-pool = { version = "3.0.0", path = "../../../client/transaction-p sc-network = { version = "0.9.0", path = "../../../client/network" } sc-consensus-slots = { version = "0.9.0", path = "../../../client/consensus/slots" } sc-consensus-babe = { version = "0.9.0", path = "../../../client/consensus/babe" } +sc-consensus-uncles = { version = "0.9.0", path = "../../../client/consensus/uncles" } grandpa = { version = "0.9.0", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } sc-client-db = { version = "0.9.0", default-features = false, path = "../../../client/db" } sc-offchain = { version = "3.0.0", path = "../../../client/offchain" } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index b00451267d96c..a13f8be9af136 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -27,7 +27,6 @@ use node_runtime::RuntimeApi; use sc_service::{ config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager, }; -use sp_inherents::InherentDataProviders; use sc_network::{Event, NetworkService}; use sp_runtime::traits::Block as BlockT; use futures::prelude::*; @@ -109,15 +108,29 @@ pub fn new_partial( client.clone(), )?; - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - + let slot_duration = babe_link.config().slot_duration(); let import_queue = sc_consensus_babe::import_queue( babe_link.clone(), block_import.clone(), Some(Box::new(justification_import)), client.clone(), select_chain.clone(), - inherent_data_providers.clone(), + move |_, ()| { + async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + let uncles = + sp_authorship::InherentDataProvider::<::Header>::check_inherents(); + + Ok((timestamp, slot, uncles)) + } + }, &task_manager.spawn_essential_handle(), config.prometheus_registry(), sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), @@ -183,14 +196,12 @@ pub fn new_partial( select_chain, import_queue, transaction_pool, - inherent_data_providers, other: (rpc_extensions_builder, import_setup, rpc_setup, telemetry), }) } pub struct NewFullBase { pub task_manager: TaskManager, - pub inherent_data_providers: InherentDataProviders, pub client: Arc, pub network: Arc::Hash>>, pub network_status_sinks: sc_service::NetworkStatusSinks, @@ -213,7 +224,6 @@ pub fn new_full_base( keystore_container, select_chain, transaction_pool, - inherent_data_providers, other: (rpc_extensions_builder, import_setup, rpc_setup, mut telemetry), } = new_partial(&config)?; @@ -291,6 +301,8 @@ pub fn new_full_base( let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + let client_clone = client.clone(); + let slot_duration = babe_link.config().slot_duration(); let babe_config = sc_consensus_babe::BabeParams { keystore: keystore_container.sync_keystore(), client: client.clone(), @@ -298,7 +310,25 @@ pub fn new_full_base( env: proposer, block_import, sync_oracle: network.clone(), - inherent_data_providers: inherent_data_providers.clone(), + create_inherent_data_providers: move |parent, ()| { + let client_clone = client_clone.clone(); + async move { + let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider( + &*client_clone, + parent, + )?; + + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + Ok((timestamp, slot, uncles)) + } + }, force_authoring, backoff_authoring_blocks, babe_link, @@ -383,7 +413,6 @@ pub fn new_full_base( network_starter.start_network(); Ok(NewFullBase { task_manager, - inherent_data_providers, client, network, network_status_sinks, @@ -463,15 +492,27 @@ pub fn new_light_base( client.clone(), )?; - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - + let slot_duration = babe_link.config().slot_duration(); let import_queue = sc_consensus_babe::import_queue( babe_link, babe_block_import, Some(Box::new(justification_import)), client.clone(), select_chain.clone(), - inherent_data_providers.clone(), + move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + let uncles = + sp_authorship::InherentDataProvider::<::Header>::check_inherents(); + + Ok((timestamp, slot, uncles)) + }, &task_manager.spawn_essential_handle(), config.prometheus_registry(), sp_consensus::NeverCanAuthor, @@ -568,6 +609,7 @@ mod tests { use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; use sc_client_api::BlockBackend; use sc_keystore::LocalKeystore; + use sp_inherents::InherentDataProvider; type AccountPublic = ::Signer; @@ -597,7 +639,7 @@ mod tests { |config| { let mut setup_handles = None; let NewFullBase { - task_manager, inherent_data_providers, client, network, transaction_pool, .. + task_manager, client, network, transaction_pool, .. } = new_full_base(config, | block_import: &sc_consensus_babe::BabeBlockImport, @@ -610,17 +652,13 @@ mod tests { let node = sc_service_test::TestNetComponents::new( task_manager, client, network, transaction_pool ); - Ok((node, (inherent_data_providers, setup_handles.unwrap()))) + Ok((node, setup_handles.unwrap())) }, |config| { let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) }, - |service, &mut (ref inherent_data_providers, (ref mut block_import, ref babe_link))| { - let mut inherent_data = inherent_data_providers - .create_inherent_data() - .expect("Creates inherent data."); - + |service, &mut (ref mut block_import, ref babe_link)| { let parent_id = BlockId::number(service.client().chain_info().best_number); let parent_header = service.client().header(&parent_id).unwrap().unwrap(); let parent_hash = parent_header.hash(); @@ -648,11 +686,6 @@ mod tests { // even though there's only one authority some slots might be empty, // so we must keep trying the next slots until we can claim one. let (babe_pre_digest, epoch_descriptor) = loop { - inherent_data.replace_data( - sp_timestamp::INHERENT_IDENTIFIER, - &(slot * SLOT_DURATION), - ); - let epoch_descriptor = babe_link.epoch_changes().shared_data().epoch_descriptor_for_child_of( descendent_query(&*service.client()), &parent_hash, @@ -676,6 +709,13 @@ mod tests { slot += 1; }; + let inherent_data = ( + sp_timestamp::InherentDataProvider::new( + std::time::Duration::from_millis(SLOT_DURATION * slot).into(), + ), + sp_consensus_babe::inherents::InherentDataProvider::new(slot.into()), + ).create_inherent_data().expect("Creates inherent data"); + digest.push(::babe_pre_digest(babe_pre_digest)); let new_block = futures::executor::block_on(async move { diff --git a/bin/node/test-runner-example/Cargo.toml b/bin/node/test-runner-example/Cargo.toml index 9d810ddbcfde0..7b86582031323 100644 --- a/bin/node/test-runner-example/Cargo.toml +++ b/bin/node/test-runner-example/Cargo.toml @@ -32,6 +32,7 @@ sc-consensus = { version = "0.9.0", path = "../../../client/consensus/common" } sp-runtime = { path = "../../../primitives/runtime", version = "3.0.0" } sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } +sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } sp-api = { version = "3.0.0", path = "../../../primitives/api" } sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } diff --git a/bin/node/test-runner-example/src/lib.rs b/bin/node/test-runner-example/src/lib.rs index ac589437248ee..8a5fbdad885c9 100644 --- a/bin/node/test-runner-example/src/lib.rs +++ b/bin/node/test-runner-example/src/lib.rs @@ -22,7 +22,7 @@ use test_runner::{Node, ChainInfo, SignatureVerificationOverride, default_config use grandpa::GrandpaBlockImport; use sc_service::{TFullBackend, TFullClient, Configuration, TaskManager, new_full_parts, TaskExecutor}; use std::sync::Arc; -use sp_inherents::InherentDataProviders; +use sp_inherents::CreateInherentDataProviders; use sc_consensus_babe::BabeBlockImport; use sp_keystore::SyncCryptoStorePtr; use sp_keyring::sr25519::Keyring::Alice; @@ -59,6 +59,10 @@ impl ChainInfo for NodeTemplateChainInfo { Self::SelectChain, >; type SignedExtras = node_runtime::SignedExtra; + type InherentDataProviders = ( + sp_timestamp::InherentDataProvider, + sp_consensus_babe::inherents::InherentDataProvider, + ); fn signed_extras(from: ::AccountId) -> Self::SignedExtras { ( @@ -84,7 +88,11 @@ impl ChainInfo for NodeTemplateChainInfo { Arc>, SyncCryptoStorePtr, TaskManager, - InherentDataProviders, + Box>, Option< Box< dyn ConsensusDataProvider< @@ -105,7 +113,6 @@ impl ChainInfo for NodeTemplateChainInfo { new_full_parts::(config, None)?; let client = Arc::new(client); - let inherent_providers = InherentDataProviders::new(); let select_chain = sc_consensus::LongestChain::new(backend.clone()); let (grandpa_block_import, ..) = @@ -116,8 +123,9 @@ impl ChainInfo for NodeTemplateChainInfo { None )?; + let slot_duration = sc_consensus_babe::Config::get_or_compute(&*client)?; let (block_import, babe_link) = sc_consensus_babe::block_import( - sc_consensus_babe::Config::get_or_compute(&*client)?, + slot_duration.clone(), grandpa_block_import, client.clone(), )?; @@ -125,7 +133,6 @@ impl ChainInfo for NodeTemplateChainInfo { let consensus_data_provider = BabeConsensusDataProvider::new( client.clone(), keystore.sync_keystore(), - &inherent_providers, babe_link.epoch_changes().clone(), vec![(AuthorityId::from(Alice.public()), 1000)], ) @@ -136,7 +143,18 @@ impl ChainInfo for NodeTemplateChainInfo { backend, keystore.sync_keystore(), task_manager, - inherent_providers, + Box::new(move |_, _| { + let slot_duration = slot_duration.clone(); + async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let slot = sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration.slot_duration(), + ); + + Ok((timestamp, slot)) + } + }), Some(Box::new(consensus_data_provider)), select_chain, block_import, diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index b2301fa9c5de5..27c1534032f49 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -33,7 +33,6 @@ sp-version = { version = "3.0.0", path = "../../../primitives/version" } sc-consensus-slots = { version = "0.9.0", path = "../slots" } sp-api = { version = "3.0.0", path = "../../../primitives/api" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } -sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } sc-telemetry = { version = "3.0.0", path = "../../telemetry" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} @@ -43,6 +42,7 @@ async-trait = "0.1.42" getrandom = { version = "0.2", features = ["js"], optional = true } [dev-dependencies] +sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } sp-tracing = { version = "3.0.0", path = "../../../primitives/tracing" } sc-executor = { version = "0.9.0", path = "../../executor" } diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index 0ec95d9412c22..6bf9f69722cad 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -18,19 +18,15 @@ //! Module implementing the logic for verifying and importing AuRa blocks. -use crate::{ - AuthorityId, find_pre_digest, slot_author, aura_err, Error, AuraSlotCompatible, SlotDuration, - register_aura_inherent_data_provider, authorities, -}; +use crate::{AuthorityId, find_pre_digest, slot_author, aura_err, Error, authorities}; use std::{ - sync::Arc, time::Duration, thread, marker::PhantomData, hash::Hash, fmt::Debug, - collections::HashMap, + sync::Arc, marker::PhantomData, hash::Hash, fmt::Debug, collections::HashMap, }; use log::{debug, info, trace}; use prometheus_endpoint::Registry; use codec::{Encode, Decode, Codec}; use sp_consensus::{ - BlockImport, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, SlotData, + BlockImport, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, BlockOrigin, Error as ConsensusError, BlockCheckParams, ImportResult, import_queue::{ Verifier, BasicQueue, DefaultImportQueue, BoxJustificationImport, @@ -43,10 +39,9 @@ use sp_runtime::{generic::{BlockId, OpaqueDigestItemId}, Justifications}; use sp_runtime::traits::{Block as BlockT, Header, DigestItemFor, Zero}; use sp_api::ProvideRuntimeApi; use sp_core::crypto::Pair; -use sp_inherents::{InherentDataProviders, InherentData}; -use sp_timestamp::InherentError as TIError; -use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_TRACE, CONSENSUS_DEBUG, CONSENSUS_INFO}; -use sc_consensus_slots::{CheckedHeader, SlotCompatible, check_equivocation}; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_TRACE, CONSENSUS_DEBUG}; +use sc_consensus_slots::{CheckedHeader, check_equivocation, InherentDataProviderExt}; use sp_consensus_slots::Slot; use sp_api::ApiExt; use sp_consensus_aura::{ @@ -118,26 +113,26 @@ fn check_header( } /// A verifier for Aura blocks. -pub struct AuraVerifier { +pub struct AuraVerifier { client: Arc, phantom: PhantomData

, - inherent_data_providers: InherentDataProviders, + create_inherent_data_providers: IDP, can_author_with: CAW, check_for_equivocation: CheckForEquivocation, telemetry: Option, } -impl AuraVerifier { +impl AuraVerifier { pub(crate) fn new( client: Arc, - inherent_data_providers: InherentDataProviders, + create_inherent_data_providers: IDP, can_author_with: CAW, check_for_equivocation: CheckForEquivocation, telemetry: Option, ) -> Self { Self { client, - inherent_data_providers, + create_inherent_data_providers, can_author_with, check_for_equivocation, telemetry, @@ -146,22 +141,22 @@ impl AuraVerifier { } } -impl AuraVerifier where +impl AuraVerifier where P: Send + Sync + 'static, CAW: Send + Sync + 'static, + IDP: Send, { - fn check_inherents( + async fn check_inherents( &self, block: B, block_id: BlockId, - inherent_data: InherentData, - timestamp_now: u64, + inherent_data: sp_inherents::InherentData, + create_inherent_data_providers: IDP::InherentDataProviders, ) -> Result<(), Error> where C: ProvideRuntimeApi, C::Api: BlockBuilderApi, CAW: CanAuthorWith, + IDP: CreateInherentDataProviders, { - const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; - if let Err(e) = self.can_author_with.can_author_with(&block_id) { debug!( target: "aura", @@ -179,44 +174,20 @@ impl AuraVerifier where ).map_err(|e| Error::Client(e.into()))?; if !inherent_res.ok() { - inherent_res - .into_errors() - .try_for_each(|(i, e)| match TIError::try_from(&i, &e) { - Some(TIError::ValidAtTimestamp(timestamp)) => { - // halt import until timestamp is valid. - // reject when too far ahead. - if timestamp > timestamp_now + MAX_TIMESTAMP_DRIFT_SECS { - return Err(Error::TooFarInFuture); - } - - let diff = timestamp.saturating_sub(timestamp_now); - info!( - target: "aura", - "halting for block {} seconds in the future", - diff - ); - telemetry!( - self.telemetry; - CONSENSUS_INFO; - "aura.halting_for_future_block"; - "diff" => ?diff - ); - thread::sleep(Duration::from_secs(diff)); - Ok(()) - }, - Some(TIError::Other(e)) => Err(Error::Runtime(e.into())), - None => Err(Error::DataProvider( - self.inherent_data_providers.error_to_string(&i, &e) - )), - }) - } else { - Ok(()) + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(Error::Inherent)?, + None => return Err(Error::UnknownInherentError(i)), + } + } } + + Ok(()) } } #[async_trait::async_trait] -impl Verifier for AuraVerifier where +impl Verifier for AuraVerifier where C: ProvideRuntimeApi + Send + Sync + @@ -229,6 +200,8 @@ impl Verifier for AuraVerifier where P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, P::Signature: Encode + Decode, CAW: CanAuthorWith + Send + Sync + 'static, + IDP: CreateInherentDataProviders + Send + Sync, + IDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { async fn verify( &mut self, @@ -237,16 +210,24 @@ impl Verifier for AuraVerifier where justifications: Option, mut body: Option>, ) -> Result<(BlockImportParams, Option)>>), String> { - let mut inherent_data = self.inherent_data_providers - .create_inherent_data() - .map_err(|e| e.into_string())?; - let (timestamp_now, slot_now, _) = AuraSlotCompatible.extract_timestamp_and_slot(&inherent_data) - .map_err(|e| format!("Could not extract timestamp and slot: {:?}", e))?; let hash = header.hash(); let parent_hash = *header.parent_hash(); let authorities = authorities(self.client.as_ref(), &BlockId::Hash(parent_hash)) .map_err(|e| format!("Could not fetch authorities at {:?}: {:?}", parent_hash, e))?; + let create_inherent_data_providers = self.create_inherent_data_providers + .create_inherent_data_providers( + parent_hash, + (), + ) + .await + .map_err(|e| Error::::Client(sp_blockchain::Error::Application(e)))?; + + let mut inherent_data = create_inherent_data_providers.create_inherent_data() + .map_err(Error::::Inherent)?; + + let slot_now = create_inherent_data_providers.slot(); + // we add one to allow for some small drift. // FIXME #1019 in the future, alter this queue to allow deferring of // headers @@ -264,9 +245,10 @@ impl Verifier for AuraVerifier where // to check that the internally-set timestamp in the inherents // actually matches the slot set in the seal. if let Some(inner_body) = body.take() { - inherent_data.aura_replace_inherent_data(slot); let block = B::new(pre_header.clone(), inner_body); + inherent_data.aura_replace_inherent_data(slot); + // skip the inherents verification if the runtime API is old. if self.client .runtime_api() @@ -280,8 +262,8 @@ impl Verifier for AuraVerifier where block.clone(), BlockId::Hash(parent_hash), inherent_data, - *timestamp_now, - ).map_err(|e| e.to_string())?; + create_inherent_data_providers, + ).await.map_err(|e| e.to_string())?; } let (_, inner_body) = block.deconstruct(); @@ -480,15 +462,15 @@ impl Default for CheckForEquivocation { } /// Parameters of [`import_queue`]. -pub struct ImportQueueParams<'a, Block, I, C, S, CAW> { +pub struct ImportQueueParams<'a, Block, I, C, S, CAW, CIDP> { /// The block import to use. pub block_import: I, /// The justification import. pub justification_import: Option>, /// The client to interact with the chain. pub client: Arc, - /// The inherent data provider, to create the inherent data. - pub inherent_data_providers: InherentDataProviders, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, /// The spawner to spawn background tasks. pub spawner: &'a S, /// The prometheus registry. @@ -497,26 +479,23 @@ pub struct ImportQueueParams<'a, Block, I, C, S, CAW> { pub can_author_with: CAW, /// Should we check for equivocation? pub check_for_equivocation: CheckForEquivocation, - /// The duration of one slot. - pub slot_duration: SlotDuration, /// Telemetry instance used to report telemetry metrics. pub telemetry: Option, } /// Start an import queue for the Aura consensus algorithm. -pub fn import_queue<'a, P, Block, I, C, S, CAW>( +pub fn import_queue<'a, P, Block, I, C, S, CAW, CIDP>( ImportQueueParams { block_import, justification_import, client, - inherent_data_providers, + create_inherent_data_providers, spawner, registry, can_author_with, check_for_equivocation, - slot_duration, telemetry, - }: ImportQueueParams<'a, Block, I, C, S, CAW> + }: ImportQueueParams<'a, Block, I, C, S, CAW, CIDP> ) -> Result, sp_consensus::Error> where Block: BlockT, C::Api: BlockBuilderApi + AuraApi> + ApiExt, @@ -538,13 +517,14 @@ pub fn import_queue<'a, P, Block, I, C, S, CAW>( P::Signature: Encode + Decode, S: sp_core::traits::SpawnEssentialNamed, CAW: CanAuthorWith + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Sync + Send + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.slot_duration())?; initialize_authorities_cache(&*client)?; - let verifier = AuraVerifier::<_, P, _>::new( + let verifier = AuraVerifier::<_, P, _, _>::new( client, - inherent_data_providers, + create_inherent_data_providers, can_author_with, check_for_equivocation, telemetry, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 3c72f359f8f15..ce254799d61f7 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -31,7 +31,8 @@ //! NOTE: Aura itself is designed to be generic over the crypto used. #![forbid(missing_docs, unsafe_code)] use std::{ - sync::Arc, marker::PhantomData, hash::Hash, fmt::Debug, pin::Pin, convert::{TryFrom, TryInto}, + sync::Arc, marker::PhantomData, hash::Hash, fmt::Debug, pin::Pin, + convert::{TryFrom, TryInto}, }; use futures::prelude::*; @@ -41,7 +42,7 @@ use codec::{Encode, Decode, Codec}; use sp_consensus::{ BlockImport, Environment, Proposer, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, - BlockOrigin, Error as ConsensusError, SelectChain, SlotData, + BlockOrigin, Error as ConsensusError, SelectChain, }; use sc_client_api::{backend::AuxStore, BlockOf}; use sp_blockchain::{Result as CResult, well_known_cache_keys, ProvideCache, HeaderBackend}; @@ -52,10 +53,11 @@ use sp_runtime::traits::{Block as BlockT, Header, DigestItemFor, Zero, Member}; use sp_api::ProvideRuntimeApi; use sp_core::crypto::Pair; use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sp_inherents::{InherentDataProviders, InherentData}; -use sp_timestamp::TimestampInherentData; -use sc_consensus_slots::{SlotInfo, SlotCompatible, StorageChanges, BackoffAuthoringBlocksStrategy}; +use sp_inherents::CreateInherentDataProviders; use sc_telemetry::TelemetryHandle; +use sc_consensus_slots::{ + SlotInfo, BackoffAuthoringBlocksStrategy, InherentDataProviderExt, StorageChanges, +}; use sp_consensus_slots::Slot; mod import_queue; @@ -64,7 +66,7 @@ pub use sp_consensus_aura::{ ConsensusLog, AuraApi, AURA_ENGINE_ID, digests::CompatibleDigestItem, inherents::{ InherentType as AuraInherent, - AuraInherentData, INHERENT_IDENTIFIER, InherentDataProvider, + INHERENT_IDENTIFIER, InherentDataProvider, }, }; pub use sp_consensus::SyncOracle; @@ -103,24 +105,8 @@ fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&A Some(current_author) } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -struct AuraSlotCompatible; - -impl SlotCompatible for AuraSlotCompatible { - fn extract_timestamp_and_slot( - &self, - data: &InherentData, - ) -> Result<(sp_timestamp::Timestamp, AuraInherent, std::time::Duration), sp_consensus::Error> { - data.timestamp_inherent_data() - .and_then(|t| data.aura_inherent_data().map(|a| (t, a))) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - .map(|(x, y)| (x, y, Default::default())) - } -} - /// Parameters of [`start_aura`]. -pub struct StartAuraParams { +pub struct StartAuraParams { /// The duration of a slot. pub slot_duration: SlotDuration, /// The client to interact with the chain. @@ -133,8 +119,8 @@ pub struct StartAuraParams { pub proposer_factory: PF, /// The sync oracle that can give us the current sync status. pub sync_oracle: SO, - /// The inherent data providers to create the inherent data. - pub inherent_data_providers: InherentDataProviders, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: IDP, /// Should we force the authoring of blocks? pub force_authoring: bool, /// The backoff strategy when we miss slots. @@ -154,7 +140,7 @@ pub struct StartAuraParams { } /// Start the aura worker. The returned future should be run in a futures executor. -pub fn start_aura( +pub fn start_aura( StartAuraParams { slot_duration, client, @@ -162,14 +148,14 @@ pub fn start_aura( block_import, proposer_factory, sync_oracle, - inherent_data_providers, + create_inherent_data_providers, force_authoring, backoff_authoring_blocks, keystore, can_author_with, block_proposal_slot_portion, telemetry, - }: StartAuraParams, + }: StartAuraParams, ) -> Result, sp_consensus::Error> where B: BlockT, C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, @@ -185,6 +171,8 @@ pub fn start_aura( SO: SyncOracle + Send + Sync + Clone, CAW: CanAuthorWith + Send, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, + IDP: CreateInherentDataProviders + Send, + IDP::InherentDataProviders: InherentDataProviderExt + Send, { let worker = build_aura_worker::(BuildAuraWorkerParams { client: client.clone(), @@ -198,18 +186,12 @@ pub fn start_aura( block_proposal_slot_portion, }); - register_aura_inherent_data_provider( - &inherent_data_providers, - slot_duration.slot_duration() - )?; - - Ok(sc_consensus_slots::start_slot_worker::<_, _, _, _, _, AuraSlotCompatible, _, _>( + Ok(sc_consensus_slots::start_slot_worker( slot_duration, select_chain, worker, sync_oracle, - inherent_data_providers, - AuraSlotCompatible, + create_inherent_data_providers, can_author_with, )) } @@ -278,8 +260,8 @@ pub fn build_aura_worker( force_authoring, backoff_authoring_blocks, telemetry, - _key_type: PhantomData::

, block_proposal_slot_portion, + _key_type: PhantomData::

, } } @@ -452,8 +434,7 @@ where fn proposing_remaining_duration( &self, - head: &B::Header, - slot_info: &SlotInfo, + slot_info: &SlotInfo, ) -> std::time::Duration { let max_proposing = slot_info.duration.mul_f32(self.block_proposal_slot_portion.get()); @@ -464,11 +445,11 @@ where let slot_remaining = std::cmp::min(slot_remaining, max_proposing); // If parent is genesis block, we don't require any lenience factor. - if head.number().is_zero() { + if slot_info.chain_head.number().is_zero() { return slot_remaining } - let parent_slot = match find_pre_digest::(head) { + let parent_slot = match find_pre_digest::(&slot_info.chain_head) { Err(_) => return slot_remaining, Ok(d) => d, }; @@ -509,15 +490,15 @@ enum Error { SlotAuthorNotFound, #[display(fmt = "Bad signature on {:?}", _0)] BadSignature(B::Hash), - #[display(fmt = "Rejecting block too far in future")] - TooFarInFuture, Client(sp_blockchain::Error), - DataProvider(String), - Runtime(String), #[display(fmt = "Slot number must increase: parent slot: {}, this slot: {}", _0, _1)] SlotMustIncrease(Slot, Slot), #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] ParentUnavailable(B::Hash, B::Hash), + #[display(fmt = "Unknown inherent error for identifier: {}", "String::from_utf8_lossy(_0)")] + UnknownInherentError(sp_inherents::InherentIdentifier), + #[display(fmt = "Inherent error: {}", _0)] + Inherent(sp_inherents::Error), } impl std::convert::From> for String { @@ -543,21 +524,6 @@ fn find_pre_digest(header: &B::Header) -> Result Result<(), sp_consensus::Error> { - if !inherent_data_providers.has_provider(&INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(InherentDataProvider::new(slot_duration)) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - } else { - Ok(()) - } -} - fn authorities(client: &C, at: &BlockId) -> Result, ConsensusError> where A: Codec + Debug, B: BlockT, @@ -580,7 +546,7 @@ mod tests { use super::*; use sp_consensus::{ NoNetwork as DummyOracle, Proposal, AlwaysCanAuthor, DisableProofRecording, - import_queue::BoxJustificationImport, + import_queue::BoxJustificationImport, SlotData, }; use sc_network_test::{Block as TestBlock, *}; use sp_runtime::traits::{Block as BlockT, DigestFor}; @@ -596,6 +562,8 @@ mod tests { use substrate_test_runtime_client::{TestClient, runtime::{Header, H256}}; use sc_keystore::LocalKeystore; use sp_application_crypto::key_types::AURA; + use sp_inherents::InherentData; + use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; type Error = sp_blockchain::Error; @@ -643,7 +611,16 @@ mod tests { const SLOT_DURATION: u64 = 1000; - type AuraVerifier = import_queue::AuraVerifier; + type AuraVerifier = import_queue::AuraVerifier< + PeersFullClient, + AuthorityPair, + AlwaysCanAuthor, + Box> + >; type AuraPeer = Peer<(), PeersClient>; pub struct AuraTestNet { @@ -668,16 +645,19 @@ mod tests { match client { PeersClient::Full(client, _) => { let slot_duration = slot_duration(&*client).expect("slot duration available"); - let inherent_data_providers = InherentDataProviders::new(); - register_aura_inherent_data_provider( - &inherent_data_providers, - slot_duration.slot_duration() - ).expect("Registers aura inherent data provider"); assert_eq!(slot_duration.slot_duration().as_millis() as u64, SLOT_DURATION); import_queue::AuraVerifier::new( client, - inherent_data_providers, + Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); + + Ok((timestamp, slot)) + }), AlwaysCanAuthor, CheckForEquivocation::Yes, None, @@ -746,19 +726,22 @@ mod tests { let slot_duration = slot_duration(&*client).expect("slot duration available"); - let inherent_data_providers = InherentDataProviders::new(); - register_aura_inherent_data_provider( - &inherent_data_providers, slot_duration.slot_duration() - ).expect("Registers aura inherent data provider"); - - aura_futures.push(start_aura::(StartAuraParams { + aura_futures.push(start_aura::(StartAuraParams { slot_duration, block_import: client.clone(), select_chain, client, proposer_factory: environ, sync_oracle: DummyOracle, - inherent_data_providers, + create_inherent_data_providers: |_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); + + Ok((timestamp, slot)) + }, force_authoring: false, backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), keystore, @@ -881,13 +864,13 @@ mod tests { let head = client.header(&BlockId::Number(0)).unwrap().unwrap(); let res = futures::executor::block_on(worker.on_slot( - head, SlotInfo { slot: 0.into(), timestamp: 0.into(), ends_at: Instant::now() + Duration::from_secs(100), inherent_data: InherentData::new(), duration: Duration::from_millis(1000), + chain_head: head, block_size_limit: None, }, )).unwrap(); diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index b04caeb3ee9d7..c69544bc06c9b 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -26,7 +26,6 @@ serde = { version = "1.0.104", features = ["derive"] } sp-version = { version = "3.0.0", path = "../../../primitives/version" } sp-io = { version = "3.0.0", path = "../../../primitives/io" } sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } -sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } sc-telemetry = { version = "3.0.0", path = "../../telemetry" } sc-keystore = { version = "3.0.0", path = "../../keystore" } sc-client-api = { version = "3.0.0", path = "../../api" } @@ -56,6 +55,7 @@ retain_mut = "0.1.2" async-trait = "0.1.42" [dev-dependencies] +sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } sp-tracing = { version = "3.0.0", path = "../../../primitives/tracing" } sc-executor = { version = "0.9.0", path = "../../executor" } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index c3f1929c2ea8b..3bdeaabf614de 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -77,7 +77,7 @@ pub use sp_consensus::SyncOracle; pub use sc_consensus_slots::SlotProportion; use std::{ collections::HashMap, sync::Arc, u64, pin::Pin, borrow::Cow, convert::TryInto, - time::{Duration, Instant}, + time::Duration, }; use sp_consensus::{ImportResult, CanAuthorWith, import_queue::BoxJustificationImport}; use sp_core::crypto::Public; @@ -89,7 +89,7 @@ use sp_runtime::{ }; use sp_api::{ProvideRuntimeApi, NumberFor}; use parking_lot::Mutex; -use sp_inherents::{InherentDataProviders, InherentData}; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider, InherentData}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_TRACE, CONSENSUS_DEBUG}; use sp_consensus::{ BlockImport, Environment, Proposer, BlockCheckParams, @@ -97,7 +97,6 @@ use sp_consensus::{ SelectChain, SlotData, import_queue::{Verifier, BasicQueue, DefaultImportQueue, CacheKeyId}, }; use sp_consensus_babe::inherents::BabeInherentData; -use sp_timestamp::TimestampInherentData; use sc_client_api::{ backend::AuxStore, BlockchainEvents, ProvideUncles, }; @@ -110,8 +109,8 @@ use futures::prelude::*; use log::{debug, info, log, trace, warn}; use prometheus_endpoint::Registry; use sc_consensus_slots::{ - SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, - BackoffAuthoringBlocksStrategy + SlotInfo, StorageChanges, CheckedHeader, check_equivocation, + BackoffAuthoringBlocksStrategy, InherentDataProviderExt, }; use sc_consensus_epochs::{ descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT, ViableEpochDescriptor, @@ -270,15 +269,19 @@ pub enum Error { /// Parent block has no associated weight #[display(fmt = "Parent block of {} has no associated weight", _0)] ParentBlockNoAssociatedWeight(B::Hash), + /// Check inherents error #[display(fmt = "Checking inherents failed: {}", _0)] - /// Check Inherents error - CheckInherents(String), + CheckInherents(sp_inherents::Error), + /// Unhandled check inherents error + #[display(fmt = "Checking inherents unhandled error: {}", "String::from_utf8_lossy(_0)")] + CheckInherentsUnhandled(sp_inherents::InherentIdentifier), + /// Create inherents error. + #[display(fmt = "Creating inherents failed: {}", _0)] + CreateInherents(sp_inherents::Error), /// Client error Client(sp_blockchain::Error), /// Runtime Api error. RuntimeApi(sp_api::ApiError), - /// Runtime error - Runtime(sp_inherents::Error), /// Fork tree error ForkTree(Box>), } @@ -360,7 +363,7 @@ impl std::ops::Deref for Config { } /// Parameters for BABE. -pub struct BabeParams { +pub struct BabeParams { /// The keystore that manages the keys of the node. pub keystore: SyncCryptoStorePtr, @@ -381,8 +384,8 @@ pub struct BabeParams { /// A sync oracle pub sync_oracle: SO, - /// Providers for inherent data. - pub inherent_data_providers: InherentDataProviders, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: IDP, /// Force authoring of blocks even if we are offline pub force_authoring: bool, @@ -408,21 +411,21 @@ pub struct BabeParams { } /// Start the babe worker. -pub fn start_babe(BabeParams { +pub fn start_babe(BabeParams { keystore, client, select_chain, env, block_import, sync_oracle, - inherent_data_providers, + create_inherent_data_providers, force_authoring, backoff_authoring_blocks, babe_link, can_author_with, block_proposal_slot_portion, telemetry, -}: BabeParams) -> Result< +}: BabeParams) -> Result< BabeWorker, sp_consensus::Error, > where @@ -440,6 +443,8 @@ pub fn start_babe(BabeParams { SO: SyncOracle + Send + Sync + Clone + 'static, CAW: CanAuthorWith + Send + Sync + 'static, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, + IDP: CreateInherentDataProviders + Send + Sync + 'static, + IDP::InherentDataProviders: InherentDataProviderExt + Send, { const HANDLE_BUFFER_SIZE: usize = 1024; @@ -461,21 +466,13 @@ pub fn start_babe(BabeParams { telemetry, }; - register_babe_inherent_data_provider(&inherent_data_providers, config.slot_duration())?; - sc_consensus_uncles::register_uncles_inherent_data_provider( - client.clone(), - select_chain.clone(), - &inherent_data_providers, - )?; - info!(target: "babe", "👶 Starting BABE Authorship worker"); let inner = sc_consensus_slots::start_slot_worker( config.0.clone(), select_chain, worker, sync_oracle, - inherent_data_providers, - babe_link.time_source, + create_inherent_data_providers, can_author_with, ); @@ -813,23 +810,22 @@ where fn proposing_remaining_duration( &self, - parent_head: &B::Header, - slot_info: &SlotInfo, + slot_info: &SlotInfo, ) -> std::time::Duration { let max_proposing = slot_info.duration.mul_f32(self.block_proposal_slot_portion.get()); let slot_remaining = slot_info.ends_at - .checked_duration_since(Instant::now()) + .checked_duration_since(std::time::Instant::now()) .unwrap_or_default(); let slot_remaining = std::cmp::min(slot_remaining, max_proposing); // If parent is genesis block, we don't require any lenience factor. - if parent_head.number().is_zero() { + if slot_info.chain_head.number().is_zero() { return slot_remaining } - let parent_slot = match find_pre_digest::(parent_head) { + let parent_slot = match find_pre_digest::(&slot_info.chain_head) { Err(_) => return slot_remaining, Ok(d) => d.slot(), }; @@ -913,27 +909,9 @@ fn find_next_config_digest(header: &B::Header) Ok(config_digest) } -#[derive(Default, Clone)] -struct TimeSource(Arc, Vec<(Instant, u64)>)>>); - -impl SlotCompatible for TimeSource { - fn extract_timestamp_and_slot( - &self, - data: &InherentData, - ) -> Result<(sp_timestamp::Timestamp, Slot, std::time::Duration), sp_consensus::Error> { - trace!(target: "babe", "extract timestamp"); - data.timestamp_inherent_data() - .and_then(|t| data.babe_inherent_data().map(|a| (t, a))) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) - } -} - /// State that must be shared between the import queue and the authoring logic. #[derive(Clone)] pub struct BabeLink { - time_source: TimeSource, epoch_changes: SharedEpochChanges, config: Config, } @@ -951,30 +929,31 @@ impl BabeLink { } /// A verifier for Babe blocks. -pub struct BabeVerifier { +pub struct BabeVerifier { client: Arc, select_chain: SelectChain, - inherent_data_providers: sp_inherents::InherentDataProviders, + create_inherent_data_providers: CIDP, config: Config, epoch_changes: SharedEpochChanges, - time_source: TimeSource, can_author_with: CAW, telemetry: Option, } -impl BabeVerifier +impl BabeVerifier where Block: BlockT, Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, CAW: CanAuthorWith, + CIDP: CreateInherentDataProviders, { - fn check_inherents( + async fn check_inherents( &self, block: Block, block_id: BlockId, inherent_data: InherentData, + create_inherent_data_providers: CIDP::InherentDataProviders, ) -> Result<(), Error> { if let Err(e) = self.can_author_with.can_author_with(&block_id) { debug!( @@ -993,14 +972,15 @@ where ).map_err(Error::RuntimeApi)?; if !inherent_res.ok() { - inherent_res - .into_errors() - .try_for_each(|(i, e)| { - Err(Error::CheckInherents(self.inherent_data_providers.error_to_string(&i, &e))) - }) - } else { - Ok(()) + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(|e| Error::CheckInherents(e))?, + None => return Err(Error::CheckInherentsUnhandled(i)), + } + } } + + Ok(()) } fn check_and_report_equivocation( @@ -1085,8 +1065,8 @@ where } #[async_trait::async_trait] -impl Verifier - for BabeVerifier +impl Verifier + for BabeVerifier where Block: BlockT, Client: HeaderMetadata + HeaderBackend + ProvideRuntimeApi @@ -1094,6 +1074,8 @@ where Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, CAW: CanAuthorWith + Send + Sync, + CIDP: CreateInherentDataProviders + Send + Sync, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { async fn verify( &mut self, @@ -1111,46 +1093,51 @@ where body, ); + let hash = header.hash(); + let parent_hash = *header.parent_hash(); + debug!(target: "babe", "We have {:?} logs in this header", header.digest().logs().len()); - let mut inherent_data = self - .inherent_data_providers - .create_inherent_data() - .map_err(Error::::Runtime)?; - let (_, slot_now, _) = self.time_source.extract_timestamp_and_slot(&inherent_data) - .map_err(Error::::Extraction)?; + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; - let hash = header.hash(); - let parent_hash = *header.parent_hash(); + let slot_now = create_inherent_data_providers.slot(); let parent_header_metadata = self.client.header_metadata(parent_hash) .map_err(Error::::FetchParentHeader)?; let pre_digest = find_pre_digest::(&header)?; - let epoch_changes = self.epoch_changes.shared_data(); - let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent_hash, - parent_header_metadata.number, - pre_digest.slot(), - ) + let (check_header, epoch_descriptor) = { + let epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + pre_digest.slot(), + ) .map_err(|e| Error::::ForkTree(Box::new(e)))? .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; - let viable_epoch = epoch_changes.viable_epoch( - &epoch_descriptor, - |slot| Epoch::genesis(&self.config, slot) - ).ok_or_else(|| Error::::FetchEpoch(parent_hash))?; - - // We add one to the current slot to allow for some small drift. - // FIXME #1019 in the future, alter this queue to allow deferring of headers - let v_params = verification::VerificationParams { - header: header.clone(), - pre_digest: Some(pre_digest), - slot_now: slot_now + 1, - epoch: viable_epoch.as_ref(), + let viable_epoch = epoch_changes.viable_epoch( + &epoch_descriptor, + |slot| Epoch::genesis(&self.config, slot) + ).ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + + // We add one to the current slot to allow for some small drift. + // FIXME #1019 in the future, alter this queue to allow deferring of headers + let v_params = verification::VerificationParams { + header: header.clone(), + pre_digest: Some(pre_digest), + slot_now: slot_now + 1, + epoch: viable_epoch.as_ref(), + }; + + (verification::check_header::(v_params)?, epoch_descriptor) }; - match verification::check_header::(v_params)? { + match check_header { CheckedHeader::Checked(pre_header, verified_info) => { let babe_pre_digest = verified_info.pre_digest.as_babe_pre_digest() .expect("check_header always returns a pre-digest digest item; qed"); @@ -1173,6 +1160,8 @@ where // to check that the internally-set timestamp in the inherents // actually matches the slot set in the seal. if let Some(inner_body) = body.take() { + let mut inherent_data = create_inherent_data_providers.create_inherent_data() + .map_err(Error::::CreateInherents)?; inherent_data.babe_replace_inherent_data(slot); let block = Block::new(pre_header.clone(), inner_body); @@ -1180,7 +1169,8 @@ where block.clone(), BlockId::Hash(parent_hash), inherent_data, - )?; + create_inherent_data_providers, + ).await?; let (_, inner_body) = block.deconstruct(); body = Some(inner_body); @@ -1220,22 +1210,6 @@ where } } -/// Register the babe inherent data provider, if not registered already. -pub fn register_babe_inherent_data_provider( - inherent_data_providers: &InherentDataProviders, - slot_duration: Duration, -) -> Result<(), sp_consensus::Error> { - debug!(target: "babe", "Registering"); - if !inherent_data_providers.has_provider(&sp_consensus_babe::inherents::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(sp_consensus_babe::inherents::InherentDataProvider::new(slot_duration)) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - } else { - Ok(()) - } -} - /// A block-import handler for BABE. /// /// This scans each imported block for epoch change signals. The signals are @@ -1579,13 +1553,13 @@ pub fn block_import( config: Config, wrapped_block_import: I, client: Arc, -) -> ClientResult<(BabeBlockImport, BabeLink)> where +) -> ClientResult<(BabeBlockImport, BabeLink)> +where Client: AuxStore + HeaderBackend + HeaderMetadata, { let epoch_changes = aux_schema::load_epoch_changes::(&*client, &config)?; let link = BabeLink { epoch_changes: epoch_changes.clone(), - time_source: Default::default(), config: config.clone(), }; @@ -1616,13 +1590,13 @@ pub fn block_import( /// /// The block import object provided must be the `BabeBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( babe_link: BabeLink, block_import: Inner, justification_import: Option>, client: Arc, select_chain: SelectChain, - inherent_data_providers: InherentDataProviders, + create_inherent_data_providers: CIDP, spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, can_author_with: CAW, @@ -1636,15 +1610,14 @@ pub fn import_queue( Client::Api: BlockBuilderApi + BabeApi + ApiExt, SelectChain: sp_consensus::SelectChain + 'static, CAW: CanAuthorWith + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration())?; - let verifier = BabeVerifier { select_chain, - inherent_data_providers, + create_inherent_data_providers, config: babe_link.config, epoch_changes: babe_link.epoch_changes, - time_source: babe_link.time_source, can_author_with, telemetry, client, diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 9949da61da579..d042f25399ee4 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -28,7 +28,10 @@ use sp_keystore::{ SyncCryptoStore, vrf::make_transcript as transcript_from_data, }; -use sp_consensus_babe::{AuthorityPair, Slot, AllowedSlots, make_transcript, make_transcript_data}; +use sp_consensus_babe::{ + AuthorityPair, Slot, AllowedSlots, make_transcript, make_transcript_data, + inherents::InherentDataProvider, +}; use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sp_consensus::{ @@ -48,6 +51,7 @@ use rand_chacha::{ use sc_keystore::LocalKeystore; use sp_application_crypto::key_types::BABE; use futures::executor::block_on; +use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; type Item = DigestItem; @@ -235,7 +239,17 @@ type TestSelectChain = substrate_test_runtime_client::LongestChain< >; pub struct TestVerifier { - inner: BabeVerifier, + inner: BabeVerifier< + TestBlock, + PeersFullClient, + TestSelectChain, + AlwaysCanAuthor, + Box> + >, mutator: Mutator, } @@ -253,13 +267,12 @@ impl Verifier for TestVerifier { ) -> Result<(BlockImportParams, Option)>>), String> { // apply post-sealing mutations (i.e. stripping seal, if desired). (self.mutator)(&mut header, Stage::PostSeal); - self.inner.verify(dbg!(origin), header, justifications, body).await + self.inner.verify(origin, header, justifications, body).await } } pub struct PeerData { link: BabeLink, - inherent_data_providers: InherentDataProviders, block_import: Mutex< Option>> >, @@ -286,7 +299,6 @@ impl TestNetFactory for BabeTestNet { ) { let client = client.as_full().expect("only full clients are tested"); - let inherent_data_providers = InherentDataProviders::new(); let config = Config::get_or_compute(&*client).expect("config available"); let (block_import, link) = crate::block_import( @@ -303,7 +315,7 @@ impl TestNetFactory for BabeTestNet { ( BlockImportAdapter::new(block_import), None, - Some(PeerData { link, inherent_data_providers, block_import: data_block_import }), + Some(PeerData { link, block_import: data_block_import }), ) } @@ -329,10 +341,17 @@ impl TestNetFactory for BabeTestNet { inner: BabeVerifier { client: client.clone(), select_chain: longest_chain, - inherent_data_providers: data.inherent_data_providers.clone(), + create_inherent_data_providers: Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); + + Ok((timestamp, slot)) + }), config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), - time_source: data.link.time_source.clone(), can_author_with: AlwaysCanAuthor, telemetry: None, }, @@ -440,7 +459,15 @@ fn run_one_test( client, env: environ, sync_oracle: DummyOracle, - inherent_data_providers: data.inherent_data_providers.clone(), + create_inherent_data_providers: Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); + + Ok((timestamp, slot)) + }), force_authoring: false, backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), babe_link: data.link.clone(), diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index d627ea2a25c3a..29fea05d8366a 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -24,8 +24,8 @@ use codec::Encode; use std::{borrow::Cow, sync::{Arc, atomic}, time::SystemTime}; use sc_client_api::AuxStore; use sc_consensus_babe::{ - Config, Epoch, authorship, CompatibleDigestItem, BabeIntermediate, - register_babe_inherent_data_provider, INTERMEDIATE_KEY, find_pre_digest, + Config, Epoch, authorship, CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY, + find_pre_digest, }; use sc_consensus_epochs::{SharedEpochChanges, descendent_query, ViableEpochDescriptor, EpochHeader}; use sp_keystore::SyncCryptoStorePtr; @@ -38,12 +38,12 @@ use sp_consensus_babe::{ BabeApi, inherents::BabeInherentData, ConsensusLog, BABE_ENGINE_ID, AuthorityId, digests::{PreDigest, SecondaryPlainPreDigest, NextEpochDescriptor}, BabeAuthorityWeight, }; -use sp_inherents::{InherentDataProviders, InherentData, ProvideInherentData, InherentIdentifier}; +use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier}; use sp_runtime::{ traits::{DigestItemFor, DigestFor, Block as BlockT, Zero, Header}, generic::{Digest, BlockId}, }; -use sp_timestamp::{InherentType, InherentError, INHERENT_IDENTIFIER, TimestampInherentData}; +use sp_timestamp::{InherentType, INHERENT_IDENTIFIER, TimestampInherentData}; /// Provides BABE-compatible predigests and BlockImportParams. /// Intended for use with BABE runtimes. @@ -73,7 +73,6 @@ impl BabeConsensusDataProvider pub fn new( client: Arc, keystore: SyncCryptoStorePtr, - provider: &InherentDataProviders, epoch_changes: SharedEpochChanges, authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, ) -> Result { @@ -82,10 +81,6 @@ impl BabeConsensusDataProvider } let config = Config::get_or_compute(&*client)?; - let timestamp_provider = SlotTimestampProvider::new(client.clone())?; - - provider.register_provider(timestamp_provider)?; - register_babe_inherent_data_provider(provider, config.slot_duration())?; Ok(Self { config, @@ -131,7 +126,8 @@ impl ConsensusDataProvider for BabeConsensusDataProvider type Transaction = TransactionFor; fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result, Error> { - let slot = inherents.babe_inherent_data()?; + let slot = inherents.babe_inherent_data()? + .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; let epoch = self.epoch(parent, slot)?; // this is a dev node environment, we should always be able to claim a slot. @@ -194,7 +190,8 @@ impl ConsensusDataProvider for BabeConsensusDataProvider params: &mut BlockImportParams, inherents: &InherentData ) -> Result<(), Error> { - let slot = inherents.babe_inherent_data()?; + let slot = inherents.babe_inherent_data()? + .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; let epoch_changes = self.epoch_changes.shared_data(); let mut epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( @@ -216,7 +213,9 @@ impl ConsensusDataProvider for BabeConsensusDataProvider if !has_authority { log::info!(target: "manual-seal", "authority not found"); - let slot = *inherents.timestamp_inherent_data()? / self.config.slot_duration; + let timestamp = inherents.timestamp_inherent_data()? + .ok_or_else(|| Error::StringError("No timestamp inherent data".into()))?; + let slot = *timestamp / self.config.slot_duration; // manually hard code epoch descriptor epoch_descriptor = match epoch_descriptor { ViableEpochDescriptor::Signaled(identifier, _header) => { @@ -243,14 +242,14 @@ impl ConsensusDataProvider for BabeConsensusDataProvider /// Provide duration since unix epoch in millisecond for timestamp inherent. /// Mocks the timestamp inherent to always produce the timestamp for the next babe slot. -struct SlotTimestampProvider { +pub struct SlotTimestampProvider { time: atomic::AtomicU64, slot_duration: u64 } impl SlotTimestampProvider { - /// create a new mocked time stamp provider. - fn new(client: Arc) -> Result + /// Create a new mocked time stamp provider. + pub fn new(client: Arc) -> Result where B: BlockT, C: AuxStore + HeaderBackend + ProvideRuntimeApi, @@ -281,11 +280,8 @@ impl SlotTimestampProvider { } } -impl ProvideInherentData for SlotTimestampProvider { - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER - } - +#[async_trait::async_trait] +impl InherentDataProvider for SlotTimestampProvider { fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), sp_inherents::Error> { // we update the time here. let duration: InherentType = self.time.fetch_add( @@ -296,7 +292,11 @@ impl ProvideInherentData for SlotTimestampProvider { Ok(()) } - fn error_to_string(&self, error: &[u8]) -> Option { - InherentError::try_from(&INHERENT_IDENTIFIER, error).map(|e| format!("{:?}", e)) + async fn try_handle_error( + &self, + _: &InherentIdentifier, + _: &[u8], + ) -> Option> { + None } } diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index a5351c63bc3b4..45628e90a6f9c 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -26,7 +26,7 @@ use sp_consensus::{ import_queue::{Verifier, BasicQueue, CacheKeyId, BoxBlockImport}, }; use sp_blockchain::HeaderBackend; -use sp_inherents::InherentDataProviders; +use sp_inherents::CreateInherentDataProviders; use sp_runtime::{traits::Block as BlockT, Justifications, ConsensusEngineId}; use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; use sc_transaction_pool::txpool; @@ -94,7 +94,7 @@ pub fn import_queue( } /// Params required to start the instant sealing authorship task. -pub struct ManualSealParams, A: txpool::ChainApi, SC, CS> { +pub struct ManualSealParams, A: txpool::ChainApi, SC, CS, CIDP> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -117,12 +117,12 @@ pub struct ManualSealParams, A: txpool /// Digest provider for inclusion in blocks. pub consensus_data_provider: Option>>>, - /// Provider for inherents to include in blocks. - pub inherent_data_providers: InherentDataProviders, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, } /// Params required to start the manual sealing authorship task. -pub struct InstantSealParams, A: txpool::ChainApi, SC> { +pub struct InstantSealParams, A: txpool::ChainApi, SC, CIDP> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -141,12 +141,12 @@ pub struct InstantSealParams, A: txpoo /// Digest provider for inclusion in blocks. pub consensus_data_provider: Option>>>, - /// Provider for inherents to include in blocks. - pub inherent_data_providers: InherentDataProviders, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, } /// Creates the background authorship task for the manual seal engine. -pub async fn run_manual_seal( +pub async fn run_manual_seal( ManualSealParams { mut block_import, mut env, @@ -154,10 +154,9 @@ pub async fn run_manual_seal( pool, mut commands_stream, select_chain, - inherent_data_providers, consensus_data_provider, - .. - }: ManualSealParams + create_inherent_data_providers, + }: ManualSealParams ) where A: txpool::ChainApi + 'static, @@ -171,6 +170,7 @@ pub async fn run_manual_seal( CS: Stream::Hash>> + Unpin + 'static, SC: SelectChain + 'static, TransactionFor: 'static, + CIDP: CreateInherentDataProviders, { while let Some(command) = commands_stream.next().await { match command { @@ -189,10 +189,10 @@ pub async fn run_manual_seal( env: &mut env, select_chain: &select_chain, block_import: &mut block_import, - inherent_data_provider: &inherent_data_providers, consensus_data_provider: consensus_data_provider.as_ref().map(|p| &**p), pool: pool.clone(), client: client.clone(), + create_inherent_data_providers: &create_inherent_data_providers, } ).await; } @@ -215,7 +215,7 @@ pub async fn run_manual_seal( /// runs the background authorship task for the instant seal engine. /// instant-seal creates a new block for every transaction imported into /// the transaction pool. -pub async fn run_instant_seal( +pub async fn run_instant_seal( InstantSealParams { block_import, env, @@ -223,9 +223,8 @@ pub async fn run_instant_seal( pool, select_chain, consensus_data_provider, - inherent_data_providers, - .. - }: InstantSealParams + create_inherent_data_providers, + }: InstantSealParams ) where A: txpool::ChainApi + 'static, @@ -238,6 +237,7 @@ pub async fn run_instant_seal( E::Proposer: Proposer>, SC: SelectChain + 'static, TransactionFor: 'static, + CIDP: CreateInherentDataProviders, { // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. @@ -261,7 +261,7 @@ pub async fn run_instant_seal( commands_stream, select_chain, consensus_data_provider, - inherent_data_providers, + create_inherent_data_providers, } ).await } @@ -280,7 +280,6 @@ mod tests { use sp_transaction_pool::{TransactionPool, MaintainedTransactionPool, TransactionSource}; use sp_runtime::generic::BlockId; use sp_consensus::ImportedAux; - use sp_inherents::InherentDataProviders; use sc_basic_authorship::ProposerFactory; use sc_client_api::BlockBackend; @@ -295,7 +294,6 @@ mod tests { let builder = TestClientBuilder::new(); let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let inherent_data_providers = InherentDataProviders::new(); let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), api(), None, RevalidationType::Full, spawner.clone(), @@ -330,7 +328,7 @@ mod tests { pool: pool.pool().clone(), commands_stream, select_chain, - inherent_data_providers, + create_inherent_data_providers: |_, _| async { Ok(()) }, consensus_data_provider: None, } ); @@ -367,10 +365,14 @@ mod tests { let builder = TestClientBuilder::new(); let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let inherent_data_providers = InherentDataProviders::new(); let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( - Options::default(), true.into(), api(), None, RevalidationType::Full, spawner.clone(), + Options::default(), + true.into(), + api(), + None, + RevalidationType::Full, + spawner.clone(), )); let env = ProposerFactory::new( spawner.clone(), @@ -390,7 +392,7 @@ mod tests { commands_stream, select_chain, consensus_data_provider: None, - inherent_data_providers, + create_inherent_data_providers: |_, _| async { Ok(()) }, } ); std::thread::spawn(|| { @@ -442,11 +444,15 @@ mod tests { let builder = TestClientBuilder::new(); let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let inherent_data_providers = InherentDataProviders::new(); let pool_api = api(); let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( - Options::default(), true.into(), pool_api.clone(), None, RevalidationType::Full, spawner.clone(), + Options::default(), + true.into(), + pool_api.clone(), + None, + RevalidationType::Full, + spawner.clone(), )); let env = ProposerFactory::new( spawner.clone(), @@ -466,7 +472,7 @@ mod tests { commands_stream, select_chain, consensus_data_provider: None, - inherent_data_providers, + create_inherent_data_providers: |_, _| async { Ok(()) }, } ); std::thread::spawn(|| { @@ -528,7 +534,7 @@ mod tests { pool_api.add_block(block, true); pool_api.increment_nonce(Alice.into()); - assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 2)).await.is_ok()); + assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Bob, 0)).await.is_ok()); let (tx2, rx2) = futures::channel::oneshot::channel(); assert!(sink.send(EngineCommand::SealNewBlock { parent_hash: Some(created_block.hash), diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index a8050efb9a075..6f2b613cd939a 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -33,14 +33,14 @@ use sp_consensus::{ use sp_blockchain::HeaderBackend; use std::collections::HashMap; use std::time::Duration; -use sp_inherents::InherentDataProviders; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_api::{ProvideRuntimeApi, TransactionFor}; /// max duration for creating a proposal in secs pub const MAX_PROPOSAL_DURATION: u64 = 10; /// params for sealing a new block -pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, P: txpool::ChainApi> { +pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, P: txpool::ChainApi, CIDP> { /// if true, empty blocks(without extrinsics) will be created. /// otherwise, will return Error::EmptyTransactionPool. pub create_empty: bool, @@ -62,12 +62,12 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, P: pub consensus_data_provider: Option<&'a dyn ConsensusDataProvider>>, /// block import object pub block_import: &'a mut BI, - /// inherent data provider - pub inherent_data_provider: &'a InherentDataProviders, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: &'a CIDP, } /// seals a new block with the given params -pub async fn seal_block( +pub async fn seal_block( SealBlockParams { create_empty, finalize, @@ -77,11 +77,10 @@ pub async fn seal_block( select_chain, block_import, env, - inherent_data_provider, + create_inherent_data_providers, consensus_data_provider: digest_provider, mut sender, - .. - }: SealBlockParams<'_, B, BI, SC, C, E, P> + }: SealBlockParams<'_, B, BI, SC, C, E, P, CIDP> ) where B: BlockT, @@ -93,6 +92,7 @@ pub async fn seal_block( P: txpool::ChainApi, SC: SelectChain, TransactionFor: 'static, + CIDP: CreateInherentDataProviders, { let future = async { if pool.validated_pool().status().ready == 0 && !create_empty { @@ -109,19 +109,29 @@ pub async fn seal_block( None => select_chain.best_chain()? }; + let inherent_data_providers = + create_inherent_data_providers + .create_inherent_data_providers( + parent.hash(), + (), + ) + .await + .map_err(|e| Error::Other(e))?; + + let inherent_data = inherent_data_providers.create_inherent_data()?; + let proposer = env.init(&parent) .map_err(|err| Error::StringError(format!("{:?}", err))).await?; - let id = inherent_data_provider.create_inherent_data()?; - let inherents_len = id.len(); + let inherents_len = inherent_data.len(); let digest = if let Some(digest_provider) = digest_provider { - digest_provider.create_digest(&parent, &id)? + digest_provider.create_digest(&parent, &inherent_data)? } else { Default::default() }; let proposal = proposer.propose( - id.clone(), + inherent_data.clone(), digest, Duration::from_secs(MAX_PROPOSAL_DURATION), None, @@ -139,7 +149,7 @@ pub async fn seal_block( params.storage_changes = Some(proposal.storage_changes); if let Some(digest_provider) = digest_provider { - digest_provider.append_block_import(&parent, &mut params, &id)?; + digest_provider.append_block_import(&parent, &mut params, &inherent_data)?; } match block_import.import_block(params, HashMap::new()).await? { diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index 86b0b1df54e26..443b852c41e5d 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -27,7 +27,6 @@ log = "0.4.8" futures = { version = "0.3.1", features = ["compat"] } futures-timer = "3.0.1" parking_lot = "0.11.1" -sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } derive_more = "0.99.2" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} async-trait = "0.1.42" diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index bcbc2009321b8..b12bad7bac22e 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -39,7 +39,7 @@ use std::{ sync::Arc, borrow::Cow, collections::HashMap, marker::PhantomData, cmp::Ordering, time::Duration, }; -use futures::{prelude::*, future::Either}; +use futures::{Future, StreamExt}; use parking_lot::Mutex; use sc_client_api::{BlockOf, backend::AuxStore, BlockchainEvents}; use sp_blockchain::{HeaderBackend, ProvideCache, well_known_cache_keys::Id as CacheKeyId}; @@ -49,7 +49,7 @@ use sp_runtime::generic::{BlockId, Digest, DigestItem}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_api::ProvideRuntimeApi; use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; -use sp_inherents::{InherentDataProviders, InherentData}; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_consensus::{ BlockImportParams, BlockOrigin, ForkChoiceStrategy, SyncOracle, Environment, Proposer, SelectChain, Error as ConsensusError, CanAuthorWith, BlockImport, BlockCheckParams, ImportResult, @@ -61,7 +61,6 @@ use codec::{Encode, Decode}; use prometheus_endpoint::Registry; use sc_client_api; use log::*; -use sp_timestamp::{InherentError as TIError, TimestampInherentData}; use crate::worker::UntilImportedOrTimeout; @@ -92,7 +91,12 @@ pub enum Error { #[display(fmt = "Creating inherents failed: {}", _0)] CreateInherents(sp_inherents::Error), #[display(fmt = "Checking inherents failed: {}", _0)] - CheckInherents(String), + CheckInherents(sp_inherents::Error), + #[display( + fmt = "Checking inherents unknown error for identifier: {:?}", + "String::from_utf8_lossy(_0)", + )] + CheckInherentsUnknownError(sp_inherents::InherentIdentifier), #[display(fmt = "Multiple pre-runtime digests")] MultiplePreRuntimeDigests, Client(sp_blockchain::Error), @@ -200,18 +204,18 @@ pub trait PowAlgorithm { } /// A block importer for PoW. -pub struct PowBlockImport { +pub struct PowBlockImport { algorithm: Algorithm, inner: I, select_chain: S, client: Arc, - inherent_data_providers: sp_inherents::InherentDataProviders, + create_inherent_data_providers: Arc, check_inherents_after: <::Header as HeaderT>::Number, can_author_with: CAW, } -impl Clone - for PowBlockImport +impl Clone + for PowBlockImport { fn clone(&self) -> Self { Self { @@ -219,14 +223,14 @@ impl Clone inner: self.inner.clone(), select_chain: self.select_chain.clone(), client: self.client.clone(), - inherent_data_providers: self.inherent_data_providers.clone(), + create_inherent_data_providers: self.create_inherent_data_providers.clone(), check_inherents_after: self.check_inherents_after.clone(), can_author_with: self.can_author_with.clone(), } } } -impl PowBlockImport where +impl PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, @@ -234,6 +238,7 @@ impl PowBlockImport wher C::Api: BlockBuilderApi, Algorithm: PowAlgorithm, CAW: CanAuthorWith, + CIDP: CreateInherentDataProviders, { /// Create a new block import suitable to be used in PoW pub fn new( @@ -242,7 +247,7 @@ impl PowBlockImport wher algorithm: Algorithm, check_inherents_after: <::Header as HeaderT>::Number, select_chain: S, - inherent_data_providers: sp_inherents::InherentDataProviders, + create_inherent_data_providers: CIDP, can_author_with: CAW, ) -> Self { Self { @@ -251,20 +256,17 @@ impl PowBlockImport wher algorithm, check_inherents_after, select_chain, - inherent_data_providers, + create_inherent_data_providers: Arc::new(create_inherent_data_providers), can_author_with, } } - fn check_inherents( + async fn check_inherents( &self, block: B, block_id: BlockId, - inherent_data: InherentData, - timestamp_now: u64, + inherent_data_providers: CIDP::InherentDataProviders, ) -> Result<(), Error> { - const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; - if *block.header().number() < self.check_inherents_after { return Ok(()) } @@ -279,6 +281,9 @@ impl PowBlockImport wher return Ok(()) } + let inherent_data = inherent_data_providers.create_inherent_data() + .map_err(|e| Error::CreateInherents(e))?; + let inherent_res = self.client.runtime_api().check_inherents( &block_id, block, @@ -286,38 +291,32 @@ impl PowBlockImport wher ).map_err(|e| Error::Client(e.into()))?; if !inherent_res.ok() { - inherent_res - .into_errors() - .try_for_each(|(i, e)| match TIError::try_from(&i, &e) { - Some(TIError::ValidAtTimestamp(timestamp)) => { - if timestamp > timestamp_now + MAX_TIMESTAMP_DRIFT_SECS { - return Err(Error::TooFarInFuture); - } - - Ok(()) - }, - Some(TIError::Other(e)) => Err(Error::Runtime(e)), - None => Err(Error::CheckInherents( - self.inherent_data_providers.error_to_string(&i, &e) - )), - }) - } else { - Ok(()) + for (identifier, error) in inherent_res.into_errors() { + match inherent_data_providers.try_handle_error(&identifier, &error).await { + Some(res) => res.map_err(Error::CheckInherents)?, + None => return Err(Error::CheckInherentsUnknownError(identifier)), + } + } } + + Ok(()) } } #[async_trait::async_trait] -impl BlockImport for PowBlockImport where +impl BlockImport + for PowBlockImport +where B: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, S: SelectChain, C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, C::Api: BlockBuilderApi, - Algorithm: PowAlgorithm + Send, + Algorithm: PowAlgorithm + Send + Sync, Algorithm::Difficulty: 'static + Send, - CAW: CanAuthorWith + Send, + CAW: CanAuthorWith + Send + Sync, + CIDP: CreateInherentDataProviders + Send + Sync, { type Error = ConsensusError; type Transaction = sp_api::TransactionFor; @@ -343,18 +342,16 @@ impl BlockImport for PowBlockImport(self.client.as_ref(), &parent_hash)?; if let Some(inner_body) = block.body.take() { - let inherent_data = self.inherent_data_providers - .create_inherent_data().map_err(|e| e.into_string())?; - let timestamp_now = inherent_data.timestamp_inherent_data().map_err(|e| e.into_string())?; - let check_block = B::new(block.header.clone(), inner_body); self.check_inherents( check_block.clone(), BlockId::Hash(parent_hash), - inherent_data, - *timestamp_now, - )?; + self.create_inherent_data_providers.create_inherent_data_providers( + parent_hash, + (), + ).await?, + ).await?; block.body = Some(check_block.deconstruct().1); } @@ -475,7 +472,7 @@ impl Verifier for PowVerifier where import_block.justifications = justifications; import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box<_>, + Box::new(intermediate) as Box<_> ); import_block.post_hash = Some(hash); @@ -483,20 +480,6 @@ impl Verifier for PowVerifier where } } -/// Register the PoW inherent data provider, if not registered already. -pub fn register_pow_inherent_data_provider( - inherent_data_providers: &InherentDataProviders, -) -> Result<(), sp_consensus::Error> { - if !inherent_data_providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(sp_timestamp::InherentDataProvider) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - } else { - Ok(()) - } -} - /// The PoW import queue type. pub type PowImportQueue = BasicQueue; @@ -505,7 +488,6 @@ pub fn import_queue( block_import: BoxBlockImport, justification_import: Option>, algorithm: Algorithm, - inherent_data_providers: InherentDataProviders, spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, ) -> Result< @@ -517,8 +499,6 @@ pub fn import_queue( Algorithm: PowAlgorithm + Clone + Send + Sync + 'static, Algorithm::Difficulty: Send, { - register_pow_inherent_data_provider(&inherent_data_providers)?; - let verifier = PowVerifier::new(algorithm); Ok(BasicQueue::new( @@ -539,7 +519,7 @@ pub fn import_queue( /// /// `pre_runtime` is a parameter that allows a custom additional pre-runtime digest to be inserted /// for blocks being built. This can encode authorship information, or just be a graffiti. -pub fn start_mining_worker( +pub fn start_mining_worker( block_import: BoxBlockImport>, client: Arc, select_chain: S, @@ -547,7 +527,7 @@ pub fn start_mining_worker( mut env: E, mut sync_oracle: SO, pre_runtime: Option>, - inherent_data_providers: sp_inherents::InherentDataProviders, + create_inherent_data_providers: CIDP, timeout: Duration, build_time: Duration, can_author_with: CAW, @@ -565,12 +545,9 @@ pub fn start_mining_worker( E::Proposer: Proposer>, SO: SyncOracle + Clone + Send + Sync + 'static, CAW: CanAuthorWith + Clone + Send + 'static, + CIDP: CreateInherentDataProviders, { - if let Err(_) = register_pow_inherent_data_provider(&inherent_data_providers) { - warn!("Registering inherent data provider for timestamp failed"); - } - - let timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); + let mut timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); let worker = Arc::new(Mutex::new(MiningWorker:: { build: None, algorithm: algorithm.clone(), @@ -578,81 +555,97 @@ pub fn start_mining_worker( })); let worker_ret = worker.clone(); - let task = timer.for_each(move |()| { - let worker = worker.clone(); + let task = async move { + loop { + if timer.next().await.is_none() { + break; + } - if sync_oracle.is_major_syncing() { - debug!(target: "pow", "Skipping proposal due to sync."); - worker.lock().on_major_syncing(); - return Either::Left(future::ready(())) - } + if sync_oracle.is_major_syncing() { + debug!(target: "pow", "Skipping proposal due to sync."); + worker.lock().on_major_syncing(); + return; + } - let best_header = match select_chain.best_chain() { - Ok(x) => x, - Err(err) => { + let best_header = match select_chain.best_chain() { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to pull new block for authoring. \ + Select best chain error: {:?}", + err + ); + return; + }, + }; + let best_hash = best_header.hash(); + + if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(best_hash)) { warn!( target: "pow", - "Unable to pull new block for authoring. \ - Select best chain error: {:?}", - err + "Skipping proposal `can_author_with` returned: {} \ + Probably a node update is required!", + err, ); - return Either::Left(future::ready(())) - }, - }; - let best_hash = best_header.hash(); + return; + } - if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(best_hash)) { - warn!( - target: "pow", - "Skipping proposal `can_author_with` returned: {} \ - Probably a node update is required!", - err, - ); - return Either::Left(future::ready(())) - } + if worker.lock().best_hash() == Some(best_hash) { + return; + } - if worker.lock().best_hash() == Some(best_hash) { - return Either::Left(future::ready(())) - } + // The worker is locked for the duration of the whole proposing period. Within this period, + // the mining target is outdated and useless anyway. - // The worker is locked for the duration of the whole proposing period. Within this period, - // the mining target is outdated and useless anyway. + let difficulty = match algorithm.difficulty(best_hash) { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to propose new block for authoring. \ + Fetch difficulty failed: {:?}", + err, + ); + return; + }, + }; - let difficulty = match algorithm.difficulty(best_hash) { - Ok(x) => x, - Err(err) => { - warn!( - target: "pow", - "Unable to propose new block for authoring. \ - Fetch difficulty failed: {:?}", - err, - ); - return Either::Left(future::ready(())) - }, - }; + let inherent_data_providers = + match create_inherent_data_providers.create_inherent_data_providers(best_hash, ()).await { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to propose new block for authoring. \ + Creating inherent data providers failed: {:?}", + err, + ); + return; + }, + }; - let awaiting_proposer = env.init(&best_header); - let inherent_data = match inherent_data_providers.create_inherent_data() { - Ok(x) => x, - Err(err) => { - warn!( - target: "pow", - "Unable to propose new block for authoring. \ - Creating inherent data failed: {:?}", - err, - ); - return Either::Left(future::ready(())) - }, - }; - let mut inherent_digest = Digest::::default(); - if let Some(pre_runtime) = &pre_runtime { - inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); - } + let inherent_data = match inherent_data_providers.create_inherent_data() { + Ok(r) => r, + Err(e) => { + warn!( + target: "pow", + "Unable to propose new block for authoring. \ + Creating inherent data failed: {:?}", + e, + ); + return; + }, + }; + + let mut inherent_digest = Digest::::default(); + if let Some(pre_runtime) = &pre_runtime { + inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); + } - let pre_runtime = pre_runtime.clone(); + let pre_runtime = pre_runtime.clone(); - Either::Right(async move { - let proposer = match awaiting_proposer.await { + let proposer = match env.init(&best_header).await { Ok(x) => x, Err(err) => { warn!( @@ -694,8 +687,8 @@ pub fn start_mining_worker( }; worker.lock().on_build(build); - }) - }); + } + }; (worker_ret, task) } diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 64beea50fcf63..51382198f5087 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -33,6 +33,7 @@ futures = "0.3.9" futures-timer = "3.0.1" log = "0.4.11" thiserror = "1.0.21" +impl-trait-for-tuples = "0.2.1" async-trait = "0.1.42" [dev-dependencies] diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index c1638fb566326..4ef0093a185e5 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -23,7 +23,7 @@ //! provides generic functionality for slots. #![forbid(unsafe_code)] -#![deny(missing_docs)] +#![warn(missing_docs)] mod slots; mod aux_schema; @@ -41,12 +41,13 @@ use sp_api::{ProvideRuntimeApi, ApiRef}; use sp_arithmetic::traits::BaseArithmetic; use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData}; use sp_consensus_slots::Slot; -use sp_inherents::{InherentData, InherentDataProviders}; +use sp_inherents::CreateInherentDataProviders; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header, HashFor, NumberFor} + traits::{Block as BlockT, Header as HeaderT, HashFor, NumberFor} }; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_WARN, CONSENSUS_INFO}; +use sp_timestamp::Timestamp; /// The changes that need to applied to the storage to create the state for a block. /// @@ -75,8 +76,7 @@ pub trait SlotWorker { /// the slot. Otherwise `None` is returned. async fn on_slot( &mut self, - chain_head: B::Header, - slot_info: SlotInfo, + slot_info: SlotInfo, ) -> Option>; } @@ -187,21 +187,19 @@ pub trait SimpleSlotWorker { /// Remaining duration for proposing. fn proposing_remaining_duration( &self, - head: &B::Header, - slot_info: &SlotInfo, + slot_info: &SlotInfo, ) -> Duration; /// Implements [`SlotWorker::on_slot`]. async fn on_slot( &mut self, - chain_head: B::Header, - slot_info: SlotInfo, + slot_info: SlotInfo, ) -> Option>::Proof>> { let (timestamp, slot) = (slot_info.timestamp, slot_info.slot); let telemetry = self.telemetry(); let logging_target = self.logging_target(); - let proposing_remaining_duration = self.proposing_remaining_duration(&chain_head, &slot_info); + let proposing_remaining_duration = self.proposing_remaining_duration(&slot_info); let proposing_remaining = if proposing_remaining_duration == Duration::default() { debug!( @@ -215,13 +213,13 @@ pub trait SimpleSlotWorker { Delay::new(proposing_remaining_duration) }; - let epoch_data = match self.epoch_data(&chain_head, slot) { + let epoch_data = match self.epoch_data(&slot_info.chain_head, slot) { Ok(epoch_data) => epoch_data, Err(err) => { warn!( target: logging_target, "Unable to fetch epoch data at block {:?}: {:?}", - chain_head.hash(), + slot_info.chain_head.hash(), err, ); @@ -229,7 +227,7 @@ pub trait SimpleSlotWorker { telemetry; CONSENSUS_WARN; "slots.unable_fetching_authorities"; - "slot" => ?chain_head.hash(), + "slot" => ?slot_info.chain_head.hash(), "err" => ?err, ); @@ -237,7 +235,7 @@ pub trait SimpleSlotWorker { } }; - self.notify_slot(&chain_head, slot, &epoch_data); + self.notify_slot(&slot_info.chain_head, slot, &epoch_data); let authorities_len = self.authorities_len(&epoch_data); @@ -256,9 +254,9 @@ pub trait SimpleSlotWorker { return None; } - let claim = self.claim_slot(&chain_head, slot, &epoch_data)?; + let claim = self.claim_slot(&slot_info.chain_head, slot, &epoch_data)?; - if self.should_backoff(slot, &chain_head) { + if self.should_backoff(slot, &slot_info.chain_head) { return None; } @@ -266,7 +264,7 @@ pub trait SimpleSlotWorker { target: self.logging_target(), "Starting authorship at slot {}; timestamp = {}", slot, - timestamp, + *timestamp, ); telemetry!( @@ -277,7 +275,7 @@ pub trait SimpleSlotWorker { "timestamp" => *timestamp, ); - let proposer = match self.proposer(&chain_head).await { + let proposer = match self.proposer(&slot_info.chain_head).await { Ok(p) => p, Err(err) => { warn!( @@ -422,94 +420,119 @@ pub trait SimpleSlotWorker { impl + Send> SlotWorker>::Proof> for T { async fn on_slot( &mut self, - chain_head: B::Header, - slot_info: SlotInfo, + slot_info: SlotInfo, ) -> Option>::Proof>> { - SimpleSlotWorker::on_slot(self, chain_head, slot_info).await + SimpleSlotWorker::on_slot(self, slot_info).await } } -/// Slot compatible inherent data. -pub trait SlotCompatible { - /// Extract timestamp and slot from inherent data. - fn extract_timestamp_and_slot( - &self, - inherent: &InherentData, - ) -> Result<(sp_timestamp::Timestamp, Slot, std::time::Duration), sp_consensus::Error>; +/// Slot specific extension that the inherent data provider needs to implement. +pub trait InherentDataProviderExt { + /// The current timestamp that will be found in the [`InherentData`]. + fn timestamp(&self) -> Timestamp; + + /// The current slot that will be found in the [`InherentData`]. + fn slot(&self) -> Slot; +} + +impl InherentDataProviderExt for (T, S, P) +where + T: Deref, + S: Deref, +{ + fn timestamp(&self) -> Timestamp { + *self.0.deref() + } + + fn slot(&self) -> Slot { + *self.1.deref() + } +} + +impl InherentDataProviderExt for (T, S, P, R) +where + T: Deref, + S: Deref, +{ + fn timestamp(&self) -> Timestamp { + *self.0.deref() + } + + fn slot(&self) -> Slot { + *self.1.deref() + } +} + +impl InherentDataProviderExt for (T, S) +where + T: Deref, + S: Deref, +{ + fn timestamp(&self) -> Timestamp { + *self.0.deref() + } + + fn slot(&self) -> Slot { + *self.1.deref() + } } /// Start a new slot worker. /// /// Every time a new slot is triggered, `worker.on_slot` is called and the future it returns is /// polled until completion, unless we are major syncing. -pub fn start_slot_worker( +pub async fn start_slot_worker( slot_duration: SlotDuration, client: C, mut worker: W, mut sync_oracle: SO, - inherent_data_providers: InherentDataProviders, - timestamp_extractor: SC, + create_inherent_data_providers: CIDP, can_author_with: CAW, -) -> impl Future +) where B: BlockT, C: SelectChain, W: SlotWorker, SO: SyncOracle + Send, - SC: SlotCompatible + Unpin, T: SlotData + Clone, CAW: CanAuthorWith + Send, + CIDP: CreateInherentDataProviders + Send, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, { let SlotDuration(slot_duration) = slot_duration; - // rather than use a timer interval, we schedule our waits ourselves - let mut slots = Slots::::new( + let mut slots = Slots::new( slot_duration.slot_duration(), - inherent_data_providers, - timestamp_extractor, + create_inherent_data_providers, + client, ); - async move { - loop { - let slot_info = match slots.next_slot().await { - Ok(slot) => slot, - Err(err) => { - debug!(target: "slots", "Faulty timer: {:?}", err); - return - }, - }; - - // only propose when we are not syncing. - if sync_oracle.is_major_syncing() { - debug!(target: "slots", "Skipping proposal slot due to sync."); - continue; + loop { + let slot_info = match slots.next_slot().await { + Ok(r) => r, + Err(e) => { + warn!(target: "slots", "Error while polling for next slot: {:?}", e); + return; } + }; - let slot = slot_info.slot; - let chain_head = match client.best_chain() { - Ok(x) => x, - Err(e) => { - warn!( - target: "slots", - "Unable to author block in slot {}. No best block header: {:?}", - slot, - e, - ); - continue; - } - }; + if sync_oracle.is_major_syncing() { + debug!(target: "slots", "Skipping proposal slot due to sync."); + continue; + } - if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(chain_head.hash())) { - warn!( - target: "slots", - "Unable to author block in slot {},. `can_author_with` returned: {} \ - Probably a node update is required!", - slot, - err, - ); - } else { - worker.on_slot(chain_head, slot_info).await; - } + if let Err(err) = can_author_with + .can_author_with(&BlockId::Hash(slot_info.chain_head.hash())) + { + warn!( + target: "slots", + "Unable to author block in slot {},. `can_author_with` returned: {} \ + Probably a node update is required!", + slot_info.slot, + err, + ); + } else { + let _ = worker.on_slot(slot_info).await; } } } @@ -627,7 +650,10 @@ impl SlotProportion { /// to parent. If the number of skipped slots is greated than 0 this method will apply /// an exponential backoff of at most `2^7 * slot_duration`, if no slots were skipped /// this method will return `None.` -pub fn slot_lenience_exponential(parent_slot: Slot, slot_info: &SlotInfo) -> Option { +pub fn slot_lenience_exponential( + parent_slot: Slot, + slot_info: &SlotInfo, +) -> Option { // never give more than 2^this times the lenience. const BACKOFF_CAP: u64 = 7; @@ -656,7 +682,10 @@ pub fn slot_lenience_exponential(parent_slot: Slot, slot_info: &SlotInfo) -> Opt /// to parent. If the number of skipped slots is greated than 0 this method will apply /// a linear backoff of at most `20 * slot_duration`, if no slots were skipped /// this method will return `None.` -pub fn slot_lenience_linear(parent_slot: Slot, slot_info: &SlotInfo) -> Option { +pub fn slot_lenience_linear( + parent_slot: u64, + slot_info: &SlotInfo, +) -> Option { // never give more than 20 times more lenience. const BACKOFF_CAP: u64 = 20; @@ -777,20 +806,27 @@ impl BackoffAuthoringBlocksStrategy for () { #[cfg(test)] mod test { + use super::*; use std::time::{Duration, Instant}; - use crate::{BackoffAuthoringOnFinalizedHeadLagging, BackoffAuthoringBlocksStrategy}; - use substrate_test_runtime_client::runtime::Block; + use substrate_test_runtime_client::runtime::{Block, Header}; use sp_api::NumberFor; const SLOT_DURATION: Duration = Duration::from_millis(6000); - fn slot(slot: u64) -> super::slots::SlotInfo { + fn slot(slot: u64) -> super::slots::SlotInfo { super::slots::SlotInfo { slot: slot.into(), duration: SLOT_DURATION, timestamp: Default::default(), inherent_data: Default::default(), ends_at: Instant::now(), + chain_head: Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), block_size_limit: None, } } @@ -798,20 +834,20 @@ mod test { #[test] fn linear_slot_lenience() { // if no slots are skipped there should be no lenience - assert_eq!(super::slot_lenience_linear(1.into(), &slot(2)), None); + assert_eq!(super::slot_lenience_linear(1u64.into(), &slot(2)), None); // otherwise the lenience is incremented linearly with // the number of skipped slots. for n in 3..=22 { assert_eq!( - super::slot_lenience_linear(1.into(), &slot(n)), + super::slot_lenience_linear(1u64.into(), &slot(n)), Some(SLOT_DURATION * (n - 2) as u32), ); } // but we cap it to a maximum of 20 slots assert_eq!( - super::slot_lenience_linear(1.into(), &slot(23)), + super::slot_lenience_linear(1u64.into(), &slot(23)), Some(SLOT_DURATION * 20), ); } @@ -819,24 +855,24 @@ mod test { #[test] fn exponential_slot_lenience() { // if no slots are skipped there should be no lenience - assert_eq!(super::slot_lenience_exponential(1.into(), &slot(2)), None); + assert_eq!(super::slot_lenience_exponential(1u64.into(), &slot(2)), None); // otherwise the lenience is incremented exponentially every two slots for n in 3..=17 { assert_eq!( - super::slot_lenience_exponential(1.into(), &slot(n)), + super::slot_lenience_exponential(1u64.into(), &slot(n)), Some(SLOT_DURATION * 2u32.pow((n / 2 - 1) as u32)), ); } // but we cap it to a maximum of 14 slots assert_eq!( - super::slot_lenience_exponential(1.into(), &slot(18)), + super::slot_lenience_exponential(1u64.into(), &slot(18)), Some(SLOT_DURATION * 2u32.pow(7)), ); assert_eq!( - super::slot_lenience_exponential(1.into(), &slot(19)), + super::slot_lenience_exponential(1u64.into(), &slot(19)), Some(SLOT_DURATION * 2u32.pow(7)), ); } diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index 1d89ba3bf9927..b5ce71dfbf4c9 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -20,9 +20,10 @@ //! //! This is used instead of `futures_timer::Interval` because it was unreliable. -use super::{SlotCompatible, Slot}; -use sp_consensus::Error; -use sp_inherents::{InherentData, InherentDataProviders}; +use super::{Slot, InherentDataProviderExt}; +use sp_consensus::{Error, SelectChain}; +use sp_inherents::{InherentData, CreateInherentDataProviders, InherentDataProvider}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::time::{Duration, Instant}; use futures_timer::Delay; @@ -38,19 +39,19 @@ pub fn duration_now() -> Duration { )) } -/// Returns the duration until the next slot, based on current duration since -pub fn time_until_next(now: Duration, slot_duration: Duration) -> Duration { +/// Returns the duration until the next slot from now. +pub fn time_until_next(slot_duration: Duration) -> Duration { let remaining_full_millis = slot_duration.as_millis() - - (now.as_millis() % slot_duration.as_millis()) + - (duration_now().as_millis() % slot_duration.as_millis()) - 1; Duration::from_millis(remaining_full_millis as u64) } /// Information about a slot. -pub struct SlotInfo { - /// The slot number. +pub struct SlotInfo { + /// The slot number as found in the inherent data. pub slot: Slot, - /// Current timestamp. + /// Current timestamp as found in the inherent data. pub timestamp: sp_timestamp::Timestamp, /// The instant at which the slot ends. pub ends_at: Instant, @@ -58,13 +59,15 @@ pub struct SlotInfo { pub inherent_data: InherentData, /// Slot duration. pub duration: Duration, + /// The chain header this slot is based on. + pub chain_head: B::Header, /// Some potential block size limit for the block to be authored at this slot. /// /// For more information see [`Proposer::propose`](sp_consensus::Proposer::propose). pub block_size_limit: Option, } -impl SlotInfo { +impl SlotInfo { /// Create a new [`SlotInfo`]. /// /// `ends_at` is calculated using `timestamp` and `duration`. @@ -73,6 +76,7 @@ impl SlotInfo { timestamp: sp_timestamp::Timestamp, inherent_data: InherentData, duration: Duration, + chain_head: B::Header, block_size_limit: Option, ) -> Self { Self { @@ -80,46 +84,55 @@ impl SlotInfo { timestamp, inherent_data, duration, + chain_head, block_size_limit, - ends_at: Instant::now() + time_until_next(timestamp.as_duration(), duration), + ends_at: Instant::now() + time_until_next(duration), } } } /// A stream that returns every time there is a new slot. -pub(crate) struct Slots { +pub(crate) struct Slots { last_slot: Slot, slot_duration: Duration, inner_delay: Option, - inherent_data_providers: InherentDataProviders, - timestamp_extractor: SC, + create_inherent_data_providers: IDP, + client: C, + _phantom: std::marker::PhantomData, } -impl Slots { +impl Slots { /// Create a new `Slots` stream. pub fn new( slot_duration: Duration, - inherent_data_providers: InherentDataProviders, - timestamp_extractor: SC, + create_inherent_data_providers: IDP, + client: C, ) -> Self { Slots { last_slot: 0.into(), slot_duration, inner_delay: None, - inherent_data_providers, - timestamp_extractor, + create_inherent_data_providers, + client, + _phantom: Default::default(), } } } -impl Slots { +impl Slots +where + Block: BlockT, + C: SelectChain, + IDP: CreateInherentDataProviders, + IDP::InherentDataProviders: crate::InherentDataProviderExt, +{ /// Returns a future that fires when the next slot starts. - pub async fn next_slot(&mut self) -> Result { + pub async fn next_slot(&mut self) -> Result, Error> { loop { self.inner_delay = match self.inner_delay.take() { None => { // schedule wait. - let wait_dur = time_until_next(duration_now(), self.slot_duration); + let wait_dur = time_until_next(self.slot_duration); Some(Delay::new(wait_dur)) } Some(d) => Some(d), @@ -130,15 +143,39 @@ impl Slots { } // timeout has fired. - let inherent_data = match self.inherent_data_providers.create_inherent_data() { - Ok(id) => id, - Err(err) => return Err(sp_consensus::Error::InherentData(err)), + let ends_at = Instant::now() + time_until_next(self.slot_duration); + + let chain_head = match self.client.best_chain() { + Ok(x) => x, + Err(e) => { + log::warn!( + target: "slots", + "Unable to author block in slot. No best block header: {:?}", + e, + ); + // Let's try at the next slot.. + self.inner_delay.take(); + continue; + } }; - let result = self.timestamp_extractor.extract_timestamp_and_slot(&inherent_data); - let (timestamp, slot, offset) = result?; + + let inherent_data_providers = self.create_inherent_data_providers + .create_inherent_data_providers(chain_head.hash(), ()) + .await?; + + if Instant::now() > ends_at { + log::warn!( + target: "slots", + "Creating inherent data providers took more time than we had left for the slot.", + ); + } + + let timestamp = inherent_data_providers.timestamp(); + let slot = inherent_data_providers.slot(); + let inherent_data = inherent_data_providers.create_inherent_data()?; + // reschedule delay for next slot. - let ends_in = offset + - time_until_next(timestamp.as_duration(), self.slot_duration); + let ends_in = time_until_next(self.slot_duration); self.inner_delay = Some(Delay::new(ends_in)); // never yield the same slot twice. @@ -150,6 +187,7 @@ impl Slots { timestamp, inherent_data, self.slot_duration, + chain_head, None, )) } diff --git a/client/consensus/uncles/Cargo.toml b/client/consensus/uncles/Cargo.toml index 14a8c850562cb..ab88d4496fecb 100644 --- a/client/consensus/uncles/Cargo.toml +++ b/client/consensus/uncles/Cargo.toml @@ -14,9 +14,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sc-client-api = { version = "3.0.0", path = "../../api" } -sp-core = { version = "3.0.0", path = "../../../primitives/core" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } sp-authorship = { version = "3.0.0", path = "../../../primitives/authorship" } -sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } -log = "0.4.8" +thiserror = "1.0.21" diff --git a/client/consensus/uncles/src/lib.rs b/client/consensus/uncles/src/lib.rs index f38849300d0da..cfae0528a627d 100644 --- a/client/consensus/uncles/src/lib.rs +++ b/client/consensus/uncles/src/lib.rs @@ -17,51 +17,28 @@ // along with this program. If not, see . //! Uncles functionality for Substrate. -#![forbid(unsafe_code, missing_docs)] -use sp_consensus::SelectChain; -use sp_inherents::{InherentDataProviders}; -use log::warn; use sc_client_api::ProvideUncles; -use sp_runtime::traits::{Block as BlockT, Header}; -use std::sync::Arc; -use sp_authorship; +use sp_runtime::{traits::Block as BlockT, generic::BlockId}; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("Could not retrieve the block hash for block id: {0:?}")] + NoHashForBlockId(BlockId), +} /// Maximum uncles generations we may provide to the runtime. const MAX_UNCLE_GENERATIONS: u32 = 8; -/// Register uncles inherent data provider, if not registered already. -pub fn register_uncles_inherent_data_provider( - client: Arc, - select_chain: SC, - inherent_data_providers: &InherentDataProviders, -) -> Result<(), sp_consensus::Error> where +/// Create a new [`sp_authorship::InherentDataProvider`] at the given block. +pub fn create_uncles_inherent_data_provider( + client: &C, + parent: B::Hash, +) -> Result, sc_client_api::blockchain::Error> where B: BlockT, - C: ProvideUncles + Send + Sync + 'static, - SC: SelectChain + 'static, + C: ProvideUncles, { - if !inherent_data_providers.has_provider(&sp_authorship::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(sp_authorship::InherentDataProvider::new(move || { - { - let chain_head = match select_chain.best_chain() { - Ok(x) => x, - Err(e) => { - warn!(target: "uncles", "Unable to get chain head: {:?}", e); - return Vec::new(); - } - }; - match client.uncles(chain_head.hash(), MAX_UNCLE_GENERATIONS.into()) { - Ok(uncles) => uncles, - Err(e) => { - warn!(target: "uncles", "Unable to get uncles: {:?}", e); - Vec::new() - } - } - } - })) - .map_err(|err| sp_consensus::Error::InherentData(err.into()))?; - } - Ok(()) -} + let uncles = client.uncles(parent, MAX_UNCLE_GENERATIONS.into())?; + Ok(sp_authorship::InherentDataProvider::new(uncles)) +} diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index f05a2751995da..a958cb6865c79 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -44,8 +44,8 @@ use sp_runtime::{ Justification, Justifications, BuildStorage, generic::{BlockId, SignedBlock, DigestItem}, traits::{ - Block as BlockT, Header as HeaderT, Zero, NumberFor, - HashFor, SaturatedConversion, One, DigestFor, UniqueSaturatedInto, + Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor, SaturatedConversion, One, + DigestFor, }, }; use sp_state_machine::{ @@ -1149,14 +1149,20 @@ impl Client where } /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. - pub fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) -> sp_blockchain::Result> { + pub fn uncles( + &self, + target_hash: Block::Hash, + max_generation: NumberFor, + ) -> sp_blockchain::Result> { let load_header = |id: Block::Hash| -> sp_blockchain::Result { self.backend.blockchain().header(BlockId::Hash(id))? .ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))) }; let genesis_hash = self.backend.blockchain().info().genesis_hash; - if genesis_hash == target_hash { return Ok(Vec::new()); } + if genesis_hash == target_hash { + return Ok(Vec::new()); + } let mut current_hash = target_hash; let mut current = load_header(current_hash)?; @@ -1164,14 +1170,20 @@ impl Client where let mut ancestor = load_header(ancestor_hash)?; let mut uncles = Vec::new(); - for _generation in 0u32..UniqueSaturatedInto::::unique_saturated_into(max_generation) { + let mut generation: NumberFor = Zero::zero(); + while generation < max_generation { let children = self.backend.blockchain().children(ancestor_hash)?; uncles.extend(children.into_iter().filter(|h| h != ¤t_hash)); current_hash = ancestor_hash; - if genesis_hash == current_hash { break; } + + if genesis_hash == current_hash { + break; + } + current = ancestor; ancestor_hash = *current.parent_hash(); ancestor = load_header(ancestor_hash)?; + generation += One::one(); } trace!("Collected {} uncles", uncles.len()); Ok(uncles) diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index da05f99506a94..0e47b775e4a43 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -176,8 +176,6 @@ pub struct PartialComponents, - /// A registry of all providers of `InherentData`. - pub inherent_data_providers: sp_inherents::InherentDataProviders, /// Everything else that needs to be passed into the main build function. pub other: Other, } diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index 3bbbe9749c63b..56c56e23dfc8c 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -14,7 +14,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -sp-inherents = { version = "3.0.0", default-features = false, path = "../../primitives/inherents" } sp-authorship = { version = "3.0.0", default-features = false, path = "../../primitives/authorship" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } @@ -31,7 +30,6 @@ serde = { version = "1.0.101" } default = ["std"] std = [ "codec/std", - "sp-inherents/std", "sp-runtime/std", "sp-std/std", "frame-support/std", diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 4243ae55718ae..b00f412808a1a 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -21,13 +21,13 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{result, prelude::*}; -use sp_std::collections::btree_set::BTreeSet; -use frame_support::dispatch; -use frame_support::traits::{FindAuthor, VerifySeal, Get}; +use sp_std::{result, prelude::*, collections::btree_set::BTreeSet}; +use frame_support::{ + dispatch, traits::{FindAuthor, VerifySeal, Get}, + inherent::{InherentData, ProvideInherent, InherentIdentifier}, +}; use codec::{Encode, Decode}; use sp_runtime::traits::{Header as HeaderT, One, Zero}; -use sp_inherents::{InherentIdentifier, ProvideInherent, InherentData}; use sp_authorship::{INHERENT_IDENTIFIER, UnclesInherentData, InherentError}; const MAX_UNCLES: usize = 10; @@ -293,8 +293,7 @@ impl Pallet { uncle: &T::Header, existing_uncles: I, accumulator: &mut >::Accumulator, - ) -> Result, dispatch::DispatchError> - { + ) -> Result, dispatch::DispatchError> { let now = >::block_number(); let (minimum_height, maximum_height) = { diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 36ad11360fd35..c630fb639960b 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -619,7 +619,7 @@ mod tests { } } - impl sp_inherents::ProvideInherent for Module { + impl frame_support::inherent::ProvideInherent for Module { type Call = Call; type Error = sp_inherents::MakeFatalError<()>; const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; diff --git a/frame/support/src/inherent.rs b/frame/support/src/inherent.rs index 87e489bd8f4db..4ce5958adbe9a 100644 --- a/frame/support/src/inherent.rs +++ b/frame/support/src/inherent.rs @@ -19,14 +19,70 @@ pub use crate::sp_std::vec::Vec; #[doc(hidden)] pub use crate::sp_runtime::traits::{Block as BlockT, Extrinsic}; -#[doc(hidden)] + pub use sp_inherents::{ - InherentData, ProvideInherent, CheckInherentsResult, IsFatalError, InherentIdentifier, - MakeFatalError, + InherentData, CheckInherentsResult, IsFatalError, InherentIdentifier, MakeFatalError, }; +/// A pallet that provides or verifies an inherent extrinsic. +/// +/// The pallet may provide the inherent, verify an inherent, or both provide and verify. +pub trait ProvideInherent { + /// The call type of the pallet. + type Call; + /// The error returned by `check_inherent`. + type Error: codec::Encode + IsFatalError; + /// The inherent identifier used by this inherent. + const INHERENT_IDENTIFIER: self::InherentIdentifier; + + /// Create an inherent out of the given `InherentData`. + fn create_inherent(data: &InherentData) -> Option; + + /// Determines whether this inherent is required in this block. + /// + /// - `Ok(None)` indicates that this inherent is not required in this block. The default + /// implementation returns this. + /// + /// - `Ok(Some(e))` indicates that this inherent is required in this block. The + /// `impl_outer_inherent!`, will call this function from its `check_extrinsics`. + /// If the inherent is not present, it will return `e`. + /// + /// - `Err(_)` indicates that this function failed and further operations should be aborted. + /// + /// NOTE: If inherent is required then the runtime asserts that the block contains at least + /// one inherent for which: + /// * type is [`Self::Call`], + /// * [`Self::is_inherent`] returns true. + fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { Ok(None) } + + /// Check whether the given inherent is valid. Checking the inherent is optional and can be + /// omitted by using the default implementation. + /// + /// When checking an inherent, the first parameter represents the inherent that is actually + /// included in the block by its author. Whereas the second parameter represents the inherent + /// data that the verifying node calculates. + /// + /// NOTE: A block can contains multiple inherent. + fn check_inherent(_: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { + Ok(()) + } + + /// Return whether the call is an inherent call. + /// + /// NOTE: Signed extrinsics are not inherent, but signed extrinsic with the given call variant + /// can be dispatched. + /// + /// # Warning + /// + /// In FRAME, inherent are enforced to be before other extrinsics, for this reason, + /// pallets with unsigned transactions **must ensure** that no unsigned transaction call + /// is an inherent call, when implementing `ValidateUnsigned::validate_unsigned`. + /// Otherwise block producer can produce invalid blocks by including them after non inherent. + fn is_inherent(call: &Self::Call) -> bool; +} + /// Implement the outer inherent. -/// All given modules need to implement `ProvideInherent`. +/// All given modules need to implement [`ProvideInherent`]. /// /// # Example /// diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 12f651cb3daeb..77163755ac56f 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1229,8 +1229,8 @@ pub mod tests { pub mod pallet_prelude { pub use sp_std::marker::PhantomData; #[cfg(feature = "std")] - pub use frame_support::traits::GenesisBuild; - pub use frame_support::{ + pub use crate::traits::GenesisBuild; + pub use crate::{ EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, DebugNoBound, CloneNoBound, Twox256, Twox128, Blake2_256, Blake2_128, Identity, Twox64Concat, Blake2_128Concat, ensure, RuntimeDebug, storage, @@ -1241,7 +1241,7 @@ pub mod pallet_prelude { storage::bounded_vec::{BoundedVec, BoundedVecValue}, }; pub use codec::{Encode, Decode}; - pub use sp_inherents::{InherentData, InherentIdentifier, ProvideInherent}; + pub use crate::inherent::{InherentData, InherentIdentifier, ProvideInherent}; pub use sp_runtime::{ traits::{MaybeSerializeDeserialize, Member, ValidateUnsigned}, transaction_validity::{ diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 7d2f0ec463a38..85236a20f60e5 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -17,7 +17,6 @@ codec = { package = "parity-scale-codec", version = "2.0.0", default-features = sp-io = { version = "3.0.0", path = "../../../primitives/io", default-features = false } sp-state-machine = { version = "0.9.0", optional = true, path = "../../../primitives/state-machine" } frame-support = { version = "3.0.0", default-features = false, path = "../" } -sp-inherents = { version = "3.0.0", default-features = false, path = "../../../primitives/inherents" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } @@ -35,7 +34,6 @@ std = [ "sp-io/std", "frame-support/std", "frame-system/std", - "sp-inherents/std", "sp-core/std", "sp-std/std", "sp-runtime/std", diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index dbffead8ad2b0..077763ac9128d 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -26,8 +26,8 @@ use frame_support::{ StorageEntryMetadata, StorageHasher, }, StorageValue, StorageMap, StorageDoubleMap, + inherent::{ProvideInherent, InherentData, InherentIdentifier, MakeFatalError}, }; -use sp_inherents::{ProvideInherent, InherentData, InherentIdentifier, MakeFatalError}; use sp_core::{H256, sr25519}; mod system; @@ -112,7 +112,7 @@ mod module1 { T::BlockNumber: From { type Call = Call; - type Error = MakeFatalError; + type Error = MakeFatalError<()>; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; fn create_inherent(_data: &InherentData) -> Option { @@ -176,7 +176,7 @@ mod module2 { impl, I: Instance> ProvideInherent for Module { type Call = Call; - type Error = MakeFatalError; + type Error = MakeFatalError<()>; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; fn create_inherent(_data: &InherentData) -> Option { diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 8fc056a2f36a5..4944ded2dbec0 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -299,13 +299,13 @@ pub mod pallet { pub enum InherentError { } - impl sp_inherents::IsFatalError for InherentError { + impl frame_support::inherent::IsFatalError for InherentError { fn is_fatal_error(&self) -> bool { unimplemented!(); } } - pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; + pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"testpall"; } // Test that a pallet with non generic event and generic genesis_config is correctly handled diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 232a25ff5bf27..f0143b9c40d6c 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -181,13 +181,13 @@ pub mod pallet { pub enum InherentError { } - impl sp_inherents::IsFatalError for InherentError { + impl frame_support::inherent::IsFatalError for InherentError { fn is_fatal_error(&self) -> bool { unimplemented!(); } } - pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; + pub const INHERENT_IDENTIFIER: frame_support::inherent::InherentIdentifier = *b"testpall"; } // Test that a instantiable pallet with a generic genesis_config is correctly handled diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs index 99c00b535fa0d..e7f44c4b96519 100644 --- a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -67,18 +67,21 @@ impl sp_runtime::traits::ValidateUnsigned for Module { } } -pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"12345678"; +pub const INHERENT_IDENTIFIER: frame_support::inherent::InherentIdentifier = *b"12345678"; -impl sp_inherents::ProvideInherent for Module { +impl frame_support::inherent::ProvideInherent for Module { type Call = Call; - type Error = sp_inherents::MakeFatalError; - const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = INHERENT_IDENTIFIER; + type Error = frame_support::inherent::MakeFatalError<()>; + const INHERENT_IDENTIFIER: frame_support::inherent::InherentIdentifier = INHERENT_IDENTIFIER; - fn create_inherent(_data: &sp_inherents::InherentData) -> Option { + fn create_inherent(_data: &frame_support::inherent::InherentData) -> Option { unimplemented!(); } - fn check_inherent(_: &Self::Call, _: &sp_inherents::InherentData) -> std::result::Result<(), Self::Error> { + fn check_inherent( + _: &Self::Call, + _: &frame_support::inherent::InherentData, + ) -> std::result::Result<(), Self::Error> { unimplemented!(); } diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index ce6fd09bb7828..7c553b1e4b82f 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -96,14 +96,8 @@ mod benchmarking; pub mod weights; use sp_std::{result, cmp}; -use sp_inherents::InherentData; use frame_support::traits::{Time, UnixTime, OnTimestampSet}; -use sp_runtime::{ - RuntimeString, - traits::{ - AtLeast32Bit, Zero, SaturatedConversion, Scale, - } -}; +use sp_runtime::traits::{AtLeast32Bit, Zero, SaturatedConversion, Scale}; use sp_timestamp::{ InherentError, INHERENT_IDENTIFIER, InherentType, }; @@ -213,8 +207,9 @@ pub mod pallet { const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; fn create_inherent(data: &InherentData) -> Option { - let inherent_data = extract_inherent_data(data) - .expect("Gets and decodes timestamp inherent data"); + let inherent_data = data.get_data::(&INHERENT_IDENTIFIER) + .expect("Timestamp inherent data not correctly encoded") + .expect("Timestamp inherent data must be provided"); let data = (*inherent_data).saturated_into::(); let next_time = cmp::max(data, Self::now() + T::MinimumPeriod::get()); @@ -230,11 +225,13 @@ pub mod pallet { _ => return Ok(()), }; - let data = extract_inherent_data(data).map_err(|e| InherentError::Other(e))?; + let data = data.get_data::(&INHERENT_IDENTIFIER) + .expect("Timestamp inherent data not correctly encoded") + .expect("Timestamp inherent data must be provided"); let minimum = (Self::now() + T::MinimumPeriod::get()).saturated_into::(); if t > *(data + MAX_TIMESTAMP_DRIFT_MILLIS) { - Err(InherentError::Other("Timestamp too far in future to accept".into())) + Err(InherentError::TooFarInFuture) } else if t < minimum { Err(InherentError::ValidAtTimestamp(minimum.into())) } else { @@ -264,12 +261,6 @@ impl Pallet { } } -fn extract_inherent_data(data: &InherentData) -> Result { - data.get_data::(&INHERENT_IDENTIFIER) - .map_err(|_| RuntimeString::from("Invalid timestamp inherent data encoding."))? - .ok_or_else(|| "Timestamp inherent data is not provided.".into()) -} - impl Time for Pallet { type Moment = T::Moment; diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index 5455902fddc34..a9428f8422f56 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -17,6 +17,7 @@ sp-inherents = { version = "3.0.0", default-features = false, path = "../inheren sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } sp-std = { version = "3.0.0", default-features = false, path = "../std" } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +async-trait = { version = "0.1.48", optional = true } [features] default = [ "std" ] @@ -25,4 +26,5 @@ std = [ "sp-std/std", "sp-inherents/std", "sp-runtime/std", + "async-trait", ] diff --git a/primitives/authorship/src/lib.rs b/primitives/authorship/src/lib.rs index 7bf6769951b27..1350fa17ff301 100644 --- a/primitives/authorship/src/lib.rs +++ b/primitives/authorship/src/lib.rs @@ -23,7 +23,7 @@ use sp_std::{result::Result, prelude::*}; use codec::{Encode, Decode}; use sp_inherents::{Error, InherentIdentifier, InherentData, IsFatalError}; -use sp_runtime::RuntimeString; +use sp_runtime::{RuntimeString, traits::Header as HeaderT}; /// The identifier for the `uncles` inherent. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"uncles00"; @@ -44,12 +44,12 @@ impl IsFatalError for InherentError { } /// Auxiliary trait to extract uncles inherent data. -pub trait UnclesInherentData { +pub trait UnclesInherentData { /// Get uncles. fn uncles(&self) -> Result, Error>; } -impl UnclesInherentData for InherentData { +impl UnclesInherentData for InherentData { fn uncles(&self) -> Result, Error> { Ok(self.get_data(&INHERENT_IDENTIFIER)?.unwrap_or_default()) } @@ -57,36 +57,43 @@ impl UnclesInherentData for InherentData { /// Provider for inherent data. #[cfg(feature = "std")] -pub struct InherentDataProvider { - inner: F, - _marker: std::marker::PhantomData, +pub struct InherentDataProvider { + uncles: Vec, } #[cfg(feature = "std")] -impl InherentDataProvider { - pub fn new(uncles_oracle: F) -> Self { - InherentDataProvider { inner: uncles_oracle, _marker: Default::default() } +impl InherentDataProvider { + /// Create a new inherent data provider with the given `uncles`. + pub fn new(uncles: Vec) -> Self { + InherentDataProvider { uncles } + } + + /// Create a new instance that is usable for checking inherents. + /// + /// This will always return an empty vec of uncles. + pub fn check_inherents() -> Self { + Self { uncles: Vec::new() } } } #[cfg(feature = "std")] -impl sp_inherents::ProvideInherentData for InherentDataProvider -where F: Fn() -> Vec -{ - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + inherent_data.put_data(INHERENT_IDENTIFIER, &self.uncles) } - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { - let uncles = (self.inner)(); - if !uncles.is_empty() { - inherent_data.put_data(INHERENT_IDENTIFIER, &uncles) - } else { - Ok(()) + async fn try_handle_error( + &self, + identifier: &InherentIdentifier, + error: &[u8], + ) -> Option> { + if *identifier != INHERENT_IDENTIFIER { + return None } - } - fn error_to_string(&self, _error: &[u8]) -> Option { - Some(format!("no further information")) + let error = InherentError::decode(&mut &error[..]).ok()?; + + Some(Err(Error::Application(Box::from(format!("{:?}", error))))) } } diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 105c74bb317d7..2ae4259a21e50 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -22,6 +22,7 @@ sp-inherents = { version = "3.0.0", default-features = false, path = "../../inhe sp-timestamp = { version = "3.0.0", default-features = false, path = "../../timestamp" } sp-consensus-slots = { version = "0.9.0", default-features = false, path = "../slots" } sp-consensus = { version = "0.9.0", path = "../common", optional = true } +async-trait = { version = "0.1.48", optional = true } [features] default = ["std"] @@ -35,4 +36,5 @@ std = [ "sp-timestamp/std", "sp-consensus-slots/std", "sp-consensus", + "async-trait", ] diff --git a/primitives/consensus/aura/src/inherents.rs b/primitives/consensus/aura/src/inherents.rs index 32af901311a30..294f544f6725a 100644 --- a/primitives/consensus/aura/src/inherents.rs +++ b/primitives/consensus/aura/src/inherents.rs @@ -19,9 +19,6 @@ use sp_inherents::{InherentIdentifier, InherentData, Error}; -#[cfg(feature = "std")] -use sp_inherents::{InherentDataProviders, ProvideInherentData}; - /// The Aura inherent identifier. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"auraslot"; @@ -31,15 +28,14 @@ pub type InherentType = sp_consensus_slots::Slot; /// Auxiliary trait to extract Aura inherent data. pub trait AuraInherentData { /// Get aura inherent data. - fn aura_inherent_data(&self) ->Result; + fn aura_inherent_data(&self) ->Result, Error>; /// Replace aura inherent data. fn aura_replace_inherent_data(&mut self, new: InherentType); } impl AuraInherentData for InherentData { - fn aura_inherent_data(&self) ->Result { + fn aura_inherent_data(&self) ->Result, Error> { self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "Aura inherent data not found".into())) } fn aura_replace_inherent_data(&mut self, new: InherentType) { @@ -51,50 +47,59 @@ impl AuraInherentData for InherentData { // TODO: Remove in the future. https://github.com/paritytech/substrate/issues/8029 #[cfg(feature = "std")] pub struct InherentDataProvider { - slot_duration: std::time::Duration, + slot: InherentType, } #[cfg(feature = "std")] impl InherentDataProvider { - pub fn new(slot_duration: std::time::Duration) -> Self { + /// Create a new instance with the given slot. + pub fn new(slot: InherentType) -> Self { Self { - slot_duration + slot, } } -} -#[cfg(feature = "std")] -impl ProvideInherentData for InherentDataProvider { - fn on_register( - &self, - providers: &InherentDataProviders, - ) ->Result<(), Error> { - if !providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { - // Add the timestamp inherent data provider, as we require it. - providers.register_provider(sp_timestamp::InherentDataProvider) - } else { - Ok(()) + /// Creates the inherent data provider by calculating the slot from the given + /// `timestamp` and `duration`. + pub fn from_timestamp_and_duration( + timestamp: sp_timestamp::Timestamp, + duration: std::time::Duration, + ) -> Self { + let slot = InherentType::from( + (timestamp.as_duration().as_millis() / duration.as_millis()) as u64 + ); + + Self { + slot, } } +} + +#[cfg(feature = "std")] +impl sp_std::ops::Deref for InherentDataProvider { + type Target = InherentType; - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER + fn deref(&self) -> &Self::Target { + &self.slot } +} +#[cfg(feature = "std")] +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { fn provide_inherent_data( &self, inherent_data: &mut InherentData, ) ->Result<(), Error> { - use sp_timestamp::TimestampInherentData; - - let timestamp = inherent_data.timestamp_inherent_data()?; - let slot = *timestamp / self.slot_duration.as_millis() as u64; - inherent_data.put_data(INHERENT_IDENTIFIER, &slot) + inherent_data.put_data(INHERENT_IDENTIFIER, &self.slot) } - fn error_to_string(&self, error: &[u8]) -> Option { - use codec::Decode; - - sp_inherents::Error::decode(&mut &error[..]).map(|e| e.into_string()).ok() + async fn try_handle_error( + &self, + _: &InherentIdentifier, + _: &[u8], + ) -> Option> { + // There is no error anymore + None } } diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index a8ab03dcdaa49..0fc09e11032b0 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -25,8 +25,9 @@ sp-core = { version = "3.0.0", default-features = false, path = "../../core" } sp-inherents = { version = "3.0.0", default-features = false, path = "../../inherents" } sp-keystore = { version = "0.9.0", default-features = false, path = "../../keystore", optional = true } sp-runtime = { version = "3.0.0", default-features = false, path = "../../runtime" } -sp-timestamp = { version = "3.0.0", default-features = false, path = "../../timestamp" } +sp-timestamp = { version = "3.0.0", path = "../../timestamp", optional = true } serde = { version = "1.0.123", features = ["derive"], optional = true } +async-trait = { version = "0.1.48", optional = true } [features] default = ["std"] @@ -43,6 +44,7 @@ std = [ "sp-inherents/std", "sp-keystore", "sp-runtime/std", - "sp-timestamp/std", "serde", + "sp-timestamp", + "async-trait", ] diff --git a/primitives/consensus/babe/src/inherents.rs b/primitives/consensus/babe/src/inherents.rs index 4c7c55f1cfd55..e160ca8644bc2 100644 --- a/primitives/consensus/babe/src/inherents.rs +++ b/primitives/consensus/babe/src/inherents.rs @@ -17,14 +17,8 @@ //! Inherents for BABE -use sp_inherents::{Error, InherentData, InherentIdentifier}; -#[cfg(feature = "std")] -use sp_inherents::{InherentDataProviders, ProvideInherentData}; -#[cfg(feature = "std")] -use sp_timestamp::TimestampInherentData; +use sp_inherents::{InherentData, InherentIdentifier, Error}; -#[cfg(feature = "std")] -use codec::Decode; use sp_std::result::Result; /// The BABE inherent identifier. @@ -35,15 +29,14 @@ pub type InherentType = sp_consensus_slots::Slot; /// Auxiliary trait to extract BABE inherent data. pub trait BabeInherentData { /// Get BABE inherent data. - fn babe_inherent_data(&self) -> Result; + fn babe_inherent_data(&self) -> Result, Error>; /// Replace BABE inherent data. fn babe_replace_inherent_data(&mut self, new: InherentType); } impl BabeInherentData for InherentData { - fn babe_inherent_data(&self) -> Result { + fn babe_inherent_data(&self) -> Result, Error> { self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "BABE inherent data not found".into())) } fn babe_replace_inherent_data(&mut self, new: InherentType) { @@ -55,39 +48,59 @@ impl BabeInherentData for InherentData { // TODO: Remove in the future. https://github.com/paritytech/substrate/issues/8029 #[cfg(feature = "std")] pub struct InherentDataProvider { - slot_duration: std::time::Duration, + slot: InherentType, } #[cfg(feature = "std")] impl InherentDataProvider { - /// Constructs `Self` - pub fn new(slot_duration: std::time::Duration) -> Self { - Self { slot_duration } + /// Create new inherent data provider from the given `slot`. + pub fn new(slot: InherentType) -> Self { + Self { slot } } -} -#[cfg(feature = "std")] -impl ProvideInherentData for InherentDataProvider { - fn on_register(&self, providers: &InherentDataProviders) -> Result<(), Error> { - if !providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { - // Add the timestamp inherent data provider, as we require it. - providers.register_provider(sp_timestamp::InherentDataProvider) - } else { - Ok(()) + /// Creates the inherent data provider by calculating the slot from the given + /// `timestamp` and `duration`. + pub fn from_timestamp_and_duration( + timestamp: sp_timestamp::Timestamp, + duration: std::time::Duration, + ) -> Self { + let slot = InherentType::from( + (timestamp.as_duration().as_millis() / duration.as_millis()) as u64 + ); + + Self { + slot, } } - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER + /// Returns the `slot` of this inherent data provider. + pub fn slot(&self) -> InherentType { + self.slot } +} +#[cfg(feature = "std")] +impl sp_std::ops::Deref for InherentDataProvider { + type Target = InherentType; + + fn deref(&self) -> &Self::Target { + &self.slot + } +} + +#[cfg(feature = "std")] +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { - let timestamp = inherent_data.timestamp_inherent_data()?; - let slot = *timestamp / self.slot_duration.as_millis() as u64; - inherent_data.put_data(INHERENT_IDENTIFIER, &slot) + inherent_data.put_data(INHERENT_IDENTIFIER, &self.slot) } - fn error_to_string(&self, error: &[u8]) -> Option { - Error::decode(&mut &error[..]).map(|e| e.into_string()).ok() + async fn try_handle_error( + &self, + _: &InherentIdentifier, + _: &[u8], + ) -> Option> { + // There is no error anymore + None } } diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs index 7998ba1b3ec76..55fc2eac40ca6 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -402,7 +402,6 @@ async fn import_many_blocks, Transaction: Send + 'stat /// A future that will always `yield` on the first call of `poll` but schedules the current task for /// re-execution. - /// /// This is done by getting the waker and calling `wake_by_ref` followed by returning `Pending`. /// The next time the `poll` is called, it will return `Ready`. diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index c0e74c0fb99fd..54ce09306e192 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -15,18 +15,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = { version = "0.11.1", optional = true } sp-std = { version = "3.0.0", default-features = false, path = "../std" } sp-core = { version = "3.0.0", default-features = false, path = "../core" } +sp-runtime = { version = "3.0.0", path = "../runtime", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } thiserror = { version = "1.0.21", optional = true } +impl-trait-for-tuples = "0.2.0" +async-trait = { version = "0.1.30", optional = true } + +[dev-dependencies] +futures = "0.3.9" [features] default = [ "std" ] std = [ - "parking_lot", "sp-std/std", "codec/std", "sp-core/std", "thiserror", + "sp-runtime", + "async-trait", ] diff --git a/primitives/inherents/src/client_side.rs b/primitives/inherents/src/client_side.rs new file mode 100644 index 0000000000000..38639c5de3227 --- /dev/null +++ b/primitives/inherents/src/client_side.rs @@ -0,0 +1,125 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{InherentData, Error, InherentIdentifier}; +use sp_runtime::traits::Block as BlockT; + +/// Something that can create inherent data providers. +/// +/// It is possible for the caller to provide custom arguments to the callee by setting the +/// `ExtraArgs` generic parameter. +/// +/// The crate already provides some convience implementations of this trait for +/// `Box` and closures. So, it should not be required to implement +/// this trait manually. +#[async_trait::async_trait] +pub trait CreateInherentDataProviders: Send + Sync { + /// The inherent data providers that will be created. + type InherentDataProviders: InherentDataProvider; + + /// Create the inherent data providers at the given `parent` block using the given `extra_args`. + async fn create_inherent_data_providers( + &self, + parent: Block::Hash, + extra_args: ExtraArgs, + ) -> Result>; +} + +#[async_trait::async_trait] +impl CreateInherentDataProviders for F +where + Block: BlockT, + F: Fn(Block::Hash, ExtraArgs) -> Fut + Sync + Send, + Fut: std::future::Future>> + Send + 'static, + IDP: InherentDataProvider + 'static, + ExtraArgs: Send + 'static, +{ + type InherentDataProviders = IDP; + + async fn create_inherent_data_providers( + &self, + parent: Block::Hash, + extra_args: ExtraArgs, + ) -> Result> { + (*self)(parent, extra_args).await + } +} + +#[async_trait::async_trait] +impl + CreateInherentDataProviders + for Box> +{ + type InherentDataProviders = IDPS; + + async fn create_inherent_data_providers( + &self, + parent: Block::Hash, + extra_args: ExtraArgs, + ) -> Result> { + (**self).create_inherent_data_providers(parent, extra_args).await + } +} + +/// Something that provides inherent data. +#[async_trait::async_trait] +pub trait InherentDataProvider: Send + Sync { + /// Convenience function for creating [`InherentData`]. + /// + /// Basically maps around [`Self::provide_inherent_data`]. + fn create_inherent_data(&self) -> Result { + let mut inherent_data = InherentData::new(); + self.provide_inherent_data(&mut inherent_data)?; + Ok(inherent_data) + } + + /// Provide inherent data that should be included in a block. + /// + /// The data should be stored in the given `InherentData` structure. + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error>; + + /// Convert the given encoded error to a string. + /// + /// If the given error could not be decoded, `None` should be returned. + async fn try_handle_error( + &self, + identifier: &InherentIdentifier, + error: &[u8], + ) -> Option>; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +#[async_trait::async_trait] +impl InherentDataProvider for Tuple { + for_tuples!( where #( Tuple: Send + Sync )* ); + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + for_tuples!( #( Tuple.provide_inherent_data(inherent_data)?; )* ); + Ok(()) + } + + async fn try_handle_error( + &self, + identifier: &InherentIdentifier, + error: &[u8], + ) -> Option> { + for_tuples!( #( + if let Some(r) = Tuple.try_handle_error(identifier, error).await { return Some(r) } + )* ); + + None + } +} diff --git a/primitives/inherents/src/lib.rs b/primitives/inherents/src/lib.rs index facc620810468..f0b5fdc940a92 100644 --- a/primitives/inherents/src/lib.rs +++ b/primitives/inherents/src/lib.rs @@ -15,21 +15,149 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Provides types and traits for creating and checking inherents. +//! Substrate inherent extrinsics //! -//! Each inherent is added to a produced block. Each runtime decides on which inherents it -//! wants to attach to its blocks. All data that is required for the runtime to create the inherents -//! is stored in the `InherentData`. This `InherentData` is constructed by the node and given to -//! the runtime. +//! Inherent extrinsics are extrinsics that are inherently added to each block. However, it is up to +//! runtime implementation to require an inherent for each block or to make it optional. Inherents +//! are mainly used to pass data from the block producer to the runtime. So, inherents require some +//! part that is running on the client side and some part that is running on the runtime side. Any +//! data that is required by an inherent is passed as [`InherentData`] from the client to the runtime +//! when the inherents are constructed. //! -//! Types that provide data for inherents, should implement `InherentDataProvider` and need to be -//! registered at `InherentDataProviders`. +//! The process of constructing and applying inherents is the following: //! -//! In the runtime, modules need to implement `ProvideInherent` when they can create and/or check -//! inherents. By implementing `ProvideInherent`, a module is not enforced to create an inherent. -//! A module can also just check given inherents. For using a module as inherent provider, it needs -//! to be registered by the `construct_runtime!` macro. The macro documentation gives more -//! information on how that is done. +//! 1. The block producer first creates the [`InherentData`] by using the inherent data providers +//! that are created by [`CreateInherentDataProviders`]. +//! +//! 2. The [`InherentData`] is passed to the `inherent_extrinsics` function of the `BlockBuilder` +//! runtime api. This will call the runtime which will create all the inherents that should be +//! applied to the block. +//! +//! 3. Apply each inherent to the block like any normal extrinsic. +//! +//! On block import the inherents in the block are checked by calling the `check_inherents` runtime +//! API. This will also pass an instance of [`InherentData`] which the runtime can use to validate +//! all inherents. If some inherent data isn't required for validating an inherent, it can be +//! omitted when providing the inherent data providers for block import. +//! +//! # Providing inherent data +//! +//! To provide inherent data from the client side, [`InherentDataProvider`] should be implemented. +//! +//! ``` +//! use codec::Decode; +//! use sp_inherents::{InherentIdentifier, InherentData}; +//! +//! // This needs to be unique for the runtime. +//! const INHERENT_IDENTIFIER: InherentIdentifier = *b"testinh0"; +//! +//! /// Some custom inherent data provider +//! struct InherentDataProvider; +//! +//! #[async_trait::async_trait] +//! impl sp_inherents::InherentDataProvider for InherentDataProvider { +//! fn provide_inherent_data( +//! &self, +//! inherent_data: &mut InherentData, +//! ) -> Result<(), sp_inherents::Error> { +//! // We can insert any data that implements [`codec::Encode`]. +//! inherent_data.put_data(INHERENT_IDENTIFIER, &"hello") +//! } +//! +//! /// When validating the inherents, the runtime implementation can throw errors. We support +//! /// two error modes, fatal and non-fatal errors. A fatal error means that the block is invalid +//! /// and this function here should return `Err(_)` to not import the block. Non-fatal errors +//! /// are allowed to be handled here in this function and the function should return `Ok(())` +//! /// if it could be handled. A non-fatal error is for example that a block is in the future +//! /// from the point of view of the local node. In such a case the block import for example +//! /// should be delayed until the block is valid. +//! /// +//! /// If this functions returns `None`, it means that it is not responsible for this error or +//! /// that the error could not be interpreted. +//! async fn try_handle_error( +//! &self, +//! identifier: &InherentIdentifier, +//! mut error: &[u8], +//! ) -> Option> { +//! // Check if this error belongs to us. +//! if *identifier != INHERENT_IDENTIFIER { +//! return None; +//! } +//! +//! // For demonstration purposes we are using a `String` as error type. In real +//! // implementations it is advised to not use `String`. +//! Some(Err( +//! sp_inherents::Error::Application(Box::from(String::decode(&mut error).ok()?)) +//! )) +//! } +//! } +//! ``` +//! +//! In the service the relevant inherent data providers need to be passed the block production and +//! the block import. As already highlighted above, the providers can be different between import +//! and production. +//! +//! ``` +//! # use sp_runtime::testing::ExtrinsicWrapper; +//! # use sp_inherents::{InherentIdentifier, InherentData}; +//! # use futures::FutureExt; +//! # type Block = sp_runtime::testing::Block>; +//! # const INHERENT_IDENTIFIER: InherentIdentifier = *b"testinh0"; +//! # struct InherentDataProvider; +//! # #[async_trait::async_trait] +//! # impl sp_inherents::InherentDataProvider for InherentDataProvider { +//! # fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), sp_inherents::Error> { +//! # inherent_data.put_data(INHERENT_IDENTIFIER, &"hello") +//! # } +//! # async fn try_handle_error( +//! # &self, +//! # _: &InherentIdentifier, +//! # _: &[u8], +//! # ) -> Option> { +//! # None +//! # } +//! # } +//! +//! async fn cool_consensus_block_production( +//! // The second parameter to the trait are parameters that depend on what the caller +//! // can provide on extra data. +//! _: impl sp_inherents::CreateInherentDataProviders, +//! ) { +//! // do cool stuff +//! } +//! +//! async fn cool_consensus_block_import( +//! _: impl sp_inherents::CreateInherentDataProviders, +//! ) { +//! // do cool stuff +//! } +//! +//! async fn build_service(is_validator: bool) { +//! // For block import we don't pass any inherent data provider, because our runtime +//! // does not need any inherent data to validate the inherents. +//! let block_import = cool_consensus_block_import(|_parent, ()| async { Ok(()) }); +//! +//! let block_production = if is_validator { +//! // For block production we want to provide our inherent data provider +//! cool_consensus_block_production(|_parent, ()| async { +//! Ok(InherentDataProvider) +//! }).boxed() +//! } else { +//! futures::future::pending().boxed() +//! }; +//! +//! futures::pin_mut!(block_import); +//! +//! futures::future::select(block_import, block_production).await; +//! } +//! ``` +//! +//! # Creating the inherent +//! +//! As the inherents are created by the runtime, it depends on the runtime implementation on how +//! to create the inherents. As already described above the client side passes the [`InherentData`] +//! and expects the runtime to construct the inherents out of it. When validating the inherents, +//! [`CheckInherentsResult`] is used to communicate the result client side. #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] @@ -39,42 +167,34 @@ use codec::{Encode, Decode}; use sp_std::{collections::btree_map::{BTreeMap, IntoIter, Entry}, vec::Vec}; #[cfg(feature = "std")] -use parking_lot::RwLock; - -#[cfg(feature = "std")] -use std::{sync::Arc, format}; +mod client_side; -/// An error that can occur within the inherent data system. #[cfg(feature = "std")] -#[derive(Debug, Encode, Decode, thiserror::Error)] -#[error("Inherents: {0}")] -pub struct Error(String); - -#[cfg(feature = "std")] -impl> From for Error { - fn from(data: T) -> Error { - Self(data.into()) - } -} - -#[cfg(feature = "std")] -impl Error { - /// Convert this error into a `String`. - pub fn into_string(self) -> String { - self.0 - } -} - -/// An error that can occur within the inherent data system. -#[derive(Encode, sp_core::RuntimeDebug)] -#[cfg(not(feature = "std"))] -pub struct Error(&'static str); - -#[cfg(not(feature = "std"))] -impl From<&'static str> for Error { - fn from(data: &'static str) -> Error { - Self(data) - } +pub use client_side::*; + +/// Errors that occur in context of inherents. +#[derive(Debug)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[allow(missing_docs)] +pub enum Error { + #[cfg_attr( + feature = "std", + error("Inherent data already exists for identifier: {}", "String::from_utf8_lossy(_0)") + )] + InherentDataExists(InherentIdentifier), + #[cfg_attr( + feature = "std", + error("Failed to decode inherent data for identifier: {}", "String::from_utf8_lossy(_1)") + )] + DecodingFailed(#[cfg_attr(feature = "std", source)] codec::Error, InherentIdentifier), + #[cfg_attr( + feature = "std", + error("There was already a fatal error reported and no other errors are allowed") + )] + FatalErrorReported, + #[cfg(feature = "std")] + #[error(transparent)] + Application(#[from] Box), } /// An identifier for an inherent. @@ -112,7 +232,7 @@ impl InherentData { Ok(()) }, Entry::Occupied(_) => { - Err("Inherent with same identifier already exists!".into()) + Err(Error::InherentDataExists(identifier)) } } } @@ -142,9 +262,7 @@ impl InherentData { match self.data.get(identifier) { Some(inherent) => I::decode(&mut &inherent[..]) - .map_err(|_| { - "Could not decode requested inherent type!".into() - }) + .map_err(|e| Error::DecodingFailed(e, *identifier)) .map(Some), None => Ok(None) } @@ -203,7 +321,7 @@ impl CheckInherentsResult { ) -> Result<(), Error> { // Don't accept any other error if self.fatal_error { - return Err("No other errors are accepted after an hard error!".into()) + return Err(Error::FatalErrorReported) } if error.is_fatal_error() { @@ -257,118 +375,6 @@ impl PartialEq for CheckInherentsResult { } } -/// All `InherentData` providers. -#[cfg(feature = "std")] -#[derive(Clone, Default)] -pub struct InherentDataProviders { - providers: Arc>>>, -} - -#[cfg(feature = "std")] -impl InherentDataProviders { - /// Create a new instance. - pub fn new() -> Self { - Self::default() - } - - /// Register an `InherentData` provider. - /// - /// The registration order is preserved and this order will also be used when creating the - /// inherent data. - /// - /// # Result - /// - /// Will return an error, if a provider with the same identifier already exists. - pub fn register_provider( - &self, - provider: P, - ) -> Result<(), Error> { - if self.has_provider(&provider.inherent_identifier()) { - Err( - format!( - "Inherent data provider with identifier {:?} already exists!", - &provider.inherent_identifier() - ).into() - ) - } else { - provider.on_register(self)?; - self.providers.write().push(Box::new(provider)); - Ok(()) - } - } - - /// Returns if a provider for the given identifier exists. - pub fn has_provider(&self, identifier: &InherentIdentifier) -> bool { - self.providers.read().iter().any(|p| p.inherent_identifier() == identifier) - } - - /// Create inherent data. - pub fn create_inherent_data(&self) -> Result { - let mut data = InherentData::new(); - self.providers.read().iter().try_for_each(|p| { - p.provide_inherent_data(&mut data) - .map_err(|e| format!("Error for `{:?}`: {:?}", p.inherent_identifier(), e)) - })?; - Ok(data) - } - - /// Converts a given encoded error into a `String`. - /// - /// Useful if the implementation encounters an error for an identifier it does not know. - pub fn error_to_string(&self, identifier: &InherentIdentifier, error: &[u8]) -> String { - let res = self.providers.read().iter().filter_map(|p| - if p.inherent_identifier() == identifier { - Some( - p.error_to_string(error) - .unwrap_or_else(|| error_to_string_fallback(identifier)) - ) - } else { - None - } - ).next(); - - match res { - Some(res) => res, - None => format!( - "Error while checking inherent of type \"{}\", but this inherent type is unknown.", - String::from_utf8_lossy(identifier) - ) - } - } -} - -/// Something that provides inherent data. -#[cfg(feature = "std")] -pub trait ProvideInherentData { - /// Is called when this inherent data provider is registered at the given - /// `InherentDataProviders`. - fn on_register(&self, _: &InherentDataProviders) -> Result<(), Error> { - Ok(()) - } - - /// The identifier of the inherent for that data will be provided. - fn inherent_identifier(&self) -> &'static InherentIdentifier; - - /// Provide inherent data that should be included in a block. - /// - /// The data should be stored in the given `InherentData` structure. - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error>; - - /// Convert the given encoded error to a string. - /// - /// If the given error could not be decoded, `None` should be returned. - fn error_to_string(&self, error: &[u8]) -> Option; -} - -/// A fallback function, if the decoding of an error fails. -#[cfg(feature = "std")] -fn error_to_string_fallback(identifier: &InherentIdentifier) -> String { - format!( - "Error while checking inherent of type \"{}\", but error could not be decoded.", - String::from_utf8_lossy(identifier) - ) -} - /// Did we encounter a fatal error while checking an inherent? /// /// A fatal error is everything that fails while checking an inherent error, e.g. the inherent @@ -382,9 +388,9 @@ pub trait IsFatalError { fn is_fatal_error(&self) -> bool; } -/// Auxiliary to make any given error resolve to `is_fatal_error() == true`. -#[derive(Encode)] -pub struct MakeFatalError(E); +/// Auxiliary to make any given error resolve to `is_fatal_error() == true` for [`IsFatalError`]. +#[derive(codec::Encode)] +pub struct MakeFatalError(E); impl From for MakeFatalError { fn from(err: E) -> Self { @@ -398,63 +404,6 @@ impl IsFatalError for MakeFatalError { } } -/// A pallet that provides or verifies an inherent extrinsic. -/// -/// The pallet may provide the inherent, verify an inherent, or both provide and verify. -pub trait ProvideInherent { - /// The call type of the pallet. - type Call; - /// The error returned by `check_inherent`. - type Error: codec::Encode + IsFatalError; - /// The inherent identifier used by this inherent. - const INHERENT_IDENTIFIER: self::InherentIdentifier; - - /// Create an inherent out of the given `InherentData`. - fn create_inherent(data: &InherentData) -> Option; - - /// Determines whether this inherent is required in this block. - /// - /// - `Ok(None)` indicates that this inherent is not required in this block. The default - /// implementation returns this. - /// - /// - `Ok(Some(e))` indicates that this inherent is required in this block. The - /// `impl_outer_inherent!`, will call this function from its `check_extrinsics`. - /// If the inherent is not present, it will return `e`. - /// - /// - `Err(_)` indicates that this function failed and further operations should be aborted. - /// - /// NOTE: If inherent is required then the runtime asserts that the block contains at least - /// one inherent for which: - /// * type is [`Self::Call`], - /// * [`Self::is_inherent`] returns true. - fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { Ok(None) } - - /// Check whether the given inherent is valid. Checking the inherent is optional and can be - /// omitted by using the default implementation. - /// - /// When checking an inherent, the first parameter represents the inherent that is actually - /// included in the block by its author. Whereas the second parameter represents the inherent - /// data that the verifying node calculates. - /// - /// NOTE: A block can contains multiple inherent. - fn check_inherent(_: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { - Ok(()) - } - - /// Return whether the call is an inherent call. - /// - /// NOTE: Signed extrinsics are not inherent, but signed extrinsic with the given call variant - /// can be dispatched. - /// - /// # Warning - /// - /// In FRAME, inherent are enforced to be before other extrinsics, for this reason, - /// pallets with unsigned transactions **must ensure** that no unsigned transaction call - /// is an inherent call, when implementing `ValidateUnsigned::validate_unsigned`. - /// Otherwise block producer can produce invalid blocks by including them after non inherent. - fn is_inherent(call: &Self::Call) -> bool; -} - #[cfg(test)] mod tests { use super::*; @@ -496,93 +445,34 @@ mod tests { } #[derive(Clone)] - struct TestInherentDataProvider { - registered: Arc>, - } - - impl TestInherentDataProvider { - fn new() -> Self { - let inst = Self { - registered: Default::default(), - }; - - // just make sure - assert!(!inst.is_registered()); - - inst - } - - fn is_registered(&self) -> bool { - *self.registered.read() - } - } + struct TestInherentDataProvider; const ERROR_TO_STRING: &str = "Found error!"; - impl ProvideInherentData for TestInherentDataProvider { - fn on_register(&self, _: &InherentDataProviders) -> Result<(), Error> { - *self.registered.write() = true; - Ok(()) - } - - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &TEST_INHERENT_0 - } - + #[async_trait::async_trait] + impl InherentDataProvider for TestInherentDataProvider { fn provide_inherent_data(&self, data: &mut InherentData) -> Result<(), Error> { data.put_data(TEST_INHERENT_0, &42) } - fn error_to_string(&self, _: &[u8]) -> Option { - Some(ERROR_TO_STRING.into()) + async fn try_handle_error( + &self, + _: &InherentIdentifier, + _: &[u8], + ) -> Option> { + Some(Err(Error::Application(Box::from(ERROR_TO_STRING)))) } } #[test] - fn registering_inherent_provider() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); + fn create_inherent_data() { + let provider = TestInherentDataProvider; - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); - assert!(providers.has_provider(provider.inherent_identifier())); - - // Second time should fail - assert!(providers.register_provider(provider.clone()).is_err()); - } - - #[test] - fn create_inherent_data_from_all_providers() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); - - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); - - let inherent_data = providers.create_inherent_data().unwrap(); - - assert_eq!( - inherent_data.get_data::(provider.inherent_identifier()).unwrap().unwrap(), - 42u32 - ); - } - - #[test] - fn encoded_error_to_string() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); - - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); + let inherent_data = provider.create_inherent_data().unwrap(); assert_eq!( - &providers.error_to_string(&TEST_INHERENT_0, &[1, 2]), ERROR_TO_STRING - ); - - assert!( - providers - .error_to_string(&TEST_INHERENT_1, &[1, 2]) - .contains("inherent type is unknown") + inherent_data.get_data::(&TEST_INHERENT_0).unwrap().unwrap(), + 42u32, ); } diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index f3e9a331cfd36..3fc8e76f40f17 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -19,6 +19,10 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-inherents = { version = "3.0.0", default-features = false, path = "../inherents" } wasm-timer = { version = "0.2", optional = true } +thiserror = { version = "1.0.21", optional = true } +log = { version = "0.4.8", optional = true } +futures-timer = { version = "3.0.2", optional = true } +async-trait = { version = "0.1.48", optional = true } [features] default = [ "std" ] @@ -29,4 +33,8 @@ std = [ "codec/std", "sp-inherents/std", "wasm-timer", + "thiserror", + "log", + "futures-timer", + "async-trait", ] diff --git a/primitives/timestamp/src/lib.rs b/primitives/timestamp/src/lib.rs index 846ba67aec739..542522c9b8500 100644 --- a/primitives/timestamp/src/lib.rs +++ b/primitives/timestamp/src/lib.rs @@ -20,13 +20,9 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::{Encode, Decode}; -#[cfg(feature = "std")] -use sp_inherents::ProvideInherentData; use sp_inherents::{InherentIdentifier, IsFatalError, InherentData}; use sp_std::time::Duration; -use sp_runtime::RuntimeString; - /// The identifier for the `timestamp` inherent. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"timstap0"; @@ -46,9 +42,14 @@ impl Timestamp { } /// Returns `self` as [`Duration`]. - pub fn as_duration(&self) -> Duration { + pub fn as_duration(self) -> Duration { Duration::from_millis(self.0) } + + /// Checked subtraction that returns `None` on an underflow. + pub fn checked_sub(self, other: Self) -> Option { + self.0.checked_sub(other.0).map(Self) + } } impl sp_std::ops::Deref for Timestamp { @@ -114,20 +115,22 @@ impl From for Timestamp { /// Errors that can occur while checking the timestamp inherent. #[derive(Encode, sp_runtime::RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode))] +#[cfg_attr(feature = "std", derive(Decode, thiserror::Error))] pub enum InherentError { /// The timestamp is valid in the future. /// This is a non-fatal-error and will not stop checking the inherents. + #[cfg_attr(feature = "std", error("Block will be valid at {0}."))] ValidAtTimestamp(InherentType), - /// Some other error. - Other(RuntimeString), + /// The block timestamp is too far in the future + #[cfg_attr(feature = "std", error("The timestamp of the block is too far in the future."))] + TooFarInFuture, } impl IsFatalError for InherentError { fn is_fatal_error(&self) -> bool { match self { InherentError::ValidAtTimestamp(_) => false, - InherentError::Other(_) => true, + InherentError::TooFarInFuture => true, } } } @@ -147,43 +150,123 @@ impl InherentError { /// Auxiliary trait to extract timestamp inherent data. pub trait TimestampInherentData { /// Get timestamp inherent data. - fn timestamp_inherent_data(&self) -> Result; + fn timestamp_inherent_data(&self) -> Result, sp_inherents::Error>; } impl TimestampInherentData for InherentData { - fn timestamp_inherent_data(&self) -> Result { + fn timestamp_inherent_data(&self) -> Result, sp_inherents::Error> { self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "Timestamp inherent data not found".into())) } } +/// The current timestamp using the system time. +/// +/// This timestamp is the time since the UNIX epoch. +#[cfg(feature = "std")] +fn current_timestamp() -> std::time::Duration { + use wasm_timer::SystemTime; + + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .expect("Current time is always after unix epoch; qed") +} + /// Provide duration since unix epoch in millisecond for timestamp inherent. #[cfg(feature = "std")] -pub struct InherentDataProvider; +pub struct InherentDataProvider { + max_drift: InherentType, + timestamp: InherentType, +} #[cfg(feature = "std")] -impl ProvideInherentData for InherentDataProvider { - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER +impl InherentDataProvider { + /// Create `Self` while using the system time to get the timestamp. + pub fn from_system_time() -> Self { + Self { + max_drift: std::time::Duration::from_secs(60).into(), + timestamp: current_timestamp().into(), + } + } + + /// Create `Self` using the given `timestamp`. + pub fn new(timestamp: InherentType) -> Self { + Self { + max_drift: std::time::Duration::from_secs(60).into(), + timestamp, + } + } + + /// With the given maximum drift. + /// + /// By default the maximum drift is 60 seconds. + /// + /// The maximum drift is used when checking the inherents of a runtime. If the current timestamp + /// plus the maximum drift is smaller than the timestamp in the block, the block will be rejected + /// as being too far in the future. + pub fn with_max_drift(mut self, max_drift: std::time::Duration) -> Self { + self.max_drift = max_drift.into(); + self } + /// Returns the timestamp of this inherent data provider. + pub fn timestamp(&self) -> InherentType { + self.timestamp + } +} + +#[cfg(feature = "std")] +impl sp_std::ops::Deref for InherentDataProvider { + type Target = InherentType; + + fn deref(&self) -> &Self::Target { + &self.timestamp + } +} + +#[cfg(feature = "std")] +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { fn provide_inherent_data( &self, inherent_data: &mut InherentData, ) -> Result<(), sp_inherents::Error> { - use wasm_timer::SystemTime; - - let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH) - .map_err(|_| { - "Current time is before unix epoch".into() - }).and_then(|d| { - inherent_data.put_data(INHERENT_IDENTIFIER, &InherentType::from(d)) - }) + inherent_data.put_data(INHERENT_IDENTIFIER, &InherentType::from(self.timestamp)) } - fn error_to_string(&self, error: &[u8]) -> Option { - InherentError::try_from(&INHERENT_IDENTIFIER, error).map(|e| format!("{:?}", e)) + async fn try_handle_error( + &self, + identifier: &InherentIdentifier, + error: &[u8], + ) -> Option> { + if *identifier != INHERENT_IDENTIFIER { + return None + } + + match InherentError::try_from(&INHERENT_IDENTIFIER, error)? { + InherentError::ValidAtTimestamp(valid) => { + let max_drift = self.max_drift; + let timestamp = self.timestamp; + // halt import until timestamp is valid. + // reject when too far ahead. + if valid > timestamp + max_drift { + return Some(Err( + sp_inherents::Error::Application(Box::from(InherentError::TooFarInFuture)) + )) + } + + let diff = valid.checked_sub(timestamp).unwrap_or_default(); + log::info!( + target: "timestamp", + "halting for block {} milliseconds in the future", + diff.0, + ); + + futures_timer::Delay::new(diff.as_duration()).await; + + Some(Ok(())) + }, + o => Some(Err(sp_inherents::Error::Application(Box::from(o)))), + } } } diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 704df1ad9ef7b..33ef7b12d8db0 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -107,11 +107,11 @@ pub fn polish_block(block: &mut Block) { execute_block_with_state_root_handler(block, Mode::Overwrite); } -pub fn execute_block(mut block: Block) { - execute_block_with_state_root_handler(&mut block, Mode::Verify); +pub fn execute_block(mut block: Block) -> Header { + execute_block_with_state_root_handler(&mut block, Mode::Verify) } -fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { +fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) -> Header { let header = &mut block.header; initialize_block(header); @@ -142,6 +142,8 @@ fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { "Transaction trie root must be valid.", ); } + + new_header } /// The block executor. diff --git a/test-utils/test-runner/src/lib.rs b/test-utils/test-runner/src/lib.rs index f76083d281724..000d3efc3e96f 100644 --- a/test-utils/test-runner/src/lib.rs +++ b/test-utils/test-runner/src/lib.rs @@ -231,7 +231,7 @@ use sc_executor::NativeExecutionDispatch; use sc_service::{Configuration, TFullBackend, TFullClient, TaskManager, TaskExecutor}; use sp_api::{ConstructRuntimeApi, TransactionFor}; use sp_consensus::{BlockImport, SelectChain}; -use sp_inherents::InherentDataProviders; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_keystore::SyncCryptoStorePtr; use sp_runtime::traits::{Block as BlockT, SignedExtension}; use std::sync::Arc; @@ -277,6 +277,9 @@ pub trait ChainInfo: Sized { /// The signed extras required by the runtime type SignedExtras: SignedExtension; + /// The inherent data providers. + type InherentDataProviders: InherentDataProvider + 'static; + /// Signed extras, this function is caled in an externalities provided environment. fn signed_extras(from: ::AccountId) -> Self::SignedExtras; @@ -293,7 +296,13 @@ pub trait ChainInfo: Sized { Arc>, SyncCryptoStorePtr, TaskManager, - InherentDataProviders, + Box< + dyn CreateInherentDataProviders< + Self::Block, + (), + InherentDataProviders = Self::InherentDataProviders + > + >, Option< Box< dyn ConsensusDataProvider< diff --git a/test-utils/test-runner/src/node.rs b/test-utils/test-runner/src/node.rs index 2e6fc97c582a0..50c9c54ea18fb 100644 --- a/test-utils/test-runner/src/node.rs +++ b/test-utils/test-runner/src/node.rs @@ -121,7 +121,7 @@ impl Node { backend, keystore, mut task_manager, - inherent_data_providers, + create_inherent_data_providers, consensus_data_provider, select_chain, block_import, @@ -198,7 +198,7 @@ impl Node { commands_stream, select_chain, consensus_data_provider, - inherent_data_providers, + create_inherent_data_providers, }); // spawn the authorship task as an essential task.