From 869b70c1458a0c597b31660f79cdd37f49b19a67 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Tue, 30 Jun 2020 15:03:04 +0200 Subject: [PATCH 01/24] :) --- Cargo.lock | 101 +-- bin/node-template/node/src/command.rs | 5 +- bin/node/cli/src/command.rs | 6 +- client/cli/Cargo.toml | 1 + client/cli/src/commands/build_spec_cmd.rs | 11 +- client/cli/src/commands/check_block_cmd.rs | 37 +- client/cli/src/commands/export_blocks_cmd.rs | 24 +- client/cli/src/commands/export_state_cmd.rs | 27 +- client/cli/src/commands/import_blocks_cmd.rs | 23 +- client/cli/src/commands/purge_chain_cmd.rs | 6 +- client/cli/src/commands/revert_cmd.rs | 20 +- client/cli/src/runner.rs | 51 +- client/service/src/builder.rs | 5 + client/service/src/chain_ops.rs | 614 ------------------ client/service/src/chain_ops/check_block.rs | 51 ++ client/service/src/chain_ops/export_blocks.rs | 106 +++ .../service/src/chain_ops/export_raw_state.rs | 71 ++ client/service/src/chain_ops/import_blocks.rs | 472 ++++++++++++++ client/service/src/chain_ops/mod.rs | 29 + client/service/src/chain_ops/revert_chain.rs | 43 ++ client/service/src/lib.rs | 2 +- 21 files changed, 952 insertions(+), 753 deletions(-) delete mode 100644 client/service/src/chain_ops.rs create mode 100644 client/service/src/chain_ops/check_block.rs create mode 100644 client/service/src/chain_ops/export_blocks.rs create mode 100644 client/service/src/chain_ops/export_raw_state.rs create mode 100644 client/service/src/chain_ops/import_blocks.rs create mode 100644 client/service/src/chain_ops/mod.rs create mode 100644 client/service/src/chain_ops/revert_chain.rs diff --git a/Cargo.lock b/Cargo.lock index 8b0273d199144..6ba0222f975d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -195,7 +195,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" dependencies = [ "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1040,7 +1040,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47c5e5ac752e18207b12e16b10631ae5f7f68f8805f335f9b817ead83d9ffce1" dependencies = [ "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1080,7 +1080,7 @@ checksum = "e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1182,7 +1182,7 @@ checksum = "2ed9afacaea0301eefb738c9deea725e6d53938004597cdc518a8cf9a7aa2f03" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1335,7 +1335,7 @@ checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "synstructure", ] @@ -1526,7 +1526,7 @@ dependencies = [ "frame-support-procedural-tools", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1537,7 +1537,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1546,7 +1546,7 @@ version = "2.0.0-rc4" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1763,7 +1763,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -2309,7 +2309,7 @@ checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -2448,7 +2448,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -2740,7 +2740,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f09548626b737ed64080fde595e06ce1117795b8b9fc4d2629fa36561c583171" dependencies = [ "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -4628,7 +4628,7 @@ dependencies = [ "proc-macro2", "quote 1.0.6", "sp-runtime", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -4862,7 +4862,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -4912,7 +4912,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ "proc-macro2", - "syn 1.0.17", + "syn 1.0.33", "synstructure", ] @@ -5000,7 +5000,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -5064,7 +5064,7 @@ checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -5203,7 +5203,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "version_check", ] @@ -5215,7 +5215,7 @@ checksum = "4f5444ead4e9935abd7f27dc51f7e852a0569ac888096d5ec2499470794e2e53" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "syn-mid", "version_check", ] @@ -5234,9 +5234,9 @@ checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" [[package]] name = "proc-macro2" -version = "1.0.10" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" +checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" dependencies = [ "unicode-xid 0.2.0", ] @@ -5308,7 +5308,7 @@ dependencies = [ "itertools 0.8.2", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -5676,7 +5676,7 @@ checksum = "602eb59cda66fcb9aec25841fb76bc01d2b34282dcdd705028da297db6f3eec8" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -5756,7 +5756,7 @@ checksum = "475e68978dc5b743f2f40d8e0a8fdc83f1c5e78cbf4b8fa5e74e73beebc340de" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -5881,7 +5881,7 @@ checksum = "b3bba175698996010c4f6dce5e7f173b6eb781fce25d2cfc45e27091ce0b79f6" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -6014,7 +6014,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -6041,6 +6041,7 @@ dependencies = [ "sc-service", "sc-telemetry", "sc-tracing", + "serde", "serde_json", "sp-blockchain", "sp-core", @@ -7069,7 +7070,7 @@ checksum = "f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7150,22 +7151,22 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.110" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c" +checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.110" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" +checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7279,7 +7280,7 @@ checksum = "a945ec7f7ce853e89ffa36be1e27dce9a43e82ff9093bf3461c30d5da74ed11b" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7377,7 +7378,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7640,7 +7641,7 @@ version = "2.0.0-rc4" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7739,7 +7740,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7831,7 +7832,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -8132,7 +8133,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -8153,7 +8154,7 @@ dependencies = [ "heck", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -8521,9 +8522,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.17" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" +checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" dependencies = [ "proc-macro2", "quote 1.0.6", @@ -8538,7 +8539,7 @@ checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -8558,7 +8559,7 @@ checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "unicode-xid 0.2.0", ] @@ -8621,7 +8622,7 @@ dependencies = [ "lazy_static", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "version_check", ] @@ -8651,7 +8652,7 @@ checksum = "ca972988113b7715266f91250ddb98070d033c62a011fa0fcc57434a649310dd" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -8857,7 +8858,7 @@ checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -9058,7 +9059,7 @@ checksum = "99bbad0de3fd923c9c3232ead88510b783e5a4d16a6154adffa3d53308de984c" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -9410,7 +9411,7 @@ dependencies = [ "log", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "wasm-bindgen-shared", ] @@ -9444,7 +9445,7 @@ checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9765,7 +9766,7 @@ checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "synstructure", ] diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 4f2fd3aad6fd3..1bc436a063beb 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -71,7 +71,10 @@ pub fn run() -> sc_cli::Result<()> { match &cli.subcommand { Some(subcommand) => { let runner = cli.create_runner(subcommand)?; - runner.run_subcommand(subcommand, |config| Ok(new_full_start!(config).0)) + runner.run_subcommand(subcommand, |config| { + let (builder, _, _) = new_full_start!(config); + Ok(builder.to_chain_ops_parts()) + }) } None => { let runner = cli.create_runner(&cli.run)?; diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index b07e0cdc907e0..4ac796370c6f8 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -97,8 +97,10 @@ pub fn run() -> Result<()> { } Some(Subcommand::Base(subcommand)) => { let runner = cli.create_runner(subcommand)?; - - runner.run_subcommand(subcommand, |config| Ok(new_full_start!(config).0)) + runner.run_subcommand(subcommand, |config| { + let (builder, _, _, _) = new_full_start!(config); + Ok(builder.to_chain_ops_parts()) + }) } } } diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 616b4f3481324..6ebf2f9bf8982 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -43,6 +43,7 @@ structopt = "0.3.8" sc-tracing = { version = "2.0.0-rc4", path = "../tracing" } chrono = "0.4.10" parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +serde = "1.0.111" [target.'cfg(not(target_os = "unknown"))'.dependencies] rpassword = "4.0.1" diff --git a/client/cli/src/commands/build_spec_cmd.rs b/client/cli/src/commands/build_spec_cmd.rs index 23626359ff131..e45545c584210 100644 --- a/client/cli/src/commands/build_spec_cmd.rs +++ b/client/cli/src/commands/build_spec_cmd.rs @@ -22,7 +22,7 @@ use crate::params::SharedParams; use crate::CliConfiguration; use log::info; use sc_network::config::build_multiaddr; -use sc_service::{config::MultiaddrWithPeerId, Configuration}; +use sc_service::{config::{MultiaddrWithPeerId, NetworkConfiguration}, ChainSpec}; use structopt::StructOpt; use std::io::Write; @@ -51,13 +51,16 @@ pub struct BuildSpecCmd { impl BuildSpecCmd { /// Run the build-spec command - pub fn run(&self, config: Configuration) -> error::Result<()> { + pub fn run( + &self, + mut spec: Box, + network_config: NetworkConfiguration + ) -> error::Result<()> { info!("Building chain spec"); - let mut spec = config.chain_spec; let raw_output = self.raw; if spec.boot_nodes().is_empty() && !self.disable_default_bootnode { - let keys = config.network.node_key.into_keypair()?; + let keys = network_config.node_key.into_keypair()?; let peer_id = keys.public().into_peer_id(); let addr = MultiaddrWithPeerId { multiaddr: build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(30333u16)], diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index c000ea7fb11ee..b3312f6e01580 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -16,13 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{ - CliConfiguration, error, params::{ImportParams, SharedParams, BlockNumberOrHash}, -}; -use sc_service::{Configuration, ServiceBuilderCommand}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::{fmt::Debug, str::FromStr}; +use crate::error; +use crate::params::ImportParams; +use crate::params::SharedParams; +use crate::params::BlockNumberOrHash; +use crate::CliConfiguration; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use std::fmt::Debug; +use std::str::FromStr; +use std::sync::Arc; use structopt::StructOpt; +use sc_client_api::{BlockBackend, UsageProvider}; /// The `check-block` command used to validate blocks. #[derive(Debug, StructOpt)] @@ -48,21 +52,22 @@ pub struct CheckBlockCmd { impl CheckBlockCmd { /// Run the check-block command - pub async fn run( + pub async fn run( &self, - config: Configuration, - builder: B, + client: Arc, + import_queue: IQ, ) -> error::Result<()> where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: BlockT + Debug, - as FromStr>::Err: std::fmt::Debug, - BB::Hash: FromStr, - ::Err: std::fmt::Debug, + B: BlockT + for<'de> serde::Deserialize<'de>, + C: BlockBackend + UsageProvider + Send + Sync + 'static, + IQ: sc_service::ImportQueue + 'static, + ::Hash: FromStr, + <::Hash as FromStr>::Err: Debug, + <<::Header as HeaderT>::Number as FromStr>::Err: Debug, { let start = std::time::Instant::now(); - builder(config)?.check_block(self.input.parse()?).await?; + let block_id = self.input.parse()?; + sc_service::chain_ops::check_block(client, import_queue, block_id).await?; println!("Completed in {} ms.", start.elapsed().as_millis()); Ok(()) diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index 7c523c0555d55..0db100adcbab0 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -21,13 +21,16 @@ use crate::params::{BlockNumber, DatabaseParams, PruningParams, SharedParams}; use crate::CliConfiguration; use log::info; use sc_service::{ - config::DatabaseConfig, Configuration, ServiceBuilderCommand, + config::DatabaseConfig, chain_ops::export_blocks, }; +use sc_client_api::{BlockBackend, UsageProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::fmt::Debug; use std::fs; use std::io; use std::path::PathBuf; +use std::str::FromStr; +use std::sync::Arc; use structopt::StructOpt; /// The `export-blocks` command used to export blocks. @@ -68,19 +71,17 @@ pub struct ExportBlocksCmd { impl ExportBlocksCmd { /// Run the export-blocks command - pub async fn run( + pub async fn run( &self, - config: Configuration, - builder: B, + client: Arc, + database_config: DatabaseConfig, ) -> error::Result<()> where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, + B: BlockT, + C: BlockBackend + UsageProvider + 'static, + <<::Header as HeaderT>::Number as FromStr>::Err: Debug, { - if let DatabaseConfig::RocksDb { ref path, .. } = &config.database { + if let DatabaseConfig::RocksDb { ref path, .. } = database_config { info!("DB path: {}", path.display()); } @@ -94,8 +95,7 @@ impl ExportBlocksCmd { None => Box::new(io::stdout()), }; - builder(config)? - .export_blocks(file, from.into(), to, binary) + export_blocks(client, file, from.into(), to, binary) .await .map_err(Into::into) } diff --git a/client/cli/src/commands/export_state_cmd.rs b/client/cli/src/commands/export_state_cmd.rs index 23a43a178abe5..84f92f33f7bb1 100644 --- a/client/cli/src/commands/export_state_cmd.rs +++ b/client/cli/src/commands/export_state_cmd.rs @@ -20,10 +20,10 @@ use crate::{ CliConfiguration, error, params::{PruningParams, SharedParams, BlockNumberOrHash}, }; use log::info; -use sc_service::{Configuration, ServiceBuilderCommand}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::{fmt::Debug, str::FromStr, io::Write}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use std::{fmt::Debug, str::FromStr, io::Write, sync::Arc}; use structopt::StructOpt; +use sc_client_api::{StorageProvider, UsageProvider}; /// The `export-state` command used to export the state of a given block into /// a chain spec. @@ -44,23 +44,22 @@ pub struct ExportStateCmd { impl ExportStateCmd { /// Run the `export-state` command - pub fn run( + pub fn run( &self, - config: Configuration, - builder: B, + client: Arc, + mut input_spec: Box, ) -> error::Result<()> where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: BlockT + Debug, - as FromStr>::Err: std::fmt::Debug, - BB::Hash: FromStr, - ::Err: std::fmt::Debug, + B: BlockT, + C: UsageProvider + StorageProvider, + BA: sc_client_api::backend::Backend, + ::Hash: FromStr, + <::Hash as FromStr>::Err: Debug, + <<::Header as HeaderT>::Number as FromStr>::Err: Debug, { info!("Exporting raw state..."); - let mut input_spec = config.chain_spec.cloned_box(); let block_id = self.input.as_ref().map(|b| b.parse()).transpose()?; - let raw_state = builder(config)?.export_raw_state(block_id)?; + let raw_state = sc_service::chain_ops::export_raw_state(client, block_id)?; input_spec.set_storage(raw_state); info!("Generating new chain spec..."); diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs index 8e178c4b97964..00f8ec43b02fe 100644 --- a/client/cli/src/commands/import_blocks_cmd.rs +++ b/client/cli/src/commands/import_blocks_cmd.rs @@ -20,13 +20,15 @@ use crate::error; use crate::params::ImportParams; use crate::params::SharedParams; use crate::CliConfiguration; -use sc_service::{Configuration, ServiceBuilderCommand}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sc_service::chain_ops::import_blocks; +use sp_runtime::traits::Block as BlockT; use std::fmt::Debug; use std::fs; use std::io::{self, Read, Seek}; use std::path::PathBuf; +use std::sync::Arc; use structopt::StructOpt; +use sc_client_api::UsageProvider; /// The `import-blocks` command used to import blocks. #[derive(Debug, StructOpt)] @@ -61,17 +63,15 @@ impl ReadPlusSeek for T {} impl ImportBlocksCmd { /// Run the import-blocks command - pub async fn run( + pub async fn run( &self, - config: Configuration, - builder: B, + client: Arc, + import_queue: IQ, ) -> error::Result<()> where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, + C: UsageProvider + Send + Sync + 'static, + B: BlockT + for<'de> serde::Deserialize<'de>, + IQ: sc_service::ImportQueue + 'static, { let file: Box = match &self.input { Some(filename) => Box::new(fs::File::open(filename)?), @@ -82,8 +82,7 @@ impl ImportBlocksCmd { } }; - builder(config)? - .import_blocks(file, false, self.binary) + import_blocks(client, import_queue, file, false, self.binary) .await .map_err(Into::into) } diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs index 053f427309828..9c9c6e91fb241 100644 --- a/client/cli/src/commands/purge_chain_cmd.rs +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -19,7 +19,7 @@ use crate::error; use crate::params::{DatabaseParams, SharedParams}; use crate::CliConfiguration; -use sc_service::Configuration; +use sc_service::DatabaseConfig; use std::fmt::Debug; use std::fs; use std::io::{self, Write}; @@ -43,8 +43,8 @@ pub struct PurgeChainCmd { impl PurgeChainCmd { /// Run the purge command - pub fn run(&self, config: Configuration) -> error::Result<()> { - let db_path = config.database.path() + pub fn run(&self, database_config: DatabaseConfig) -> error::Result<()> { + let db_path = database_config.path() .ok_or_else(|| error::Error::Input("Cannot purge custom database implementation".into()) )?; diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs index 1b5489df708a7..09d782f9b0dc5 100644 --- a/client/cli/src/commands/revert_cmd.rs +++ b/client/cli/src/commands/revert_cmd.rs @@ -19,10 +19,13 @@ use crate::error; use crate::params::{BlockNumber, PruningParams, SharedParams}; use crate::CliConfiguration; -use sc_service::{Configuration, ServiceBuilderCommand}; +use sc_service::chain_ops::revert_chain; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::fmt::Debug; +use std::str::FromStr; +use std::sync::Arc; use structopt::StructOpt; +use sc_client_api::{Backend, UsageProvider}; /// The `revert` command used revert the chain to a previous state. #[derive(Debug, StructOpt)] @@ -42,16 +45,17 @@ pub struct RevertCmd { impl RevertCmd { /// Run the revert command - pub fn run(&self, config: Configuration, builder: B) -> error::Result<()> + pub fn run( + &self, client: Arc, backend: Arc + ) -> error::Result<()> where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, + B: BlockT, + BA: Backend, + C: UsageProvider, + <<::Header as HeaderT>::Number as FromStr>::Err: Debug, { let blocks = self.num.parse()?; - builder(config)?.revert_chain(blocks)?; + revert_chain(client, backend, blocks)?; Ok(()) } diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index fcc869dc87069..900a715a2e1f9 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -25,10 +25,11 @@ use futures::pin_mut; use futures::select; use futures::{future, future::FutureExt, Future}; use log::info; -use sc_service::{Configuration, ServiceBuilderCommand, TaskType, TaskManager}; +use sc_service::{Configuration, TaskType, TaskManager}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; -use std::{fmt::Debug, marker::PhantomData, str::FromStr}; +use std::{fmt::Debug, marker::PhantomData, str::FromStr, sync::Arc}; +use sc_client_api::{UsageProvider, BlockBackend, StorageProvider}; #[cfg(target_family = "unix")] async fn main(func: F) -> std::result::Result<(), Box> @@ -173,29 +174,47 @@ impl Runner { /// A helper function that runs a future with tokio and stops if the process receives the signal /// `SIGTERM` or `SIGINT`. - pub fn run_subcommand(self, subcommand: &Subcommand, builder: B) -> Result<()> + pub fn run_subcommand(self, subcommand: &Subcommand, builder: BU) + -> Result<()> where - B: FnOnce(Configuration) -> sc_service::error::Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as FromStr>::Err: Debug, - ::Hash: FromStr, - <::Hash as FromStr>::Err: Debug, + BU: FnOnce(Configuration) + -> sc_service::error::Result<(Arc, Arc, IQ)>, + B: BlockT + for<'de> serde::Deserialize<'de>, + BA: sc_client_api::backend::Backend + 'static, + IQ: sc_service::ImportQueue + 'static, + ::Hash: FromStr, + <::Hash as FromStr>::Err: Debug, + <<::Header as HeaderT>::Number as FromStr>::Err: Debug, + CL: UsageProvider + BlockBackend + StorageProvider + Send + Sync + + 'static, { + let chain_spec = self.config.chain_spec.cloned_box(); + let network_config = self.config.network.clone(); + let db_config = self.config.database.clone(); + match subcommand { - Subcommand::BuildSpec(cmd) => cmd.run(self.config), + Subcommand::BuildSpec(cmd) => cmd.run(chain_spec, network_config), Subcommand::ExportBlocks(cmd) => { - run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + let (client, _, _) = builder(self.config)?; + run_until_exit(self.tokio_runtime, cmd.run(client, db_config)) } Subcommand::ImportBlocks(cmd) => { - run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + let (client, _, import_queue) = builder(self.config)?; + run_until_exit(self.tokio_runtime, cmd.run(client, import_queue)) } Subcommand::CheckBlock(cmd) => { - run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + let (client, _, import_queue) = builder(self.config)?; + run_until_exit(self.tokio_runtime, cmd.run(client, import_queue)) } - Subcommand::Revert(cmd) => cmd.run(self.config, builder), - Subcommand::PurgeChain(cmd) => cmd.run(self.config), - Subcommand::ExportState(cmd) => cmd.run(self.config, builder), + Subcommand::Revert(cmd) => { + let (client, backend, _) = builder(self.config)?; + cmd.run(client, backend) + }, + Subcommand::PurgeChain(cmd) => cmd.run(db_config), + Subcommand::ExportState(cmd) => { + let (client, _, _) = builder(self.config)?; + cmd.run(client, chain_spec) + }, } } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 8c96f514ddaee..739e350d4d586 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -523,6 +523,11 @@ impl self.remote_backend.clone() } + /// Consume the builder and return the parts needed for chain operations. + pub fn to_chain_ops_parts(self) -> (Arc, Arc, TImpQu) { + (self.client, self.backend, self.import_queue) + } + /// Defines which head-of-chain strategy to use. pub fn with_opt_select_chain( self, diff --git a/client/service/src/chain_ops.rs b/client/service/src/chain_ops.rs deleted file mode 100644 index cb4ed24b60b62..0000000000000 --- a/client/service/src/chain_ops.rs +++ /dev/null @@ -1,614 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Chain utilities. - -use crate::error; -use crate::builder::{ServiceBuilderCommand, ServiceBuilder}; -use crate::error::Error; -use sc_chain_spec::ChainSpec; -use log::{warn, info}; -use futures::{future, prelude::*}; -use sp_runtime::traits::{ - Block as BlockT, NumberFor, One, Zero, Header, SaturatedConversion, MaybeSerializeDeserialize, -}; -use sp_runtime::generic::{BlockId, SignedBlock}; -use codec::{Decode, Encode, IoReader as CodecIoReader}; -use crate::client::{Client, LocalCallExecutor}; -use sp_consensus::{ - BlockOrigin, - import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, -}; -use sc_executor::{NativeExecutor, NativeExecutionDispatch}; -use sp_core::storage::{StorageKey, well_known_keys, ChildInfo, Storage, StorageChild, StorageMap}; -use sc_client_api::{StorageProvider, BlockBackend, UsageProvider}; - -use std::{io::{Read, Write, Seek}, pin::Pin, collections::HashMap}; -use std::time::{Duration, Instant}; -use futures_timer::Delay; -use std::task::Poll; -use serde_json::{de::IoRead as JsonIoRead, Deserializer, StreamDeserializer}; -use std::convert::{TryFrom, TryInto}; -use sp_runtime::traits::{CheckedDiv, Saturating}; - -/// Number of blocks we will add to the queue before waiting for the queue to catch up. -const MAX_PENDING_BLOCKS: u64 = 1_024; - -/// Number of milliseconds to wait until next poll. -const DELAY_TIME: u64 = 2_000; - -/// Number of milliseconds that must have passed between two updates. -const TIME_BETWEEN_UPDATES: u64 = 3_000; - -/// Build a chain spec json -pub fn build_spec(spec: &dyn ChainSpec, raw: bool) -> error::Result { - spec.as_json(raw).map_err(Into::into) -} - - -/// Helper enum that wraps either a binary decoder (from parity-scale-codec), or a JSON decoder (from serde_json). -/// Implements the Iterator Trait, calling `next()` will decode the next SignedBlock and return it. -enum BlockIter where - R: std::io::Read + std::io::Seek, -{ - Binary { - // Total number of blocks we are expecting to decode. - num_expected_blocks: u64, - // Number of blocks we have decoded thus far. - read_block_count: u64, - // Reader to the data, used for decoding new blocks. - reader: CodecIoReader, - }, - Json { - // Nubmer of blocks we have decoded thus far. - read_block_count: u64, - // Stream to the data, used for decoding new blocks. - reader: StreamDeserializer<'static, JsonIoRead, SignedBlock>, - }, -} - -impl BlockIter where - R: Read + Seek + 'static, - B: BlockT + MaybeSerializeDeserialize, -{ - fn new(input: R, binary: bool) -> Result { - if binary { - let mut reader = CodecIoReader(input); - // If the file is encoded in binary format, it is expected to first specify the number - // of blocks that are going to be decoded. We read it and add it to our enum struct. - let num_expected_blocks: u64 = Decode::decode(&mut reader) - .map_err(|e| format!("Failed to decode the number of blocks: {:?}", e))?; - Ok(BlockIter::Binary { - num_expected_blocks, - read_block_count: 0, - reader, - }) - } else { - let stream_deser = Deserializer::from_reader(input) - .into_iter::>(); - Ok(BlockIter::Json { - reader: stream_deser, - read_block_count: 0, - }) - } - } - - /// Returns the number of blocks read thus far. - fn read_block_count(&self) -> u64 { - match self { - BlockIter::Binary { read_block_count, .. } - | BlockIter::Json { read_block_count, .. } - => *read_block_count, - } - } - - /// Returns the total number of blocks to be imported, if possible. - fn num_expected_blocks(&self) -> Option { - match self { - BlockIter::Binary { num_expected_blocks, ..} => Some(*num_expected_blocks), - BlockIter::Json {..} => None - } - } -} - -impl Iterator for BlockIter where - R: Read + Seek + 'static, - B: BlockT + MaybeSerializeDeserialize, -{ - type Item = Result, String>; - - fn next(&mut self) -> Option { - match self { - BlockIter::Binary { num_expected_blocks, read_block_count, reader } => { - if read_block_count < num_expected_blocks { - let block_result: Result, _> = SignedBlock::::decode(reader) - .map_err(|e| e.to_string()); - *read_block_count += 1; - Some(block_result) - } else { - // `read_block_count` == `num_expected_blocks` so we've read enough blocks. - None - } - } - BlockIter::Json { reader, read_block_count } => { - let res = Some(reader.next()?.map_err(|e| e.to_string())); - *read_block_count += 1; - res - } - } - } -} - -/// Imports the SignedBlock to the queue. -fn import_block_to_queue( - signed_block: SignedBlock, - queue: &mut TImpQu, - force: bool -) where - TBl: BlockT + MaybeSerializeDeserialize, - TImpQu: 'static + ImportQueue, -{ - let (header, extrinsics) = signed_block.block.deconstruct(); - let hash = header.hash(); - // import queue handles verification and importing it into the client. - queue.import_blocks(BlockOrigin::File, vec![ - IncomingBlock:: { - hash, - header: Some(header), - body: Some(extrinsics), - justification: signed_block.justification, - origin: None, - allow_missing_state: false, - import_existing: force, - } - ]); -} - -/// Returns true if we have imported every block we were supposed to import, else returns false. -fn importing_is_done( - num_expected_blocks: Option, - read_block_count: u64, - imported_blocks: u64 -) -> bool { - if let Some(num_expected_blocks) = num_expected_blocks { - imported_blocks >= num_expected_blocks - } else { - imported_blocks >= read_block_count - } -} - -/// Structure used to log the block importing speed. -struct Speedometer { - best_number: NumberFor, - last_number: Option>, - last_update: Instant, -} - -impl Speedometer { - /// Creates a fresh Speedometer. - fn new() -> Self { - Self { - best_number: NumberFor::::from(0), - last_number: None, - last_update: Instant::now(), - } - } - - /// Calculates `(best_number - last_number) / (now - last_update)` and - /// logs the speed of import. - fn display_speed(&self) { - // Number of milliseconds elapsed since last time. - let elapsed_ms = { - let elapsed = self.last_update.elapsed(); - let since_last_millis = elapsed.as_secs() * 1000; - let since_last_subsec_millis = elapsed.subsec_millis() as u64; - since_last_millis + since_last_subsec_millis - }; - - // Number of blocks that have been imported since last time. - let diff = match self.last_number { - None => return, - Some(n) => self.best_number.saturating_sub(n) - }; - - if let Ok(diff) = TryInto::::try_into(diff) { - // If the number of blocks can be converted to a regular integer, then it's easy: just - // do the math and turn it into a `f64`. - let speed = diff.saturating_mul(10_000).checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) / 10.0; - info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed); - } else { - // If the number of blocks can't be converted to a regular integer, then we need a more - // algebraic approach and we stay within the realm of integers. - let one_thousand = NumberFor::::from(1_000); - let elapsed = NumberFor::::from( - >::try_from(elapsed_ms).unwrap_or(u32::max_value()) - ); - - let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) - .unwrap_or_else(Zero::zero); - info!("📦 Current best block: {} ({} bps)", self.best_number, speed) - } - } - - /// Updates the Speedometer. - fn update(&mut self, best_number: NumberFor) { - self.last_number = Some(self.best_number); - self.best_number = best_number; - self.last_update = Instant::now(); - } - - // If more than TIME_BETWEEN_UPDATES has elapsed since last update, - // then print and update the speedometer. - fn notify_user(&mut self, best_number: NumberFor) { - let delta = Duration::from_millis(TIME_BETWEEN_UPDATES); - if Instant::now().duration_since(self.last_update) >= delta { - self.display_speed(); - self.update(best_number); - } - } -} - -/// Different State that the `import_blocks` future could be in. -enum ImportState where - R: Read + Seek + 'static, - B: BlockT + MaybeSerializeDeserialize, -{ - /// We are reading from the BlockIter structure, adding those blocks to the queue if possible. - Reading{block_iter: BlockIter}, - /// The queue is full (contains at least MAX_PENDING_BLOCKS blocks) and we are waiting for it to catch up. - WaitingForImportQueueToCatchUp{ - block_iter: BlockIter, - delay: Delay, - block: SignedBlock - }, - // We have added all the blocks to the queue but they are still being processed. - WaitingForImportQueueToFinish{ - num_expected_blocks: Option, - read_block_count: u64, - delay: Delay, - }, -} - -impl< - TBl, TRtApi, TBackend, - TExecDisp, TFchr, TSc, TImpQu, TFprb, TFpp, - TExPool, TRpc, Backend -> ServiceBuilderCommand for ServiceBuilder< - TBl, TRtApi, - Client>, TBl, TRtApi>, - TFchr, TSc, TImpQu, TFprb, TFpp, TExPool, TRpc, Backend -> where - TBl: BlockT + MaybeSerializeDeserialize, - TBackend: 'static + sc_client_api::backend::Backend + Send, - TExecDisp: 'static + NativeExecutionDispatch, - TImpQu: 'static + ImportQueue, - TRtApi: 'static + Send + Sync, - Self: Send + 'static, -{ - type Block = TBl; - type NativeDispatch = TExecDisp; - - fn import_blocks( - mut self, - input: impl Read + Seek + Send + 'static, - force: bool, - binary: bool, - ) -> Pin> + Send>> { - struct WaitLink { - imported_blocks: u64, - has_error: bool, - } - - impl WaitLink { - fn new() -> WaitLink { - WaitLink { - imported_blocks: 0, - has_error: false, - } - } - } - - impl Link for WaitLink { - fn blocks_processed( - &mut self, - imported: usize, - _num_expected_blocks: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> - ) { - self.imported_blocks += imported as u64; - - for result in results { - if let (Err(err), hash) = result { - warn!("There was an error importing block with hash {:?}: {:?}", hash, err); - self.has_error = true; - break; - } - } - } - } - - let mut link = WaitLink::new(); - let block_iter_res: Result, String> = BlockIter::new(input, binary); - - let block_iter = match block_iter_res { - Ok(block_iter) => block_iter, - Err(e) => { - // We've encountered an error while creating the block iterator - // so we can just return a future that returns an error. - return future::ready(Err(Error::Other(e))).boxed() - } - }; - - let mut state = Some(ImportState::Reading{block_iter}); - let mut speedometer = Speedometer::::new(); - - // Importing blocks is implemented as a future, because we want the operation to be - // interruptible. - // - // Every time we read a block from the input or import a bunch of blocks from the import - // queue, the `Future` re-schedules itself and returns `Poll::Pending`. - // This makes it possible either to interleave other operations in-between the block imports, - // or to stop the operation completely. - let import = future::poll_fn(move |cx| { - let client = &self.client; - let queue = &mut self.import_queue; - match state.take().expect("state should never be None; qed") { - ImportState::Reading{mut block_iter} => { - match block_iter.next() { - None => { - // The iterator is over: we now need to wait for the import queue to finish. - let num_expected_blocks = block_iter.num_expected_blocks(); - let read_block_count = block_iter.read_block_count(); - let delay = Delay::new(Duration::from_millis(DELAY_TIME)); - state = Some(ImportState::WaitingForImportQueueToFinish{num_expected_blocks, read_block_count, delay}); - }, - Some(block_result) => { - let read_block_count = block_iter.read_block_count(); - match block_result { - Ok(block) => { - if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { - // The queue is full, so do not add this block and simply wait until - // the queue has made some progress. - let delay = Delay::new(Duration::from_millis(DELAY_TIME)); - state = Some(ImportState::WaitingForImportQueueToCatchUp{block_iter, delay, block}); - } else { - // Queue is not full, we can keep on adding blocks to the queue. - import_block_to_queue(block, queue, force); - state = Some(ImportState::Reading{block_iter}); - } - } - Err(e) => { - return Poll::Ready( - Err(Error::Other(format!("Error reading block #{}: {}", read_block_count, e)))) - } - } - } - } - }, - ImportState::WaitingForImportQueueToCatchUp{block_iter, mut delay, block} => { - let read_block_count = block_iter.read_block_count(); - if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { - // Queue is still full, so wait until there is room to insert our block. - match Pin::new(&mut delay).poll(cx) { - Poll::Pending => { - state = Some(ImportState::WaitingForImportQueueToCatchUp{block_iter, delay, block}); - return Poll::Pending - }, - Poll::Ready(_) => { - delay.reset(Duration::from_millis(DELAY_TIME)); - }, - } - state = Some(ImportState::WaitingForImportQueueToCatchUp{block_iter, delay, block}); - } else { - // Queue is no longer full, so we can add our block to the queue. - import_block_to_queue(block, queue, force); - // Switch back to Reading state. - state = Some(ImportState::Reading{block_iter}); - } - }, - ImportState::WaitingForImportQueueToFinish{num_expected_blocks, read_block_count, mut delay} => { - // All the blocks have been added to the queue, which doesn't mean they - // have all been properly imported. - if importing_is_done(num_expected_blocks, read_block_count, link.imported_blocks) { - // Importing is done, we can log the result and return. - info!( - "🎉 Imported {} blocks. Best: #{}", - read_block_count, client.chain_info().best_number - ); - return Poll::Ready(Ok(())) - } else { - // Importing is not done, we still have to wait for the queue to finish. - // Wait for the delay, because we know the queue is lagging behind. - match Pin::new(&mut delay).poll(cx) { - Poll::Pending => { - state = Some(ImportState::WaitingForImportQueueToFinish{num_expected_blocks, read_block_count, delay}); - return Poll::Pending - }, - Poll::Ready(_) => { - delay.reset(Duration::from_millis(DELAY_TIME)); - }, - } - - state = Some(ImportState::WaitingForImportQueueToFinish{num_expected_blocks, read_block_count, delay}); - } - } - } - - queue.poll_actions(cx, &mut link); - - let best_number = client.chain_info().best_number; - speedometer.notify_user(best_number); - - if link.has_error { - return Poll::Ready(Err( - Error::Other( - format!("Stopping after #{} blocks because of an error", link.imported_blocks) - ) - )) - } - - cx.waker().wake_by_ref(); - Poll::Pending - }); - Box::pin(import) - } - - fn export_blocks( - self, - mut output: impl Write + 'static, - from: NumberFor, - to: Option>, - binary: bool - ) -> Pin>>> { - let mut block = from; - - let last = match to { - Some(v) if v.is_zero() => One::one(), - Some(v) => v, - None => self.client.chain_info().best_number, - }; - - let mut wrote_header = false; - - // Exporting blocks is implemented as a future, because we want the operation to be - // interruptible. - // - // Every time we write a block to the output, the `Future` re-schedules itself and returns - // `Poll::Pending`. - // This makes it possible either to interleave other operations in-between the block exports, - // or to stop the operation completely. - let export = future::poll_fn(move |cx| { - let client = &self.client; - - if last < block { - return Poll::Ready(Err("Invalid block range specified".into())); - } - - if !wrote_header { - info!("Exporting blocks from #{} to #{}", block, last); - if binary { - let last_: u64 = last.saturated_into::(); - let block_: u64 = block.saturated_into::(); - let len: u64 = last_ - block_ + 1; - output.write_all(&len.encode())?; - } - wrote_header = true; - } - - match client.block(&BlockId::number(block))? { - Some(block) => { - if binary { - output.write_all(&block.encode())?; - } else { - serde_json::to_writer(&mut output, &block) - .map_err(|e| format!("Error writing JSON: {}", e))?; - } - }, - // Reached end of the chain. - None => return Poll::Ready(Ok(())), - } - if (block % 10000.into()).is_zero() { - info!("#{}", block); - } - if block == last { - return Poll::Ready(Ok(())); - } - block += One::one(); - - // Re-schedule the task in order to continue the operation. - cx.waker().wake_by_ref(); - Poll::Pending - }); - - Box::pin(export) - } - - fn revert_chain( - &self, - blocks: NumberFor - ) -> Result<(), Error> { - let reverted = self.client.revert(blocks)?; - let info = self.client.chain_info(); - - if reverted.is_zero() { - info!("There aren't any non-finalized blocks to revert."); - } else { - info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash); - } - Ok(()) - } - - fn check_block( - self, - block_id: BlockId - ) -> Pin> + Send>> { - match self.client.block(&block_id) { - Ok(Some(block)) => { - let mut buf = Vec::new(); - 1u64.encode_to(&mut buf); - block.encode_to(&mut buf); - let reader = std::io::Cursor::new(buf); - self.import_blocks(reader, true, true) - } - Ok(None) => Box::pin(future::err("Unknown block".into())), - Err(e) => Box::pin(future::err(format!("Error reading block: {:?}", e).into())), - } - } - - fn export_raw_state( - &self, - block: Option>, - ) -> Result { - let block = block.unwrap_or_else( - || BlockId::Hash(self.client.usage_info().chain.best_hash) - ); - - let empty_key = StorageKey(Vec::new()); - let mut top_storage = self.client.storage_pairs(&block, &empty_key)?; - let mut children_default = HashMap::new(); - - // Remove all default child storage roots from the top storage and collect the child storage - // pairs. - while let Some(pos) = top_storage - .iter() - .position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)) { - let (key, _) = top_storage.swap_remove(pos); - - let key = StorageKey( - key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec(), - ); - let child_info = ChildInfo::new_default(&key.0); - - let keys = self.client.child_storage_keys(&block, &child_info, &empty_key)?; - let mut pairs = StorageMap::new(); - keys.into_iter().try_for_each(|k| { - if let Some(value) = self.client.child_storage(&block, &child_info, &k)? { - pairs.insert(k.0, value.0); - } - - Ok::<_, Error>(()) - })?; - - children_default.insert(key.0, StorageChild { child_info, data: pairs }); - } - - let top = top_storage.into_iter().map(|(k, v)| (k.0, v.0)).collect(); - Ok(Storage { top, children_default }) - } -} diff --git a/client/service/src/chain_ops/check_block.rs b/client/service/src/chain_ops/check_block.rs new file mode 100644 index 0000000000000..34baeb55445a8 --- /dev/null +++ b/client/service/src/chain_ops/check_block.rs @@ -0,0 +1,51 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use futures::{future, prelude::*}; +use sp_runtime::traits::Block as BlockT; +use sp_runtime::generic::BlockId; +use codec::Encode; +use sp_consensus::import_queue::ImportQueue; +use sc_client_api::{BlockBackend, UsageProvider}; + +use std::pin::Pin; +use std::sync::Arc; +use crate::chain_ops::import_blocks; + +/// Re-validate known block. +pub fn check_block( + client: Arc, + import_queue: IQ, + block_id: BlockId +) -> Pin> + Send>> +where + C: BlockBackend + UsageProvider + Send + Sync + 'static, + B: BlockT + for<'de> serde::Deserialize<'de>, + IQ: ImportQueue + 'static, +{ + match client.block(&block_id) { + Ok(Some(block)) => { + let mut buf = Vec::new(); + 1u64.encode_to(&mut buf); + block.encode_to(&mut buf); + let reader = std::io::Cursor::new(buf); + import_blocks(client, import_queue, reader, true, true) + } + Ok(None) => Box::pin(future::err("Unknown block".into())), + Err(e) => Box::pin(future::err(format!("Error reading block: {:?}", e).into())), + } +} diff --git a/client/service/src/chain_ops/export_blocks.rs b/client/service/src/chain_ops/export_blocks.rs new file mode 100644 index 0000000000000..cca2f2830e52a --- /dev/null +++ b/client/service/src/chain_ops/export_blocks.rs @@ -0,0 +1,106 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use log::info; +use futures::{future, prelude::*}; +use sp_runtime::traits::{ + Block as BlockT, NumberFor, One, Zero, SaturatedConversion +}; +use sp_runtime::generic::BlockId; +use codec::Encode; + +use std::{io::Write, pin::Pin}; +use sc_client_api::{BlockBackend, UsageProvider}; +use std::sync::Arc; +use std::task::Poll; + +/// Performs the blocks export. +pub fn export_blocks( + client: Arc, + mut output: impl Write + 'static, + from: NumberFor, + to: Option>, + binary: bool +) -> Pin>>> +where + C: BlockBackend + UsageProvider + 'static, + B: BlockT, +{ + let mut block = from; + + let last = match to { + Some(v) if v.is_zero() => One::one(), + Some(v) => v, + None => client.usage_info().chain.best_number, + }; + + let mut wrote_header = false; + + // Exporting blocks is implemented as a future, because we want the operation to be + // interruptible. + // + // Every time we write a block to the output, the `Future` re-schedules itself and returns + // `Poll::Pending`. + // This makes it possible either to interleave other operations in-between the block exports, + // or to stop the operation completely. + let export = future::poll_fn(move |cx| { + let client = &client; + + if last < block { + return Poll::Ready(Err("Invalid block range specified".into())); + } + + if !wrote_header { + info!("Exporting blocks from #{} to #{}", block, last); + if binary { + let last_: u64 = last.saturated_into::(); + let block_: u64 = block.saturated_into::(); + let len: u64 = last_ - block_ + 1; + output.write_all(&len.encode())?; + } + wrote_header = true; + } + + match client.block(&BlockId::number(block))? { + Some(block) => { + if binary { + output.write_all(&block.encode())?; + } else { + serde_json::to_writer(&mut output, &block) + .map_err(|e| format!("Error writing JSON: {}", e))?; + } + }, + // Reached end of the chain. + None => return Poll::Ready(Ok(())), + } + if (block % 10000.into()).is_zero() { + info!("#{}", block); + } + if block == last { + return Poll::Ready(Ok(())); + } + block += One::one(); + + // Re-schedule the task in order to continue the operation. + cx.waker().wake_by_ref(); + Poll::Pending + }); + + Box::pin(export) +} + + diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs new file mode 100644 index 0000000000000..bbe12caf3afa2 --- /dev/null +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -0,0 +1,71 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use sp_runtime::traits::Block as BlockT; +use sp_runtime::generic::BlockId; +use sp_core::storage::{StorageKey, well_known_keys, ChildInfo, Storage, StorageChild, StorageMap}; +use sc_client_api::{StorageProvider, UsageProvider}; + +use std::{collections::HashMap, sync::Arc}; + +/// Export the raw state at the given `block`. If `block` is `None`, the +/// best block will be used. +pub fn export_raw_state( + client: Arc, + block: Option>, +) -> Result +where + C: UsageProvider + StorageProvider, + B: BlockT, + BA: sc_client_api::backend::Backend, +{ + let block = block.unwrap_or_else( + || BlockId::Hash(client.usage_info().chain.best_hash) + ); + + let empty_key = StorageKey(Vec::new()); + let mut top_storage = client.storage_pairs(&block, &empty_key)?; + let mut children_default = HashMap::new(); + + // Remove all default child storage roots from the top storage and collect the child storage + // pairs. + while let Some(pos) = top_storage + .iter() + .position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)) { + let (key, _) = top_storage.swap_remove(pos); + + let key = StorageKey( + key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec(), + ); + let child_info = ChildInfo::new_default(&key.0); + + let keys = client.child_storage_keys(&block, &child_info, &empty_key)?; + let mut pairs = StorageMap::new(); + keys.into_iter().try_for_each(|k| { + if let Some(value) = client.child_storage(&block, &child_info, &k)? { + pairs.insert(k.0, value.0); + } + + Ok::<_, Error>(()) + })?; + + children_default.insert(key.0, StorageChild { child_info, data: pairs }); + } + + let top = top_storage.into_iter().map(|(k, v)| (k.0, v.0)).collect(); + Ok(Storage { top, children_default }) +} diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs new file mode 100644 index 0000000000000..46ad0d0501d93 --- /dev/null +++ b/client/service/src/chain_ops/import_blocks.rs @@ -0,0 +1,472 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::error; +use crate::error::Error; +use sc_chain_spec::ChainSpec; +use log::{warn, info}; +use futures::{future, prelude::*}; +use sp_runtime::traits::{ + Block as BlockT, NumberFor, Zero, Header, MaybeSerializeDeserialize, +}; +use sp_runtime::generic::SignedBlock; +use codec::{Decode, IoReader as CodecIoReader}; +use sp_consensus::{ + BlockOrigin, + import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, +}; + +use std::{io::{Read, Seek}, pin::Pin}; +use std::time::{Duration, Instant}; +use futures_timer::Delay; +use std::task::Poll; +use serde_json::{de::IoRead as JsonIoRead, Deserializer, StreamDeserializer}; +use std::convert::{TryFrom, TryInto}; +use sp_runtime::traits::{CheckedDiv, Saturating}; +use sc_client_api::UsageProvider; + +/// Number of blocks we will add to the queue before waiting for the queue to catch up. +const MAX_PENDING_BLOCKS: u64 = 1_024; + +/// Number of milliseconds to wait until next poll. +const DELAY_TIME: u64 = 2_000; + +/// Number of milliseconds that must have passed between two updates. +const TIME_BETWEEN_UPDATES: u64 = 3_000; + +use std::sync::Arc; + +/// Build a chain spec json +pub fn build_spec(spec: &dyn ChainSpec, raw: bool) -> error::Result { + spec.as_json(raw).map_err(Into::into) +} + + +/// Helper enum that wraps either a binary decoder (from parity-scale-codec), or a JSON decoder +/// (from serde_json). Implements the Iterator Trait, calling `next()` will decode the next +/// SignedBlock and return it. +enum BlockIter where + R: std::io::Read + std::io::Seek, +{ + Binary { + // Total number of blocks we are expecting to decode. + num_expected_blocks: u64, + // Number of blocks we have decoded thus far. + read_block_count: u64, + // Reader to the data, used for decoding new blocks. + reader: CodecIoReader, + }, + Json { + // Nubmer of blocks we have decoded thus far. + read_block_count: u64, + // Stream to the data, used for decoding new blocks. + reader: StreamDeserializer<'static, JsonIoRead, SignedBlock>, + }, +} + +impl BlockIter where + R: Read + Seek + 'static, + B: BlockT + MaybeSerializeDeserialize, +{ + fn new(input: R, binary: bool) -> Result { + if binary { + let mut reader = CodecIoReader(input); + // If the file is encoded in binary format, it is expected to first specify the number + // of blocks that are going to be decoded. We read it and add it to our enum struct. + let num_expected_blocks: u64 = Decode::decode(&mut reader) + .map_err(|e| format!("Failed to decode the number of blocks: {:?}", e))?; + Ok(BlockIter::Binary { + num_expected_blocks, + read_block_count: 0, + reader, + }) + } else { + let stream_deser = Deserializer::from_reader(input) + .into_iter::>(); + Ok(BlockIter::Json { + reader: stream_deser, + read_block_count: 0, + }) + } + } + + /// Returns the number of blocks read thus far. + fn read_block_count(&self) -> u64 { + match self { + BlockIter::Binary { read_block_count, .. } + | BlockIter::Json { read_block_count, .. } + => *read_block_count, + } + } + + /// Returns the total number of blocks to be imported, if possible. + fn num_expected_blocks(&self) -> Option { + match self { + BlockIter::Binary { num_expected_blocks, ..} => Some(*num_expected_blocks), + BlockIter::Json {..} => None + } + } +} + +impl Iterator for BlockIter where + R: Read + Seek + 'static, + B: BlockT + MaybeSerializeDeserialize, +{ + type Item = Result, String>; + + fn next(&mut self) -> Option { + match self { + BlockIter::Binary { num_expected_blocks, read_block_count, reader } => { + if read_block_count < num_expected_blocks { + let block_result: Result, _> = SignedBlock::::decode(reader) + .map_err(|e| e.to_string()); + *read_block_count += 1; + Some(block_result) + } else { + // `read_block_count` == `num_expected_blocks` so we've read enough blocks. + None + } + } + BlockIter::Json { reader, read_block_count } => { + let res = Some(reader.next()?.map_err(|e| e.to_string())); + *read_block_count += 1; + res + } + } + } +} + +/// Imports the SignedBlock to the queue. +fn import_block_to_queue( + signed_block: SignedBlock, + queue: &mut TImpQu, + force: bool +) where + TBl: BlockT + MaybeSerializeDeserialize, + TImpQu: 'static + ImportQueue, +{ + let (header, extrinsics) = signed_block.block.deconstruct(); + let hash = header.hash(); + // import queue handles verification and importing it into the client. + queue.import_blocks(BlockOrigin::File, vec![ + IncomingBlock:: { + hash, + header: Some(header), + body: Some(extrinsics), + justification: signed_block.justification, + origin: None, + allow_missing_state: false, + import_existing: force, + } + ]); +} + +/// Returns true if we have imported every block we were supposed to import, else returns false. +fn importing_is_done( + num_expected_blocks: Option, + read_block_count: u64, + imported_blocks: u64 +) -> bool { + if let Some(num_expected_blocks) = num_expected_blocks { + imported_blocks >= num_expected_blocks + } else { + imported_blocks >= read_block_count + } +} + +/// Structure used to log the block importing speed. +struct Speedometer { + best_number: NumberFor, + last_number: Option>, + last_update: Instant, +} + +impl Speedometer { + /// Creates a fresh Speedometer. + fn new() -> Self { + Self { + best_number: NumberFor::::from(0), + last_number: None, + last_update: Instant::now(), + } + } + + /// Calculates `(best_number - last_number) / (now - last_update)` and + /// logs the speed of import. + fn display_speed(&self) { + // Number of milliseconds elapsed since last time. + let elapsed_ms = { + let elapsed = self.last_update.elapsed(); + let since_last_millis = elapsed.as_secs() * 1000; + let since_last_subsec_millis = elapsed.subsec_millis() as u64; + since_last_millis + since_last_subsec_millis + }; + + // Number of blocks that have been imported since last time. + let diff = match self.last_number { + None => return, + Some(n) => self.best_number.saturating_sub(n) + }; + + if let Ok(diff) = TryInto::::try_into(diff) { + // If the number of blocks can be converted to a regular integer, then it's easy: just + // do the math and turn it into a `f64`. + let speed = diff.saturating_mul(10_000).checked_div(u128::from(elapsed_ms)) + .map_or(0.0, |s| s as f64) / 10.0; + info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed); + } else { + // If the number of blocks can't be converted to a regular integer, then we need a more + // algebraic approach and we stay within the realm of integers. + let one_thousand = NumberFor::::from(1_000); + let elapsed = NumberFor::::from( + >::try_from(elapsed_ms).unwrap_or(u32::max_value()) + ); + + let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) + .unwrap_or_else(Zero::zero); + info!("📦 Current best block: {} ({} bps)", self.best_number, speed) + } + } + + /// Updates the Speedometer. + fn update(&mut self, best_number: NumberFor) { + self.last_number = Some(self.best_number); + self.best_number = best_number; + self.last_update = Instant::now(); + } + + // If more than TIME_BETWEEN_UPDATES has elapsed since last update, + // then print and update the speedometer. + fn notify_user(&mut self, best_number: NumberFor) { + let delta = Duration::from_millis(TIME_BETWEEN_UPDATES); + if Instant::now().duration_since(self.last_update) >= delta { + self.display_speed(); + self.update(best_number); + } + } +} + +/// Different State that the `import_blocks` future could be in. +enum ImportState where + R: Read + Seek + 'static, + B: BlockT + MaybeSerializeDeserialize, +{ + /// We are reading from the BlockIter structure, adding those blocks to the queue if possible. + Reading{block_iter: BlockIter}, + /// The queue is full (contains at least MAX_PENDING_BLOCKS blocks) and we are waiting for it to + /// catch up. + WaitingForImportQueueToCatchUp{ + block_iter: BlockIter, + delay: Delay, + block: SignedBlock + }, + // We have added all the blocks to the queue but they are still being processed. + WaitingForImportQueueToFinish{ + num_expected_blocks: Option, + read_block_count: u64, + delay: Delay, + }, +} + +/// Starts the process of importing blocks. +pub fn import_blocks( + client: Arc, + mut import_queue: IQ, + input: impl Read + Seek + Send + 'static, + force: bool, + binary: bool, +) -> Pin> + Send>> +where + C: UsageProvider + Send + Sync + 'static, + B: BlockT + for<'de> serde::Deserialize<'de>, + IQ: ImportQueue + 'static, +{ + struct WaitLink { + imported_blocks: u64, + has_error: bool, + } + + impl WaitLink { + fn new() -> WaitLink { + WaitLink { + imported_blocks: 0, + has_error: false, + } + } + } + + impl Link for WaitLink { + fn blocks_processed( + &mut self, + imported: usize, + _num_expected_blocks: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)> + ) { + self.imported_blocks += imported as u64; + + for result in results { + if let (Err(err), hash) = result { + warn!("There was an error importing block with hash {:?}: {:?}", hash, err); + self.has_error = true; + break; + } + } + } + } + + let mut link = WaitLink::new(); + let block_iter_res: Result, String> = BlockIter::new(input, binary); + + let block_iter = match block_iter_res { + Ok(block_iter) => block_iter, + Err(e) => { + // We've encountered an error while creating the block iterator + // so we can just return a future that returns an error. + return future::ready(Err(Error::Other(e))).boxed() + } + }; + + let mut state = Some(ImportState::Reading{block_iter}); + let mut speedometer = Speedometer::::new(); + + // Importing blocks is implemented as a future, because we want the operation to be + // interruptible. + // + // Every time we read a block from the input or import a bunch of blocks from the import + // queue, the `Future` re-schedules itself and returns `Poll::Pending`. + // This makes it possible either to interleave other operations in-between the block imports, + // or to stop the operation completely. + let import = future::poll_fn(move |cx| { + let client = &client; + let queue = &mut import_queue; + match state.take().expect("state should never be None; qed") { + ImportState::Reading{mut block_iter} => { + match block_iter.next() { + None => { + // The iterator is over: we now need to wait for the import queue to finish. + let num_expected_blocks = block_iter.num_expected_blocks(); + let read_block_count = block_iter.read_block_count(); + let delay = Delay::new(Duration::from_millis(DELAY_TIME)); + state = Some(ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, delay + }); + }, + Some(block_result) => { + let read_block_count = block_iter.read_block_count(); + match block_result { + Ok(block) => { + if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { + // The queue is full, so do not add this block and simply wait + // until the queue has made some progress. + let delay = Delay::new(Duration::from_millis(DELAY_TIME)); + state = Some(ImportState::WaitingForImportQueueToCatchUp { + block_iter, delay, block + }); + } else { + // Queue is not full, we can keep on adding blocks to the queue. + import_block_to_queue(block, queue, force); + state = Some(ImportState::Reading{block_iter}); + } + } + Err(e) => { + return Poll::Ready( + Err(Error::Other( + format!("Error reading block #{}: {}", read_block_count, e) + ))) + } + } + } + } + }, + ImportState::WaitingForImportQueueToCatchUp{block_iter, mut delay, block} => { + let read_block_count = block_iter.read_block_count(); + if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { + // Queue is still full, so wait until there is room to insert our block. + match Pin::new(&mut delay).poll(cx) { + Poll::Pending => { + state = Some(ImportState::WaitingForImportQueueToCatchUp { + block_iter, delay, block + }); + return Poll::Pending + }, + Poll::Ready(_) => { + delay.reset(Duration::from_millis(DELAY_TIME)); + }, + } + state = Some(ImportState::WaitingForImportQueueToCatchUp { + block_iter, delay, block + }); + } else { + // Queue is no longer full, so we can add our block to the queue. + import_block_to_queue(block, queue, force); + // Switch back to Reading state. + state = Some(ImportState::Reading{block_iter}); + } + }, + ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, mut delay + } => { + // All the blocks have been added to the queue, which doesn't mean they + // have all been properly imported. + if importing_is_done(num_expected_blocks, read_block_count, link.imported_blocks) { + // Importing is done, we can log the result and return. + info!( + "🎉 Imported {} blocks. Best: #{}", + read_block_count, client.usage_info().chain.best_number + ); + return Poll::Ready(Ok(())) + } else { + // Importing is not done, we still have to wait for the queue to finish. + // Wait for the delay, because we know the queue is lagging behind. + match Pin::new(&mut delay).poll(cx) { + Poll::Pending => { + state = Some(ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, delay + }); + return Poll::Pending + }, + Poll::Ready(_) => { + delay.reset(Duration::from_millis(DELAY_TIME)); + }, + } + + state = Some(ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, delay + }); + } + } + } + + queue.poll_actions(cx, &mut link); + + let best_number = client.usage_info().chain.best_number; + speedometer.notify_user(best_number); + + if link.has_error { + return Poll::Ready(Err( + Error::Other( + format!("Stopping after #{} blocks because of an error", link.imported_blocks) + ) + )) + } + + cx.waker().wake_by_ref(); + Poll::Pending + }); + Box::pin(import) +} diff --git a/client/service/src/chain_ops/mod.rs b/client/service/src/chain_ops/mod.rs new file mode 100644 index 0000000000000..af6e6f632fc06 --- /dev/null +++ b/client/service/src/chain_ops/mod.rs @@ -0,0 +1,29 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Chain utilities. + +mod check_block; +mod export_blocks; +mod export_raw_state; +mod import_blocks; +mod revert_chain; + +pub use check_block::*; +pub use export_blocks::*; +pub use export_raw_state::*; +pub use import_blocks::*; +pub use revert_chain::*; diff --git a/client/service/src/chain_ops/revert_chain.rs b/client/service/src/chain_ops/revert_chain.rs new file mode 100644 index 0000000000000..129aea0408685 --- /dev/null +++ b/client/service/src/chain_ops/revert_chain.rs @@ -0,0 +1,43 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use log::info; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; +use sc_client_api::{Backend, UsageProvider}; +use std::sync::Arc; + +/// Performs a revert of `blocks` blocks. +pub fn revert_chain( + client: Arc, + backend: Arc, + blocks: NumberFor +) -> Result<(), Error> +where + B: BlockT, + C: UsageProvider, + BA: Backend, +{ + let reverted = backend.revert(blocks, false)?; + let info = client.usage_info().chain; + + if reverted.is_zero() { + info!("There aren't any non-finalized blocks to revert."); + } else { + info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash); + } + Ok(()) +} diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index c3c8f60e689ad..e88706a46df6a 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -23,7 +23,6 @@ #![recursion_limit="128"] pub mod config; -#[macro_use] pub mod chain_ops; pub mod error; @@ -79,6 +78,7 @@ pub use sc_network::config::{ pub use sc_tracing::TracingReceiver; pub use task_manager::SpawnTaskHandle; pub use task_manager::TaskManager; +pub use sp_consensus::import_queue::ImportQueue; use sc_client_api::{Backend, BlockchainEvents}; const DEFAULT_PROTOCOL_ID: &str = "sup"; From 31c65a6a2b38cd65e3771730da28c019fb44a7bc Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Tue, 30 Jun 2020 15:13:15 +0200 Subject: [PATCH 02/24] Slight tidy --- client/cli/src/commands/check_block_cmd.rs | 23 ++++++++------------ client/cli/src/commands/export_blocks_cmd.rs | 2 +- client/cli/src/commands/export_state_cmd.rs | 6 ++--- 3 files changed, 13 insertions(+), 18 deletions(-) diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index b3312f6e01580..b536d4f26bb6c 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -16,17 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::params::ImportParams; -use crate::params::SharedParams; -use crate::params::BlockNumberOrHash; -use crate::CliConfiguration; +use crate::{ + CliConfiguration, error, params::{ImportParams, SharedParams, BlockNumberOrHash}, +}; +use sc_client_api::{BlockBackend, UsageProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use std::fmt::Debug; -use std::str::FromStr; -use std::sync::Arc; +use std::{fmt::Debug, str::FromStr, sync::Arc}; use structopt::StructOpt; -use sc_client_api::{BlockBackend, UsageProvider}; /// The `check-block` command used to validate blocks. #[derive(Debug, StructOpt)] @@ -61,13 +57,12 @@ impl CheckBlockCmd { B: BlockT + for<'de> serde::Deserialize<'de>, C: BlockBackend + UsageProvider + Send + Sync + 'static, IQ: sc_service::ImportQueue + 'static, - ::Hash: FromStr, - <::Hash as FromStr>::Err: Debug, - <<::Header as HeaderT>::Number as FromStr>::Err: Debug, + B::Hash: FromStr, + ::Err: Debug, + <::Number as FromStr>::Err: Debug, { let start = std::time::Instant::now(); - let block_id = self.input.parse()?; - sc_service::chain_ops::check_block(client, import_queue, block_id).await?; + sc_service::chain_ops::check_block(client, import_queue, self.input.parse()?).await?; println!("Completed in {} ms.", start.elapsed().as_millis()); Ok(()) diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index 0db100adcbab0..118832a79d29d 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -79,7 +79,7 @@ impl ExportBlocksCmd { where B: BlockT, C: BlockBackend + UsageProvider + 'static, - <<::Header as HeaderT>::Number as FromStr>::Err: Debug, + <::Number as FromStr>::Err: Debug, { if let DatabaseConfig::RocksDb { ref path, .. } = database_config { info!("DB path: {}", path.display()); diff --git a/client/cli/src/commands/export_state_cmd.rs b/client/cli/src/commands/export_state_cmd.rs index 84f92f33f7bb1..689c99e829979 100644 --- a/client/cli/src/commands/export_state_cmd.rs +++ b/client/cli/src/commands/export_state_cmd.rs @@ -53,9 +53,9 @@ impl ExportStateCmd { B: BlockT, C: UsageProvider + StorageProvider, BA: sc_client_api::backend::Backend, - ::Hash: FromStr, - <::Hash as FromStr>::Err: Debug, - <<::Header as HeaderT>::Number as FromStr>::Err: Debug, + B::Hash: FromStr, + ::Err: Debug, + <::Number as FromStr>::Err: Debug, { info!("Exporting raw state..."); let block_id = self.input.as_ref().map(|b| b.parse()).transpose()?; From 6ad0d792779fcbddc8f461fa42bb7e64c71c63f3 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Tue, 30 Jun 2020 15:20:25 +0200 Subject: [PATCH 03/24] Remove ServiceBuilderCommand --- client/service/src/builder.rs | 53 ++--------------------------------- client/service/src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 52 deletions(-) diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 739e350d4d586..d09f4d8457335 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -45,15 +45,11 @@ use sc_network::NetworkService; use parking_lot::{Mutex, RwLock}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ - Block as BlockT, NumberFor, SaturatedConversion, HashFor, Zero, BlockIdTo, + Block as BlockT, SaturatedConversion, HashFor, Zero, BlockIdTo, }; use sp_api::{ProvideRuntimeApi, CallApiAt}; use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo}; -use std::{ - collections::HashMap, - io::{Read, Write, Seek}, - marker::PhantomData, sync::Arc, pin::Pin -}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc, pin::Pin}; use wasm_timer::SystemTime; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_transaction_pool::{LocalTransactionPool, MaintainedTransactionPool}; @@ -67,7 +63,6 @@ use sc_client_api::{ proof_provider::ProofProvider, execution_extensions::ExecutionExtensions }; -use sp_core::storage::Storage; use sp_blockchain::{HeaderMetadata, HeaderBackend}; use crate::{ServiceComponents, TelemetryOnConnectSinks, RpcHandlers, NetworkStatusSinks}; @@ -845,50 +840,6 @@ impl } } -/// Implemented on `ServiceBuilder`. Allows running block commands, such as import/export/validate -/// components to the builder. -pub trait ServiceBuilderCommand { - /// Block type this API operates on. - type Block: BlockT; - /// Native execution dispatch required by some commands. - type NativeDispatch: NativeExecutionDispatch + 'static; - /// Starts the process of importing blocks. - fn import_blocks( - self, - input: impl Read + Seek + Send + 'static, - force: bool, - binary: bool, - ) -> Pin> + Send>>; - - /// Performs the blocks export. - fn export_blocks( - self, - output: impl Write + 'static, - from: NumberFor, - to: Option>, - binary: bool - ) -> Pin>>>; - - /// Performs a revert of `blocks` blocks. - fn revert_chain( - &self, - blocks: NumberFor - ) -> Result<(), Error>; - - /// Re-validate known block. - fn check_block( - self, - block: BlockId - ) -> Pin> + Send>>; - - /// Export the raw state at the given `block`. If `block` is `None`, the - /// best block will be used. - fn export_raw_state( - &self, - block: Option>, - ) -> Result; -} - impl ServiceBuilder< TBl, diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index e88706a46df6a..1d41490956858 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -54,7 +54,7 @@ use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver, pub use self::error::Error; pub use self::builder::{ new_full_client, new_client, - ServiceBuilder, ServiceBuilderCommand, TFullClient, TLightClient, TFullBackend, TLightBackend, + ServiceBuilder, TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, }; pub use config::{ From b7399d6ac14d0691f2960e4303f5dd6aa79e9cab Mon Sep 17 00:00:00 2001 From: Ashley Date: Tue, 30 Jun 2020 16:21:33 +0200 Subject: [PATCH 04/24] Remove whitespace --- client/service/src/chain_ops/export_blocks.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/client/service/src/chain_ops/export_blocks.rs b/client/service/src/chain_ops/export_blocks.rs index cca2f2830e52a..2f32cbf7fbdb7 100644 --- a/client/service/src/chain_ops/export_blocks.rs +++ b/client/service/src/chain_ops/export_blocks.rs @@ -102,5 +102,3 @@ where Box::pin(export) } - - From 4020ff40ae7eb72116b266b7ec6793ff957540c3 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Wed, 1 Jul 2020 13:23:24 +0200 Subject: [PATCH 05/24] Keep task manager alive for check_block/import_blocks --- client/cli/src/runner.rs | 12 ++++++------ client/service/src/builder.rs | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index 900a715a2e1f9..97d61836d3ee9 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -178,7 +178,7 @@ impl Runner { -> Result<()> where BU: FnOnce(Configuration) - -> sc_service::error::Result<(Arc, Arc, IQ)>, + -> sc_service::error::Result<(Arc, Arc, IQ, TaskManager)>, B: BlockT + for<'de> serde::Deserialize<'de>, BA: sc_client_api::backend::Backend + 'static, IQ: sc_service::ImportQueue + 'static, @@ -195,24 +195,24 @@ impl Runner { match subcommand { Subcommand::BuildSpec(cmd) => cmd.run(chain_spec, network_config), Subcommand::ExportBlocks(cmd) => { - let (client, _, _) = builder(self.config)?; + let (client, _, _, _) = builder(self.config)?; run_until_exit(self.tokio_runtime, cmd.run(client, db_config)) } Subcommand::ImportBlocks(cmd) => { - let (client, _, import_queue) = builder(self.config)?; + let (client, _, import_queue, _task_manager) = builder(self.config)?; run_until_exit(self.tokio_runtime, cmd.run(client, import_queue)) } Subcommand::CheckBlock(cmd) => { - let (client, _, import_queue) = builder(self.config)?; + let (client, _, import_queue, _task_manager) = builder(self.config)?; run_until_exit(self.tokio_runtime, cmd.run(client, import_queue)) } Subcommand::Revert(cmd) => { - let (client, backend, _) = builder(self.config)?; + let (client, backend, _, _) = builder(self.config)?; cmd.run(client, backend) }, Subcommand::PurgeChain(cmd) => cmd.run(db_config), Subcommand::ExportState(cmd) => { - let (client, _, _) = builder(self.config)?; + let (client, _, _, _) = builder(self.config)?; cmd.run(client, chain_spec) }, } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 247027e8e6982..3a1c5c85af5da 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -519,8 +519,8 @@ impl } /// Consume the builder and return the parts needed for chain operations. - pub fn to_chain_ops_parts(self) -> (Arc, Arc, TImpQu) { - (self.client, self.backend, self.import_queue) + pub fn to_chain_ops_parts(self) -> (Arc, Arc, TImpQu, TaskManager) { + (self.client, self.backend, self.import_queue, self.task_manager) } /// Defines which head-of-chain strategy to use. From 23c6f29af0aedb52af9bbf45ea77bc2541f8b7f7 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Wed, 1 Jul 2020 14:14:47 +0200 Subject: [PATCH 06/24] Pass task_manager to run_until_exit --- client/cli/src/runner.rs | 42 ++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index 97d61836d3ee9..8bf81b1a13586 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -93,7 +93,9 @@ pub fn build_runtime() -> std::result::Result(mut tokio_runtime: tokio::runtime::Runtime, future: FUT) -> Result<()> +fn run_until_exit( + mut tokio_runtime: tokio::runtime::Runtime, future: FUT, mut task_manager: Option +) -> Result<()> where FUT: Future> + future::Future, ERR: 'static + std::error::Error, @@ -103,6 +105,12 @@ where tokio_runtime.block_on(main(f)).map_err(|e| e.to_string())?; + if let Some(task_manager) = task_manager.as_mut() { + task_manager.terminate(); + } + + drop(tokio_runtime); + Ok(()) } @@ -195,25 +203,35 @@ impl Runner { match subcommand { Subcommand::BuildSpec(cmd) => cmd.run(chain_spec, network_config), Subcommand::ExportBlocks(cmd) => { - let (client, _, _, _) = builder(self.config)?; - run_until_exit(self.tokio_runtime, cmd.run(client, db_config)) + let (client, _, _, task_manager) = builder(self.config)?; + run_until_exit( + self.tokio_runtime, cmd.run(client, db_config), Some(task_manager) + ) } Subcommand::ImportBlocks(cmd) => { - let (client, _, import_queue, _task_manager) = builder(self.config)?; - run_until_exit(self.tokio_runtime, cmd.run(client, import_queue)) + let (client, _, import_queue, task_manager) = builder(self.config)?; + run_until_exit( + self.tokio_runtime, cmd.run(client, import_queue), Some(task_manager) + ) } Subcommand::CheckBlock(cmd) => { - let (client, _, import_queue, _task_manager) = builder(self.config)?; - run_until_exit(self.tokio_runtime, cmd.run(client, import_queue)) + let (client, _, import_queue, task_manager) = builder(self.config)?; + run_until_exit( + self.tokio_runtime, cmd.run(client, import_queue), Some(task_manager) + ) } Subcommand::Revert(cmd) => { - let (client, backend, _, _) = builder(self.config)?; - cmd.run(client, backend) + let (client, backend, _, mut task_manager) = builder(self.config)?; + cmd.run(client, backend)?; + task_manager.terminate(); + Ok(()) }, Subcommand::PurgeChain(cmd) => cmd.run(db_config), Subcommand::ExportState(cmd) => { - let (client, _, _, _) = builder(self.config)?; - cmd.run(client, chain_spec) + let (client, _, _, mut task_manager) = builder(self.config)?; + cmd.run(client, chain_spec)?; + task_manager.terminate(); + Ok(()) }, } } @@ -244,7 +262,7 @@ impl Runner { where FUT: Future>, { - run_until_exit(self.tokio_runtime, runner(self.config)) + run_until_exit(self.tokio_runtime, runner(self.config), None) } /// Get an immutable reference to the node Configuration From 66d0466ddd9deefefa5861942946761c1859937b Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Wed, 1 Jul 2020 15:14:44 +0200 Subject: [PATCH 07/24] WIP --- bin/node-template/node/src/command.rs | 4 +- bin/node-template/node/src/service.rs | 260 ++--- bin/node/cli/src/command.rs | 4 +- bin/node/cli/src/service.rs | 394 +++---- client/api/src/execution_extensions.rs | 6 +- client/finality-grandpa/src/lib.rs | 13 +- client/finality-grandpa/src/light_import.rs | 24 +- client/service/src/builder.rs | 1114 +++++-------------- client/service/src/config.rs | 5 + client/service/src/lib.rs | 20 +- client/service/src/metrics.rs | 8 +- client/transaction-pool/src/lib.rs | 92 +- primitives/core/src/tasks.rs | 2 +- 13 files changed, 676 insertions(+), 1270 deletions(-) diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 1bc436a063beb..367ea5f677471 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -72,8 +72,8 @@ pub fn run() -> sc_cli::Result<()> { Some(subcommand) => { let runner = cli.create_runner(subcommand)?; runner.run_subcommand(subcommand, |config| { - let (builder, _, _) = new_full_start!(config); - Ok(builder.to_chain_ops_parts()) + let (client, backend, _, task_manager, .., import_queue) = new_full_up_to_import_queue!(&config); + Ok((client, backend, import_queue, task_manager)) }) } None => { diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 89bf159927fc6..847360f48d419 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -2,13 +2,9 @@ use std::sync::Arc; use std::time::Duration; -use sc_client_api::ExecutorProvider; -use sc_consensus::LongestChain; +use sc_client_api::{ExecutorProvider, RemoteBackend}; use node_template_runtime::{self, opaque::Block, RuntimeApi}; -use sc_service::{ - error::{Error as ServiceError}, Configuration, ServiceBuilder, ServiceComponents, - TaskManager, -}; +use sc_service::{error::Error as ServiceError, Configuration, ServiceComponents, TaskManager}; use sp_inherents::InherentDataProviders; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; @@ -24,100 +20,90 @@ native_executor_instance!( node_template_runtime::native_version, ); -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -macro_rules! new_full_start { +macro_rules! new_full_up_to_import_queue { ($config:expr) => {{ use std::sync::Arc; + use node_template_runtime::{Block, RuntimeApi}; + use crate::service::Executor; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; - let mut import_setup = None; let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - let builder = sc_service::ServiceBuilder::new_full::< - node_template_runtime::opaque::Block, - node_template_runtime::RuntimeApi, - crate::service::Executor - >($config)? - .with_select_chain(|_config, backend| { - Ok(sc_consensus::LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|builder| { - let pool_api = sc_transaction_pool::FullChainApi::new( - builder.client().clone(), - ); - Ok(sc_transaction_pool::BasicPool::new( - builder.config().transaction_pool.clone(), - std::sync::Arc::new(pool_api), - builder.prometheus_registry(), - )) - })? - .with_import_queue(| - _config, - client, - mut select_chain, - _transaction_pool, - spawn_task_handle, - registry, - | { - let select_chain = select_chain.take() - .ok_or_else(|| sc_service::Error::SelectChainRequired)?; + let (client, backend, keystore, task_manager) = sc_service::new_full_parts::(&$config)?; + let client = Arc::new(client); - let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( - client.clone(), - &(client.clone() as Arc<_>), - select_chain, - )?; + let select_chain = sc_consensus::LongestChain::new(backend.clone()); - let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( - grandpa_block_import.clone(), client.clone(), - ); + let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + $config.transaction_pool.clone(), + std::sync::Arc::new(pool_api), + $config.prometheus_registry().as_ref(), + task_manager.spawn_handle(), + client.clone(), + ); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import, - Some(Box::new(grandpa_block_import.clone())), - None, - client, - inherent_data_providers.clone(), - spawn_task_handle, - registry, - )?; + let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( + client.clone(), select_chain.clone(), + )?; - import_setup = Some((grandpa_block_import, grandpa_link)); + let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( + grandpa_block_import.clone(), client.clone(), + ); - Ok(import_queue) - })?; + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( + sc_consensus_aura::slot_duration(&*client)?, + aura_block_import, + Some(Box::new(grandpa_block_import.clone())), + None, + client.clone(), + inherent_data_providers.clone(), + &task_manager.spawn_handle(), + $config.prometheus_registry().as_ref(), + )?; - (builder, import_setup, inherent_data_providers) + ( + client, backend, keystore, task_manager, inherent_data_providers, select_chain, + transaction_pool, grandpa_block_import, grandpa_link, import_queue + ) }} } /// Builds a new service for a full client. -pub fn new_full(config: Configuration) -> Result { +pub fn new_full(config: Configuration) -> Result { + let ( + client, backend, keystore, mut task_manager, inherent_data_providers, select_chain, + transaction_pool, block_import, grandpa_link, import_queue + ) = new_full_up_to_import_queue!(&config); + + let provider = client.clone() as Arc>; + let finality_proof_provider = Arc::new(GrandpaFinalityProofProvider::new(backend.clone(), provider)); + + let prometheus_registry = config.prometheus_registry(); + let role = config.role.clone(); let force_authoring = config.force_authoring; let name = config.network.node_name.clone(); - let disable_grandpa = config.disable_grandpa; - - let (builder, mut import_setup, inherent_data_providers) = new_full_start!(config); - - let (block_import, grandpa_link) = - import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); + let enable_grandpa = !config.disable_grandpa; let ServiceComponents { - client, transaction_pool, task_manager, keystore, network, select_chain, - prometheus_registry, telemetry_on_connect_sinks, .. - } = builder - .with_finality_proof_provider(|client, backend| { - // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider - let provider = client as Arc>; - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) - })? - .build_full()?; + network, + telemetry_on_connect_sinks, .. + } = sc_service::build_common(sc_service::ServiceParams { + config: config, + backend: backend.clone(), + client: client.clone(), + block_announce_validator_builder: None, + finality_proof_request_builder: None, + finality_proof_provider: Some(finality_proof_provider), + on_demand: None, + import_queue: import_queue, + keystore: keystore.clone(), + task_manager: &mut task_manager, + remote_backend: None, + rpc_extensions_builder: Box::new(|_| ()), + transaction_pool: transaction_pool.clone(), + })?; if role.is_authority() { let proposer = sc_basic_authorship::ProposerFactory::new( @@ -126,9 +112,6 @@ pub fn new_full(config: Configuration) -> Result { prometheus_registry.as_ref(), ); - let select_chain = select_chain - .ok_or(ServiceError::SelectChainRequired)?; - let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); @@ -168,7 +151,6 @@ pub fn new_full(config: Configuration) -> Result { is_authority: role.is_network_authority(), }; - let enable_grandpa = !disable_grandpa; if enable_grandpa { // start the full GRANDPA voter // NOTE: non-authorities could run the GRANDPA observer protocol, but at @@ -206,69 +188,53 @@ pub fn new_full(config: Configuration) -> Result { /// Builds a new service for a light client. pub fn new_light(config: Configuration) -> Result { - let inherent_data_providers = InherentDataProviders::new(); - - ServiceBuilder::new_light::(config)? - .with_select_chain(|_config, backend| { - Ok(LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|builder| { - let fetcher = builder.fetcher() - .ok_or_else(|| "Trying to start light transaction pool without active fetcher")?; + let (client, backend, keystore, mut task_manager, on_demand) = + sc_service::new_light_parts::(&config)?; + + let transaction_pool_api = Arc::new(sc_transaction_pool::LightChainApi::new( + client.clone(), on_demand.clone(), + )); + let transaction_pool = sc_transaction_pool::BasicPool::new_light( + config.transaction_pool.clone(), + transaction_pool_api, + config.prometheus_registry().as_ref(), + task_manager.spawn_handle(), + ); + + let grandpa_block_import = sc_finality_grandpa::light_block_import( + client.clone(), backend.clone(), + Arc::new(on_demand.checker().clone()) as Arc<_>, + )?; + let finality_proof_import = grandpa_block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( + sc_consensus_aura::slot_duration(&*client)?, + grandpa_block_import, + None, + Some(Box::new(finality_proof_import)), + client.clone(), + InherentDataProviders::new(), + &task_manager.spawn_handle(), + config.prometheus_registry().as_ref(), + )?; + + let finality_proof_provider = Arc::new(GrandpaFinalityProofProvider::new( + backend.clone(), client.clone() as Arc<_> + )); + + sc_service::build_common(sc_service::ServiceParams { + block_announce_validator_builder: None, + finality_proof_request_builder: Some(finality_proof_request_builder), + finality_proof_provider: Some(finality_proof_provider), + on_demand: Some(on_demand), + task_manager: &mut task_manager, + remote_backend: Some(backend.remote_blockchain()), + rpc_extensions_builder: Box::new(|_| ()), + transaction_pool: Arc::new(transaction_pool), + config, client, import_queue, keystore, backend, + })?; - let pool_api = sc_transaction_pool::LightChainApi::new( - builder.client().clone(), - fetcher.clone(), - ); - let pool = sc_transaction_pool::BasicPool::with_revalidation_type( - builder.config().transaction_pool.clone(), - Arc::new(pool_api), - builder.prometheus_registry(), - sc_transaction_pool::RevalidationType::Light, - ); - Ok(pool) - })? - .with_import_queue_and_fprb(| - _config, - client, - backend, - fetcher, - _select_chain, - _tx_pool, - spawn_task_handle, - prometheus_registry, - | { - let fetch_checker = fetcher - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let grandpa_block_import = sc_finality_grandpa::light_block_import( - client.clone(), - backend, - &(client.clone() as Arc<_>), - Arc::new(fetch_checker), - )?; - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); - - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( - sc_consensus_aura::slot_duration(&*client)?, - grandpa_block_import, - None, - Some(Box::new(finality_proof_import)), - client, - inherent_data_providers.clone(), - spawn_task_handle, - prometheus_registry, - )?; - - Ok((import_queue, finality_proof_request_builder)) - })? - .with_finality_proof_provider(|client, backend| { - // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider - let provider = client as Arc>; - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) - })? - .build_light() - .map(|ServiceComponents { task_manager, .. }| task_manager) + Ok(task_manager) } diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 4ac796370c6f8..ceb815489817e 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -98,8 +98,8 @@ pub fn run() -> Result<()> { Some(Subcommand::Base(subcommand)) => { let runner = cli.create_runner(subcommand)?; runner.run_subcommand(subcommand, |config| { - let (builder, _, _, _) = new_full_start!(config); - Ok(builder.to_chain_ops_parts()) + let (client, backend, _, task_manager, .., import_queue) = new_full_up_to_import_queue!(&config); + Ok((client, backend, import_queue, task_manager)) }) } } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 9707e3d8caf08..d4b70ee230f2c 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -29,127 +29,71 @@ use node_executor; use node_primitives::Block; use node_runtime::RuntimeApi; use sc_service::{ - ServiceBuilder, config::{Role, Configuration}, error::{Error as ServiceError}, + config::{Role, Configuration}, error::{Error as ServiceError}, RpcHandlers, ServiceComponents, TaskManager, }; use sp_inherents::InherentDataProviders; -use sc_consensus::LongestChain; use sc_network::{Event, NetworkService}; use sp_runtime::traits::Block as BlockT; use futures::prelude::*; -use sc_client_api::ExecutorProvider; +use sc_client_api::{ExecutorProvider, RemoteBackend}; use sp_core::traits::BareCryptoStorePtr; /// Starts a `ServiceBuilder` for a full service. /// /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. -macro_rules! new_full_start { +macro_rules! new_full_up_to_import_queue { ($config:expr) => {{ use std::sync::Arc; + use node_executor::Executor; + + let (client, backend, keystore, task_manager) = sc_service::new_full_parts::(&$config)?; + let client = Arc::new(client); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + $config.transaction_pool.clone(), + std::sync::Arc::new(pool_api), + $config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + ); + + let (grandpa_block_import, grandpa_link) = grandpa::block_import( + client.clone(), + select_chain.clone(), + )?; + let justification_import = grandpa_block_import.clone(); + + let (block_import, babe_link) = sc_consensus_babe::block_import( + sc_consensus_babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + )?; - let mut import_setup = None; - let mut rpc_setup = None; let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - let builder = sc_service::ServiceBuilder::new_full::< - node_primitives::Block, node_runtime::RuntimeApi, node_executor::Executor - >($config)? - .with_select_chain(|_config, backend| { - Ok(sc_consensus::LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|builder| { - let pool_api = sc_transaction_pool::FullChainApi::new( - builder.client().clone(), - ); - let config = builder.config(); - - Ok(sc_transaction_pool::BasicPool::new( - config.transaction_pool.clone(), - std::sync::Arc::new(pool_api), - builder.prometheus_registry(), - )) - })? - .with_import_queue(| - _config, - client, - mut select_chain, - _transaction_pool, - spawn_task_handle, - prometheus_registry, - | { - let select_chain = select_chain.take() - .ok_or_else(|| sc_service::Error::SelectChainRequired)?; - let (grandpa_block_import, grandpa_link) = grandpa::block_import( - client.clone(), - &(client.clone() as Arc<_>), - select_chain, - )?; - let justification_import = grandpa_block_import.clone(); - - let (block_import, babe_link) = sc_consensus_babe::block_import( - sc_consensus_babe::Config::get_or_compute(&*client)?, - grandpa_block_import, - client.clone(), - )?; - - let import_queue = sc_consensus_babe::import_queue( - babe_link.clone(), - block_import.clone(), - Some(Box::new(justification_import)), - None, - client, - inherent_data_providers.clone(), - spawn_task_handle, - prometheus_registry, - )?; - - import_setup = Some((block_import, grandpa_link, babe_link)); - Ok(import_queue) - })? - .with_rpc_extensions_builder(|builder| { - let grandpa_link = import_setup.as_ref().map(|s| &s.1) - .expect("GRANDPA LinkHalf is present for full services or set up failed; qed."); - - let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = grandpa::SharedVoterState::empty(); - - rpc_setup = Some((shared_voter_state.clone())); - - let babe_link = import_setup.as_ref().map(|s| &s.2) - .expect("BabeLink is present for full services or set up failed; qed."); - - let babe_config = babe_link.config().clone(); - let shared_epoch_changes = babe_link.epoch_changes().clone(); - - let client = builder.client().clone(); - let pool = builder.pool().clone(); - let select_chain = builder.select_chain().cloned() - .expect("SelectChain is present for full services or set up failed; qed."); - let keystore = builder.keystore().clone(); - - Ok(move |deny_unsafe| { - let deps = node_rpc::FullDeps { - client: client.clone(), - pool: pool.clone(), - select_chain: select_chain.clone(), - deny_unsafe, - babe: node_rpc::BabeDeps { - babe_config: babe_config.clone(), - shared_epoch_changes: shared_epoch_changes.clone(), - keystore: keystore.clone(), - }, - grandpa: node_rpc::GrandpaDeps { - shared_voter_state: shared_voter_state.clone(), - shared_authority_set: shared_authority_set.clone(), - }, - }; + let import_queue = sc_consensus_babe::import_queue( + babe_link.clone(), + block_import.clone(), + Some(Box::new(justification_import)), + None, + client.clone(), + inherent_data_providers.clone(), + &task_manager.spawn_handle(), + $config.prometheus_registry(), + )?; - node_rpc::create_full(deps) - }) - })?; + let import_setup = (block_import, grandpa_link, babe_link); - (builder, import_setup, inherent_data_providers, rpc_setup) + ( + client, backend, keystore, task_manager, + select_chain, transaction_pool, inherent_data_providers, + import_setup, import_queue, + ) }} } @@ -170,37 +114,89 @@ pub fn new_full_base( Arc, Arc::Hash>>, Arc, Block>> ), ServiceError> { + let ( + client, backend, keystore, mut task_manager, + select_chain, transaction_pool, inherent_data_providers, + import_setup, import_queue, + ) = new_full_up_to_import_queue!(&config); + + let (rpc_extensions_builder, rpc_setup) = { + let (_, grandpa_link, babe_link) = &import_setup; + + let shared_authority_set = grandpa_link.shared_authority_set().clone(); + let shared_voter_state = grandpa::SharedVoterState::empty(); + + let rpc_setup = shared_voter_state.clone(); + + let babe_config = babe_link.config().clone(); + let shared_epoch_changes = babe_link.epoch_changes().clone(); + + let client = client.clone(); + let pool = transaction_pool.clone(); + let select_chain = select_chain.clone(); + let keystore = keystore.clone(); + + let rpc_extensions_builder = Box::new(move |deny_unsafe| { + let deps = node_rpc::FullDeps { + client: client.clone(), + pool: pool.clone(), + select_chain: select_chain.clone(), + deny_unsafe, + babe: node_rpc::BabeDeps { + babe_config: babe_config.clone(), + shared_epoch_changes: shared_epoch_changes.clone(), + keystore: keystore.clone(), + }, + grandpa: node_rpc::GrandpaDeps { + shared_voter_state: shared_voter_state.clone(), + shared_authority_set: shared_authority_set.clone(), + }, + }; + + node_rpc::create_full(deps) + }); + + (rpc_extensions_builder, rpc_setup) + }; + + let provider = client.clone() as Arc>; + let finality_proof_provider = Arc::new(grandpa::FinalityProofProvider::new(backend.clone(), provider)) as _; + let ( role, force_authoring, name, disable_grandpa, + prometheus_registry, ) = ( config.role.clone(), config.force_authoring, config.network.node_name.clone(), config.disable_grandpa, + config.prometheus_registry().cloned(), ); - let (builder, mut import_setup, inherent_data_providers, mut rpc_setup) = - new_full_start!(config); - let ServiceComponents { - client, transaction_pool, task_manager, keystore, network, select_chain, - prometheus_registry, telemetry_on_connect_sinks, .. - } = builder - .with_finality_proof_provider(|client, backend| { - // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider - let provider = client as Arc>; - Ok(Arc::new(grandpa::FinalityProofProvider::new(backend, provider)) as _) - })? - .build_full()?; - - let (block_import, grandpa_link, babe_link) = import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - let shared_voter_state = rpc_setup.take() - .expect("The SharedVoterState is present for Full Services or setup failed before. qed"); + network, + telemetry_on_connect_sinks, .. + } = sc_service::build_common(sc_service::ServiceParams { + config, + backend: backend.clone(), + client: client.clone(), + block_announce_validator_builder: None, + finality_proof_request_builder: None, + finality_proof_provider: Some(finality_proof_provider), + on_demand: None, + import_queue: import_queue, + keystore: keystore.clone(), + task_manager: &mut task_manager, + remote_backend: None, + rpc_extensions_builder, + transaction_pool: transaction_pool.clone(), + })?; + + let (block_import, grandpa_link, babe_link) = import_setup; + let shared_voter_state = rpc_setup; (with_startup_data)(&block_import, &babe_link); @@ -211,9 +207,6 @@ pub fn new_full_base( prometheus_registry.as_ref(), ); - let select_chain = select_chain - .ok_or(sc_service::Error::SelectChainRequired)?; - let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); @@ -300,7 +293,7 @@ pub fn new_full_base( inherent_data_providers: inherent_data_providers.clone(), telemetry_on_connect: Some(telemetry_on_connect_sinks.on_connect_stream()), voting_rule: grandpa::VotingRulesBuilder::default().build(), - prometheus_registry: prometheus_registry.clone(), + prometheus_registry: prometheus_registry, shared_voter_state, }; @@ -339,93 +332,74 @@ pub fn new_light_base(config: Configuration) -> Result<( sc_transaction_pool::LightChainApi, Block >> ), ServiceError> { - let inherent_data_providers = InherentDataProviders::new(); - - let ServiceComponents { - task_manager, rpc_handlers, client, network, transaction_pool, .. - } = ServiceBuilder::new_light::(config)? - .with_select_chain(|_config, backend| { - Ok(LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|builder| { - let fetcher = builder.fetcher() - .ok_or_else(|| "Trying to start light transaction pool without active fetcher")?; - let pool_api = sc_transaction_pool::LightChainApi::new( - builder.client().clone(), - fetcher, - ); - let pool = sc_transaction_pool::BasicPool::with_revalidation_type( - builder.config().transaction_pool.clone(), - Arc::new(pool_api), - builder.prometheus_registry(), - sc_transaction_pool::RevalidationType::Light, - ); - Ok(pool) - })? - .with_import_queue_and_fprb(| - _config, - client, - backend, - fetcher, - _select_chain, - _tx_pool, - spawn_task_handle, - registry, - | { - let fetch_checker = fetcher - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let grandpa_block_import = grandpa::light_block_import( - client.clone(), - backend, - &(client.clone() as Arc<_>), - Arc::new(fetch_checker), - )?; - - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); - - let (babe_block_import, babe_link) = sc_consensus_babe::block_import( - sc_consensus_babe::Config::get_or_compute(&*client)?, - grandpa_block_import, - client.clone(), - )?; - - let import_queue = sc_consensus_babe::import_queue( - babe_link, - babe_block_import, - None, - Some(Box::new(finality_proof_import)), - client.clone(), - inherent_data_providers.clone(), - spawn_task_handle, - registry, - )?; - - Ok((import_queue, finality_proof_request_builder)) - })? - .with_finality_proof_provider(|client, backend| { - // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider - let provider = client as Arc>; - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) - })? - .with_rpc_extensions(|builder| { - let fetcher = builder.fetcher() - .ok_or_else(|| "Trying to start node RPC without active fetcher")?; - let remote_blockchain = builder.remote_backend() - .ok_or_else(|| "Trying to start node RPC without active remote blockchain")?; - - let light_deps = node_rpc::LightDeps { - remote_blockchain, - fetcher, - client: builder.client().clone(), - pool: builder.pool(), - }; + let (client, backend, keystore, mut task_manager, on_demand) = + sc_service::new_light_parts::(&config)?; + + let transaction_pool_api = Arc::new(sc_transaction_pool::LightChainApi::new( + client.clone(), + on_demand.clone(), + )); + let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( + config.transaction_pool.clone(), + transaction_pool_api, + config.prometheus_registry(), + task_manager.spawn_handle(), + )); + + let grandpa_block_import = grandpa::light_block_import( + client.clone(), + backend.clone(), + Arc::new(on_demand.checker().clone()), + )?; + + let finality_proof_import = grandpa_block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + let (babe_block_import, babe_link) = sc_consensus_babe::block_import( + sc_consensus_babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + )?; + + let inherent_data_providers = sp_inherents::InherentDataProviders::new(); + + let import_queue = sc_consensus_babe::import_queue( + babe_link, + babe_block_import, + None, + Some(Box::new(finality_proof_import)), + client.clone(), + inherent_data_providers.clone(), + &task_manager.spawn_handle(), + config.prometheus_registry(), + )?; + + // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider + let provider = client.clone() as Arc>; + let finality_proof_provider = Arc::new(GrandpaFinalityProofProvider::new(backend.clone(), provider)); + + let light_deps = node_rpc::LightDeps { + remote_blockchain: backend.remote_blockchain(), + fetcher: on_demand.clone(), + client: client.clone(), + pool: transaction_pool.clone(), + }; - Ok(node_rpc::create_light(light_deps)) - })? - .build_light()?; + let rpc_extensions = node_rpc::create_light(light_deps); + + let ServiceComponents { rpc_handlers, network, .. } = sc_service::build_common(sc_service::ServiceParams { + block_announce_validator_builder: None, + finality_proof_request_builder: Some(finality_proof_request_builder), + finality_proof_provider: Some(finality_proof_provider), + on_demand: Some(on_demand), + task_manager: &mut task_manager, + remote_backend: Some(backend.remote_blockchain()), + rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), + client: client.clone(), + transaction_pool: transaction_pool.clone(), + config, import_queue, keystore, backend, + })?; Ok((task_manager, rpc_handlers, client, network, transaction_pool)) } diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index 55ffc3794c4ea..b89885cc5c4dc 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -126,8 +126,10 @@ impl ExecutionExtensions { /// extension to be a `Weak` reference. /// That's also the reason why it's being registered lazily instead of /// during initialization. - pub fn register_transaction_pool(&self, pool: Weak>) { - *self.transaction_pool.write() = Some(pool); + pub fn register_transaction_pool(&self, pool: &Arc) + where T: sp_transaction_pool::OffchainSubmitTransaction + 'static + { + *self.transaction_pool.write() = Some(Arc::downgrade(&pool) as _); } /// Create `ExecutionManager` and `Extensions` for given offchain call. diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index fa2a6fedd8b05..beeb86210f912 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -462,8 +462,8 @@ pub trait GenesisAuthoritySetProvider { fn get(&self) -> Result; } -impl GenesisAuthoritySetProvider for Arc> - where E: CallExecutor, +impl GenesisAuthoritySetProvider for T + where T: ExecutorProvider, E: CallExecutor, { fn get(&self) -> Result { // This implementation uses the Grandpa runtime API instead of reading directly from the @@ -490,7 +490,6 @@ impl GenesisAuthoritySetProvider for Arc( client: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, select_chain: SC, ) -> Result< ( @@ -502,11 +501,10 @@ pub fn block_import( where SC: SelectChain, BE: Backend + 'static, - Client: ClientForGrandpa + 'static, + Client: ClientForGrandpa + GenesisAuthoritySetProvider + 'static, { block_import_with_authority_set_hard_forks( client, - genesis_authorities_provider, select_chain, Default::default(), ) @@ -519,7 +517,6 @@ where /// given static authorities. pub fn block_import_with_authority_set_hard_forks( client: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, select_chain: SC, authority_set_hard_forks: Vec<(SetId, (Block::Hash, NumberFor), AuthorityList)>, ) -> Result< @@ -532,7 +529,7 @@ pub fn block_import_with_authority_set_hard_forks where SC: SelectChain, BE: Backend + 'static, - Client: ClientForGrandpa + 'static, + Client: ClientForGrandpa + GenesisAuthoritySetProvider + 'static, { let chain_info = client.info(); let genesis_hash = chain_info.genesis_hash; @@ -542,7 +539,7 @@ where genesis_hash, >::zero(), || { - let authorities = genesis_authorities_provider.get()?; + let authorities = client.get()?; telemetry!(CONSENSUS_DEBUG; "afg.loading_authorities"; "authorities_len" => ?authorities.len() ); diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index a7c9a655467c7..211f5565b9e13 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -50,18 +50,16 @@ const LIGHT_CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; pub fn light_block_import( client: Arc, backend: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, authority_set_provider: Arc>, ) -> Result, ClientError> where BE: Backend, - Client: crate::ClientForGrandpa, + Client: crate::ClientForGrandpa + GenesisAuthoritySetProvider, { let info = client.info(); let import_data = load_aux_import_data( info.finalized_hash, &*client, - genesis_authorities_provider, )?; Ok(GrandpaLightBlockImport { client, @@ -498,40 +496,40 @@ fn do_finalize_block( Ok(ImportResult::imported(true)) } -/// Load light import aux data from the store. -fn load_aux_import_data( +/// Load light import aux data from the client. +fn load_aux_import_data( last_finalized: Block::Hash, - aux_store: &B, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, + client: &Client, + ) -> Result, ClientError> where - B: AuxStore, + Client: AuxStore + GenesisAuthoritySetProvider, Block: BlockT, { - let authority_set = match load_decode(aux_store, LIGHT_AUTHORITY_SET_KEY)? { + let authority_set = match load_decode(client, LIGHT_AUTHORITY_SET_KEY)? { Some(authority_set) => authority_set, None => { info!(target: "afg", "Loading GRANDPA authorities \ from genesis on what appears to be first startup."); // no authority set on disk: fetch authorities from genesis state - let genesis_authorities = genesis_authorities_provider.get()?; + let genesis_authorities = client.get()?; let authority_set = LightAuthoritySet::genesis(genesis_authorities); let encoded = authority_set.encode(); - aux_store.insert_aux(&[(LIGHT_AUTHORITY_SET_KEY, &encoded[..])], &[])?; + client.insert_aux(&[(LIGHT_AUTHORITY_SET_KEY, &encoded[..])], &[])?; authority_set }, }; - let consensus_changes = match load_decode(aux_store, LIGHT_CONSENSUS_CHANGES_KEY)? { + let consensus_changes = match load_decode(client, LIGHT_CONSENSUS_CHANGES_KEY)? { Some(consensus_changes) => consensus_changes, None => { let consensus_changes = ConsensusChanges::>::empty(); let encoded = authority_set.encode(); - aux_store.insert_aux(&[(LIGHT_CONSENSUS_CHANGES_KEY, &encoded[..])], &[])?; + client.insert_aux(&[(LIGHT_CONSENSUS_CHANGES_KEY, &encoded[..])], &[])?; consensus_changes }, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 3a1c5c85af5da..9416eee01dafc 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -24,8 +24,8 @@ use crate::{ config::{Configuration, KeystoreConfig, PrometheusConfig, OffchainWorkerConfig}, }; use sc_client_api::{ - self, light::RemoteBlockchain, execution_extensions::ExtensionsFactory, ExecutorProvider, - ForkBlocks, BadBlocks, CloneableSpawn, UsageProvider, backend::RemoteBackend, + light::RemoteBlockchain, ForkBlocks, BadBlocks, CloneableSpawn, UsageProvider, + ExecutorProvider, }; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; use sc_chain_spec::get_extension; @@ -49,10 +49,10 @@ use sp_runtime::traits::{ }; use sp_api::{ProvideRuntimeApi, CallApiAt}; use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo}; -use std::{collections::HashMap, marker::PhantomData, sync::Arc, pin::Pin}; +use std::{collections::HashMap, sync::Arc, pin::Pin}; use wasm_timer::SystemTime; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; -use sp_transaction_pool::{LocalTransactionPool, MaintainedTransactionPool}; +use sp_transaction_pool::MaintainedTransactionPool; use prometheus_endpoint::Registry; use sc_client_db::{Backend, DatabaseSettings}; use sp_core::traits::CodeExecutor; @@ -66,45 +66,6 @@ use sc_client_api::{ use sp_blockchain::{HeaderMetadata, HeaderBackend}; use crate::{ServiceComponents, TelemetryOnConnectSinks, RpcHandlers, NetworkStatusSinks}; -pub type BackgroundTask = Pin + Send>>; - -/// Aggregator for the components required to build a service. -/// -/// # Usage -/// -/// Call [`ServiceBuilder::new_full`] or [`ServiceBuilder::new_light`], then call the various -/// `with_` methods to add the required components that you built yourself: -/// -/// - [`with_select_chain`](ServiceBuilder::with_select_chain) -/// - [`with_import_queue`](ServiceBuilder::with_import_queue) -/// - [`with_finality_proof_provider`](ServiceBuilder::with_finality_proof_provider) -/// - [`with_transaction_pool`](ServiceBuilder::with_transaction_pool) -/// -/// After this is done, call [`build`](ServiceBuilder::build) to construct the service. -/// -/// The order in which the `with_*` methods are called doesn't matter, as the correct binding of -/// generics is done when you call `build`. -/// -pub struct ServiceBuilder -{ - config: Configuration, - pub (crate) client: Arc, - backend: Arc, - task_manager: TaskManager, - keystore: Arc>, - fetcher: Option, - select_chain: Option, - pub (crate) import_queue: TImpQu, - finality_proof_request_builder: Option, - finality_proof_provider: Option, - transaction_pool: Arc, - rpc_extensions_builder: Box + Send>, - remote_backend: Option>>, - marker: PhantomData<(TBl, TRtApi)>, - block_announce_validator_builder: Option) -> Box + Send> + Send>>, -} - /// A utility trait for building an RPC extension given a `DenyUnsafe` instance. /// This is useful since at service definition time we don't know whether the /// specific interface where the RPC extension will be exposed is safe or not. @@ -133,7 +94,7 @@ impl RpcExtensionBuilder for F where /// A utility struct for implementing an `RpcExtensionBuilder` given a cloneable /// `RpcExtension`, the resulting builder will simply ignore the provided /// `DenyUnsafe` instance and return a static `RpcExtension` instance. -struct NoopRpcExtensionBuilder(R); +pub struct NoopRpcExtensionBuilder(pub R); impl RpcExtensionBuilder for NoopRpcExtensionBuilder where R: Clone + sc_rpc::RpcExtension, @@ -207,6 +168,14 @@ type TFullParts = ( TaskManager, ); +type TLightParts = ( + Arc>, + Arc>, + Arc>, + TaskManager, + Arc>, +); + /// Creates a new full client for the given config. pub fn new_full_client( config: &Configuration, @@ -217,7 +186,7 @@ pub fn new_full_client( new_full_parts(config).map(|parts| parts.0) } -fn new_full_parts( +pub fn new_full_parts( config: &Configuration, ) -> Result, Error> where TBl: BlockT, @@ -284,6 +253,62 @@ fn new_full_parts( Ok((client, backend, keystore, task_manager)) } +pub fn new_light_parts( + config: &Configuration +) -> Result, Error> where + TBl: BlockT, + TExecDisp: NativeExecutionDispatch + 'static, +{ + + let task_manager = { + let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); + TaskManager::new(config.task_executor.clone(), registry)? + }; + + let keystore = match &config.keystore { + KeystoreConfig::Path { path, password } => Keystore::open( + path.clone(), + password.clone() + )?, + KeystoreConfig::InMemory => Keystore::new_in_memory(), + }; + + let executor = NativeExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + + let db_storage = { + let db_settings = sc_client_db::DatabaseSettings { + state_cache_size: config.state_cache_size, + state_cache_child_ratio: + config.state_cache_child_ratio.map(|v| (v, 100)), + pruning: config.pruning.clone(), + source: config.database.clone(), + }; + sc_client_db::light::LightStorage::new(db_settings)? + }; + let light_blockchain = sc_light::new_light_blockchain(db_storage); + let fetch_checker = Arc::new( + sc_light::new_fetch_checker::<_, TBl, _>( + light_blockchain.clone(), + executor.clone(), + Box::new(task_manager.spawn_handle()), + ), + ); + let on_demand = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); + let backend = sc_light::new_light_backend(light_blockchain); + let client = Arc::new(light::new_light( + backend.clone(), + config.chain_spec.as_storage_builder(), + executor, + Box::new(task_manager.spawn_handle()), + config.prometheus_config.as_ref().map(|config| config.registry.clone()), + )?); + + Ok((client, backend, keystore, task_manager, on_demand)) +} /// Create an instance of db-backed client. pub fn new_client( @@ -330,804 +355,219 @@ pub fn new_client( )) } -impl ServiceBuilder<(), (), (), (), (), (), (), (), (), (), ()> { - /// Start the service builder with a configuration. - pub fn new_full( - config: Configuration, - ) -> Result, - Arc>, - (), - (), - BoxFinalityProofRequestBuilder, - Arc>, - (), - (), - TFullBackend, - >, Error> { - let (client, backend, keystore, task_manager) = new_full_parts(&config)?; - - let client = Arc::new(client); - - Ok(ServiceBuilder { - config, - client, - backend, - keystore, - task_manager, - fetcher: None, - select_chain: None, - import_queue: (), - finality_proof_request_builder: None, - finality_proof_provider: None, - transaction_pool: Arc::new(()), - rpc_extensions_builder: Box::new(|_| ()), - remote_backend: None, - block_announce_validator_builder: None, - marker: PhantomData, - }) - } - - /// Start the service builder with a configuration. - pub fn new_light( - config: Configuration, - ) -> Result, - Arc>, - (), - (), - BoxFinalityProofRequestBuilder, - Arc>, - (), - (), - TLightBackend, - >, Error> { - let task_manager = { - let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - TaskManager::new(config.task_executor.clone(), registry)? - }; - - let keystore = match &config.keystore { - KeystoreConfig::Path { path, password } => Keystore::open( - path.clone(), - password.clone() - )?, - KeystoreConfig::InMemory => Keystore::new_in_memory(), - }; - - let executor = NativeExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - ); - - let db_storage = { - let db_settings = sc_client_db::DatabaseSettings { - state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), - pruning: config.pruning.clone(), - source: config.database.clone(), - }; - sc_client_db::light::LightStorage::new(db_settings)? - }; - let light_blockchain = sc_light::new_light_blockchain(db_storage); - let fetch_checker = Arc::new( - sc_light::new_fetch_checker::<_, TBl, _>( - light_blockchain.clone(), - executor.clone(), - Box::new(task_manager.spawn_handle()), - ), - ); - let fetcher = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); - let backend = sc_light::new_light_backend(light_blockchain); - let remote_blockchain = backend.remote_blockchain(); - let client = Arc::new(light::new_light( - backend.clone(), - config.chain_spec.as_storage_builder(), - executor, - Box::new(task_manager.spawn_handle()), - config.prometheus_config.as_ref().map(|config| config.registry.clone()), - )?); - - Ok(ServiceBuilder { - config, - client, - backend, - task_manager, - keystore, - fetcher: Some(fetcher.clone()), - select_chain: None, - import_queue: (), - finality_proof_request_builder: None, - finality_proof_provider: None, - transaction_pool: Arc::new(()), - rpc_extensions_builder: Box::new(|_| ()), - remote_backend: Some(remote_blockchain), - block_announce_validator_builder: None, - marker: PhantomData, - }) - } +pub struct ServiceParams<'a, TBl, TCl, TFchr, TImpQu, TFprb, TFpp, TExPool, TRpc, Backend> { + pub config: Configuration, + pub client: Arc, + pub backend: Arc, + pub task_manager: &'a mut TaskManager, + pub keystore: Arc>, + pub on_demand: Option, + pub import_queue: TImpQu, + pub finality_proof_request_builder: Option, + pub finality_proof_provider: Option, + pub transaction_pool: Arc, + pub rpc_extensions_builder: Box + Send>, + pub remote_backend: Option>>, + pub block_announce_validator_builder: Option) -> Box + Send> + Send>>, } -impl - ServiceBuilder< - TBl, - TRtApi, - TCl, - TFchr, - TSc, - TImpQu, - TFprb, - TFpp, - TExPool, - TRpc, - Backend - > -{ - /// Returns a reference to the configuration that was stored in this builder. - pub fn config(&self) -> &Configuration { - &self.config - } - - /// Returns a reference to the optional prometheus registry that was stored in this builder. - pub fn prometheus_registry(&self) -> Option<&Registry> { - self.config.prometheus_config.as_ref().map(|config| &config.registry) - } - - /// Returns a reference to the client that was stored in this builder. - pub fn client(&self) -> &Arc { - &self.client - } - - /// Returns a reference to the backend that was used in this builder. - pub fn backend(&self) -> &Arc { - &self.backend - } - - /// Returns a reference to the select-chain that was stored in this builder. - pub fn select_chain(&self) -> Option<&TSc> { - self.select_chain.as_ref() - } - - /// Returns a reference to the keystore - pub fn keystore(&self) -> Arc> { - self.keystore.clone() - } - - /// Returns a reference to the transaction pool stored in this builder - pub fn pool(&self) -> Arc { - self.transaction_pool.clone() - } - - /// Returns a reference to the fetcher, only available if builder - /// was created with `new_light`. - pub fn fetcher(&self) -> Option - where TFchr: Clone - { - self.fetcher.clone() - } - - /// Returns a reference to the remote_backend, only available if builder - /// was created with `new_light`. - pub fn remote_backend(&self) -> Option>> { - self.remote_backend.clone() - } - - /// Consume the builder and return the parts needed for chain operations. - pub fn to_chain_ops_parts(self) -> (Arc, Arc, TImpQu, TaskManager) { - (self.client, self.backend, self.import_queue, self.task_manager) - } - - /// Defines which head-of-chain strategy to use. - pub fn with_opt_select_chain( - self, - select_chain_builder: impl FnOnce( - &Configuration, &Arc, - ) -> Result, Error> - ) -> Result, Error> { - let select_chain = select_chain_builder(&self.config, &self.backend)?; - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - task_manager: self.task_manager, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain, - import_queue: self.import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider: self.finality_proof_provider, - transaction_pool: self.transaction_pool, - rpc_extensions_builder: self.rpc_extensions_builder, - remote_backend: self.remote_backend, - block_announce_validator_builder: self.block_announce_validator_builder, - marker: self.marker, - }) - } - - /// Defines which head-of-chain strategy to use. - pub fn with_select_chain( - self, - builder: impl FnOnce(&Configuration, &Arc) -> Result, - ) -> Result, Error> { - self.with_opt_select_chain(|cfg, b| builder(cfg, b).map(Option::Some)) - } - - /// Defines which import queue to use. - pub fn with_import_queue( - self, - builder: impl FnOnce(&Configuration, Arc, Option, Arc, &SpawnTaskHandle, Option<&Registry>) - -> Result - ) -> Result, Error> - where TSc: Clone { - let import_queue = builder( - &self.config, - self.client.clone(), - self.select_chain.clone(), - self.transaction_pool.clone(), - &self.task_manager.spawn_handle(), - self.config.prometheus_config.as_ref().map(|config| &config.registry), - )?; - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - task_manager: self.task_manager, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider: self.finality_proof_provider, - transaction_pool: self.transaction_pool, - rpc_extensions_builder: self.rpc_extensions_builder, - remote_backend: self.remote_backend, - block_announce_validator_builder: self.block_announce_validator_builder, - marker: self.marker, - }) - } - - /// Defines which strategy to use for providing finality proofs. - pub fn with_opt_finality_proof_provider( - self, - builder: impl FnOnce(Arc, Arc) -> Result>>, Error> - ) -> Result>, - TExPool, - TRpc, - Backend, - >, Error> { - let finality_proof_provider = builder(self.client.clone(), self.backend.clone())?; - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - task_manager: self.task_manager, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue: self.import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider, - transaction_pool: self.transaction_pool, - rpc_extensions_builder: self.rpc_extensions_builder, - remote_backend: self.remote_backend, - block_announce_validator_builder: self.block_announce_validator_builder, - marker: self.marker, - }) - } - - /// Defines which strategy to use for providing finality proofs. - pub fn with_finality_proof_provider( - self, - build: impl FnOnce(Arc, Arc) -> Result>, Error> - ) -> Result>, - TExPool, - TRpc, - Backend, - >, Error> { - self.with_opt_finality_proof_provider(|client, backend| build(client, backend).map(Option::Some)) - } - - /// Defines which import queue to use. - pub fn with_import_queue_and_opt_fprb( - self, - builder: impl FnOnce( - &Configuration, - Arc, - Arc, - Option, - Option, - Arc, - &SpawnTaskHandle, - Option<&Registry>, - ) -> Result<(UImpQu, Option), Error> - ) -> Result, Error> - where TSc: Clone, TFchr: Clone { - let (import_queue, fprb) = builder( - &self.config, - self.client.clone(), - self.backend.clone(), - self.fetcher.clone(), - self.select_chain.clone(), - self.transaction_pool.clone(), - &self.task_manager.spawn_handle(), - self.config.prometheus_config.as_ref().map(|config| &config.registry), - )?; - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - task_manager: self.task_manager, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue, - finality_proof_request_builder: fprb, - finality_proof_provider: self.finality_proof_provider, - transaction_pool: self.transaction_pool, - rpc_extensions_builder: self.rpc_extensions_builder, - remote_backend: self.remote_backend, - block_announce_validator_builder: self.block_announce_validator_builder, - marker: self.marker, - }) - } - - /// Defines which import queue to use. - pub fn with_import_queue_and_fprb( - self, - builder: impl FnOnce( - &Configuration, - Arc, - Arc, - Option, - Option, - Arc, - &SpawnTaskHandle, - Option<&Registry>, - ) -> Result<(UImpQu, UFprb), Error> - ) -> Result, Error> - where TSc: Clone, TFchr: Clone { - self.with_import_queue_and_opt_fprb(|cfg, cl, b, f, sc, tx, tb, pr| - builder(cfg, cl, b, f, sc, tx, tb, pr) - .map(|(q, f)| (q, Some(f))) - ) - } - - /// Defines which transaction pool to use. - pub fn with_transaction_pool( - self, - transaction_pool_builder: impl FnOnce( - &Self, - ) -> Result<(UExPool, Option), Error>, - ) -> Result, Error> - where TSc: Clone, TFchr: Clone { - let (transaction_pool, background_task) = transaction_pool_builder(&self)?; - - if let Some(background_task) = background_task{ - self.task_manager.spawn_handle().spawn("txpool-background", background_task); - } - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - task_manager: self.task_manager, - backend: self.backend, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue: self.import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider: self.finality_proof_provider, - transaction_pool: Arc::new(transaction_pool), - rpc_extensions_builder: self.rpc_extensions_builder, - remote_backend: self.remote_backend, - block_announce_validator_builder: self.block_announce_validator_builder, - marker: self.marker, - }) - } - - /// Defines the RPC extension builder to use. Unlike `with_rpc_extensions`, - /// this method is useful in situations where the RPC extensions need to - /// access to a `DenyUnsafe` instance to avoid exposing sensitive methods. - pub fn with_rpc_extensions_builder( - self, - rpc_extensions_builder: impl FnOnce(&Self) -> Result, - ) -> Result< - ServiceBuilder, - Error, - > - where - TSc: Clone, - TFchr: Clone, - URpcBuilder: RpcExtensionBuilder + Send + 'static, - URpc: sc_rpc::RpcExtension, - { - let rpc_extensions_builder = rpc_extensions_builder(&self)?; - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - task_manager: self.task_manager, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue: self.import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider: self.finality_proof_provider, - transaction_pool: self.transaction_pool, - rpc_extensions_builder: Box::new(rpc_extensions_builder), - remote_backend: self.remote_backend, - block_announce_validator_builder: self.block_announce_validator_builder, - marker: self.marker, - }) - } - - /// Defines the RPC extensions to use. - pub fn with_rpc_extensions( - self, - rpc_extensions: impl FnOnce(&Self) -> Result, - ) -> Result< - ServiceBuilder, - Error, - > - where - TSc: Clone, - TFchr: Clone, - URpc: Clone + sc_rpc::RpcExtension + Send + 'static, - { - let rpc_extensions = rpc_extensions(&self)?; - self.with_rpc_extensions_builder(|_| Ok(NoopRpcExtensionBuilder::from(rpc_extensions))) - } - - /// Defines the `BlockAnnounceValidator` to use. `DefaultBlockAnnounceValidator` will be used by - /// default. - pub fn with_block_announce_validator( - self, - block_announce_validator_builder: - impl FnOnce(Arc) -> Box + Send> + Send + 'static, - ) -> Result, Error> - where TSc: Clone, TFchr: Clone { - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - task_manager: self.task_manager, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue: self.import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider: self.finality_proof_provider, - transaction_pool: self.transaction_pool, - rpc_extensions_builder: self.rpc_extensions_builder, - remote_backend: self.remote_backend, - block_announce_validator_builder: Some(Box::new(block_announce_validator_builder)), - marker: self.marker, - }) - } -} - -impl -ServiceBuilder< +pub fn build_common(builder: ServiceParams< TBl, - TRtApi, TCl, Arc>, - TSc, TImpQu, BoxFinalityProofRequestBuilder, Arc>, TExPool, TRpc, TBackend, -> where - TCl: ProvideRuntimeApi + HeaderMetadata + Chain + - BlockBackend + BlockIdTo + ProofProvider + - HeaderBackend + BlockchainEvents + ExecutorProvider + UsageProvider + - StorageProvider + CallApiAt + - Send + 'static, - >::Api: - sp_api::Metadata + - sc_offchain::OffchainWorkerApi + - sp_transaction_pool::runtime_api::TaggedTransactionQueue + - sp_session::SessionKeys + - sp_api::ApiErrorExt + - sp_api::ApiExt, - TBl: BlockT, - TRtApi: 'static + Send + Sync, - TBackend: 'static + sc_client_api::backend::Backend + Send, - TSc: Clone, - TImpQu: 'static + ImportQueue, - TExPool: MaintainedTransactionPool::Hash> + MallocSizeOfWasm + 'static, - TRpc: sc_rpc::RpcExtension, +>) + -> Result, Error> + where + TCl: ProvideRuntimeApi + HeaderMetadata + Chain + + BlockBackend + BlockIdTo + ProofProvider + + HeaderBackend + BlockchainEvents + ExecutorProvider + UsageProvider + + StorageProvider + CallApiAt + + Send + 'static, + >::Api: + sp_api::Metadata + + sc_offchain::OffchainWorkerApi + + sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_session::SessionKeys + + sp_api::ApiErrorExt + + sp_api::ApiExt, + TBl: BlockT, + TBackend: 'static + sc_client_api::backend::Backend + Send, + TImpQu: 'static + ImportQueue, + TExPool: MaintainedTransactionPool::Hash> + MallocSizeOfWasm + 'static, + TRpc: sc_rpc::RpcExtension { + let ServiceParams { + mut config, + client, + task_manager, + on_demand, + backend, + keystore, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + transaction_pool, + rpc_extensions_builder, + remote_backend, + block_announce_validator_builder, + } = builder; - /// Set an ExecutionExtensionsFactory - pub fn with_execution_extensions_factory(self, execution_extensions_factory: Box) -> Result { - self.client.execution_extensions().set_extensions_factory(execution_extensions_factory); - Ok(self) - } + let chain_info = client.usage_info().chain; - fn build_common(self) -> Result, Error> { - let ServiceBuilder { - marker: _, - mut config, - client, - mut task_manager, - fetcher: on_demand, - backend, - keystore, - select_chain, - import_queue, - finality_proof_request_builder, - finality_proof_provider, - transaction_pool, - rpc_extensions_builder, - remote_backend, - block_announce_validator_builder, - } = self; - - let chain_info = client.usage_info().chain; - - sp_session::generate_initial_session_keys( - client.clone(), - &BlockId::Hash(chain_info.best_hash), - config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), - )?; - - info!("📦 Highest known block at #{}", chain_info.best_number); - telemetry!( - SUBSTRATE_INFO; - "node.start"; - "height" => chain_info.best_number.saturated_into::(), - "best" => ?chain_info.best_hash - ); + sp_session::generate_initial_session_keys( + client.clone(), + &BlockId::Hash(chain_info.best_hash), + config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), + )?; + + info!("📦 Highest known block at #{}", chain_info.best_number); + telemetry!( + SUBSTRATE_INFO; + "node.start"; + "height" => chain_info.best_number.saturated_into::(), + "best" => ?chain_info.best_hash + ); - let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); - - let (network, network_status_sinks, network_future) = build_network( - &config, client.clone(), transaction_pool.clone(), task_manager.spawn_handle(), - on_demand.clone(), block_announce_validator_builder, finality_proof_request_builder, - finality_proof_provider, system_rpc_rx, import_queue - )?; - - let spawn_handle = task_manager.spawn_handle(); - - // The network worker is responsible for gathering all network messages and processing - // them. This is quite a heavy task, and at the time of the writing of this comment it - // frequently happens that this future takes several seconds or in some situations - // even more than a minute until it has processed its entire queue. This is clearly an - // issue, and ideally we would like to fix the network future to take as little time as - // possible, but we also take the extra harm-prevention measure to execute the networking - // future using `spawn_blocking`. - spawn_handle.spawn_blocking( - "network-worker", - network_future - ); + let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); + + let (network, network_status_sinks, network_future) = build_network( + &config, client.clone(), transaction_pool.clone(), task_manager.spawn_handle(), + on_demand.clone(), block_announce_validator_builder, finality_proof_request_builder, + finality_proof_provider, system_rpc_rx, import_queue + )?; + + let spawn_handle = task_manager.spawn_handle(); + + // The network worker is responsible for gathering all network messages and processing + // them. This is quite a heavy task, and at the time of the writing of this comment it + // frequently happens that this future takes several seconds or in some situations + // even more than a minute until it has processed its entire queue. This is clearly an + // issue, and ideally we would like to fix the network future to take as little time as + // possible, but we also take the extra harm-prevention measure to execute the networking + // future using `spawn_blocking`. + spawn_handle.spawn_blocking("network-worker", network_future); + + let offchain_storage = backend.offchain_storage(); + let offchain_workers = match (config.offchain_worker.clone(), offchain_storage.clone()) { + (OffchainWorkerConfig {enabled: true, .. }, Some(db)) => { + Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone(), db))) + }, + (OffchainWorkerConfig {enabled: true, .. }, None) => { + warn!("Offchain workers disabled, due to lack of offchain storage support in backend."); + None + }, + _ => None, + }; - let offchain_storage = backend.offchain_storage(); - let offchain_workers = match (config.offchain_worker.clone(), offchain_storage.clone()) { - (OffchainWorkerConfig {enabled: true, .. }, Some(db)) => { - Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone(), db))) - }, - (OffchainWorkerConfig {enabled: true, .. }, None) => { - warn!("Offchain workers disabled, due to lack of offchain storage support in backend."); - None - }, - _ => None, - }; + // Inform the tx pool about imported and finalized blocks. + spawn_handle.spawn( + "txpool-notifications", + sc_transaction_pool::notification_future(client.clone(), transaction_pool.clone()), + ); - // Inform the tx pool about imported and finalized blocks. + // Inform the offchain worker about new imported blocks + if let Some(offchain) = offchain_workers.clone() { spawn_handle.spawn( - "txpool-notifications", - sc_transaction_pool::notification_future(client.clone(), transaction_pool.clone()), + "offchain-notifications", + sc_offchain::notification_future( + config.role.is_authority(), + client.clone(), + offchain, + Clone::clone(&spawn_handle), + network.clone() + ) ); + } - // Inform the offchain worker about new imported blocks - if let Some(offchain) = offchain_workers.clone() { - spawn_handle.spawn( - "offchain-notifications", - sc_offchain::notification_future( - config.role.is_authority(), - client.clone(), - offchain, - task_manager.spawn_handle(), - network.clone() - ) - ); - } + spawn_handle.spawn( + "on-transaction-imported", + transaction_notifications(transaction_pool.clone(), network.clone()), + ); + // Prometheus metrics. + let metrics_service = if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { + // Set static metrics. + let metrics = MetricsService::with_prometheus(®istry, &config)?; spawn_handle.spawn( - "on-transaction-imported", - transaction_notifications(transaction_pool.clone(), network.clone()), + "prometheus-endpoint", + prometheus_endpoint::init_prometheus(port, registry).map(drop) ); - // Prometheus metrics. - let metrics_service = if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { - // Set static metrics. - let metrics = MetricsService::with_prometheus( - ®istry, - &config.network.node_name, - &config.impl_version, - &config.role, - )?; - spawn_handle.spawn( - "prometheus-endpoint", - prometheus_endpoint::init_prometheus(port, registry).map(drop) - ); + metrics + } else { + MetricsService::new() + }; - metrics - } else { - MetricsService::new() - }; + // Periodically notify the telemetry. + spawn_handle.spawn("telemetry-periodic-send", telemetry_periodic_send( + client.clone(), transaction_pool.clone(), metrics_service, network_status_sinks.clone() + )); - // Periodically notify the telemetry. - spawn_handle.spawn("telemetry-periodic-send", telemetry_periodic_send( - client.clone(), transaction_pool.clone(), metrics_service, network_status_sinks.clone() - )); + // Periodically send the network state to the telemetry. + spawn_handle.spawn( + "telemetry-periodic-network-state", + telemetry_periodic_network_state(network_status_sinks.clone()), + ); - // Periodically send the network state to the telemetry. - spawn_handle.spawn( - "telemetry-periodic-network-state", - telemetry_periodic_network_state(network_status_sinks.clone()), - ); + // RPC + let gen_handler = |deny_unsafe: sc_rpc::DenyUnsafe| gen_handler( + deny_unsafe, &config, Clone::clone(&spawn_handle), client.clone(), transaction_pool.clone(), + keystore.clone(), on_demand.clone(), remote_backend.clone(), &*rpc_extensions_builder, + offchain_storage.clone(), system_rpc_tx.clone() + ); + let rpc = start_rpc_servers(&config, gen_handler)?; + // This is used internally, so don't restrict access to unsafe RPC + let rpc_handlers = Arc::new(RpcHandlers(gen_handler(sc_rpc::DenyUnsafe::No))); - // RPC - let gen_handler = |deny_unsafe: sc_rpc::DenyUnsafe| gen_handler( - deny_unsafe, &config, &task_manager, client.clone(), transaction_pool.clone(), - keystore.clone(), on_demand.clone(), remote_backend.clone(), &*rpc_extensions_builder, - offchain_storage.clone(), system_rpc_tx.clone() - ); - let rpc = start_rpc_servers(&config, gen_handler)?; - // This is used internally, so don't restrict access to unsafe RPC - let rpc_handlers = Arc::new(RpcHandlers(gen_handler(sc_rpc::DenyUnsafe::No))); - - let telemetry_connection_sinks: Arc>>> = Default::default(); - - // Telemetry - let telemetry = config.telemetry_endpoints.clone().map(|endpoints| { - let genesis_hash = match client.block_hash(Zero::zero()) { - Ok(Some(hash)) => hash, - _ => Default::default(), - }; - - let (telemetry, future) = build_telemetry( - &mut config, - endpoints, - telemetry_connection_sinks.clone(), - network.clone(), - genesis_hash, - ); + let telemetry_connection_sinks: Arc>>> = Default::default(); - spawn_handle.spawn( - "telemetry-worker", - future, - ); + // Telemetry + let telemetry = config.telemetry_endpoints.clone().map(|endpoints| { + let genesis_hash = match client.block_hash(Zero::zero()) { + Ok(Some(hash)) => hash, + _ => Default::default(), + }; - telemetry - }); + build_telemetry( + &mut config, endpoints, telemetry_connection_sinks.clone(), network.clone(), + Clone::clone(&spawn_handle), genesis_hash, + ) + }); - // Instrumentation - if let Some(tracing_targets) = config.tracing_targets.as_ref() { - let subscriber = sc_tracing::ProfilingSubscriber::new( - config.tracing_receiver, tracing_targets - ); - match tracing::subscriber::set_global_default(subscriber) { - Ok(_) => (), - Err(e) => error!(target: "tracing", "Unable to set global default subscriber {}", e), - } + // Instrumentation + if let Some(tracing_targets) = config.tracing_targets.as_ref() { + let subscriber = sc_tracing::ProfilingSubscriber::new( + config.tracing_receiver, tracing_targets + ); + match tracing::subscriber::set_global_default(subscriber) { + Ok(_) => (), + Err(e) => error!(target: "tracing", "Unable to set global default subscriber {}", e), } - - // Spawn informant task - spawn_handle.spawn("informant", sc_informant::build( - client.clone(), - network_status_sinks.clone(), - transaction_pool.clone(), - config.informant_output_format, - )); - - task_manager.keep_alive((telemetry, config.base_path, rpc, rpc_handlers.clone())); - - Ok(ServiceComponents { - client, - task_manager, - network, - select_chain, - transaction_pool, - rpc_handlers, - keystore, - offchain_workers, - telemetry_on_connect_sinks: TelemetryOnConnectSinks(telemetry_connection_sinks), - network_status_sinks: NetworkStatusSinks::new(network_status_sinks), - prometheus_registry: config.prometheus_config.map(|config| config.registry), - }) - } - - /// Builds the light service. - pub fn build_light(self) -> Result, Error> { - self.build_common() } -} -impl -ServiceBuilder< - TBl, - TRtApi, - TCl, - Arc>, - TSc, - TImpQu, - BoxFinalityProofRequestBuilder, - Arc>, - TExPool, - TRpc, - TBackend, -> where - TCl: ProvideRuntimeApi + HeaderMetadata + Chain + - BlockBackend + BlockIdTo + ProofProvider + - HeaderBackend + BlockchainEvents + ExecutorProvider + UsageProvider + - StorageProvider + CallApiAt + - Send + 'static, - >::Api: - sp_api::Metadata + - sc_offchain::OffchainWorkerApi + - sp_transaction_pool::runtime_api::TaggedTransactionQueue + - sp_session::SessionKeys + - sp_api::ApiErrorExt + - sp_api::ApiExt, - TBl: BlockT, - TRtApi: 'static + Send + Sync, - TBackend: 'static + sc_client_api::backend::Backend + Send, - TSc: Clone, - TImpQu: 'static + ImportQueue, - TExPool: MaintainedTransactionPool::Hash> + - LocalTransactionPool::Hash> + - MallocSizeOfWasm + - 'static, - TRpc: sc_rpc::RpcExtension, -{ - - /// Builds the full service. - pub fn build_full(self) -> Result, Error> { - // make transaction pool available for off-chain runtime calls. - self.client.execution_extensions() - .register_transaction_pool(Arc::downgrade(&self.transaction_pool) as _); - - self.build_common() - } + // Spawn informant task + spawn_handle.spawn("informant", sc_informant::build( + client.clone(), + network_status_sinks.clone(), + transaction_pool.clone(), + config.informant_output_format, + )); + + task_manager.keep_alive((telemetry, config.base_path, rpc, rpc_handlers.clone())); + + Ok(ServiceComponents { + network, + rpc_handlers, + offchain_workers, + telemetry_on_connect_sinks: TelemetryOnConnectSinks(telemetry_connection_sinks), + network_status_sinks: NetworkStatusSinks::new(network_status_sinks), + }) } async fn transaction_notifications( @@ -1198,8 +638,9 @@ fn build_telemetry( endpoints: sc_telemetry::TelemetryEndpoints, telemetry_connection_sinks: Arc>>>, network: Arc::Hash>>, + spawn_handle: SpawnTaskHandle, genesis_hash: ::Hash, -) -> (sc_telemetry::Telemetry, Pin + Send>>) { +) -> sc_telemetry::Telemetry { let is_authority = config.role.is_authority(); let network_id = network.local_peer_id().to_base58(); let name = config.network.node_name.clone(); @@ -1213,37 +654,40 @@ fn build_telemetry( let startup_time = SystemTime::UNIX_EPOCH.elapsed() .map(|dur| dur.as_millis()) .unwrap_or(0); - let future = telemetry.clone() - .for_each(move |event| { - // Safe-guard in case we add more events in the future. - let sc_telemetry::TelemetryEvent::Connected = event; - - telemetry!(SUBSTRATE_INFO; "system.connected"; - "name" => name.clone(), - "implementation" => impl_name.clone(), - "version" => version, - "config" => "", - "chain" => chain_name.clone(), - "genesis_hash" => ?genesis_hash, - "authority" => is_authority, - "startup_time" => startup_time, - "network_id" => network_id.clone() - ); + + spawn_handle.spawn( + "telemetry-worker", + telemetry.clone() + .for_each(move |event| { + // Safe-guard in case we add more events in the future. + let sc_telemetry::TelemetryEvent::Connected = event; + + telemetry!(SUBSTRATE_INFO; "system.connected"; + "name" => name.clone(), + "implementation" => impl_name.clone(), + "version" => version, + "config" => "", + "chain" => chain_name.clone(), + "genesis_hash" => ?genesis_hash, + "authority" => is_authority, + "startup_time" => startup_time, + "network_id" => network_id.clone() + ); - telemetry_connection_sinks.lock().retain(|sink| { - sink.unbounded_send(()).is_ok() - }); - ready(()) - }) - .boxed(); + telemetry_connection_sinks.lock().retain(|sink| { + sink.unbounded_send(()).is_ok() + }); + ready(()) + }) + ); - (telemetry, future) + telemetry } fn gen_handler( deny_unsafe: sc_rpc::DenyUnsafe, config: &Configuration, - task_manager: &TaskManager, + spawn_handle: SpawnTaskHandle, client: Arc, transaction_pool: Arc, keystore: Arc>, @@ -1276,7 +720,7 @@ fn gen_handler( chain_type: config.chain_spec.chain_type(), }; - let subscriptions = SubscriptionManager::new(Arc::new(task_manager.spawn_handle())); + let subscriptions = SubscriptionManager::new(Arc::new(spawn_handle)); let (chain, state, child_state) = if let (Some(remote_backend), Some(on_demand)) = (remote_backend, on_demand) { diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 5015ce7facc6f..ac65b74c49156 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -181,6 +181,11 @@ impl Configuration { pub fn display_role(&self) -> String { self.role.to_string() } + + /// Returns the prometheus metrics registry, if available. + pub fn prometheus_registry<'a>(&'a self) -> Option<&'a Registry> { + self.prometheus_config.as_ref().map(|config| &config.registry) + } } /// Available RPC methods. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 1d41490956858..f6002dc05ccc4 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -53,9 +53,9 @@ use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver, pub use self::error::Error; pub use self::builder::{ - new_full_client, new_client, - ServiceBuilder, TFullClient, TLightClient, TFullBackend, TLightBackend, - TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, + new_full_client, new_client, new_full_parts, new_light_parts, build_common, + ServiceParams, TFullClient, TLightClient, TFullBackend, TLightBackend, + TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, NoopRpcExtensionBuilder, }; pub use config::{ BasePath, Configuration, DatabaseConfig, PruningMode, Role, RpcMethods, TaskExecutor, TaskType, @@ -151,25 +151,13 @@ impl TelemetryOnConnectSinks { /// The individual components of the chain, built by the service builder. You are encouraged to /// deconstruct this into its fields. -pub struct ServiceComponents, TSc, TExPool, TCl> { - /// A blockchain client. - pub client: Arc, - /// A shared transaction pool instance. - pub transaction_pool: Arc, - /// The chain task manager. - pub task_manager: TaskManager, - /// A keystore that stores keys. - pub keystore: sc_keystore::KeyStorePtr, +pub struct ServiceComponents, TCl> { /// A shared network instance. pub network: Arc::Hash>>, /// RPC handlers that can perform RPC queries. pub rpc_handlers: Arc, - /// A shared instance of the chain selection algorithm. - pub select_chain: Option, /// Sinks to propagate network status updates. pub network_status_sinks: NetworkStatusSinks, - /// A prometheus metrics registry, (if enabled). - pub prometheus_registry: Option, /// Shared Telemetry connection sinks, pub telemetry_on_connect_sinks: TelemetryOnConnectSinks, /// A shared offchain workers instance. diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 232e9abdc1c97..1727aaae743aa 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -18,7 +18,7 @@ use std::{convert::TryFrom, time::SystemTime}; -use crate::NetworkStatus; +use crate::{NetworkStatus, config::Configuration}; use prometheus_endpoint::{register, Gauge, U64, F64, Registry, PrometheusError, Opts, GaugeVec}; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_runtime::traits::{NumberFor, Block, SaturatedConversion, UniqueSaturatedInto}; @@ -261,17 +261,17 @@ impl MetricsService { impl MetricsService { - pub fn with_prometheus(registry: &Registry, name: &str, version: &str, role: &Role) + pub fn with_prometheus(registry: &Registry, config: &Configuration) -> Result { - let role_bits = match role { + let role_bits = match config.role { Role::Full => 1u64, Role::Light => 2u64, Role::Sentry { .. } => 3u64, Role::Authority { .. } => 4u64, }; - PrometheusMetrics::setup(registry, name, version, role_bits).map(|p| { + PrometheusMetrics::setup(registry, &config.network.node_name, &config.impl_version, role_bits).map(|p| { Self::inner_new(Some(p)) }) } diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index ea8b4bf9dec81..95ac5746b653d 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -42,6 +42,7 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor, AtLeast32Bit, Extrinsic, Zero}, }; +use sp_core::traits::SpawnNamed; use sp_transaction_pool::{ TransactionPool, PoolStatus, ImportNotificationStream, TxHash, TransactionFor, TransactionStatusStreamFor, MaintainedTransactionPool, PoolFuture, ChainEvent, @@ -152,18 +153,6 @@ impl BasicPool Block: BlockT, PoolApi: ChainApi + 'static, { - /// Create new basic transaction pool with provided api. - /// - /// It will also optionally return background task that might be started by the - /// caller. - pub fn new( - options: sc_transaction_graph::Options, - pool_api: Arc, - prometheus: Option<&PrometheusRegistry>, - ) -> (Self, Option + Send>>>) { - Self::with_revalidation_type(options, pool_api, prometheus, RevalidationType::Full) - } - /// Create new basic transaction pool with provided api, for tests. #[cfg(test)] pub fn new_test( @@ -186,14 +175,27 @@ impl BasicPool ) } + /// Create new basic transaction pool for a light node with the provided api. + pub fn new_light( + options: sc_transaction_graph::Options, + pool_api: Arc, + prometheus: Option<&PrometheusRegistry>, + spawner: impl SpawnNamed, + ) -> Self { + Self::with_revalidation_type( + options, pool_api, prometheus, RevalidationType::Light, spawner, + ) + } + /// Create new basic transaction pool with provided api and custom /// revalidation type. - pub fn with_revalidation_type( + fn with_revalidation_type( options: sc_transaction_graph::Options, pool_api: Arc, prometheus: Option<&PrometheusRegistry>, revalidation_type: RevalidationType, - ) -> (Self, Option + Send>>>) { + spawner: impl SpawnNamed, + ) -> Self { let pool = Arc::new(sc_transaction_graph::Pool::new(options, pool_api.clone())); let (revalidation_queue, background_task) = match revalidation_type { RevalidationType::Light => (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), @@ -203,22 +205,23 @@ impl BasicPool }, }; - ( - BasicPool { - api: pool_api, - pool, - revalidation_queue: Arc::new(revalidation_queue), - revalidation_strategy: Arc::new(Mutex::new( - match revalidation_type { - RevalidationType::Light => RevalidationStrategy::Light(RevalidationStatus::NotScheduled), - RevalidationType::Full => RevalidationStrategy::Always, - } - )), - ready_poll: Default::default(), - metrics: PrometheusMetrics::new(prometheus), - }, - background_task, - ) + if let Some(background_task) = background_task { + spawner.spawn("txpool-background", background_task); + } + + BasicPool { + api: pool_api, + pool, + revalidation_queue: Arc::new(revalidation_queue), + revalidation_strategy: Arc::new(Mutex::new( + match revalidation_type { + RevalidationType::Light => RevalidationStrategy::Light(RevalidationStatus::NotScheduled), + RevalidationType::Full => RevalidationStrategy::Always, + } + )), + ready_poll: Default::default(), + metrics: PrometheusMetrics::new(prometheus), + } } /// Gets shared reference to the underlying pool. @@ -352,6 +355,35 @@ impl TransactionPool for BasicPool } } +impl BasicPool, Block> +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sp_runtime::traits::BlockIdTo, + Client: sc_client_api::ExecutorProvider + Send + Sync + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, + sp_api::ApiErrorFor: Send + std::fmt::Display, +{ + /// Create new basic transaction pool for a full node with the provided api. + pub fn new_full( + options: sc_transaction_graph::Options, + pool_api: Arc>, + prometheus: Option<&PrometheusRegistry>, + spawner: impl SpawnNamed, + client: Arc, + ) -> Arc { + let pool = Arc::new(Self::with_revalidation_type( + options, pool_api, prometheus, RevalidationType::Full, spawner + )); + + // make transaction pool available for off-chain runtime calls. + client.execution_extensions().register_transaction_pool(&pool); + + pool + } +} + impl sp_transaction_pool::LocalTransactionPool for BasicPool, Block> where diff --git a/primitives/core/src/tasks.rs b/primitives/core/src/tasks.rs index 9a181255ec4e0..731e51d2470c0 100644 --- a/primitives/core/src/tasks.rs +++ b/primitives/core/src/tasks.rs @@ -54,4 +54,4 @@ impl CloneableSpawn for Executor { /// Create tasks executor. pub fn executor() -> Box { Box::new(Executor::new()) -} \ No newline at end of file +} From e44d9883fb4f5d1490b60690619531c168b4db37 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Fri, 3 Jul 2020 12:28:49 +0200 Subject: [PATCH 08/24] WIP --- client/api/src/in_mem.rs | 6 ++ .../basic-authorship/src/basic_authorship.rs | 52 +++++++++-------- client/consensus/manual-seal/src/lib.rs | 3 +- client/finality-grandpa/src/light_import.rs | 23 ++++---- client/finality-grandpa/src/tests.rs | 58 ++++++++++++------- client/offchain/src/lib.rs | 11 ++-- client/rpc/src/author/tests.rs | 7 ++- client/service/src/lib.rs | 7 ++- utils/frame/rpc/system/src/lib.rs | 52 +++++++++-------- 9 files changed, 128 insertions(+), 91 deletions(-) diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 1de2747eb4c76..9bfdcdd4d5aea 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -114,6 +114,12 @@ pub struct Blockchain { storage: Arc>>, } +impl Default for Blockchain { + fn default() -> Self { + Self::new() + } +} + impl Clone for Blockchain { fn clone(&self) -> Self { let storage = Arc::new(RwLock::new(self.storage.read().clone())); diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 383d0ea6fcad4..ad41d765183db 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -367,12 +367,13 @@ mod tests { fn should_cease_building_block_when_deadline_is_reached() { // given let client = Arc::new(substrate_test_runtime_client::new()); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); futures::executor::block_on( @@ -420,12 +421,13 @@ mod tests { #[test] fn should_not_panic_when_deadline_is_reached() { let client = Arc::new(substrate_test_runtime_client::new()); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), None); @@ -455,12 +457,13 @@ mod tests { fn proposed_storage_changes_should_match_execute_block_storage_changes() { let (client, backend) = TestClientBuilder::new().build_with_backend(); let client = Arc::new(client); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let genesis_hash = client.info().best_hash; @@ -517,12 +520,13 @@ mod tests { fn should_not_remove_invalid_transactions_when_skipping() { // given let mut client = Arc::new(substrate_test_runtime_client::new()); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); futures::executor::block_on( diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 53cc57ba6e8f2..6a34fd904343e 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -223,7 +223,8 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let inherent_data_providers = InherentDataProviders::new(); - let pool = Arc::new(BasicPool::new(Options::default(), api(), None).0); + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full(Options::default(), api(), None, spawner, client.clone()); let env = ProposerFactory::new( client.clone(), pool.clone(), diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index 211f5565b9e13..874a7b9f458e8 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -673,14 +673,13 @@ pub mod tests { pub fn light_block_import_without_justifications( client: Arc, backend: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, authority_set_provider: Arc>, ) -> Result, ClientError> where BE: Backend + 'static, - Client: crate::ClientForGrandpa, + Client: crate::ClientForGrandpa + GenesisAuthoritySetProvider, { - light_block_import(client, backend, genesis_authorities_provider, authority_set_provider) + light_block_import(client, backend, authority_set_provider) .map(NoJustificationsImport) } @@ -784,23 +783,21 @@ pub mod tests { #[test] fn aux_data_updated_on_start() { let aux_store = InMemoryAuxStore::::new(); - let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); - // when aux store is empty initially assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_none()); assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_none()); + let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)], aux_store); + // it is updated on importer start - load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); - assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_some()); - assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_some()); + load_aux_import_data(Default::default(), &api).unwrap(); + assert!(api.aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_some()); + assert!(api.aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_some()); } #[test] fn aux_data_loaded_on_restart() { let aux_store = InMemoryAuxStore::::new(); - let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); - // when aux store is non-empty initially let mut consensus_changes = ConsensusChanges::::empty(); consensus_changes.note_change((42, Default::default())); @@ -820,8 +817,10 @@ pub mod tests { &[], ).unwrap(); + let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)], aux_store); + // importer uses it on start - let data = load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); + let data = load_aux_import_data(Default::default(), &api).unwrap(); assert_eq!(data.authority_set.authorities(), vec![(AuthorityId::from_slice(&[42; 32]), 2)]); assert_eq!(data.consensus_changes.pending_changes(), &[(42, Default::default())]); } @@ -872,7 +871,7 @@ pub mod tests { ).unwrap(); // verify that new authorities set has been saved to the aux storage - let data = load_aux_import_data(Default::default(), &client, &TestApi::new(initial_set)).unwrap(); + let data = load_aux_import_data(Default::default(), &client).unwrap(); assert_eq!(data.authority_set.authorities(), updated_set); } } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 50f9e8eba2357..ea00ca12bc7d3 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -30,6 +30,7 @@ use futures_timer::Delay; use tokio::runtime::{Runtime, Handle}; use sp_keyring::Ed25519Keyring; use sc_client_api::backend::TransactionFor; +use sc_client_api::in_mem::Blockchain as InMemoryAuxStore; use sp_blockchain::Result; use sp_api::{ApiRef, StorageProof, ProvideRuntimeApi}; use substrate_test_runtime_client::runtime::BlockNumber; @@ -122,7 +123,6 @@ impl TestNetFactory for GrandpaTestNet { PeersClient::Full(ref client, ref backend) => { let (import, link) = block_import( client.clone(), - &self.test_config, LongestChain::new(backend.clone()), ).expect("Could not create block import for fresh peer."); let justification_import = Box::new(import.clone()); @@ -143,7 +143,6 @@ impl TestNetFactory for GrandpaTestNet { let import = light_block_import_without_justifications( client.clone(), backend.clone(), - &self.test_config, authorities_provider, ).expect("Could not create block import for fresh peer."); let finality_proof_req_builder = import.0.create_finality_proof_request_builder(); @@ -187,12 +186,13 @@ impl TestNetFactory for GrandpaTestNet { #[derive(Default, Clone)] pub(crate) struct TestApi { genesis_authorities: AuthorityList, + pub aux_store: InMemoryAuxStore, } impl TestApi { - pub fn new(genesis_authorities: AuthorityList) -> Self { + pub fn new(genesis_authorities: AuthorityList, aux_store: InMemoryAuxStore) -> Self { TestApi { - genesis_authorities, + genesis_authorities, aux_store, } } } @@ -209,6 +209,22 @@ impl ProvideRuntimeApi for TestApi { } } +impl AuxStore for TestApi { + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { + self.aux_store.insert_aux(insert, delete) + } + + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + self.aux_store.get_aux(key) + } +} + sp_api::mock_impl_runtime_apis! { impl GrandpaApi for RuntimeApi { type Error = sp_blockchain::Error; @@ -419,7 +435,7 @@ fn finalize_3_voters_no_observers() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + let mut net = GrandpaTestNet::new(TestApi::new(voters, InMemoryAuxStore::new()), 3); net.peer(0).push_blocks(20, false); net.block_until_sync(); @@ -445,7 +461,7 @@ fn finalize_3_voters_1_full_observer() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); + let mut net = GrandpaTestNet::new(TestApi::new(voters, InMemoryAuxStore::new()), 4); net.peer(0).push_blocks(20, false); net.block_until_sync(); @@ -542,7 +558,7 @@ fn transition_3_voters_twice_1_full_observer() { let genesis_voters = make_ids(peers_a); - let api = TestApi::new(genesis_voters); + let api = TestApi::new(genesis_voters, InMemoryAuxStore::new()); let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8))); let mut runtime = Runtime::new().unwrap(); @@ -681,7 +697,7 @@ fn transition_3_voters_twice_1_full_observer() { fn justification_is_emitted_when_consensus_data_changes() { let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); + let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers), InMemoryAuxStore::new()), 3); // import block#1 WITH consensus data change let new_authorities = vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]; @@ -701,7 +717,7 @@ fn justification_is_generated_periodically() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + let mut net = GrandpaTestNet::new(TestApi::new(voters, InMemoryAuxStore::new()), 3); net.peer(0).push_blocks(32, false); net.block_until_sync(); @@ -742,7 +758,7 @@ fn sync_justifications_on_change_blocks() { let voters = make_ids(peers_b); // 4 peers, 3 of them are authorities and participate in grandpa - let api = TestApi::new(voters); + let api = TestApi::new(voters, InMemoryAuxStore::new()); let mut net = GrandpaTestNet::new(api, 4); // add 20 blocks @@ -803,7 +819,7 @@ fn finalizes_multiple_pending_changes_in_order() { let genesis_voters = make_ids(peers_a); // 6 peers, 3 of them are authorities and participate in grandpa from genesis - let api = TestApi::new(genesis_voters); + let api = TestApi::new(genesis_voters, InMemoryAuxStore::new()); let mut net = GrandpaTestNet::new(api, 6); // add 20 blocks @@ -860,7 +876,7 @@ fn force_change_to_new_set() { Ed25519Keyring::Two, ]; let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let api = TestApi::new(make_ids(genesis_authorities)); + let api = TestApi::new(make_ids(genesis_authorities), InMemoryAuxStore::new()); let voters = make_ids(peers_a); let net = GrandpaTestNet::new(api, 3); @@ -909,7 +925,7 @@ fn allows_reimporting_change_blocks() { let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers_a); - let api = TestApi::new(voters); + let api = TestApi::new(voters, InMemoryAuxStore::new()); let mut net = GrandpaTestNet::new(api.clone(), 3); let client = net.peer(0).client().clone(); @@ -959,7 +975,7 @@ fn test_bad_justification() { let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers_a); - let api = TestApi::new(voters); + let api = TestApi::new(voters, InMemoryAuxStore::new()); let mut net = GrandpaTestNet::new(api.clone(), 3); let client = net.peer(0).client().clone(); @@ -1020,7 +1036,7 @@ fn voter_persists_its_votes() { let voters = make_ids(peers); // alice has a chain with 20 blocks - let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2); + let mut net = GrandpaTestNet::new(TestApi::new(voters.clone(), InMemoryAuxStore::new()), 2); net.peer(0).push_blocks(20, false); net.block_until_sync(); @@ -1272,7 +1288,7 @@ fn finalize_3_voters_1_light_observer() { let authorities = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(authorities); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); + let mut net = GrandpaTestNet::new(TestApi::new(voters, InMemoryAuxStore::new()), 4); net.peer(0).push_blocks(20, false); net.block_until_sync(); @@ -1316,7 +1332,7 @@ fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice]; - let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1); + let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers), InMemoryAuxStore::new()), 1); net.add_light_peer(); // import block#1 WITH consensus data change. Light client ignores justification @@ -1362,7 +1378,7 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ ] }; let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let api = TestApi::new(make_ids(&genesis_authorities)); + let api = TestApi::new(make_ids(&genesis_authorities), InMemoryAuxStore::new()); let voters = make_ids(peers_a); let net = GrandpaTestNet::new(api, 3); @@ -1412,7 +1428,7 @@ fn voter_catches_up_to_latest_round_when_behind() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + let mut net = GrandpaTestNet::new(TestApi::new(voters, InMemoryAuxStore::new()), 3); net.peer(0).push_blocks(50, false); net.block_until_sync(); @@ -1527,7 +1543,7 @@ fn grandpa_environment_respects_voting_rules() { let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 1); + let mut net = GrandpaTestNet::new(TestApi::new(voters, InMemoryAuxStore::new()), 1); let peer = net.peer(0); let network_service = peer.network_service().clone(); let link = peer.data.lock().take().unwrap(); @@ -1655,7 +1671,7 @@ fn imports_justification_for_regular_blocks_on_import() { // existing justification otherwise. let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); - let api = TestApi::new(voters); + let api = TestApi::new(voters, InMemoryAuxStore::new()); let mut net = GrandpaTestNet::new(api.clone(), 1); let client = net.peer(0).client().clone(); diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 7c90065746aa3..b6285993a2f42 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -227,7 +227,7 @@ mod tests { } struct TestPool( - BasicPool, Block> + Arc, Block>> ); impl sp_transaction_pool::OffchainSubmitTransaction for TestPool { @@ -248,13 +248,14 @@ mod tests { let _ = env_logger::try_init(); let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new(TestPool(BasicPool::new( + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = TestPool(BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone())), None, - ).0)); - client.execution_extensions() - .register_transaction_pool(Arc::downgrade(&pool.clone()) as _); + spawner, + client.clone(), + )); let db = sc_client_db::offchain::LocalStorage::new_test(); let network_state = Arc::new(MockNetworkStateInfo()); let header = client.header(&BlockId::number(0)).unwrap().unwrap(); diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index f2f4ddebb2f1d..06fede952b5be 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -61,11 +61,14 @@ impl Default for TestSetup { let client_builder = substrate_test_runtime_client::TestClientBuilder::new(); let client = Arc::new(client_builder.set_keystore(keystore.clone()).build()); - let pool = Arc::new(BasicPool::new( + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone())), None, - ).0); + spawner, + client.clone(), + ); TestSetup { client, keystore, diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 838e95b5877f6..cb0e9428ed2ca 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -545,11 +545,14 @@ mod tests { // given let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); let client = Arc::new(client); - let pool = Arc::new(BasicPool::new( + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone())), None, - ).0); + spawner, + client.clone(), + ); let source = sp_runtime::transaction_validity::TransactionSource::External; let best = longest_chain.best_chain().unwrap(); let transaction = Transfer { diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 6927f05b4f05b..4a0dfca8b51e8 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -298,12 +298,13 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let source = sp_runtime::transaction_validity::TransactionSource::External; @@ -337,12 +338,13 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let accounts = FullSystem::new(client, pool, DenyUnsafe::Yes); @@ -360,12 +362,13 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let accounts = FullSystem::new(client, pool, DenyUnsafe::No); @@ -392,12 +395,13 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let accounts = FullSystem::new(client, pool, DenyUnsafe::No); From 2d757287c6d8a36d02624a6d04de36625280ad19 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Fri, 3 Jul 2020 15:00:44 +0200 Subject: [PATCH 09/24] Get rid of the macros --- Cargo.lock | 2 + bin/node-template/node/src/command.rs | 5 +- bin/node-template/node/src/service.rs | 176 +++++++-------- bin/node/cli/src/command.rs | 5 +- bin/node/cli/src/service.rs | 201 +++++++++--------- bin/node/rpc/Cargo.toml | 1 + bin/node/rpc/src/lib.rs | 3 + client/service/src/builder.rs | 10 +- client/service/src/lib.rs | 2 + primitives/consensus/common/Cargo.toml | 1 + .../consensus/common/src/import_queue.rs | 4 +- 11 files changed, 214 insertions(+), 196 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5bbcd6954611..6997137640ee8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3563,6 +3563,7 @@ dependencies = [ "sc-finality-grandpa", "sc-finality-grandpa-rpc", "sc-keystore", + "sc-rpc", "sc-rpc-api", "sp-api", "sp-block-builder", @@ -7540,6 +7541,7 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-test-primitives", + "sp-trie", "sp-utils", "sp-version", "substrate-prometheus-endpoint", diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index e453d0641c5d0..0e12ca5fa9354 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -19,6 +19,8 @@ use crate::chain_spec; use crate::cli::Cli; use crate::service; use sc_cli::{SubstrateCli, RuntimeVersion, Role, ChainSpec}; +use sc_service::ServiceParams; +use crate::service::new_full_params; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -68,7 +70,8 @@ pub fn run() -> sc_cli::Result<()> { Some(subcommand) => { let runner = cli.create_runner(subcommand)?; runner.run_subcommand(subcommand, |config| { - let (client, backend, _, task_manager, .., import_queue) = new_full_up_to_import_queue!(&config); + let (ServiceParams { client, backend, task_manager, import_queue, .. }, ..) + = new_full_params(config)?; Ok((client, backend, import_queue, task_manager)) }) } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index c64872ff2818f..53ba2c658984e 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use std::time::Duration; use sc_client_api::{ExecutorProvider, RemoteBackend}; -use node_template_runtime::{self, opaque::Block, RuntimeApi}; +use node_template_runtime::{self, Block, RuntimeApi}; use sc_service::{error::Error as ServiceError, Configuration, ServiceComponents, TaskManager}; use sp_inherents::InherentDataProviders; use sc_executor::native_executor_instance; @@ -12,6 +12,7 @@ use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; use sc_finality_grandpa::{ FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider, SharedVoterState, }; +use sp_consensus::import_queue::DefaultQueue; // Our native executor instance. native_executor_instance!( @@ -20,97 +21,107 @@ native_executor_instance!( node_template_runtime::native_version, ); -macro_rules! new_full_up_to_import_queue { - ($config:expr) => {{ - use std::sync::Arc; - use node_template_runtime::{Block, RuntimeApi}; - use crate::service::Executor; - use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; - - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - - let (client, backend, keystore, task_manager) = sc_service::new_full_parts::(&$config)?; - let client = Arc::new(client); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - $config.transaction_pool.clone(), - std::sync::Arc::new(pool_api), - $config.prometheus_registry(), - task_manager.spawn_handle(), - client.clone(), - ); - - let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( - client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), - )?; - - let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( - grandpa_block_import.clone(), client.clone(), - ); +type FullClient = sc_service::TFullClient; +type FullBackend = sc_service::TFullBackend; +type GrandpaBlockImport = sc_finality_grandpa::GrandpaBlockImport< + FullBackend, Block, FullClient, SelectChain +>; +type SelectChain = sc_consensus::LongestChain; +type GrandpaLink = sc_finality_grandpa::LinkHalf; +type FullPool = sc_transaction_pool::BasicPool< + sc_transaction_pool::FullChainApi, Block +>; + +pub fn new_full_params(config: Configuration) -> Result<( + sc_service::ServiceParams< + Block, FullClient, DefaultQueue, FullPool, (), FullBackend, + >, + SelectChain, sp_inherents::InherentDataProviders, GrandpaBlockImport, GrandpaLink +), ServiceError> { + let inherent_data_providers = sp_inherents::InherentDataProviders::new(); + + let (client, backend, keystore, task_manager) = sc_service::new_full_parts::(&config)?; + let client = Arc::new(client); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + std::sync::Arc::new(pool_api), + config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + ); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import, - Some(Box::new(grandpa_block_import.clone())), - None, - client.clone(), - inherent_data_providers.clone(), - &task_manager.spawn_handle(), - $config.prometheus_registry(), - )?; + let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( + client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), + )?; - ( - client, backend, keystore, task_manager, inherent_data_providers, select_chain, - transaction_pool, grandpa_block_import, grandpa_link, import_queue - ) - }} -} + let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( + grandpa_block_import.clone(), client.clone(), + ); -/// Builds a new service for a full client. -pub fn new_full(config: Configuration) -> Result { - let ( - client, backend, keystore, mut task_manager, inherent_data_providers, select_chain, - transaction_pool, block_import, grandpa_link, import_queue - ) = new_full_up_to_import_queue!(&config); + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( + sc_consensus_aura::slot_duration(&*client)?, + aura_block_import, + Some(Box::new(grandpa_block_import.clone())), + None, + client.clone(), + inherent_data_providers.clone(), + &task_manager.spawn_handle(), + config.prometheus_registry(), + )?; let provider = client.clone() as Arc>; let finality_proof_provider = Arc::new(GrandpaFinalityProofProvider::new(backend.clone(), provider)); - let ( - role, - force_authoring, - name, - enable_grandpa, - prometheus_registry, - ) = ( - config.role.clone(), - config.force_authoring, - config.network.node_name.clone(), - !config.disable_grandpa, - config.prometheus_registry().cloned(), - ); - - let ServiceComponents { - network, - telemetry_on_connect_sinks, .. - } = sc_service::build(sc_service::ServiceParams { + let params = sc_service::ServiceParams { + backend, client, import_queue, keystore, task_manager, transaction_pool, config: config, - backend: backend.clone(), - client: client.clone(), block_announce_validator_builder: None, finality_proof_request_builder: None, finality_proof_provider: Some(finality_proof_provider), on_demand: None, - import_queue: import_queue, - keystore: keystore.clone(), - task_manager: &mut task_manager, remote_blockchain: None, rpc_extensions_builder: Box::new(|_| ()), - transaction_pool: transaction_pool.clone(), - })?; + }; + + Ok(( + params, select_chain, inherent_data_providers, + grandpa_block_import, grandpa_link, + )) +} + +/// Builds a new service for a full client. +pub fn new_full(config: Configuration) -> Result { + let ( + params, select_chain, inherent_data_providers, + block_import, grandpa_link, + ) = new_full_params(config)?; + + let ( + role, force_authoring, name, enable_grandpa, prometheus_registry, + client, transaction_pool, keystore, + ) = { + let sc_service::ServiceParams { + config, client, transaction_pool, keystore, .. + } = ¶ms; + + ( + config.role.clone(), + config.force_authoring, + config.network.node_name.clone(), + !config.disable_grandpa, + config.prometheus_registry().cloned(), + + client.clone(), transaction_pool.clone(), keystore.clone(), + ) + }; + + let ServiceComponents { + task_manager, network, telemetry_on_connect_sinks, .. + } = sc_service::build(params)?; if role.is_authority() { let proposer = sc_basic_authorship::ProposerFactory::new( @@ -195,7 +206,7 @@ pub fn new_full(config: Configuration) -> Result { /// Builds a new service for a light client. pub fn new_light(config: Configuration) -> Result { - let (client, backend, keystore, mut task_manager, on_demand) = + let (client, backend, keystore, task_manager, on_demand) = sc_service::new_light_parts::(&config)?; let transaction_pool_api = Arc::new(sc_transaction_pool::LightChainApi::new( @@ -236,12 +247,9 @@ pub fn new_light(config: Configuration) -> Result { finality_proof_request_builder: Some(finality_proof_request_builder), finality_proof_provider: Some(finality_proof_provider), on_demand: Some(on_demand), - task_manager: &mut task_manager, remote_blockchain: Some(backend.remote_blockchain()), rpc_extensions_builder: Box::new(|_| ()), transaction_pool: Arc::new(transaction_pool), - config, client, import_queue, keystore, backend, - })?; - - Ok(task_manager) + config, client, import_queue, keystore, backend, task_manager + }).map(|ServiceComponents { task_manager, .. }| task_manager) } diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 128dcb12a9662..7615aef3d261f 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -20,6 +20,8 @@ use crate::{chain_spec, service, Cli, Subcommand}; use node_executor::Executor; use node_runtime::{Block, RuntimeApi}; use sc_cli::{Result, SubstrateCli, RuntimeVersion, Role, ChainSpec}; +use sc_service::ServiceParams; +use crate::service::new_full_params; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -94,7 +96,8 @@ pub fn run() -> Result<()> { Some(Subcommand::Base(subcommand)) => { let runner = cli.create_runner(subcommand)?; runner.run_subcommand(subcommand, |config| { - let (client, backend, _, task_manager, .., import_queue) = new_full_up_to_import_queue!(&config); + let (ServiceParams { client, backend, import_queue, task_manager, .. }, ..) + = new_full_params(config)?; Ok((client, backend, import_queue, task_manager)) }) } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 0270286e53e6d..c954134c29ae9 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -38,86 +38,59 @@ use sp_runtime::traits::Block as BlockT; use futures::prelude::*; use sc_client_api::{ExecutorProvider, RemoteBackend}; use sp_core::traits::BareCryptoStorePtr; +use sp_consensus::import_queue::DefaultQueue; + +pub fn new_full_params(config: Configuration) -> Result<( + sc_service::ServiceParams< + Block, FullClient, DefaultQueue, FullPool, node_rpc::IoHandler, FullBackend + >, + (BabeBlockImport, GrandpaLink, BabeLink), + grandpa::SharedVoterState, + sc_consensus::LongestChain, + InherentDataProviders +), ServiceError> { + use node_executor::Executor; -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -macro_rules! new_full_up_to_import_queue { - ($config:expr) => {{ - use std::sync::Arc; - use node_executor::Executor; - - let (client, backend, keystore, task_manager) = sc_service::new_full_parts::(&$config)?; - let client = Arc::new(client); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - $config.transaction_pool.clone(), - std::sync::Arc::new(pool_api), - $config.prometheus_registry(), - task_manager.spawn_handle(), - client.clone(), - ); + let (client, backend, keystore, task_manager) = + sc_service::new_full_parts::(&config)?; + let client = Arc::new(client); - let (grandpa_block_import, grandpa_link) = grandpa::block_import( - client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), - )?; - let justification_import = grandpa_block_import.clone(); + let select_chain = sc_consensus::LongestChain::new(backend.clone()); - let (block_import, babe_link) = sc_consensus_babe::block_import( - sc_consensus_babe::Config::get_or_compute(&*client)?, - grandpa_block_import, - client.clone(), - )?; - - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); + let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + std::sync::Arc::new(pool_api), + config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + ); - let import_queue = sc_consensus_babe::import_queue( - babe_link.clone(), - block_import.clone(), - Some(Box::new(justification_import)), - None, - client.clone(), - inherent_data_providers.clone(), - &task_manager.spawn_handle(), - $config.prometheus_registry(), - )?; + let (grandpa_block_import, grandpa_link) = grandpa::block_import( + client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), + )?; + let justification_import = grandpa_block_import.clone(); - let import_setup = (block_import, grandpa_link, babe_link); + let (block_import, babe_link) = sc_consensus_babe::block_import( + sc_consensus_babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + )?; - ( - client, backend, keystore, task_manager, - select_chain, transaction_pool, inherent_data_providers, - import_setup, import_queue, - ) - }} -} + let inherent_data_providers = sp_inherents::InherentDataProviders::new(); -type FullClient = sc_service::TFullClient; -type FullBackend = sc_service::TFullBackend; -type GrandpaBlockImport = grandpa::GrandpaBlockImport< - FullBackend, Block, FullClient, sc_consensus::LongestChain ->; -type BabeBlockImport = sc_consensus_babe::BabeBlockImport; + let import_queue = sc_consensus_babe::import_queue( + babe_link.clone(), + block_import.clone(), + Some(Box::new(justification_import)), + None, + client.clone(), + inherent_data_providers.clone(), + &task_manager.spawn_handle(), + config.prometheus_registry(), + )?; -/// Creates a full service from the configuration. -pub fn new_full_base( - config: Configuration, - with_startup_data: impl FnOnce(&BabeBlockImport, &sc_consensus_babe::BabeLink) -) -> Result<( - TaskManager, - InherentDataProviders, - Arc, Arc::Hash>>, - Arc, Block>> -), ServiceError> { - let ( - client, backend, keystore, mut task_manager, - select_chain, transaction_pool, inherent_data_providers, - import_setup, import_queue, - ) = new_full_up_to_import_queue!(&config); + let import_setup = (block_import, grandpa_link, babe_link); let (rpc_extensions_builder, rpc_setup) = { let (_, grandpa_link, babe_link) = &import_setup; @@ -161,38 +134,61 @@ pub fn new_full_base( let provider = client.clone() as Arc>; let finality_proof_provider = Arc::new(grandpa::FinalityProofProvider::new(backend.clone(), provider)) as _; - let ( - role, - force_authoring, - name, - enable_grandpa, - prometheus_registry, - ) = ( - config.role.clone(), - config.force_authoring, - config.network.node_name.clone(), - !config.disable_grandpa, - config.prometheus_registry().cloned(), - ); - - let ServiceComponents { - network, - telemetry_on_connect_sinks, .. - } = sc_service::build(sc_service::ServiceParams { - config, - backend: backend.clone(), - client: client.clone(), + let params = sc_service::ServiceParams { + config, backend, client, import_queue, keystore, task_manager, rpc_extensions_builder, + transaction_pool, block_announce_validator_builder: None, finality_proof_request_builder: None, finality_proof_provider: Some(finality_proof_provider), on_demand: None, - import_queue: import_queue, - keystore: keystore.clone(), - task_manager: &mut task_manager, remote_blockchain: None, - rpc_extensions_builder, - transaction_pool: transaction_pool.clone(), - })?; + }; + + Ok((params, import_setup, rpc_setup, select_chain, inherent_data_providers)) +} + +type FullClient = sc_service::TFullClient; +type FullBackend = sc_service::TFullBackend; +type GrandpaBlockImport = grandpa::GrandpaBlockImport; +type SelectChain = sc_consensus::LongestChain; +type GrandpaLink = grandpa::LinkHalf; +type BabeLink = sc_consensus_babe::BabeLink; +type BabeBlockImport = sc_consensus_babe::BabeBlockImport; +type FullPool = sc_transaction_pool::BasicPool, Block>; + +/// Creates a full service from the configuration. +pub fn new_full_base( + config: Configuration, + with_startup_data: impl FnOnce(&BabeBlockImport, &BabeLink) +) -> Result<( + TaskManager, InherentDataProviders, Arc, + Arc::Hash>>, Arc +), ServiceError> { + let (params, import_setup, rpc_setup, select_chain, inherent_data_providers) + = new_full_params(config)?; + + let ( + role, force_authoring, name, enable_grandpa, prometheus_registry, + client, transaction_pool, keystore, + ) = { + let sc_service::ServiceParams { + config, client, transaction_pool, keystore, .. + } = ¶ms; + + ( + config.role.clone(), + config.force_authoring, + config.network.node_name.clone(), + !config.disable_grandpa, + config.prometheus_registry().cloned(), + + client.clone(), transaction_pool.clone(), keystore.clone(), + ) + }; + + let ServiceComponents { + task_manager, network, telemetry_on_connect_sinks, .. + } = sc_service::build(params)?; let (block_import, grandpa_link, babe_link) = import_setup; let shared_voter_state = rpc_setup; @@ -330,7 +326,7 @@ pub fn new_light_base(config: Configuration) -> Result<( sc_transaction_pool::LightChainApi, Block >> ), ServiceError> { - let (client, backend, keystore, mut task_manager, on_demand) = + let (client, backend, keystore, task_manager, on_demand) = sc_service::new_light_parts::(&config)?; let transaction_pool_api = Arc::new(sc_transaction_pool::LightChainApi::new( @@ -385,17 +381,16 @@ pub fn new_light_base(config: Configuration) -> Result<( let rpc_extensions = node_rpc::create_light(light_deps); - let ServiceComponents { rpc_handlers, network, .. } = sc_service::build(sc_service::ServiceParams { + let ServiceComponents { task_manager, rpc_handlers, network, .. } = sc_service::build(sc_service::ServiceParams { block_announce_validator_builder: None, finality_proof_request_builder: Some(finality_proof_request_builder), finality_proof_provider: Some(finality_proof_provider), on_demand: Some(on_demand), - task_manager: &mut task_manager, remote_blockchain: Some(backend.remote_blockchain()), rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), client: client.clone(), transaction_pool: transaction_pool.clone(), - config, import_queue, keystore, backend, + config, import_queue, keystore, backend, task_manager, })?; Ok((task_manager, rpc_handlers, client, network, transaction_pool)) diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 95d55fab6402c..43523103a596c 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -12,6 +12,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api" } +sc-rpc = { version = "2.0.0-rc4", path = "../../../client/rpc" } jsonrpc-core = "14.2.0" node-primitives = { version = "2.0.0-rc4", path = "../primitives" } node-runtime = { version = "2.0.0-rc4", path = "../runtime" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 9b6b5991748f9..142e7fb124b47 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -94,6 +94,9 @@ pub struct FullDeps { pub grandpa: GrandpaDeps, } +/// A IO handler that uses all Full RPC extensions. +pub type IoHandler = jsonrpc_core::IoHandler; + /// Instantiate all Full RPC extensions. pub fn create_full( deps: FullDeps, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 9b0bbac69f086..34e06e775d3a7 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -358,7 +358,7 @@ pub fn new_client( } /// Parameters to pass into `build`. -pub struct ServiceParams<'a, TBl: BlockT, TCl, TImpQu, TExPool, TRpc, Backend> { +pub struct ServiceParams { /// The service configuration. pub config: Configuration, /// A shared client returned by `new_full_parts`/`new_light_parts`. @@ -366,7 +366,7 @@ pub struct ServiceParams<'a, TBl: BlockT, TCl, TImpQu, TExPool, TRpc, Backend> { /// A shared backend returned by `new_full_parts`/`new_light_parts`. pub backend: Arc, /// A task manager returned by `new_full_parts`/`new_light_parts`. - pub task_manager: &'a mut TaskManager, + pub task_manager: TaskManager, /// A shared keystore returned by `new_full_parts`/`new_light_parts`. pub keystore: Arc>, /// An optional, shared data fetcher for light clients. @@ -413,8 +413,8 @@ pub fn build( { let ServiceParams { mut config, + mut task_manager, client, - task_manager, on_demand, backend, keystore, @@ -571,9 +571,7 @@ pub fn build( task_manager.keep_alive((telemetry, config.base_path, rpc, rpc_handlers.clone())); Ok(ServiceComponents { - network, - rpc_handlers, - offchain_workers, + task_manager, network, rpc_handlers, offchain_workers, telemetry_on_connect_sinks: TelemetryOnConnectSinks(telemetry_connection_sinks), network_status_sinks: NetworkStatusSinks::new(network_status_sinks), }) diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index f55d0fb56e29f..02714e1907ad7 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -152,6 +152,8 @@ impl TelemetryOnConnectSinks { /// The individual components of the chain, built by the service builder. You are encouraged to /// deconstruct this into its fields. pub struct ServiceComponents, TCl> { + /// The chain task manager. + pub task_manager: TaskManager, /// A shared network instance. pub network: Arc::Hash>>, /// RPC handlers that can perform RPC queries. diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 39c47545c2b08..1c41d25ec70e4 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -26,6 +26,7 @@ sp-std = { version = "2.0.0-rc4", path = "../../std" } sp-version = { version = "2.0.0-rc4", path = "../../version" } sp-runtime = { version = "2.0.0-rc4", path = "../../runtime" } sp-utils = { version = "2.0.0-rc4", path = "../../utils" } +sp-trie = { version = "2.0.0-rc4", path = "../../trie" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parking_lot = "0.10.0" serde = { version = "1.0", features = ["derive"] } diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index 94228a266385f..ae8880e809ff6 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -28,7 +28,7 @@ use std::collections::HashMap; -use sp_runtime::{Justification, traits::{Block as BlockT, Header as _, NumberFor}}; +use sp_runtime::{Justification, traits::{Block as BlockT, Header as _, NumberFor, BlakeTwo256}}; use crate::{ error::Error as ConsensusError, @@ -43,6 +43,8 @@ pub use basic_queue::BasicQueue; mod basic_queue; pub mod buffered_link; +pub type DefaultQueue = BasicQueue>; + /// Shared block import struct used by the queue. pub type BoxBlockImport = Box< dyn BlockImport + Send + Sync From 2be524b09ee747b901842ebf817422dba618a05e Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Thu, 9 Jul 2020 08:52:14 +0200 Subject: [PATCH 10/24] Simplify a few chain components creation APIs related to the service --- Cargo.lock | 1 + bin/node-template/node/src/service.rs | 10 ++- bin/node/cli/src/service.rs | 14 +-- client/api/src/execution_extensions.rs | 6 +- client/api/src/in_mem.rs | 6 ++ .../basic-authorship/src/basic_authorship.rs | 52 ++++++----- client/consensus/manual-seal/src/lib.rs | 20 +++-- client/finality-grandpa/src/lib.rs | 5 +- client/offchain/src/lib.rs | 12 +-- client/rpc/src/author/tests.rs | 7 +- client/service/src/builder.rs | 28 ++---- client/service/src/config.rs | 5 ++ client/service/src/lib.rs | 7 +- client/service/src/metrics.rs | 8 +- client/transaction-pool/src/lib.rs | 90 +++++++++++++------ primitives/consensus/common/Cargo.toml | 1 + .../consensus/common/src/import_queue.rs | 4 +- primitives/core/src/tasks.rs | 2 +- utils/frame/rpc/system/src/lib.rs | 52 ++++++----- 19 files changed, 196 insertions(+), 134 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c0c734533d591..906d3526ec0e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7639,6 +7639,7 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-test-primitives", + "sp-trie", "sp-utils", "sp-version", "substrate-prometheus-endpoint", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 89bf159927fc6..0a5d3c0f4a278 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -48,10 +48,12 @@ macro_rules! new_full_start { let pool_api = sc_transaction_pool::FullChainApi::new( builder.client().clone(), ); - Ok(sc_transaction_pool::BasicPool::new( + Ok(sc_transaction_pool::BasicPool::new_full( builder.config().transaction_pool.clone(), std::sync::Arc::new(pool_api), builder.prometheus_registry(), + builder.spawn_handle(), + builder.client().clone(), )) })? .with_import_queue(| @@ -220,12 +222,12 @@ pub fn new_light(config: Configuration) -> Result { builder.client().clone(), fetcher.clone(), ); - let pool = sc_transaction_pool::BasicPool::with_revalidation_type( + let pool = Arc::new(sc_transaction_pool::BasicPool::new_light( builder.config().transaction_pool.clone(), Arc::new(pool_api), builder.prometheus_registry(), - sc_transaction_pool::RevalidationType::Light, - ); + builder.spawn_handle(), + )); Ok(pool) })? .with_import_queue_and_fprb(| diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 632092cdaa188..718b794a26111 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -62,12 +62,12 @@ macro_rules! new_full_start { let pool_api = sc_transaction_pool::FullChainApi::new( builder.client().clone(), ); - let config = builder.config(); - - Ok(sc_transaction_pool::BasicPool::new( - config.transaction_pool.clone(), + Ok(sc_transaction_pool::BasicPool::new_full( + builder.config().transaction_pool.clone(), std::sync::Arc::new(pool_api), builder.prometheus_registry(), + builder.spawn_handle(), + builder.client().clone(), )) })? .with_import_queue(| @@ -355,12 +355,12 @@ pub fn new_light_base(config: Configuration) -> Result<( builder.client().clone(), fetcher, ); - let pool = sc_transaction_pool::BasicPool::with_revalidation_type( + let pool = Arc::new(sc_transaction_pool::BasicPool::new_light( builder.config().transaction_pool.clone(), Arc::new(pool_api), builder.prometheus_registry(), - sc_transaction_pool::RevalidationType::Light, - ); + builder.spawn_handle(), + )); Ok(pool) })? .with_import_queue_and_fprb(| diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index 55ffc3794c4ea..b89885cc5c4dc 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -126,8 +126,10 @@ impl ExecutionExtensions { /// extension to be a `Weak` reference. /// That's also the reason why it's being registered lazily instead of /// during initialization. - pub fn register_transaction_pool(&self, pool: Weak>) { - *self.transaction_pool.write() = Some(pool); + pub fn register_transaction_pool(&self, pool: &Arc) + where T: sp_transaction_pool::OffchainSubmitTransaction + 'static + { + *self.transaction_pool.write() = Some(Arc::downgrade(&pool) as _); } /// Create `ExecutionManager` and `Extensions` for given offchain call. diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 1de2747eb4c76..9bfdcdd4d5aea 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -114,6 +114,12 @@ pub struct Blockchain { storage: Arc>>, } +impl Default for Blockchain { + fn default() -> Self { + Self::new() + } +} + impl Clone for Blockchain { fn clone(&self) -> Self { let storage = Arc::new(RwLock::new(self.storage.read().clone())); diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 7343b13c04031..6fc72448a89f4 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -358,12 +358,13 @@ mod tests { fn should_cease_building_block_when_deadline_is_reached() { // given let client = Arc::new(substrate_test_runtime_client::new()); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); futures::executor::block_on( @@ -411,12 +412,13 @@ mod tests { #[test] fn should_not_panic_when_deadline_is_reached() { let client = Arc::new(substrate_test_runtime_client::new()); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), None); @@ -446,12 +448,13 @@ mod tests { fn proposed_storage_changes_should_match_execute_block_storage_changes() { let (client, backend) = TestClientBuilder::new().build_with_backend(); let client = Arc::new(client); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let genesis_hash = client.info().best_hash; @@ -508,12 +511,13 @@ mod tests { fn should_not_remove_invalid_transactions_when_skipping() { // given let mut client = Arc::new(substrate_test_runtime_client::new()); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); futures::executor::block_on( diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 53cc57ba6e8f2..77fb5043c5dd6 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -200,10 +200,7 @@ mod tests { AccountKeyring::*, TestClientBuilder, }; - use sc_transaction_pool::{ - BasicPool, - txpool::Options, - }; + use sc_transaction_pool::{BasicPool, RevalidationType, txpool::Options}; use substrate_test_runtime_transaction_pool::{TestApi, uxt}; use sp_transaction_pool::{TransactionPool, MaintainedTransactionPool, TransactionSource}; use sp_runtime::generic::BlockId; @@ -223,7 +220,10 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let inherent_data_providers = InherentDataProviders::new(); - let pool = Arc::new(BasicPool::new(Options::default(), api(), None).0); + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = Arc::new(BasicPool::with_revalidation_type( + Options::default(), api(), None, RevalidationType::Full, spawner, + )); let env = ProposerFactory::new( client.clone(), pool.clone(), @@ -288,7 +288,10 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let inherent_data_providers = InherentDataProviders::new(); - let pool = Arc::new(BasicPool::new(Options::default(), api(), None).0); + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = Arc::new(BasicPool::with_revalidation_type( + Options::default(), api(), None, RevalidationType::Full, spawner, + )); let env = ProposerFactory::new( client.clone(), pool.clone(), @@ -357,7 +360,10 @@ mod tests { let client = Arc::new(client); let inherent_data_providers = InherentDataProviders::new(); let pool_api = api(); - let pool = Arc::new(BasicPool::new(Options::default(), pool_api.clone(), None).0); + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = Arc::new(BasicPool::with_revalidation_type( + Options::default(), pool_api.clone(), None, RevalidationType::Full, spawner, + )); let env = ProposerFactory::new( client.clone(), pool.clone(), diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index fa2a6fedd8b05..7d74d0eebfc48 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -84,7 +84,8 @@ use sc_telemetry::{telemetry, CONSENSUS_INFO, CONSENSUS_DEBUG}; use parking_lot::RwLock; use finality_grandpa::Error as GrandpaError; -use finality_grandpa::{voter, BlockNumberOps, voter_set::VoterSet}; +use finality_grandpa::{voter, voter_set::VoterSet}; +pub use finality_grandpa::BlockNumberOps; use std::{fmt, io}; use std::sync::Arc; @@ -126,7 +127,7 @@ pub use authorities::SharedAuthoritySet; pub use finality_proof::{FinalityProofProvider, StorageAndProofProvider}; pub use import::GrandpaBlockImport; pub use justification::GrandpaJustification; -pub use light_import::light_block_import; +pub use light_import::{light_block_import, GrandpaLightBlockImport}; pub use voting_rule::{ BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRulesBuilder }; diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 7c90065746aa3..dcf142c005edb 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -212,7 +212,6 @@ mod tests { use substrate_test_runtime_client::{TestClient, runtime::Block}; use sc_transaction_pool::{BasicPool, FullChainApi}; use sp_transaction_pool::{TransactionPool, InPoolTransaction}; - use sc_client_api::ExecutorProvider; struct MockNetworkStateInfo(); @@ -227,7 +226,7 @@ mod tests { } struct TestPool( - BasicPool, Block> + Arc, Block>> ); impl sp_transaction_pool::OffchainSubmitTransaction for TestPool { @@ -248,13 +247,14 @@ mod tests { let _ = env_logger::try_init(); let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new(TestPool(BasicPool::new( + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = TestPool(BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone())), None, - ).0)); - client.execution_extensions() - .register_transaction_pool(Arc::downgrade(&pool.clone()) as _); + spawner, + client.clone(), + )); let db = sc_client_db::offchain::LocalStorage::new_test(); let network_state = Arc::new(MockNetworkStateInfo()); let header = client.header(&BlockId::number(0)).unwrap().unwrap(); diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index f2f4ddebb2f1d..06fede952b5be 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -61,11 +61,14 @@ impl Default for TestSetup { let client_builder = substrate_test_runtime_client::TestClientBuilder::new(); let client = Arc::new(client_builder.set_keystore(keystore.clone()).build()); - let pool = Arc::new(BasicPool::new( + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone())), None, - ).0); + spawner, + client.clone(), + ); TestSetup { client, keystore, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 1585298d98bd6..356162d863a76 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -66,8 +66,6 @@ use sc_client_api::{ use sp_blockchain::{HeaderMetadata, HeaderBackend}; use crate::{ServiceComponents, TelemetryOnConnectSinks, RpcHandlers, NetworkStatusSinks}; -pub type BackgroundTask = Pin + Send>>; - /// Aggregator for the components required to build a service. /// /// # Usage @@ -518,6 +516,11 @@ impl self.remote_backend.clone() } + /// Returns a spawn handle created by the task manager. + pub fn spawn_handle(&self) -> SpawnTaskHandle { + self.task_manager.spawn_handle() + } + /// Consume the builder and return the parts needed for chain operations. pub fn to_chain_ops_parts(self) -> (Arc, Arc, TImpQu, TaskManager) { (self.client, self.backend, self.import_queue, self.task_manager) @@ -728,15 +731,11 @@ impl self, transaction_pool_builder: impl FnOnce( &Self, - ) -> Result<(UExPool, Option), Error>, + ) -> Result, Error>, ) -> Result, Error> where TSc: Clone, TFchr: Clone { - let (transaction_pool, background_task) = transaction_pool_builder(&self)?; - - if let Some(background_task) = background_task{ - self.task_manager.spawn_handle().spawn("txpool-background", background_task); - } + let transaction_pool = transaction_pool_builder(&self)?; Ok(ServiceBuilder { config: self.config, @@ -749,7 +748,7 @@ impl import_queue: self.import_queue, finality_proof_request_builder: self.finality_proof_request_builder, finality_proof_provider: self.finality_proof_provider, - transaction_pool: Arc::new(transaction_pool), + transaction_pool: transaction_pool, rpc_extensions_builder: self.rpc_extensions_builder, remote_backend: self.remote_backend, block_announce_validator_builder: self.block_announce_validator_builder, @@ -978,12 +977,7 @@ ServiceBuilder< // Prometheus metrics. let metrics_service = if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { // Set static metrics. - let metrics = MetricsService::with_prometheus( - ®istry, - &config.network.node_name, - &config.impl_version, - &config.role, - )?; + let metrics = MetricsService::with_prometheus(®istry, &config)?; spawn_handle.spawn( "prometheus-endpoint", prometheus_endpoint::init_prometheus(port, registry).map(drop) @@ -1122,10 +1116,6 @@ ServiceBuilder< /// Builds the full service. pub fn build_full(self) -> Result, Error> { - // make transaction pool available for off-chain runtime calls. - self.client.execution_extensions() - .register_transaction_pool(Arc::downgrade(&self.transaction_pool) as _); - self.build_common() } } diff --git a/client/service/src/config.rs b/client/service/src/config.rs index f3080005a6cf3..397dacd747b14 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -181,6 +181,11 @@ impl Configuration { pub fn display_role(&self) -> String { self.role.to_string() } + + /// Returns the prometheus metrics registry, if available. + pub fn prometheus_registry<'a>(&'a self) -> Option<&'a Registry> { + self.prometheus_config.as_ref().map(|config| &config.registry) + } } /// Available RPC methods. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 2c09591fc7d65..b26f036907096 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -577,11 +577,14 @@ mod tests { // given let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); let client = Arc::new(client); - let pool = Arc::new(BasicPool::new( + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone())), None, - ).0); + spawner, + client.clone(), + ); let source = sp_runtime::transaction_validity::TransactionSource::External; let best = longest_chain.best_chain().unwrap(); let transaction = Transfer { diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 232e9abdc1c97..1727aaae743aa 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -18,7 +18,7 @@ use std::{convert::TryFrom, time::SystemTime}; -use crate::NetworkStatus; +use crate::{NetworkStatus, config::Configuration}; use prometheus_endpoint::{register, Gauge, U64, F64, Registry, PrometheusError, Opts, GaugeVec}; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_runtime::traits::{NumberFor, Block, SaturatedConversion, UniqueSaturatedInto}; @@ -261,17 +261,17 @@ impl MetricsService { impl MetricsService { - pub fn with_prometheus(registry: &Registry, name: &str, version: &str, role: &Role) + pub fn with_prometheus(registry: &Registry, config: &Configuration) -> Result { - let role_bits = match role { + let role_bits = match config.role { Role::Full => 1u64, Role::Light => 2u64, Role::Sentry { .. } => 3u64, Role::Authority { .. } => 4u64, }; - PrometheusMetrics::setup(registry, name, version, role_bits).map(|p| { + PrometheusMetrics::setup(registry, &config.network.node_name, &config.impl_version, role_bits).map(|p| { Self::inner_new(Some(p)) }) } diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index ea8b4bf9dec81..6cd0b3e7cc9c4 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -42,6 +42,7 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor, AtLeast32Bit, Extrinsic, Zero}, }; +use sp_core::traits::SpawnNamed; use sp_transaction_pool::{ TransactionPool, PoolStatus, ImportNotificationStream, TxHash, TransactionFor, TransactionStatusStreamFor, MaintainedTransactionPool, PoolFuture, ChainEvent, @@ -152,18 +153,6 @@ impl BasicPool Block: BlockT, PoolApi: ChainApi + 'static, { - /// Create new basic transaction pool with provided api. - /// - /// It will also optionally return background task that might be started by the - /// caller. - pub fn new( - options: sc_transaction_graph::Options, - pool_api: Arc, - prometheus: Option<&PrometheusRegistry>, - ) -> (Self, Option + Send>>>) { - Self::with_revalidation_type(options, pool_api, prometheus, RevalidationType::Full) - } - /// Create new basic transaction pool with provided api, for tests. #[cfg(test)] pub fn new_test( @@ -186,6 +175,18 @@ impl BasicPool ) } + /// Create new basic transaction pool for a light node with the provided api. + pub fn new_light( + options: sc_transaction_graph::Options, + pool_api: Arc, + prometheus: Option<&PrometheusRegistry>, + spawner: impl SpawnNamed, + ) -> Self { + Self::with_revalidation_type( + options, pool_api, prometheus, RevalidationType::Light, spawner, + ) + } + /// Create new basic transaction pool with provided api and custom /// revalidation type. pub fn with_revalidation_type( @@ -193,7 +194,8 @@ impl BasicPool pool_api: Arc, prometheus: Option<&PrometheusRegistry>, revalidation_type: RevalidationType, - ) -> (Self, Option + Send>>>) { + spawner: impl SpawnNamed, + ) -> Self { let pool = Arc::new(sc_transaction_graph::Pool::new(options, pool_api.clone())); let (revalidation_queue, background_task) = match revalidation_type { RevalidationType::Light => (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), @@ -203,22 +205,23 @@ impl BasicPool }, }; - ( - BasicPool { - api: pool_api, - pool, - revalidation_queue: Arc::new(revalidation_queue), - revalidation_strategy: Arc::new(Mutex::new( - match revalidation_type { - RevalidationType::Light => RevalidationStrategy::Light(RevalidationStatus::NotScheduled), - RevalidationType::Full => RevalidationStrategy::Always, - } - )), - ready_poll: Default::default(), - metrics: PrometheusMetrics::new(prometheus), - }, - background_task, - ) + if let Some(background_task) = background_task { + spawner.spawn("txpool-background", background_task); + } + + BasicPool { + api: pool_api, + pool, + revalidation_queue: Arc::new(revalidation_queue), + revalidation_strategy: Arc::new(Mutex::new( + match revalidation_type { + RevalidationType::Light => RevalidationStrategy::Light(RevalidationStatus::NotScheduled), + RevalidationType::Full => RevalidationStrategy::Always, + } + )), + ready_poll: Default::default(), + metrics: PrometheusMetrics::new(prometheus), + } } /// Gets shared reference to the underlying pool. @@ -352,6 +355,35 @@ impl TransactionPool for BasicPool } } +impl BasicPool, Block> +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sp_runtime::traits::BlockIdTo, + Client: sc_client_api::ExecutorProvider + Send + Sync + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, + sp_api::ApiErrorFor: Send + std::fmt::Display, +{ + /// Create new basic transaction pool for a full node with the provided api. + pub fn new_full( + options: sc_transaction_graph::Options, + pool_api: Arc>, + prometheus: Option<&PrometheusRegistry>, + spawner: impl SpawnNamed, + client: Arc, + ) -> Arc { + let pool = Arc::new(Self::with_revalidation_type( + options, pool_api, prometheus, RevalidationType::Full, spawner + )); + + // make transaction pool available for off-chain runtime calls. + client.execution_extensions().register_transaction_pool(&pool); + + pool + } +} + impl sp_transaction_pool::LocalTransactionPool for BasicPool, Block> where diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 39c47545c2b08..1c41d25ec70e4 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -26,6 +26,7 @@ sp-std = { version = "2.0.0-rc4", path = "../../std" } sp-version = { version = "2.0.0-rc4", path = "../../version" } sp-runtime = { version = "2.0.0-rc4", path = "../../runtime" } sp-utils = { version = "2.0.0-rc4", path = "../../utils" } +sp-trie = { version = "2.0.0-rc4", path = "../../trie" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parking_lot = "0.10.0" serde = { version = "1.0", features = ["derive"] } diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index 94228a266385f..ae8880e809ff6 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -28,7 +28,7 @@ use std::collections::HashMap; -use sp_runtime::{Justification, traits::{Block as BlockT, Header as _, NumberFor}}; +use sp_runtime::{Justification, traits::{Block as BlockT, Header as _, NumberFor, BlakeTwo256}}; use crate::{ error::Error as ConsensusError, @@ -43,6 +43,8 @@ pub use basic_queue::BasicQueue; mod basic_queue; pub mod buffered_link; +pub type DefaultQueue = BasicQueue>; + /// Shared block import struct used by the queue. pub type BoxBlockImport = Box< dyn BlockImport + Send + Sync diff --git a/primitives/core/src/tasks.rs b/primitives/core/src/tasks.rs index 9a181255ec4e0..731e51d2470c0 100644 --- a/primitives/core/src/tasks.rs +++ b/primitives/core/src/tasks.rs @@ -54,4 +54,4 @@ impl CloneableSpawn for Executor { /// Create tasks executor. pub fn executor() -> Box { Box::new(Executor::new()) -} \ No newline at end of file +} diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 6927f05b4f05b..4a0dfca8b51e8 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -298,12 +298,13 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let source = sp_runtime::transaction_validity::TransactionSource::External; @@ -337,12 +338,13 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let accounts = FullSystem::new(client, pool, DenyUnsafe::Yes); @@ -360,12 +362,13 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let accounts = FullSystem::new(client, pool, DenyUnsafe::No); @@ -392,12 +395,13 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 + let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + spawner, + client.clone(), ); let accounts = FullSystem::new(client, pool, DenyUnsafe::No); From b452e1710e9db93cd75a5a788068e816784637b5 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Thu, 9 Jul 2020 11:55:47 +0200 Subject: [PATCH 11/24] Fix basic-authorship doc tests --- client/basic-authorship/src/lib.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index bc51037277612..68356d0a28ff8 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -31,10 +31,13 @@ //! # }; //! # use sc_transaction_pool::{BasicPool, FullChainApi}; //! # let client = Arc::new(substrate_test_runtime_client::new()); -//! # let txpool = Arc::new(BasicPool::new( +//! # let spawner = sp_core::testing::SpawnBlockingExecutor::new(); +//! # let txpool = BasicPool::new_full( //! # Default::default(), //! # Arc::new(FullChainApi::new(client.clone(), None)), -//! # None).0, +//! # None, +//! # spawner, +//! # client.clone(), //! # ); //! // The first step is to create a `ProposerFactory`. //! let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), None); From cf49f81194c30c6c4ecb3eb14787e87c2ce3883b Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Thu, 9 Jul 2020 12:19:04 +0200 Subject: [PATCH 12/24] Remove DefaultQueue --- primitives/consensus/common/src/import_queue.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index ae8880e809ff6..21e8d976e0449 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -43,8 +43,6 @@ pub use basic_queue::BasicQueue; mod basic_queue; pub mod buffered_link; -pub type DefaultQueue = BasicQueue>; - /// Shared block import struct used by the queue. pub type BoxBlockImport = Box< dyn BlockImport + Send + Sync From 6c3b9ce4e362b0e594ca0c9e52c6f97971ad83ba Mon Sep 17 00:00:00 2001 From: Ashley Date: Thu, 9 Jul 2020 12:23:34 +0200 Subject: [PATCH 13/24] Update client/service/src/builder.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> --- client/service/src/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 356162d863a76..49d2d61f9c2bb 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -516,7 +516,7 @@ impl self.remote_backend.clone() } - /// Returns a spawn handle created by the task manager. + /// Returns a spawn handle created by the task manager. pub fn spawn_handle(&self) -> SpawnTaskHandle { self.task_manager.spawn_handle() } From fbb48b95143a220892176e447ad56c9d090d0b3b Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Thu, 9 Jul 2020 12:25:45 +0200 Subject: [PATCH 14/24] Move ExecutionExtensions comment around --- client/api/src/execution_extensions.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index b89885cc5c4dc..4f2ddb77e6653 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -84,6 +84,10 @@ pub struct ExecutionExtensions { keystore: Option, // FIXME: these two are only RwLock because of https://github.com/paritytech/substrate/issues/4587 // remove when fixed. + // To break retain cycle between `Client` and `TransactionPool` we require this + // extension to be a `Weak` reference. + // That's also the reason why it's being registered lazily instead of + // during initialization. transaction_pool: RwLock>>>, extensions_factory: RwLock>, } @@ -121,11 +125,6 @@ impl ExecutionExtensions { } /// Register transaction pool extension. - /// - /// To break retain cycle between `Client` and `TransactionPool` we require this - /// extension to be a `Weak` reference. - /// That's also the reason why it's being registered lazily instead of - /// during initialization. pub fn register_transaction_pool(&self, pool: &Arc) where T: sp_transaction_pool::OffchainSubmitTransaction + 'static { From 6f170f07dc370d20aa9324b653deca9bafe3a54e Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Thu, 9 Jul 2020 12:49:20 +0200 Subject: [PATCH 15/24] Remove unused BlakeTwo256 --- primitives/consensus/common/src/import_queue.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index 21e8d976e0449..94228a266385f 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -28,7 +28,7 @@ use std::collections::HashMap; -use sp_runtime::{Justification, traits::{Block as BlockT, Header as _, NumberFor, BlakeTwo256}}; +use sp_runtime::{Justification, traits::{Block as BlockT, Header as _, NumberFor}}; use crate::{ error::Error as ConsensusError, From c0dc11dd55b3f43988bccd56db4e0c3d7509adee Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Fri, 10 Jul 2020 17:09:06 +0200 Subject: [PATCH 16/24] Add sc-prelude --- Cargo.lock | 14 ++++++ Cargo.toml | 1 + bin/node-template/node/Cargo.toml | 1 + bin/node-template/node/src/service.rs | 22 ++++----- bin/node/cli/Cargo.toml | 1 + bin/node/cli/src/service.rs | 25 +++++------ client/prelude/Cargo.toml | 20 +++++++++ client/prelude/src/lib.rs | 64 +++++++++++++++++++++++++++ 8 files changed, 121 insertions(+), 27 deletions(-) create mode 100644 client/prelude/Cargo.toml create mode 100644 client/prelude/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index d9ba76498b927..fde8452e18837 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3535,6 +3535,7 @@ dependencies = [ "sc-keystore", "sc-network", "sc-offchain", + "sc-prelude", "sc-rpc", "sc-service", "sc-service-test", @@ -3754,6 +3755,7 @@ dependencies = [ "sc-executor", "sc-finality-grandpa", "sc-network", + "sc-prelude", "sc-service", "sc-transaction-pool", "sp-api", @@ -6798,6 +6800,18 @@ dependencies = [ "wasm-timer", ] +[[package]] +name = "sc-prelude" +version = "0.8.0-rc4" +dependencies = [ + "sc-consensus", + "sc-consensus-aura", + "sc-consensus-babe", + "sc-finality-grandpa", + "sc-service", + "sc-transaction-pool", +] + [[package]] name = "sc-proposer-metrics" version = "0.8.0-rc4" diff --git a/Cargo.toml b/Cargo.toml index ba146e55bca3f..f4a9dcf0c7283 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,7 @@ members = [ "client/network-gossip", "client/offchain", "client/peerset", + "client/prelude", "client/proposer-metrics", "client/rpc-servers", "client/rpc", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index dbf478bc5d100..58d6f329b85d3 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -39,6 +39,7 @@ sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api" } sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } sc-basic-authorship = { path = "../../../client/basic-authorship", version = "0.8.0-rc4"} sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } +sc-prelude = { version = "0.8.0-rc4", path = "../../../client/prelude" } node-template-runtime = { version = "2.0.0-rc4", path = "../runtime" } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index ef64b06151a80..9d1d4e9239e14 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -20,23 +20,19 @@ native_executor_instance!( node_template_runtime::native_version, ); -type FullClient = sc_service::TFullClient; -type FullBackend = sc_service::TFullBackend; -type GrandpaBlockImport = sc_finality_grandpa::GrandpaBlockImport< - FullBackend, Block, FullClient, SelectChain ->; -type SelectChain = sc_consensus::LongestChain; -type GrandpaLink = sc_finality_grandpa::LinkHalf; -type FullPool = sc_transaction_pool::BasicPool< - sc_transaction_pool::FullChainApi, Block ->; -type ImportQueue = sc_consensus_aura::AuraImportQueue>; +mod prelude { + use super::*; + sc_prelude::prelude!(Block, RuntimeApi, Executor); +} + +use prelude::*; pub fn new_full_params(config: Configuration) -> Result<( sc_service::ServiceParams< - Block, FullClient, ImportQueue, FullPool, (), FullBackend, + Block, FullClient, FullAuraImportQueue, FullBasicPool, (), FullBackend, >, - SelectChain, sp_inherents::InherentDataProviders, GrandpaBlockImport, GrandpaLink + FullLongestChain, sp_inherents::InherentDataProviders, FullGrandpaBlockImport, + FullGrandpaLink ), ServiceError> { let inherent_data_providers = sp_inherents::InherentDataProviders::new(); diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 9029ec1ceff83..8d897254e3a63 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -76,6 +76,7 @@ sc-service = { version = "0.8.0-rc4", default-features = false, path = "../../.. sc-tracing = { version = "2.0.0-rc4", path = "../../../client/tracing" } sc-telemetry = { version = "2.0.0-rc4", path = "../../../client/telemetry" } sc-authority-discovery = { version = "0.8.0-rc4", path = "../../../client/authority-discovery" } +sc-prelude = { version = "0.8.0-rc4", path = "../../../client/prelude" } # frame dependencies pallet-indices = { version = "2.0.0-rc4", path = "../../../frame/indices" } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 77599c9fbc9f8..df194a2f1d6d4 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -41,11 +41,11 @@ use sp_core::traits::BareCryptoStorePtr; pub fn new_full_params(config: Configuration) -> Result<( sc_service::ServiceParams< - Block, FullClient, ImportQueue, FullPool, node_rpc::IoHandler, FullBackend + Block, FullClient, FullBabeImportQueue, FullBasicPool, node_rpc::IoHandler, FullBackend >, - (BabeBlockImport, GrandpaLink, BabeLink), + (FullBabeBlockImport>, FullGrandpaLink, BabeLink), grandpa::SharedVoterState, - sc_consensus::LongestChain, + FullLongestChain, InherentDataProviders ), ServiceError> { use node_executor::Executor; @@ -149,23 +149,20 @@ pub fn new_full_params(config: Configuration) -> Result<( Ok((params, import_setup, rpc_setup, select_chain, inherent_data_providers)) } -type FullClient = sc_service::TFullClient; -type FullBackend = sc_service::TFullBackend; -type GrandpaBlockImport = grandpa::GrandpaBlockImport; -type SelectChain = sc_consensus::LongestChain; -type GrandpaLink = grandpa::LinkHalf; -type BabeLink = sc_consensus_babe::BabeLink; -type BabeBlockImport = sc_consensus_babe::BabeBlockImport; -type FullPool = sc_transaction_pool::BasicPool, Block>; -type ImportQueue = sc_consensus_babe::BabeImportQueue>; +mod prelude { + use super::*; + sc_prelude::prelude!(Block, RuntimeApi, node_executor::Executor); +} + +use prelude::*; /// Creates a full service from the configuration. pub fn new_full_base( config: Configuration, - with_startup_data: impl FnOnce(&BabeBlockImport, &BabeLink) + with_startup_data: impl FnOnce(&FullBabeBlockImport>, &BabeLink) ) -> Result<( TaskManager, InherentDataProviders, Arc, - Arc::Hash>>, Arc + Arc::Hash>>, Arc ), ServiceError> { let (params, import_setup, rpc_setup, select_chain, inherent_data_providers) = new_full_params(config)?; diff --git a/client/prelude/Cargo.toml b/client/prelude/Cargo.toml new file mode 100644 index 0000000000000..a2a346a24f125 --- /dev/null +++ b/client/prelude/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "sc-prelude" +version = "0.8.0-rc4" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Easy to use type definitions" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sc-service = { path = "../service", version = "0.8.0-rc4"} +sc-finality-grandpa = { path = "../finality-grandpa", version = "0.8.0-rc4"} +sc-consensus = { path = "../consensus/common", version = "0.8.0-rc4"} +sc-consensus-aura = { path = "../consensus/aura", version = "0.8.0-rc4"} +sc-consensus-babe = { path = "../consensus/babe", version = "0.8.0-rc4"} +sc-transaction-pool = { path = "../transaction-pool", version = "2.0.0-rc4"} diff --git a/client/prelude/src/lib.rs b/client/prelude/src/lib.rs new file mode 100644 index 0000000000000..f513579f8fee0 --- /dev/null +++ b/client/prelude/src/lib.rs @@ -0,0 +1,64 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +pub use sc_service; +pub use sc_finality_grandpa; +pub use sc_consensus; +pub use sc_consensus_aura; +pub use sc_transaction_pool; +pub use sc_consensus_babe; + +#[macro_export] +macro_rules! prelude { + ($block:ty, $runtime_api:ty, $executor:ty) => { + pub type FullClient = $crate::sc_service::TFullClient<$block, $runtime_api, $executor>; + pub type FullBackend = $crate::sc_service::TFullBackend<$block>; + pub type FullGrandpaBlockImport = $crate::sc_finality_grandpa::GrandpaBlockImport< + FullBackend, $block, FullClient, SelectChain + >; + pub type FullGrandpaLink = $crate::sc_finality_grandpa::LinkHalf< + $block, FullClient, SelectChain + >; + pub type FullLongestChain = $crate::sc_consensus::LongestChain; + + pub type FullBasicPool = $crate::sc_transaction_pool::BasicPool< + sc_transaction_pool::FullChainApi, $block + >; + + pub type FullAuraImportQueue = $crate::sc_consensus_aura::AuraImportQueue< + $block, sp_api::TransactionFor + >; + + pub type FullBabeImportQueue = $crate::sc_consensus_babe::BabeImportQueue< + $block, sp_api::TransactionFor + >; + + pub type FullBabeBlockImport = $crate::sc_consensus_babe::BabeBlockImport< + $block, FullClient, BlockImport + >; + + pub type BabeLink = $crate::sc_consensus_babe::BabeLink<$block>; + + pub type LightClient = $crate::sc_service::TLightClient<$block, $runtime_api, $executor>; + } +} + +#[cfg(test)] +mod tests { + mod prelude { + prelude!((), (), ()); + } +} From cce55621706bd1738c8ca3de8d8a10388bfda5f2 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Fri, 17 Jul 2020 10:58:47 +0200 Subject: [PATCH 17/24] Rename sc-prelude to sc-service-prelude --- Cargo.lock | 30 ++++++----- Cargo.toml | 2 +- bin/node-template/node/Cargo.toml | 2 +- bin/node-template/node/src/service.rs | 10 ++-- bin/node/cli/Cargo.toml | 2 +- bin/node/cli/src/service.rs | 33 ++++++------ client/prelude/Cargo.toml | 20 ------- client/prelude/src/lib.rs | 64 ---------------------- client/service/prelude/Cargo.toml | 22 ++++++++ client/service/prelude/src/lib.rs | 78 +++++++++++++++++++++++++++ 10 files changed, 141 insertions(+), 122 deletions(-) delete mode 100644 client/prelude/Cargo.toml delete mode 100644 client/prelude/src/lib.rs create mode 100644 client/service/prelude/Cargo.toml create mode 100644 client/service/prelude/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 8f321049e9f18..8c5fe7c3ec1c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3521,9 +3521,9 @@ dependencies = [ "sc-keystore", "sc-network", "sc-offchain", - "sc-prelude", "sc-rpc", "sc-service", + "sc-service-prelude", "sc-service-test", "sc-telemetry", "sc-tracing", @@ -3741,8 +3741,8 @@ dependencies = [ "sc-executor", "sc-finality-grandpa", "sc-network", - "sc-prelude", "sc-service", + "sc-service-prelude", "sc-transaction-pool", "sp-api", "sp-consensus", @@ -6786,18 +6786,6 @@ dependencies = [ "wasm-timer", ] -[[package]] -name = "sc-prelude" -version = "0.8.0-rc4" -dependencies = [ - "sc-consensus", - "sc-consensus-aura", - "sc-consensus-babe", - "sc-finality-grandpa", - "sc-service", - "sc-transaction-pool", -] - [[package]] name = "sc-proposer-metrics" version = "0.8.0-rc4" @@ -6961,6 +6949,20 @@ dependencies = [ "wasm-timer", ] +[[package]] +name = "sc-service-prelude" +version = "0.8.0-rc4" +dependencies = [ + "sc-consensus", + "sc-consensus-aura", + "sc-consensus-babe", + "sc-finality-grandpa", + "sc-network", + "sc-service", + "sc-transaction-pool", + "sp-api", +] + [[package]] name = "sc-service-test" version = "2.0.0-rc4" diff --git a/Cargo.toml b/Cargo.toml index f4a9dcf0c7283..e2dc0bcaef2ea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,12 +46,12 @@ members = [ "client/network-gossip", "client/offchain", "client/peerset", - "client/prelude", "client/proposer-metrics", "client/rpc-servers", "client/rpc", "client/rpc-api", "client/service", + "client/service/prelude", "client/service/test", "client/state-db", "client/telemetry", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 58d6f329b85d3..cb10051128e43 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -39,7 +39,7 @@ sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api" } sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } sc-basic-authorship = { path = "../../../client/basic-authorship", version = "0.8.0-rc4"} sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } -sc-prelude = { version = "0.8.0-rc4", path = "../../../client/prelude" } +sc-service-prelude = { version = "0.8.0-rc4", path = "../../../client/service/prelude" } node-template-runtime = { version = "2.0.0-rc4", path = "../runtime" } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 9d1d4e9239e14..c4786f025d0f7 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -22,17 +22,17 @@ native_executor_instance!( mod prelude { use super::*; - sc_prelude::prelude!(Block, RuntimeApi, Executor); + sc_service_prelude::setup_types!(Block, RuntimeApi, Executor); } -use prelude::*; +use prelude::full; pub fn new_full_params(config: Configuration) -> Result<( sc_service::ServiceParams< - Block, FullClient, FullAuraImportQueue, FullBasicPool, (), FullBackend, + Block, full::Client, full::AuraImportQueue, full::BasicPool, (), full::Backend, >, - FullLongestChain, sp_inherents::InherentDataProviders, FullGrandpaBlockImport, - FullGrandpaLink + full::LongestChain, sp_inherents::InherentDataProviders, full::GrandpaBlockImport, + full::GrandpaLink ), ServiceError> { let inherent_data_providers = sp_inherents::InherentDataProviders::new(); diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 8d897254e3a63..4154da874babc 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -76,7 +76,7 @@ sc-service = { version = "0.8.0-rc4", default-features = false, path = "../../.. sc-tracing = { version = "2.0.0-rc4", path = "../../../client/tracing" } sc-telemetry = { version = "2.0.0-rc4", path = "../../../client/telemetry" } sc-authority-discovery = { version = "0.8.0-rc4", path = "../../../client/authority-discovery" } -sc-prelude = { version = "0.8.0-rc4", path = "../../../client/prelude" } +sc-service-prelude = { version = "0.8.0-rc4", path = "../../../client/service/prelude" } # frame dependencies pallet-indices = { version = "2.0.0-rc4", path = "../../../frame/indices" } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index df194a2f1d6d4..4c984fbf2844a 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -25,7 +25,6 @@ use sc_consensus_babe; use grandpa::{ self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider, }; -use node_executor; use node_primitives::Block; use node_runtime::RuntimeApi; use sc_service::{ @@ -41,11 +40,15 @@ use sp_core::traits::BareCryptoStorePtr; pub fn new_full_params(config: Configuration) -> Result<( sc_service::ServiceParams< - Block, FullClient, FullBabeImportQueue, FullBasicPool, node_rpc::IoHandler, FullBackend + Block, full::Client, full::BabeImportQueue, full::BasicPool, node_rpc::IoHandler, + full::Backend >, - (FullBabeBlockImport>, FullGrandpaLink, BabeLink), + ( + full::BabeBlockImport>, + full::GrandpaLink, BabeLink + ), grandpa::SharedVoterState, - FullLongestChain, + full::LongestChain, InherentDataProviders ), ServiceError> { use node_executor::Executor; @@ -151,18 +154,21 @@ pub fn new_full_params(config: Configuration) -> Result<( mod prelude { use super::*; - sc_prelude::prelude!(Block, RuntimeApi, node_executor::Executor); + use node_executor::Executor; + sc_service_prelude::setup_types!(Block, RuntimeApi, Executor); } -use prelude::*; +use prelude::{full, light, BabeLink}; /// Creates a full service from the configuration. pub fn new_full_base( config: Configuration, - with_startup_data: impl FnOnce(&FullBabeBlockImport>, &BabeLink) + with_startup_data: impl FnOnce( + &full::BabeBlockImport>, &BabeLink, + ) ) -> Result<( - TaskManager, InherentDataProviders, Arc, - Arc::Hash>>, Arc + TaskManager, InherentDataProviders, Arc, + Arc::Hash>>, Arc ), ServiceError> { let (params, import_setup, rpc_setup, select_chain, inherent_data_providers) = new_full_params(config)?; @@ -316,15 +322,10 @@ pub fn new_full(config: Configuration) }) } -type LightClient = sc_service::TLightClient; -type LightFetcher = sc_network::config::OnDemand; - pub fn new_light_base(config: Configuration) -> Result<( - TaskManager, Arc, Arc, + TaskManager, Arc, Arc, Arc::Hash>>, - Arc, Block - >> + Arc ), ServiceError> { let (client, backend, keystore, task_manager, on_demand) = sc_service::new_light_parts::(&config)?; diff --git a/client/prelude/Cargo.toml b/client/prelude/Cargo.toml deleted file mode 100644 index a2a346a24f125..0000000000000 --- a/client/prelude/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "sc-prelude" -version = "0.8.0-rc4" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "Easy to use type definitions" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -sc-service = { path = "../service", version = "0.8.0-rc4"} -sc-finality-grandpa = { path = "../finality-grandpa", version = "0.8.0-rc4"} -sc-consensus = { path = "../consensus/common", version = "0.8.0-rc4"} -sc-consensus-aura = { path = "../consensus/aura", version = "0.8.0-rc4"} -sc-consensus-babe = { path = "../consensus/babe", version = "0.8.0-rc4"} -sc-transaction-pool = { path = "../transaction-pool", version = "2.0.0-rc4"} diff --git a/client/prelude/src/lib.rs b/client/prelude/src/lib.rs deleted file mode 100644 index f513579f8fee0..0000000000000 --- a/client/prelude/src/lib.rs +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -pub use sc_service; -pub use sc_finality_grandpa; -pub use sc_consensus; -pub use sc_consensus_aura; -pub use sc_transaction_pool; -pub use sc_consensus_babe; - -#[macro_export] -macro_rules! prelude { - ($block:ty, $runtime_api:ty, $executor:ty) => { - pub type FullClient = $crate::sc_service::TFullClient<$block, $runtime_api, $executor>; - pub type FullBackend = $crate::sc_service::TFullBackend<$block>; - pub type FullGrandpaBlockImport = $crate::sc_finality_grandpa::GrandpaBlockImport< - FullBackend, $block, FullClient, SelectChain - >; - pub type FullGrandpaLink = $crate::sc_finality_grandpa::LinkHalf< - $block, FullClient, SelectChain - >; - pub type FullLongestChain = $crate::sc_consensus::LongestChain; - - pub type FullBasicPool = $crate::sc_transaction_pool::BasicPool< - sc_transaction_pool::FullChainApi, $block - >; - - pub type FullAuraImportQueue = $crate::sc_consensus_aura::AuraImportQueue< - $block, sp_api::TransactionFor - >; - - pub type FullBabeImportQueue = $crate::sc_consensus_babe::BabeImportQueue< - $block, sp_api::TransactionFor - >; - - pub type FullBabeBlockImport = $crate::sc_consensus_babe::BabeBlockImport< - $block, FullClient, BlockImport - >; - - pub type BabeLink = $crate::sc_consensus_babe::BabeLink<$block>; - - pub type LightClient = $crate::sc_service::TLightClient<$block, $runtime_api, $executor>; - } -} - -#[cfg(test)] -mod tests { - mod prelude { - prelude!((), (), ()); - } -} diff --git a/client/service/prelude/Cargo.toml b/client/service/prelude/Cargo.toml new file mode 100644 index 0000000000000..0170921c58980 --- /dev/null +++ b/client/service/prelude/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "sc-service-prelude" +version = "0.8.0-rc4" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Easy to use type definitions" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +service = { package = "sc-service", path = "..", version = "0.8.0-rc4"} +grandpa = { package = "sc-finality-grandpa", path = "../../finality-grandpa", version = "0.8.0-rc4"} +consensus = { package = "sc-consensus", path = "../../consensus/common", version = "0.8.0-rc4"} +aura = { package = "sc-consensus-aura", path = "../../consensus/aura", version = "0.8.0-rc4"} +babe = { package = "sc-consensus-babe", path = "../../consensus/babe", version = "0.8.0-rc4"} +transaction-pool = { package = "sc-transaction-pool", path = "../../transaction-pool", version = "2.0.0-rc4"} +network = { package = "sc-network", path = "../../network" } +sp-api = { path = "../../../primitives/api" } diff --git a/client/service/prelude/src/lib.rs b/client/service/prelude/src/lib.rs new file mode 100644 index 0000000000000..a047ea7d45a2d --- /dev/null +++ b/client/service/prelude/src/lib.rs @@ -0,0 +1,78 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +pub use service; +pub use grandpa; +pub use consensus; +pub use aura; +pub use transaction_pool; +pub use babe; +pub use network; + +#[macro_export] +macro_rules! setup_types { + ($block:ty, $runtime_api:ty, $executor:ty) => { + pub mod full { + use super::*; + + pub type Client = $crate::service::TFullClient<$block, $runtime_api, $executor>; + pub type Backend = $crate::service::TFullBackend<$block>; + pub type GrandpaBlockImport = $crate::grandpa::GrandpaBlockImport< + Backend, $block, Client, SelectChain + >; + pub type GrandpaLink = $crate::grandpa ::LinkHalf< + $block, Client, SelectChain + >; + pub type LongestChain = $crate::consensus::LongestChain; + + pub type BasicPool = $crate::transaction_pool::BasicPool< + sc_transaction_pool::FullChainApi, $block + >; + + pub type AuraImportQueue = $crate::aura::AuraImportQueue< + $block, sp_api::TransactionFor + >; + + pub type BabeImportQueue = $crate::babe::BabeImportQueue< + $block, sp_api::TransactionFor + >; + + pub type BabeBlockImport = $crate::babe::BabeBlockImport< + $block, Client, BlockImport + >; + } + + pub type BabeLink = $crate::babe::BabeLink<$block>; + + pub mod light { + use super::*; + + pub type Client = $crate::service::TLightClient<$block, $runtime_api, $executor>; + pub type Fetcher = $crate::network::config::OnDemand<$block>; + + pub type BasicPool = $crate::transaction_pool::BasicPool< + sc_transaction_pool::LightChainApi, $block + >; + } + } +} + +#[cfg(test)] +mod tests { + mod prelude { + setup_types!((), (), ()); + } +} From 03de1c7c094d15e4fb8296db007379daf7979cff Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Fri, 17 Jul 2020 12:11:36 +0200 Subject: [PATCH 18/24] Rename to sc-service-types --- Cargo.lock | 33 +++++----- Cargo.toml | 2 +- bin/node-template/node/Cargo.toml | 2 +- bin/node-template/node/src/service.rs | 5 +- bin/node/cli/Cargo.toml | 3 +- bin/node/cli/src/service.rs | 21 +++--- client/service/prelude/Cargo.toml | 22 ------- client/service/prelude/src/lib.rs | 78 ----------------------- client/service/types/Cargo.toml | 22 +++++++ client/service/types/src/lib.rs | 92 +++++++++++++++++++++++++++ 10 files changed, 147 insertions(+), 133 deletions(-) delete mode 100644 client/service/prelude/Cargo.toml delete mode 100644 client/service/prelude/src/lib.rs create mode 100644 client/service/types/Cargo.toml create mode 100644 client/service/types/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 8c5fe7c3ec1c2..12ce6c2c4a76c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3523,14 +3523,13 @@ dependencies = [ "sc-offchain", "sc-rpc", "sc-service", - "sc-service-prelude", "sc-service-test", + "sc-service-types", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "serde", "serde_json", - "sp-api", "sp-authority-discovery", "sp-consensus", "sp-consensus-babe", @@ -3742,7 +3741,7 @@ dependencies = [ "sc-finality-grandpa", "sc-network", "sc-service", - "sc-service-prelude", + "sc-service-types", "sc-transaction-pool", "sp-api", "sp-consensus", @@ -6949,20 +6948,6 @@ dependencies = [ "wasm-timer", ] -[[package]] -name = "sc-service-prelude" -version = "0.8.0-rc4" -dependencies = [ - "sc-consensus", - "sc-consensus-aura", - "sc-consensus-babe", - "sc-finality-grandpa", - "sc-network", - "sc-service", - "sc-transaction-pool", - "sp-api", -] - [[package]] name = "sc-service-test" version = "2.0.0-rc4" @@ -6999,6 +6984,20 @@ dependencies = [ "tokio 0.1.22", ] +[[package]] +name = "sc-service-types" +version = "0.8.0-rc4" +dependencies = [ + "sc-consensus", + "sc-consensus-aura", + "sc-consensus-babe", + "sc-finality-grandpa", + "sc-network", + "sc-service", + "sc-transaction-pool", + "sp-api", +] + [[package]] name = "sc-state-db" version = "0.8.0-rc4" diff --git a/Cargo.toml b/Cargo.toml index e2dc0bcaef2ea..97d15fcebc006 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,8 +51,8 @@ members = [ "client/rpc", "client/rpc-api", "client/service", - "client/service/prelude", "client/service/test", + "client/service/types", "client/state-db", "client/telemetry", "client/transaction-pool", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index cb10051128e43..571e04d6d61d2 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -39,7 +39,7 @@ sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api" } sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } sc-basic-authorship = { path = "../../../client/basic-authorship", version = "0.8.0-rc4"} sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } -sc-service-prelude = { version = "0.8.0-rc4", path = "../../../client/service/prelude" } +sc-service-types = { version = "0.8.0-rc4", path = "../../../client/service/types" } node-template-runtime = { version = "2.0.0-rc4", path = "../runtime" } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index c4786f025d0f7..84629b295e929 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -22,7 +22,7 @@ native_executor_instance!( mod prelude { use super::*; - sc_service_prelude::setup_types!(Block, RuntimeApi, Executor); + sc_service_types::setup_types!(Block, RuntimeApi, Executor); } use prelude::full; @@ -31,7 +31,8 @@ pub fn new_full_params(config: Configuration) -> Result<( sc_service::ServiceParams< Block, full::Client, full::AuraImportQueue, full::BasicPool, (), full::Backend, >, - full::LongestChain, sp_inherents::InherentDataProviders, full::GrandpaBlockImport, + full::LongestChain, sp_inherents::InherentDataProviders, + full::GrandpaBlockImport, full::GrandpaLink ), ServiceError> { let inherent_data_providers = sp_inherents::InherentDataProviders::new(); diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 4154da874babc..241f60ca4d398 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -58,7 +58,6 @@ sp-keyring = { version = "2.0.0-rc4", path = "../../../primitives/keyring" } sp-io = { version = "2.0.0-rc4", path = "../../../primitives/io" } sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } sp-transaction-pool = { version = "2.0.0-rc4", path = "../../../primitives/transaction-pool" } -sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } # client dependencies sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api" } @@ -76,7 +75,7 @@ sc-service = { version = "0.8.0-rc4", default-features = false, path = "../../.. sc-tracing = { version = "2.0.0-rc4", path = "../../../client/tracing" } sc-telemetry = { version = "2.0.0-rc4", path = "../../../client/telemetry" } sc-authority-discovery = { version = "0.8.0-rc4", path = "../../../client/authority-discovery" } -sc-service-prelude = { version = "0.8.0-rc4", path = "../../../client/service/prelude" } +sc-service-types = { version = "0.8.0-rc4", path = "../../../client/service/types" } # frame dependencies pallet-indices = { version = "2.0.0-rc4", path = "../../../frame/indices" } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 4c984fbf2844a..c135cc7040943 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -37,6 +37,15 @@ use sp_runtime::traits::Block as BlockT; use futures::prelude::*; use sc_client_api::{ExecutorProvider, RemoteBackend}; use sp_core::traits::BareCryptoStorePtr; +use sc_consensus_babe::BabeLink; + +mod prelude { + use super::*; + use node_executor::Executor; + sc_service_types::setup_types!(Block, RuntimeApi, Executor); +} + +use prelude::{full, light}; pub fn new_full_params(config: Configuration) -> Result<( sc_service::ServiceParams< @@ -45,7 +54,7 @@ pub fn new_full_params(config: Configuration) -> Result<( >, ( full::BabeBlockImport>, - full::GrandpaLink, BabeLink + full::GrandpaLink, BabeLink, ), grandpa::SharedVoterState, full::LongestChain, @@ -152,19 +161,11 @@ pub fn new_full_params(config: Configuration) -> Result<( Ok((params, import_setup, rpc_setup, select_chain, inherent_data_providers)) } -mod prelude { - use super::*; - use node_executor::Executor; - sc_service_prelude::setup_types!(Block, RuntimeApi, Executor); -} - -use prelude::{full, light, BabeLink}; - /// Creates a full service from the configuration. pub fn new_full_base( config: Configuration, with_startup_data: impl FnOnce( - &full::BabeBlockImport>, &BabeLink, + &full::BabeBlockImport>, &BabeLink, ) ) -> Result<( TaskManager, InherentDataProviders, Arc, diff --git a/client/service/prelude/Cargo.toml b/client/service/prelude/Cargo.toml deleted file mode 100644 index 0170921c58980..0000000000000 --- a/client/service/prelude/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "sc-service-prelude" -version = "0.8.0-rc4" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "Easy to use type definitions" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -service = { package = "sc-service", path = "..", version = "0.8.0-rc4"} -grandpa = { package = "sc-finality-grandpa", path = "../../finality-grandpa", version = "0.8.0-rc4"} -consensus = { package = "sc-consensus", path = "../../consensus/common", version = "0.8.0-rc4"} -aura = { package = "sc-consensus-aura", path = "../../consensus/aura", version = "0.8.0-rc4"} -babe = { package = "sc-consensus-babe", path = "../../consensus/babe", version = "0.8.0-rc4"} -transaction-pool = { package = "sc-transaction-pool", path = "../../transaction-pool", version = "2.0.0-rc4"} -network = { package = "sc-network", path = "../../network" } -sp-api = { path = "../../../primitives/api" } diff --git a/client/service/prelude/src/lib.rs b/client/service/prelude/src/lib.rs deleted file mode 100644 index a047ea7d45a2d..0000000000000 --- a/client/service/prelude/src/lib.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -pub use service; -pub use grandpa; -pub use consensus; -pub use aura; -pub use transaction_pool; -pub use babe; -pub use network; - -#[macro_export] -macro_rules! setup_types { - ($block:ty, $runtime_api:ty, $executor:ty) => { - pub mod full { - use super::*; - - pub type Client = $crate::service::TFullClient<$block, $runtime_api, $executor>; - pub type Backend = $crate::service::TFullBackend<$block>; - pub type GrandpaBlockImport = $crate::grandpa::GrandpaBlockImport< - Backend, $block, Client, SelectChain - >; - pub type GrandpaLink = $crate::grandpa ::LinkHalf< - $block, Client, SelectChain - >; - pub type LongestChain = $crate::consensus::LongestChain; - - pub type BasicPool = $crate::transaction_pool::BasicPool< - sc_transaction_pool::FullChainApi, $block - >; - - pub type AuraImportQueue = $crate::aura::AuraImportQueue< - $block, sp_api::TransactionFor - >; - - pub type BabeImportQueue = $crate::babe::BabeImportQueue< - $block, sp_api::TransactionFor - >; - - pub type BabeBlockImport = $crate::babe::BabeBlockImport< - $block, Client, BlockImport - >; - } - - pub type BabeLink = $crate::babe::BabeLink<$block>; - - pub mod light { - use super::*; - - pub type Client = $crate::service::TLightClient<$block, $runtime_api, $executor>; - pub type Fetcher = $crate::network::config::OnDemand<$block>; - - pub type BasicPool = $crate::transaction_pool::BasicPool< - sc_transaction_pool::LightChainApi, $block - >; - } - } -} - -#[cfg(test)] -mod tests { - mod prelude { - setup_types!((), (), ()); - } -} diff --git a/client/service/types/Cargo.toml b/client/service/types/Cargo.toml new file mode 100644 index 0000000000000..f1a170591b772 --- /dev/null +++ b/client/service/types/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "sc-service-types" +version = "0.8.0-rc4" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Easy to use type definitions" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sc-service = { path = "..", version = "0.8.0-rc4"} +sc-finality-grandpa = { path = "../../finality-grandpa", version = "0.8.0-rc4"} +sc-consensus = { path = "../../consensus/common", version = "0.8.0-rc4"} +sc-consensus-aura = { path = "../../consensus/aura", version = "0.8.0-rc4"} +sc-consensus-babe = { path = "../../consensus/babe", version = "0.8.0-rc4"} +sc-transaction-pool = { path = "../../transaction-pool", version = "2.0.0-rc4"} +sc-network = { path = "../../network" } +sp-api = { path = "../../../primitives/api" } diff --git a/client/service/types/src/lib.rs b/client/service/types/src/lib.rs new file mode 100644 index 0000000000000..e462ae5ea2d79 --- /dev/null +++ b/client/service/types/src/lib.rs @@ -0,0 +1,92 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +pub use sc_service as _service; +pub use sc_finality_grandpa as _grandpa; +pub use sc_consensus as _consensus; +pub use sc_consensus_aura as _aura; +pub use sc_consensus_babe as _babe; +pub use sc_transaction_pool as _transaction_pool; +pub use sc_network as _network; +pub use sp_api as _sp_api; + +#[macro_export] +macro_rules! setup_types { + ($block:ty, $runtime_api:ty, $executor:ty) => { + use $crate::{ + _service as service, _grandpa as grandpa, _consensus as consensus, _aura as aura, + _transaction_pool as transaction_pool, _sp_api as sp_api, _network as network, + _babe as babe, + }; + + /// Type definitions for a full client. + pub mod full { + use super::*; + + /// A full client. + pub type Client = service::TFullClient<$block, $runtime_api, $executor>; + /// A full backend. + pub type Backend = service::TFullBackend<$block>; + /// A GRANDPA block import. + pub type GrandpaBlockImport = grandpa::GrandpaBlockImport< + Backend, $block, Client, SelectChain + >; + /// A GRANDPA link. Connects the block import to the GRANDPA service. + pub type GrandpaLink = grandpa ::LinkHalf< + $block, Client, SelectChain + >; + /// A basic select chain implementation. + pub type LongestChain = consensus::LongestChain; + /// A basic transaction pool. + pub type BasicPool = transaction_pool::BasicPool< + transaction_pool::FullChainApi, $block + >; + /// An import queue for AURA. + pub type AuraImportQueue = aura::AuraImportQueue< + $block, sp_api::TransactionFor + >; + /// An import queue for BABE. + pub type BabeImportQueue = babe::BabeImportQueue< + $block, sp_api::TransactionFor + >; + /// A block import for BABE. Wraps around another block import. + pub type BabeBlockImport = babe::BabeBlockImport< + $block, Client, BlockImport + >; + } + + /// Type definitions for a light client. + pub mod light { + use super::*; + + /// A light client. + pub type Client = service::TLightClient<$block, $runtime_api, $executor>; + /// A network fetcher for a light client. + pub type Fetcher = network::config::OnDemand<$block>; + /// A basic transaction pool. + pub type BasicPool = transaction_pool::BasicPool< + transaction_pool::LightChainApi, $block + >; + } + } +} + +#[cfg(test)] +mod tests { + mod prelude { + setup_types!((), (), ()); + } +} From 7744a9ed4514330587f5fd60a9a0e5bccf0d96c1 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Mon, 20 Jul 2020 10:57:51 +0200 Subject: [PATCH 19/24] Improve service types --- client/service/types/src/lib.rs | 109 ++++++++++++++++++++++++-------- 1 file changed, 82 insertions(+), 27 deletions(-) diff --git a/client/service/types/src/lib.rs b/client/service/types/src/lib.rs index e462ae5ea2d79..fff10825e0aa8 100644 --- a/client/service/types/src/lib.rs +++ b/client/service/types/src/lib.rs @@ -14,6 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +//! Type definitions to make building a service easier. + +#![warn(missing_docs)] + pub use sc_service as _service; pub use sc_finality_grandpa as _grandpa; pub use sc_consensus as _consensus; @@ -23,63 +27,114 @@ pub use sc_transaction_pool as _transaction_pool; pub use sc_network as _network; pub use sp_api as _sp_api; +use { + _service as service, _grandpa as grandpa, _consensus as consensus, _aura as aura, + _transaction_pool as transaction_pool, _sp_api as sp_api, _network as network, + _babe as babe, +}; + +/// Type definitions for a full client. +pub mod full { + use super::*; + + pub use service::TFullClient as Client; + pub use service::TFullBackend as Backend; + + /// A GRANDPA block import. + pub type GrandpaBlockImport = grandpa::GrandpaBlockImport< + Backend, Block, Client, SelectChain + >; + /// A GRANDPA link. Connects the block import to the GRANDPA service. + pub type GrandpaLink = grandpa ::LinkHalf< + Block, Client, SelectChain + >; + /// A basic select chain implementation. + pub type LongestChain = consensus::LongestChain, Block>; + /// A basic transaction pool. + pub type BasicPool = transaction_pool::BasicPool< + transaction_pool::FullChainApi, Block>, Block + >; + /// An import queue for AURA. + pub type AuraImportQueue = aura::AuraImportQueue< + Block, sp_api::TransactionFor, Block> + >; + /// An import queue for BABE. + pub type BabeImportQueue = babe::BabeImportQueue< + Block, sp_api::TransactionFor, Block> + >; + /// A block import for BABE. Wraps around another block import. + pub type BabeBlockImport = babe::BabeBlockImport< + Block, Client, BlockImport + >; +} + +/// Type definitions for a light client. +pub mod light { + use super::*; + + pub use service::TLightClient as Client; + pub use service::TLightBackend as Backend; + + /// A network fetcher for a light client. + pub type Fetcher = network::config::OnDemand; + + /// A basic transaction pool. + pub type BasicPool = transaction_pool::BasicPool< + transaction_pool::LightChainApi, Fetcher, Block>, + Block + >; +} + +/// Setup the type definitions given a `Block`, `RuntimeApi`and `Executor`. #[macro_export] macro_rules! setup_types { ($block:ty, $runtime_api:ty, $executor:ty) => { - use $crate::{ - _service as service, _grandpa as grandpa, _consensus as consensus, _aura as aura, - _transaction_pool as transaction_pool, _sp_api as sp_api, _network as network, - _babe as babe, - }; - /// Type definitions for a full client. pub mod full { + use $crate::full; use super::*; /// A full client. - pub type Client = service::TFullClient<$block, $runtime_api, $executor>; + pub type Client = full::Client<$block, $runtime_api, $executor>; /// A full backend. - pub type Backend = service::TFullBackend<$block>; + pub type Backend = full::Backend<$block>; /// A GRANDPA block import. - pub type GrandpaBlockImport = grandpa::GrandpaBlockImport< - Backend, $block, Client, SelectChain + pub type GrandpaBlockImport = full::GrandpaBlockImport< + $block, $runtime_api, $executor, SelectChain >; /// A GRANDPA link. Connects the block import to the GRANDPA service. - pub type GrandpaLink = grandpa ::LinkHalf< - $block, Client, SelectChain + pub type GrandpaLink = full::GrandpaLink< + $block, $runtime_api, $executor, SelectChain >; /// A basic select chain implementation. - pub type LongestChain = consensus::LongestChain; + pub type LongestChain = full::LongestChain<$block>; /// A basic transaction pool. - pub type BasicPool = transaction_pool::BasicPool< - transaction_pool::FullChainApi, $block - >; + pub type BasicPool = $crate::BasicPool<$block, $runtime_api, $executor>; /// An import queue for AURA. - pub type AuraImportQueue = aura::AuraImportQueue< - $block, sp_api::TransactionFor + pub type AuraImportQueue = full::AuraImportQueue< + $block, $runtime_api, $executor >; /// An import queue for BABE. - pub type BabeImportQueue = babe::BabeImportQueue< - $block, sp_api::TransactionFor + pub type BabeImportQueue = full::BabeImportQueue< + $block, $runtime_api, $executor >; /// A block import for BABE. Wraps around another block import. - pub type BabeBlockImport = babe::BabeBlockImport< - $block, Client, BlockImport + pub type BabeBlockImport = full::BabeBlockImport< + $block, $runtime_api, $executor, BlockImport >; } /// Type definitions for a light client. pub mod light { + use $crate::light; use super::*; /// A light client. - pub type Client = service::TLightClient<$block, $runtime_api, $executor>; + pub type Client = light::Client<$block, $runtime_api, $executor>; /// A network fetcher for a light client. - pub type Fetcher = network::config::OnDemand<$block>; + pub type Fetcher = light::Fetcher<$block>; /// A basic transaction pool. - pub type BasicPool = transaction_pool::BasicPool< - transaction_pool::LightChainApi, $block - >; + pub type BasicPool = light::BasicPool<$block, $runtime_api, $executor>; } } } From 3a1d2ecb6f795b75dfc4c1f5a499d1cef8806989 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Mon, 20 Jul 2020 15:46:59 +0200 Subject: [PATCH 20/24] Fix line widths --- bin/node-template/node/src/service.rs | 11 +++++----- bin/node/cli/src/service.rs | 29 +++++++++++++++------------ client/service/src/builder.rs | 10 ++++++--- client/service/types/src/lib.rs | 16 +++++---------- 4 files changed, 34 insertions(+), 32 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 84629b295e929..39dc3927f3d38 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -37,7 +37,8 @@ pub fn new_full_params(config: Configuration) -> Result<( ), ServiceError> { let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - let (client, backend, keystore, task_manager) = sc_service::new_full_parts::(&config)?; + let (client, backend, keystore, task_manager) = + sc_service::new_full_parts::(&config)?; let client = Arc::new(client); let select_chain = sc_consensus::LongestChain::new(backend.clone()); @@ -73,7 +74,8 @@ pub fn new_full_params(config: Configuration) -> Result<( )?; let provider = client.clone() as Arc>; - let finality_proof_provider = Arc::new(GrandpaFinalityProofProvider::new(backend.clone(), provider)); + let finality_proof_provider = + Arc::new(GrandpaFinalityProofProvider::new(backend.clone(), provider)); let params = sc_service::ServiceParams { backend, client, import_queue, keystore, task_manager, transaction_pool, @@ -237,9 +239,8 @@ pub fn new_light(config: Configuration) -> Result { config.prometheus_registry(), )?; - let finality_proof_provider = Arc::new(GrandpaFinalityProofProvider::new( - backend.clone(), client.clone() as Arc<_> - )); + let finality_proof_provider = + Arc::new(GrandpaFinalityProofProvider::new(backend.clone(), client.clone() as Arc<_>)); sc_service::build(sc_service::ServiceParams { block_announce_validator_builder: None, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 64c580e8be655..6a90523604681 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -146,7 +146,8 @@ pub fn new_full_params(config: Configuration) -> Result<( }; let provider = client.clone() as Arc>; - let finality_proof_provider = Arc::new(grandpa::FinalityProofProvider::new(backend.clone(), provider)) as _; + let finality_proof_provider = + Arc::new(grandpa::FinalityProofProvider::new(backend.clone(), provider)); let params = sc_service::ServiceParams { config, backend, client, import_queue, keystore, task_manager, rpc_extensions_builder, @@ -375,7 +376,8 @@ pub fn new_light_base(config: Configuration) -> Result<( // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider let provider = client.clone() as Arc>; - let finality_proof_provider = Arc::new(GrandpaFinalityProofProvider::new(backend.clone(), provider)); + let finality_proof_provider = + Arc::new(GrandpaFinalityProofProvider::new(backend.clone(), provider)); let light_deps = node_rpc::LightDeps { remote_blockchain: backend.remote_blockchain(), @@ -386,17 +388,18 @@ pub fn new_light_base(config: Configuration) -> Result<( let rpc_extensions = node_rpc::create_light(light_deps); - let ServiceComponents { task_manager, rpc_handlers, network, .. } = sc_service::build(sc_service::ServiceParams { - block_announce_validator_builder: None, - finality_proof_request_builder: Some(finality_proof_request_builder), - finality_proof_provider: Some(finality_proof_provider), - on_demand: Some(on_demand), - remote_blockchain: Some(backend.remote_blockchain()), - rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), - client: client.clone(), - transaction_pool: transaction_pool.clone(), - config, import_queue, keystore, backend, task_manager, - })?; + let ServiceComponents { task_manager, rpc_handlers, network, .. } = + sc_service::build(sc_service::ServiceParams { + block_announce_validator_builder: None, + finality_proof_request_builder: Some(finality_proof_request_builder), + finality_proof_provider: Some(finality_proof_provider), + on_demand: Some(on_demand), + remote_blockchain: Some(backend.remote_blockchain()), + rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), + client: client.clone(), + transaction_pool: transaction_pool.clone(), + config, import_queue, keystore, backend, task_manager, + })?; Ok((task_manager, rpc_handlers, client, network, transaction_pool)) } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 600cc5c77b5fb..3b12f95e097f4 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -385,7 +385,8 @@ pub struct ServiceParams { /// An optional, shared remote blockchain instance. Used for light clients. pub remote_blockchain: Option>>, /// A block annouce validator builder. - pub block_announce_validator_builder: Option) -> Box + Send> + Send>>, + pub block_announce_validator_builder: + Option) -> Box + Send> + Send>>, } /// Put together the components of a service from the parameters. @@ -408,7 +409,8 @@ pub fn build( TBl: BlockT, TBackend: 'static + sc_client_api::backend::Backend + Send, TImpQu: 'static + ImportQueue, - TExPool: MaintainedTransactionPool::Hash> + MallocSizeOfWasm + 'static, + TExPool: MaintainedTransactionPool::Hash> + + MallocSizeOfWasm + 'static, TRpc: sc_rpc::RpcExtension { let ServiceParams { @@ -500,7 +502,9 @@ pub fn build( ); // Prometheus metrics. - let metrics_service = if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { + let metrics_service = if let Some(PrometheusConfig { port, registry }) = + config.prometheus_config.clone() + { // Set static metrics. let metrics = MetricsService::with_prometheus(®istry, &config)?; spawn_handle.spawn( diff --git a/client/service/types/src/lib.rs b/client/service/types/src/lib.rs index fff10825e0aa8..9d2b8bd6fa442 100644 --- a/client/service/types/src/lib.rs +++ b/client/service/types/src/lib.rs @@ -41,9 +41,10 @@ pub mod full { pub use service::TFullBackend as Backend; /// A GRANDPA block import. - pub type GrandpaBlockImport = grandpa::GrandpaBlockImport< - Backend, Block, Client, SelectChain - >; + pub type GrandpaBlockImport = + grandpa::GrandpaBlockImport< + Backend, Block, Client, SelectChain + >; /// A GRANDPA link. Connects the block import to the GRANDPA service. pub type GrandpaLink = grandpa ::LinkHalf< Block, Client, SelectChain @@ -109,7 +110,7 @@ macro_rules! setup_types { /// A basic select chain implementation. pub type LongestChain = full::LongestChain<$block>; /// A basic transaction pool. - pub type BasicPool = $crate::BasicPool<$block, $runtime_api, $executor>; + pub type BasicPool = full::BasicPool<$block, $runtime_api, $executor>; /// An import queue for AURA. pub type AuraImportQueue = full::AuraImportQueue< $block, $runtime_api, $executor @@ -138,10 +139,3 @@ macro_rules! setup_types { } } } - -#[cfg(test)] -mod tests { - mod prelude { - setup_types!((), (), ()); - } -} From 84022fe99cac0bedffe9a8642f7f7d557cea0a17 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Tue, 21 Jul 2020 14:20:54 +0200 Subject: [PATCH 21/24] Remove sc-service-types and move type definitions to crates --- Cargo.lock | 16 --- Cargo.toml | 1 - bin/node-template/node/Cargo.toml | 1 - bin/node-template/node/src/service.rs | 21 ++-- bin/node/cli/Cargo.toml | 1 - bin/node/cli/src/service.rs | 44 ++++---- client/consensus/aura/src/lib.rs | 4 +- client/consensus/babe/src/lib.rs | 4 +- client/service/types/Cargo.toml | 22 ---- client/service/types/src/lib.rs | 141 -------------------------- client/transaction-pool/src/lib.rs | 5 + 11 files changed, 43 insertions(+), 217 deletions(-) delete mode 100644 client/service/types/Cargo.toml delete mode 100644 client/service/types/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 24822f869f15a..22edeede37ec4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3611,7 +3611,6 @@ dependencies = [ "sc-rpc", "sc-service", "sc-service-test", - "sc-service-types", "sc-telemetry", "sc-tracing", "sc-transaction-pool", @@ -3828,7 +3827,6 @@ dependencies = [ "sc-finality-grandpa", "sc-network", "sc-service", - "sc-service-types", "sc-transaction-pool", "sp-consensus", "sp-consensus-aura", @@ -7084,20 +7082,6 @@ dependencies = [ "tokio 0.1.22", ] -[[package]] -name = "sc-service-types" -version = "0.8.0-rc5" -dependencies = [ - "sc-consensus", - "sc-consensus-aura", - "sc-consensus-babe", - "sc-finality-grandpa", - "sc-network", - "sc-service", - "sc-transaction-pool", - "sp-api", -] - [[package]] name = "sc-state-db" version = "0.8.0-rc5" diff --git a/Cargo.toml b/Cargo.toml index 97d15fcebc006..ba146e55bca3f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,7 +52,6 @@ members = [ "client/rpc-api", "client/service", "client/service/test", - "client/service/types", "client/state-db", "client/telemetry", "client/transaction-pool", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 446b298274fb8..cd4007a8833b5 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -38,7 +38,6 @@ sp-finality-grandpa = { version = "2.0.0-rc5", path = "../../../primitives/final sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } sc-basic-authorship = { path = "../../../client/basic-authorship", version = "0.8.0-rc5"} -sc-service-types = { version = "0.8.0-rc5", path = "../../../client/service/types" } node-template-runtime = { version = "2.0.0-rc5", path = "../runtime" } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 39dc3927f3d38..4a0164bd3f5b7 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -20,20 +20,21 @@ native_executor_instance!( node_template_runtime::native_version, ); -mod prelude { - use super::*; - sc_service_types::setup_types!(Block, RuntimeApi, Executor); -} - -use prelude::full; +type FullClient = sc_service::TFullClient; +type FullBackend = sc_service::TFullBackend; +type FullSelectChain = sc_consensus::LongestChain; pub fn new_full_params(config: Configuration) -> Result<( sc_service::ServiceParams< - Block, full::Client, full::AuraImportQueue, full::BasicPool, (), full::Backend, + Block, FullClient, + sc_consensus_aura::AuraImportQueue, + sc_transaction_pool::FullPool, + (), FullBackend, >, - full::LongestChain, sp_inherents::InherentDataProviders, - full::GrandpaBlockImport, - full::GrandpaLink + FullSelectChain, + sp_inherents::InherentDataProviders, + sc_finality_grandpa::GrandpaBlockImport, + sc_finality_grandpa::LinkHalf ), ServiceError> { let inherent_data_providers = sp_inherents::InherentDataProviders::new(); diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 1e1c9bd276bdf..71b4bfa69d856 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -75,7 +75,6 @@ sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../.. sc-tracing = { version = "2.0.0-rc5", path = "../../../client/tracing" } sc-telemetry = { version = "2.0.0-rc5", path = "../../../client/telemetry" } sc-authority-discovery = { version = "0.8.0-rc5", path = "../../../client/authority-discovery" } -sc-service-types = { version = "0.8.0-rc5", path = "../../../client/service/types" } # frame dependencies pallet-indices = { version = "2.0.0-rc5", path = "../../../frame/indices" } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 6a90523604681..ec061ccc45788 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -37,31 +37,31 @@ use sp_runtime::traits::Block as BlockT; use futures::prelude::*; use sc_client_api::{ExecutorProvider, RemoteBackend}; use sp_core::traits::BareCryptoStorePtr; -use sc_consensus_babe::BabeLink; +use node_executor::Executor; -mod prelude { - use super::*; - use node_executor::Executor; - sc_service_types::setup_types!(Block, RuntimeApi, Executor); -} - -use prelude::{full, light}; +type FullClient = sc_service::TFullClient; +type FullBackend = sc_service::TFullBackend; +type FullSelectChain = sc_consensus::LongestChain; +type FullGrandpaBlockImport = + grandpa::GrandpaBlockImport; +type LightClient = sc_service::TLightClient; pub fn new_full_params(config: Configuration) -> Result<( sc_service::ServiceParams< - Block, full::Client, full::BabeImportQueue, full::BasicPool, node_rpc::IoHandler, - full::Backend + Block, FullClient, + sc_consensus_babe::BabeImportQueue, + sc_transaction_pool::FullPool, node_rpc::IoHandler, + FullBackend >, ( - full::BabeBlockImport>, - full::GrandpaLink, BabeLink, + sc_consensus_babe::BabeBlockImport, + grandpa::LinkHalf, + sc_consensus_babe::BabeLink, ), grandpa::SharedVoterState, - full::LongestChain, + FullSelectChain, InherentDataProviders ), ServiceError> { - use node_executor::Executor; - let (client, backend, keystore, task_manager) = sc_service::new_full_parts::(&config)?; let client = Arc::new(client); @@ -166,11 +166,13 @@ pub fn new_full_params(config: Configuration) -> Result<( pub fn new_full_base( config: Configuration, with_startup_data: impl FnOnce( - &full::BabeBlockImport>, &BabeLink, + &sc_consensus_babe::BabeBlockImport, + &sc_consensus_babe::BabeLink, ) ) -> Result<( - TaskManager, InherentDataProviders, Arc, - Arc::Hash>>, Arc + TaskManager, InherentDataProviders, Arc, + Arc::Hash>>, + Arc>, ), ServiceError> { let (params, import_setup, rpc_setup, select_chain, inherent_data_providers) = new_full_params(config)?; @@ -325,12 +327,12 @@ pub fn new_full(config: Configuration) } pub fn new_light_base(config: Configuration) -> Result<( - TaskManager, Arc, Arc, + TaskManager, Arc, Arc, Arc::Hash>>, - Arc + Arc>> ), ServiceError> { let (client, backend, keystore, task_manager, on_demand) = - sc_service::new_light_parts::(&config)?; + sc_service::new_light_parts::(&config)?; let select_chain = sc_consensus::LongestChain::new(backend.clone()); diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 19bc3bae6c30b..a74a7865d0550 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -714,7 +714,7 @@ fn authorities(client: &C, at: &BlockId) -> Result, Consensus } /// The Aura import queue type. -pub type AuraImportQueue = BasicQueue; +pub type AuraImportQueue = BasicQueue>; /// Register the aura inherent data provider, if not registered already. fn register_aura_inherent_data_provider( @@ -824,7 +824,7 @@ pub fn import_queue( inherent_data_providers: InherentDataProviders, spawner: &S, registry: Option<&Registry>, -) -> Result>, sp_consensus::Error> where +) -> Result, sp_consensus::Error> where B: BlockT, C::Api: BlockBuilderApi + AuraApi> + ApiExt, C: 'static + ProvideRuntimeApi + BlockOf + ProvideCache + Send + Sync + AuxStore + HeaderBackend, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index af684499cef8b..f09e9b063c255 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -968,7 +968,7 @@ where } /// The BABE import queue type. -pub type BabeImportQueue = BasicQueue; +pub type BabeImportQueue = BasicQueue>; /// Register the babe inherent data provider, if not registered already. fn register_babe_inherent_data_provider( @@ -1368,7 +1368,7 @@ pub fn import_queue( inherent_data_providers: InherentDataProviders, spawner: &impl sp_core::traits::SpawnNamed, registry: Option<&Registry>, -) -> ClientResult>> where +) -> ClientResult> where Inner: BlockImport> + Send + Sync + 'static, Client: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, diff --git a/client/service/types/Cargo.toml b/client/service/types/Cargo.toml deleted file mode 100644 index 3009d6412c4ba..0000000000000 --- a/client/service/types/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "sc-service-types" -version = "0.8.0-rc5" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "Easy to use type definitions" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -sc-service = { path = "..", version = "0.8.0-rc4"} -sc-finality-grandpa = { path = "../../finality-grandpa", version = "0.8.0-rc4"} -sc-consensus = { path = "../../consensus/common", version = "0.8.0-rc4"} -sc-consensus-aura = { path = "../../consensus/aura", version = "0.8.0-rc4"} -sc-consensus-babe = { path = "../../consensus/babe", version = "0.8.0-rc4"} -sc-transaction-pool = { path = "../../transaction-pool", version = "2.0.0-rc4"} -sc-network = { path = "../../network" } -sp-api = { path = "../../../primitives/api" } diff --git a/client/service/types/src/lib.rs b/client/service/types/src/lib.rs deleted file mode 100644 index 9d2b8bd6fa442..0000000000000 --- a/client/service/types/src/lib.rs +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Type definitions to make building a service easier. - -#![warn(missing_docs)] - -pub use sc_service as _service; -pub use sc_finality_grandpa as _grandpa; -pub use sc_consensus as _consensus; -pub use sc_consensus_aura as _aura; -pub use sc_consensus_babe as _babe; -pub use sc_transaction_pool as _transaction_pool; -pub use sc_network as _network; -pub use sp_api as _sp_api; - -use { - _service as service, _grandpa as grandpa, _consensus as consensus, _aura as aura, - _transaction_pool as transaction_pool, _sp_api as sp_api, _network as network, - _babe as babe, -}; - -/// Type definitions for a full client. -pub mod full { - use super::*; - - pub use service::TFullClient as Client; - pub use service::TFullBackend as Backend; - - /// A GRANDPA block import. - pub type GrandpaBlockImport = - grandpa::GrandpaBlockImport< - Backend, Block, Client, SelectChain - >; - /// A GRANDPA link. Connects the block import to the GRANDPA service. - pub type GrandpaLink = grandpa ::LinkHalf< - Block, Client, SelectChain - >; - /// A basic select chain implementation. - pub type LongestChain = consensus::LongestChain, Block>; - /// A basic transaction pool. - pub type BasicPool = transaction_pool::BasicPool< - transaction_pool::FullChainApi, Block>, Block - >; - /// An import queue for AURA. - pub type AuraImportQueue = aura::AuraImportQueue< - Block, sp_api::TransactionFor, Block> - >; - /// An import queue for BABE. - pub type BabeImportQueue = babe::BabeImportQueue< - Block, sp_api::TransactionFor, Block> - >; - /// A block import for BABE. Wraps around another block import. - pub type BabeBlockImport = babe::BabeBlockImport< - Block, Client, BlockImport - >; -} - -/// Type definitions for a light client. -pub mod light { - use super::*; - - pub use service::TLightClient as Client; - pub use service::TLightBackend as Backend; - - /// A network fetcher for a light client. - pub type Fetcher = network::config::OnDemand; - - /// A basic transaction pool. - pub type BasicPool = transaction_pool::BasicPool< - transaction_pool::LightChainApi, Fetcher, Block>, - Block - >; -} - -/// Setup the type definitions given a `Block`, `RuntimeApi`and `Executor`. -#[macro_export] -macro_rules! setup_types { - ($block:ty, $runtime_api:ty, $executor:ty) => { - /// Type definitions for a full client. - pub mod full { - use $crate::full; - use super::*; - - /// A full client. - pub type Client = full::Client<$block, $runtime_api, $executor>; - /// A full backend. - pub type Backend = full::Backend<$block>; - /// A GRANDPA block import. - pub type GrandpaBlockImport = full::GrandpaBlockImport< - $block, $runtime_api, $executor, SelectChain - >; - /// A GRANDPA link. Connects the block import to the GRANDPA service. - pub type GrandpaLink = full::GrandpaLink< - $block, $runtime_api, $executor, SelectChain - >; - /// A basic select chain implementation. - pub type LongestChain = full::LongestChain<$block>; - /// A basic transaction pool. - pub type BasicPool = full::BasicPool<$block, $runtime_api, $executor>; - /// An import queue for AURA. - pub type AuraImportQueue = full::AuraImportQueue< - $block, $runtime_api, $executor - >; - /// An import queue for BABE. - pub type BabeImportQueue = full::BabeImportQueue< - $block, $runtime_api, $executor - >; - /// A block import for BABE. Wraps around another block import. - pub type BabeBlockImport = full::BabeBlockImport< - $block, $runtime_api, $executor, BlockImport - >; - } - - /// Type definitions for a light client. - pub mod light { - use $crate::light; - use super::*; - - /// A light client. - pub type Client = light::Client<$block, $runtime_api, $executor>; - /// A network fetcher for a light client. - pub type Fetcher = light::Fetcher<$block>; - /// A basic transaction pool. - pub type BasicPool = light::BasicPool<$block, $runtime_api, $executor>; - } - } -} diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index fd6d64a340db3..bb9936984f96b 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -64,6 +64,11 @@ type ReadyIteratorFor = BoxedReadyIterator< type PolledIterator = Pin> + Send>>; +/// A transaction pool for a full node. +pub type FullPool = BasicPool, Block>; +/// A transaction pool for a light node. +pub type LightPool = BasicPool, Block>; + /// Basic implementation of transaction pool that can be customized by providing PoolApi. pub struct BasicPool where From 52f9c95850dd2291b2d189e8cdbb5cd94607464c Mon Sep 17 00:00:00 2001 From: Ashley Date: Tue, 21 Jul 2020 16:48:01 +0200 Subject: [PATCH 22/24] Update bin/node-template/node/src/service.rs Co-authored-by: Seun Lanlege --- bin/node-template/node/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 56d6c4c23521b..4c41e988d0af4 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -80,7 +80,7 @@ pub fn new_full_params(config: Configuration) -> Result<( let params = sc_service::ServiceParams { backend, client, import_queue, keystore, task_manager, transaction_pool, - config: config, + config, block_announce_validator_builder: None, finality_proof_request_builder: None, finality_proof_provider: Some(finality_proof_provider), From ed14ad187c9359c8c19192ccf2218a1dbb5a5455 Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Wed, 22 Jul 2020 15:34:44 +0200 Subject: [PATCH 23/24] Add TLightClientWithHash --- client/service/src/builder.rs | 19 +++++++++++++++++++ client/service/src/lib.rs | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 48ba25cad5b1d..7a4ed274742c0 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -176,6 +176,25 @@ type TLightParts = ( Arc>, ); +type TLightBackendWithHash = sc_light::Backend< + sc_client_db::light::LightStorage, + THash, +>; + +/// Light client type with a specific hash type. +pub type TLightClientWithHash = Client< + TLightBackendWithHash, + sc_light::GenesisCallExecutor< + TLightBackendWithHash, + crate::client::LocalCallExecutor< + TLightBackendWithHash, + NativeExecutor + >, + >, + TBl, + TRtApi, +>; + /// Creates a new full client for the given config. pub fn new_full_client( config: &Configuration, diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index bb727ef874336..5f017ee7906f5 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -53,7 +53,7 @@ use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver, pub use self::error::Error; pub use self::builder::{ new_full_client, new_client, new_full_parts, new_light_parts, build, - ServiceParams, TFullClient, TLightClient, TFullBackend, TLightBackend, + ServiceParams, TFullClient, TLightClient, TFullBackend, TLightBackend, TLightClientWithHash, TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, NoopRpcExtensionBuilder, }; pub use config::{ From 319ba0c3d34b3ebfc2375aae655ffc63c2435aeb Mon Sep 17 00:00:00 2001 From: Ashley Ruglys Date: Wed, 22 Jul 2020 15:44:55 +0200 Subject: [PATCH 24/24] Rework types --- client/service/src/builder.rs | 23 +++++++++-------------- client/service/src/lib.rs | 3 ++- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 7a4ed274742c0..c71746f48a890 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -133,11 +133,8 @@ pub type TFullCallExecutor = crate::client::LocalCallExecutor< >; /// Light client type. -pub type TLightClient = Client< - TLightBackend, - TLightCallExecutor, - TBl, - TRtApi, +pub type TLightClient = TLightClientWithBackend< + TBl, TRtApi, TExecDisp, TLightBackend >; /// Light client backend type. @@ -176,20 +173,18 @@ type TLightParts = ( Arc>, ); -type TLightBackendWithHash = sc_light::Backend< +/// Light client backend type with a specific hash type. +pub type TLightBackendWithHash = sc_light::Backend< sc_client_db::light::LightStorage, THash, >; -/// Light client type with a specific hash type. -pub type TLightClientWithHash = Client< - TLightBackendWithHash, +/// Light client type with a specific backend. +pub type TLightClientWithBackend = Client< + TBackend, sc_light::GenesisCallExecutor< - TLightBackendWithHash, - crate::client::LocalCallExecutor< - TLightBackendWithHash, - NativeExecutor - >, + TBackend, + crate::client::LocalCallExecutor>, >, TBl, TRtApi, diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 5f017ee7906f5..1eef6493e775e 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -53,7 +53,8 @@ use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver, pub use self::error::Error; pub use self::builder::{ new_full_client, new_client, new_full_parts, new_light_parts, build, - ServiceParams, TFullClient, TLightClient, TFullBackend, TLightBackend, TLightClientWithHash, + ServiceParams, TFullClient, TLightClient, TFullBackend, TLightBackend, + TLightBackendWithHash, TLightClientWithBackend, TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, NoopRpcExtensionBuilder, }; pub use config::{