From ee0ccecd87013821a2e68120ba3510393c0373e7 Mon Sep 17 00:00:00 2001 From: ian Date: Fri, 30 Oct 2020 13:14:32 +0800 Subject: [PATCH] docs: deny missing docs (#2313) * docs: deny missing docs * docs: add missing docs for fixed-hash-related crates * docs: add missing docs for logger-related crates * docs: add missing docs for memory-tracker crate * docs: add missing docs for metrics-related crates * ci: fix two ci checks * docs: add missing docs for types crate * docs: fix an incorrect description and two typos according to review comments * docs: add missing docs for ckb-resource * Update resource/src/template.rs Co-authored-by: Boyu Yang * docs: add todo for missing docs Co-authored-by: Boyu Yang --- Cargo.lock | 3 +- Cargo.toml | 2 +- Makefile | 2 +- benches/Cargo.toml | 2 +- benches/benches/bench_main.rs | 1 + benches/src/lib.rs | 6 +- build.rs | 1 + chain/Cargo.toml | 2 +- chain/src/chain.rs | 16 + chain/src/switch.rs | 12 + ckb-bin/Cargo.toml | 2 +- ckb-bin/src/lib.rs | 2 + db-migration/Cargo.toml | 2 +- db-migration/src/lib.rs | 10 + db/Cargo.toml | 2 +- db/src/db.rs | 15 + db/src/iter.rs | 5 + db/src/lib.rs | 2 + db/src/snapshot.rs | 3 + db/src/transaction.rs | 13 + db/src/write_batch.rs | 7 + devtools/ci/check-cargotoml.sh | 2 +- error/Cargo.toml | 3 +- error/src/internal.rs | 5 + error/src/lib.rs | 15 + error/src/util.rs | 4 + indexer/Cargo.toml | 2 +- indexer/src/lib.rs | 3 + miner/Cargo.toml | 2 +- miner/src/client.rs | 9 + miner/src/error.rs | 4 + miner/src/lib.rs | 4 + miner/src/miner.rs | 13 + network/Cargo.toml | 2 +- network/src/behaviour.rs | 1 + network/src/benches/peer_store.rs | 1 + network/src/errors.rs | 27 ++ network/src/lib.rs | 2 + network/src/network.rs | 39 ++ network/src/peer.rs | 20 + network/src/peer_registry.rs | 17 + network/src/peer_store/addr_manager.rs | 8 + network/src/peer_store/ban_list.rs | 8 + network/src/peer_store/mod.rs | 12 + network/src/peer_store/peer_store_db.rs | 6 + network/src/peer_store/peer_store_impl.rs | 10 + network/src/peer_store/types.rs | 37 ++ network/src/protocols/mod.rs | 30 ++ network/src/protocols/support_protocols.rs | 16 + notify/Cargo.toml | 2 +- notify/src/lib.rs | 20 + pow/Cargo.toml | 2 +- pow/src/dummy.rs | 1 + pow/src/eaglesong.rs | 1 + pow/src/eaglesong_blake2b.rs | 1 + pow/src/lib.rs | 12 + resource/Cargo.toml | 2 +- resource/build.rs | 13 +- resource/src/lib.rs | 91 ++++- resource/src/template.rs | 123 +++++- rpc/Cargo.toml | 2 +- rpc/README.md | 26 +- rpc/src/error.rs | 11 + rpc/src/module/subscription.rs | 1 + rpc/src/server.rs | 2 + rpc/src/service_builder.rs | 12 + script/Cargo.toml | 2 +- script/build.rs | 1 + script/src/cost_model.rs | 4 + script/src/error.rs | 5 + script/src/ill_transaction_checker.rs | 3 + script/src/lib.rs | 1 + script/src/types.rs | 19 +- script/src/verify.rs | 19 +- shared/Cargo.toml | 2 +- shared/src/lib.rs | 1 + shared/src/shared.rs | 21 ++ spec/Cargo.toml | 2 +- spec/src/consensus.rs | 100 ++++- spec/src/error.rs | 11 +- spec/src/lib.rs | 78 +++- src/main.rs | 1 + store/Cargo.toml | 2 +- store/src/cache.rs | 8 + store/src/cell.rs | 2 +- store/src/data_loader_wrapper.rs | 3 + store/src/db.rs | 9 + store/src/lib.rs | 17 + store/src/snapshot.rs | 1 + store/src/store.rs | 30 +- store/src/transaction.rs | 17 + store/src/write_batch.rs | 8 + sync/Cargo.toml | 2 +- sync/src/lib.rs | 24 ++ sync/src/net_time_checker.rs | 1 + sync/src/relayer/mod.rs | 8 + sync/src/status.rs | 9 + sync/src/synchronizer/mod.rs | 8 + sync/src/types/mod.rs | 15 + test/Cargo.toml | 2 +- traits/Cargo.toml | 2 +- traits/src/block_median_time_context.rs | 1 + traits/src/cell_data_provider.rs | 3 + traits/src/header_provider.rs | 2 + traits/src/lib.rs | 1 + tx-pool/Cargo.toml | 2 +- tx-pool/src/component/entry.rs | 5 + tx-pool/src/error.rs | 12 + tx-pool/src/lib.rs | 1 + tx-pool/src/pool.rs | 26 ++ tx-pool/src/process.rs | 3 + tx-pool/src/service.rs | 45 +++ util/Cargo.toml | 2 +- util/app-config/Cargo.toml | 2 +- util/app-config/src/app_config.rs | 38 ++ util/app-config/src/args.rs | 64 ++++ util/app-config/src/cli.rs | 50 +++ util/app-config/src/configs/db.rs | 5 + util/app-config/src/configs/indexer.rs | 1 + util/app-config/src/configs/memory_tracker.rs | 2 + util/app-config/src/configs/miner.rs | 44 ++- util/app-config/src/configs/network.rs | 36 +- util/app-config/src/configs/network_alert.rs | 3 + util/app-config/src/configs/notify.rs | 3 + util/app-config/src/configs/rpc.rs | 33 +- util/app-config/src/configs/store.rs | 7 + util/app-config/src/configs/tx_pool.rs | 21 +- util/app-config/src/exit_code.rs | 5 + util/app-config/src/lib.rs | 20 + util/build-info/Cargo.toml | 2 +- util/build-info/src/lib.rs | 50 ++- util/chain-iter/Cargo.toml | 2 +- util/chain-iter/src/lib.rs | 5 + util/channel/Cargo.toml | 2 +- util/channel/src/lib.rs | 1 + util/crypto/Cargo.toml | 2 +- util/crypto/src/lib.rs | 2 + util/crypto/src/secp/error.rs | 7 + util/crypto/src/secp/generator.rs | 7 + util/crypto/src/secp/mod.rs | 4 + util/crypto/src/secp/privkey.rs | 3 + util/crypto/src/secp/pubkey.rs | 3 + util/crypto/src/secp/signature.rs | 5 + util/dao/Cargo.toml | 2 +- util/dao/src/lib.rs | 12 + util/dao/utils/Cargo.toml | 2 +- util/dao/utils/src/error.rs | 6 + util/dao/utils/src/lib.rs | 6 + util/fee-estimator/Cargo.toml | 2 +- util/fee-estimator/src/estimator.rs | 2 + util/fee-estimator/src/fee_rate.rs | 5 + util/fee-estimator/src/lib.rs | 2 + util/fixed-hash/Cargo.toml | 2 +- util/fixed-hash/core/Cargo.toml | 2 +- util/fixed-hash/core/src/error.rs | 16 +- util/fixed-hash/core/src/impls.rs | 2 + util/fixed-hash/core/src/lib.rs | 15 + util/fixed-hash/core/src/std_str.rs | 52 ++- util/fixed-hash/macros/Cargo.toml | 2 +- util/fixed-hash/macros/src/lib.rs | 32 +- util/fixed-hash/src/lib.rs | 20 + util/hash/Cargo.toml | 2 +- util/hash/src/lib.rs | 46 +++ util/instrument/Cargo.toml | 2 +- util/instrument/src/export.rs | 4 + util/instrument/src/import.rs | 3 + util/jsonrpc-types/Cargo.toml | 2 +- util/jsonrpc-types/src/blockchain.rs | 9 + util/jsonrpc-types/src/bytes.rs | 6 + util/jsonrpc-types/src/experiment.rs | 1 + util/jsonrpc-types/src/fixed_bytes.rs | 1 + util/jsonrpc-types/src/lib.rs | 1 + util/jsonrpc-types/src/pool.rs | 1 + util/jsonrpc-types/src/proposal_short_id.rs | 2 + util/jsonrpc-types/src/sync.rs | 1 + util/logger-config/Cargo.toml | 2 +- util/logger-config/src/lib.rs | 37 ++ util/logger-service/Cargo.toml | 2 +- util/logger-service/src/lib.rs | 21 +- util/logger/Cargo.toml | 2 +- util/logger/src/lib.rs | 239 ++++++++++++ util/memory-tracker/Cargo.toml | 9 +- util/memory-tracker/src/jemalloc.rs | 7 + util/memory-tracker/src/lib.rs | 11 +- util/memory-tracker/src/process.rs | 3 +- util/memory-tracker/src/rocksdb.rs | 72 ++-- util/memory-tracker/src/utils.rs | 91 ----- util/metrics-config/Cargo.toml | 4 +- util/metrics-config/src/lib.rs | 109 +++--- util/metrics-service/Cargo.toml | 3 +- util/metrics-service/src/lib.rs | 16 +- util/metrics/Cargo.toml | 2 +- util/metrics/src/lib.rs | 39 +- util/multisig/Cargo.toml | 2 +- util/multisig/src/error.rs | 14 +- util/multisig/src/lib.rs | 4 + util/multisig/src/secp256k1.rs | 4 +- util/network-alert/Cargo.toml | 2 +- util/network-alert/src/alert_relayer.rs | 3 + util/network-alert/src/notifier.rs | 9 + util/network-alert/src/verifier.rs | 4 + util/occupied-capacity/Cargo.toml | 2 +- util/occupied-capacity/core/Cargo.toml | 2 +- util/occupied-capacity/core/src/units.rs | 22 +- util/occupied-capacity/macros/Cargo.toml | 2 +- util/occupied-capacity/macros/src/lib.rs | 2 + util/proposal-table/Cargo.toml | 2 +- util/proposal-table/src/lib.rs | 13 + util/rational/Cargo.toml | 2 +- util/rational/src/lib.rs | 20 + util/reward-calculator/Cargo.toml | 2 +- util/reward-calculator/src/lib.rs | 5 + util/runtime/Cargo.toml | 2 +- util/runtime/src/lib.rs | 2 + util/rust-unstable-port/Cargo.toml | 2 +- util/rust-unstable-port/src/lib.rs | 2 + util/snapshot/Cargo.toml | 2 +- util/snapshot/src/lib.rs | 17 + util/src/lib.rs | 17 +- util/src/linked_hash_set.rs | 47 +++ util/src/shrink_to_fit.rs | 12 + util/src/strings.rs | 16 + util/stop-handler/Cargo.toml | 2 +- util/stop-handler/src/lib.rs | 10 + util/test-chain-utils/Cargo.toml | 2 +- util/test-chain-utils/src/chain.rs | 8 + util/test-chain-utils/src/lib.rs | 1 + util/test-chain-utils/src/median_time.rs | 4 + util/test-chain-utils/src/mock_store.rs | 5 + util/types/Cargo.toml | 2 +- util/types/src/constants.rs | 2 + util/types/src/conversion/primitive.rs | 4 + util/types/src/core/advanced_builders.rs | 110 ++++-- util/types/src/core/blockchain.rs | 6 + util/types/src/core/cell.rs | 45 +++ util/types/src/core/error.rs | 5 + util/types/src/core/extras.rs | 60 +++ util/types/src/core/mod.rs | 22 +- util/types/src/core/reward.rs | 60 +++ util/types/src/core/service.rs | 10 + util/types/src/core/transaction_meta.rs | 18 + util/types/src/core/views.rs | 352 +++++++++++++----- util/types/src/extension/calc_hash.rs | 118 +++++- util/types/src/extension/capacity.rs | 29 ++ util/types/src/extension/check_data.rs | 27 +- util/types/src/extension/serialized_size.rs | 40 +- util/types/src/extension/shortcuts.rs | 44 ++- util/types/src/generated/mod.rs | 16 + util/types/src/lib.rs | 3 +- util/types/src/prelude.rs | 17 +- util/types/src/utilities/difficulty.rs | 5 + util/types/src/utilities/merkle_tree.rs | 4 + util/types/src/utilities/mod.rs | 1 + verification/Cargo.toml | 2 +- verification/src/block_verifier.rs | 8 + verification/src/cache.rs | 6 + verification/src/contextual_block_verifier.rs | 11 + verification/src/error.rs | 182 ++++++++- verification/src/genesis_verifier.rs | 2 + verification/src/header_verifier.rs | 4 + verification/src/lib.rs | 5 + verification/src/transaction_verifier.rs | 37 ++ wasm-build-test/Cargo.toml | 2 +- 263 files changed, 3678 insertions(+), 484 deletions(-) delete mode 100644 util/memory-tracker/src/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 3b3439c352..0641a098fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -549,7 +549,6 @@ dependencies = [ "ckb-occupied-capacity", "enum-display-derive", "failure", - "quote 1.0.7", ] [[package]] @@ -719,7 +718,6 @@ dependencies = [ "ckb-metrics-config", "ckb-stop-handler", "ckb-util", - "log 0.4.11", "metrics-core", "metrics-runtime", "tokio 0.2.22", @@ -2721,6 +2719,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ "cfg-if", + "serde", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 46c814d866..aebfa6cff8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" build = "build.rs" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/Makefile b/Makefile index 0de78104dc..c1aa1d79e6 100644 --- a/Makefile +++ b/Makefile @@ -119,7 +119,7 @@ fmt: setup-ckb-test ## Check Rust source code format to keep to the same style. .PHONY: clippy clippy: setup-ckb-test ## Run linter to examine Rust source codes. - cargo clippy ${VERBOSE} --all --all-targets --all-features -- ${CLIPPY_OPTS} + cargo clippy ${VERBOSE} --all --all-targets --all-features -- ${CLIPPY_OPTS} -D missing_docs cd test && cargo clippy ${VERBOSE} --all --all-targets --all-features -- ${CLIPPY_OPTS} .PHONY: security-audit diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 87972dc1e9..5d915213d0 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/benches/benches/bench_main.rs b/benches/benches/bench_main.rs index d47160e456..68df72c80b 100644 --- a/benches/benches/bench_main.rs +++ b/benches/benches/bench_main.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @doitian mod benchmarks; use criterion::criterion_main; diff --git a/benches/src/lib.rs b/benches/src/lib.rs index 8b13789179..88775ca4ba 100644 --- a/benches/src/lib.rs +++ b/benches/src/lib.rs @@ -1 +1,5 @@ - +//! CKB Benches +//! +//! ```console +//! cd benches && cargo bench --features ci -- --test +//! ``` diff --git a/build.rs b/build.rs index e0fdd75bef..492a6f88b5 100644 --- a/build.rs +++ b/build.rs @@ -1,3 +1,4 @@ +//! Build script for the binary crate `ckb`. use std::path::Path; fn rerun_if_changed(path_str: &str) -> bool { diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 84e1abf0ad..91ecc94426 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @zhangsoledad crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/chain/src/chain.rs b/chain/src/chain.rs index dc546dfc76..f3d83136ff 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @zhangsoledad use crate::switch::Switch; use ckb_channel::{self as channel, select, Sender}; use ckb_error::{Error, InternalErrorKind}; @@ -30,6 +31,7 @@ use std::{cmp, thread}; type ProcessBlockRequest = Request<(Arc, Switch), Result>; type TruncateRequest = Request>; +/// TODO(doc): @zhangsoledad #[derive(Clone)] pub struct ChainController { process_block_sender: Sender, @@ -44,10 +46,12 @@ impl Drop for ChainController { } impl ChainController { + /// TODO(doc): @zhangsoledad pub fn process_block(&self, block: Arc) -> Result { self.internal_process_block(block, Switch::NONE) } + /// TODO(doc): @zhangsoledad pub fn internal_process_block( &self, block: Arc, @@ -71,6 +75,7 @@ impl ChainController { } } +/// TODO(doc): @zhangsoledad #[derive(Debug, Default)] pub struct ForkChanges { // blocks attached to index after forks @@ -84,26 +89,32 @@ pub struct ForkChanges { } impl ForkChanges { + /// TODO(doc): @zhangsoledad pub fn attached_blocks(&self) -> &VecDeque { &self.attached_blocks } + /// TODO(doc): @zhangsoledad pub fn detached_blocks(&self) -> &VecDeque { &self.detached_blocks } + /// TODO(doc): @zhangsoledad pub fn detached_proposal_id(&self) -> &HashSet { &self.detached_proposal_id } + /// TODO(doc): @zhangsoledad pub fn has_detached(&self) -> bool { !self.detached_blocks.is_empty() } + /// TODO(doc): @zhangsoledad pub fn verified_len(&self) -> usize { self.attached_blocks.len() - self.dirty_exts.len() } + /// TODO(doc): @zhangsoledad #[cfg(debug_assertions)] pub fn is_sorted(&self) -> bool { IsSorted::is_sorted_by_key(&mut self.attached_blocks().iter(), |blk| { @@ -135,12 +146,14 @@ impl GlobalIndex { } } +/// TODO(doc): @zhangsoledad pub struct ChainService { shared: Shared, proposal_table: ProposalTable, } impl ChainService { + /// TODO(doc): @zhangsoledad pub fn new(shared: Shared, proposal_table: ProposalTable) -> ChainService { ChainService { shared, @@ -148,6 +161,7 @@ impl ChainService { } } + /// TODO(doc): @zhangsoledad // remove `allow` tag when https://github.com/crossbeam-rs/crossbeam/issues/404 is solved #[allow(clippy::zero_ptr, clippy::drop_copy)] pub fn start(mut self, thread_name: Option) -> ChainController { @@ -197,6 +211,7 @@ impl ChainService { } } + /// TODO(doc): @zhangsoledad pub fn external_process_block(&mut self, block: Arc) -> Result { self.process_block(block, Switch::NONE) } @@ -259,6 +274,7 @@ impl ChainService { Ok(()) } + /// TODO(doc): @zhangsoledad // process_block will do block verify // but invoker should guarantee block header be verified pub fn process_block(&mut self, block: Arc, switch: Switch) -> Result { diff --git a/chain/src/switch.rs b/chain/src/switch.rs index fc9405acf0..c21f894047 100644 --- a/chain/src/switch.rs +++ b/chain/src/switch.rs @@ -1,17 +1,27 @@ +//! TODO(doc): @zhangsoledad #![allow(clippy::unreadable_literal)] use bitflags::bitflags; use ckb_verification::Switch as VerificationSwitch; bitflags! { + /// TODO(doc): @zhangsoledad pub struct Switch: u32 { + /// TODO(doc): @zhangsoledad const NONE = 0b00000000; + /// TODO(doc): @zhangsoledad const DISABLE_EPOCH = 0b00000001; + /// TODO(doc): @zhangsoledad const DISABLE_UNCLES = 0b00000010; + /// TODO(doc): @zhangsoledad const DISABLE_TWO_PHASE_COMMIT = 0b00000100; + /// TODO(doc): @zhangsoledad const DISABLE_DAOHEADER = 0b00001000; + /// TODO(doc): @zhangsoledad const DISABLE_REWARD = 0b00010000; + /// TODO(doc): @zhangsoledad const DISABLE_NON_CONTEXTUAL = 0b00100000; + /// TODO(doc): @zhangsoledad const DISABLE_ALL = Self::DISABLE_EPOCH.bits | Self::DISABLE_UNCLES.bits | Self::DISABLE_TWO_PHASE_COMMIT.bits | Self::DISABLE_DAOHEADER.bits | Self::DISABLE_REWARD.bits | @@ -20,10 +30,12 @@ bitflags! { } impl Switch { + /// TODO(doc): @zhangsoledad pub fn disable_all(self) -> bool { self.contains(Switch::DISABLE_ALL) } + /// TODO(doc): @zhangsoledad pub fn disable_non_contextual(self) -> bool { self.contains(Switch::DISABLE_NON_CONTEXTUAL) } diff --git a/ckb-bin/Cargo.toml b/ckb-bin/Cargo.toml index e852dc1958..34f73674d8 100644 --- a/ckb-bin/Cargo.toml +++ b/ckb-bin/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/ckb-bin/src/lib.rs b/ckb-bin/src/lib.rs index 43c8673b6c..60eb9833ce 100644 --- a/ckb-bin/src/lib.rs +++ b/ckb-bin/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @doitian mod helper; mod setup_guard; mod subcommand; @@ -10,6 +11,7 @@ use setup_guard::SetupGuard; pub(crate) const LOG_TARGET_MAIN: &str = "main"; pub(crate) const LOG_TARGET_SENTRY: &str = "sentry"; +/// TODO(doc): @doitian pub fn run_app(version: Version) -> Result<(), ExitCode> { // Always print backtrace on panic. ::std::env::set_var("RUST_BACKTRACE", "full"); diff --git a/db-migration/Cargo.toml b/db-migration/Cargo.toml index 0516ea2d14..b83bb5ea8d 100644 --- a/db-migration/Cargo.toml +++ b/db-migration/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/db-migration/src/lib.rs b/db-migration/src/lib.rs index 3277de5066..176e99fd0c 100644 --- a/db-migration/src/lib.rs +++ b/db-migration/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake use ckb_db::RocksDB; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::{error, info}; @@ -6,29 +7,34 @@ pub use indicatif::{HumanDuration, MultiProgress, ProgressBar, ProgressDrawTarge use std::collections::BTreeMap; use std::sync::Arc; +/// TODO(doc): @quake pub const VERSION_KEY: &[u8] = b"db-version"; fn internal_error(reason: String) -> Error { InternalErrorKind::Database.reason(reason).into() } +/// TODO(doc): @quake #[derive(Default)] pub struct Migrations { migrations: BTreeMap>, } impl Migrations { + /// TODO(doc): @quake pub fn new() -> Self { Migrations { migrations: BTreeMap::new(), } } + /// TODO(doc): @quake pub fn add_migration(&mut self, migration: Box) { self.migrations .insert(migration.version().to_string(), migration); } + /// TODO(doc): @quake pub fn migrate(&self, mut db: RocksDB) -> Result { let db_version = db .get_pinned_default(VERSION_KEY) @@ -91,7 +97,9 @@ impl Migrations { } } +/// TODO(doc): @quake pub trait Migration { + /// TODO(doc): @quake fn migrate( &self, _db: RocksDB, @@ -102,11 +110,13 @@ pub trait Migration { fn version(&self) -> &str; } +/// TODO(doc): @quake pub struct DefaultMigration { version: String, } impl DefaultMigration { + /// TODO(doc): @quake pub fn new(version: &str) -> Self { Self { version: version.to_string(), diff --git a/db/Cargo.toml b/db/Cargo.toml index ea9ed9e208..d04d7a8b69 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/db/src/db.rs b/db/src/db.rs index 1d2d848ed7..39d6b0f507 100644 --- a/db/src/db.rs +++ b/db/src/db.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake use crate::snapshot::RocksDBSnapshot; use crate::transaction::RocksDBTransaction; use crate::write_batch::RocksDBWriteBatch; @@ -14,8 +15,10 @@ use rocksdb::{ }; use std::sync::Arc; +/// TODO(doc): @quake pub const VERSION_KEY: &str = "db-version"; +/// TODO(doc): @quake #[derive(Clone)] pub struct RocksDB { pub(crate) inner: Arc, @@ -113,10 +116,12 @@ impl RocksDB { }) } + /// TODO(doc): @quake pub fn open(config: &DBConfig, columns: u32) -> Self { Self::open_with_check(config, columns).unwrap_or_else(|err| panic!("{}", err)) } + /// TODO(doc): @quake pub fn open_tmp(columns: u32) -> Self { let tmp_dir = tempfile::Builder::new().tempdir().unwrap(); let config = DBConfig { @@ -126,15 +131,18 @@ impl RocksDB { Self::open_with_check(&config, columns).unwrap_or_else(|err| panic!("{}", err)) } + /// TODO(doc): @quake pub fn get_pinned(&self, col: Col, key: &[u8]) -> Result> { let cf = cf_handle(&self.inner, col)?; self.inner.get_pinned_cf(cf, &key).map_err(internal_error) } + /// TODO(doc): @quake pub fn get_pinned_default(&self, key: &[u8]) -> Result> { self.inner.get_pinned(&key).map_err(internal_error) } + /// TODO(doc): @quake pub fn put_default(&self, key: K, value: V) -> Result<()> where K: AsRef<[u8]>, @@ -143,6 +151,7 @@ impl RocksDB { self.inner.put(key, value).map_err(internal_error) } + /// TODO(doc): @quake pub fn traverse(&self, col: Col, mut callback: F) -> Result<()> where F: FnMut(&[u8], &[u8]) -> Result<()>, @@ -170,6 +179,7 @@ impl RocksDB { } } + /// TODO(doc): @quake pub fn new_write_batch(&self) -> RocksDBWriteBatch { RocksDBWriteBatch { db: Arc::clone(&self.inner), @@ -177,10 +187,12 @@ impl RocksDB { } } + /// TODO(doc): @quake pub fn write(&self, batch: &RocksDBWriteBatch) -> Result<()> { self.inner.write(&batch.inner).map_err(internal_error) } + /// TODO(doc): @quake pub fn get_snapshot(&self) -> RocksDBSnapshot { unsafe { let snapshot = ffi::rocksdb_create_snapshot(self.inner.base_db_ptr()); @@ -188,10 +200,12 @@ impl RocksDB { } } + /// TODO(doc): @quake pub fn inner(&self) -> Arc { Arc::clone(&self.inner) } + /// TODO(doc): @quake pub fn create_cf(&mut self, col: Col) -> Result<()> { let inner = Arc::get_mut(&mut self.inner) .ok_or_else(|| internal_error("create_cf get_mut failed"))?; @@ -199,6 +213,7 @@ impl RocksDB { inner.create_cf(col, &opts).map_err(internal_error) } + /// TODO(doc): @quake pub fn drop_cf(&mut self, col: Col) -> Result<()> { let inner = Arc::get_mut(&mut self.inner) .ok_or_else(|| internal_error("drop_cf get_mut failed"))?; diff --git a/db/src/iter.rs b/db/src/iter.rs index cf32708a8d..96f35effa4 100644 --- a/db/src/iter.rs +++ b/db/src/iter.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake use crate::db::cf_handle; use crate::{ internal_error, Col, Result, RocksDB, RocksDBSnapshot, RocksDBTransaction, @@ -6,14 +7,18 @@ use crate::{ use rocksdb::{ops::IterateCF, ReadOptions}; pub use rocksdb::{DBIterator as DBIter, Direction, IteratorMode}; +/// TODO(doc): @quake pub type DBIterItem = (Box<[u8]>, Box<[u8]>); +/// TODO(doc): @quake pub trait DBIterator { + /// TODO(doc): @quake fn iter(&self, col: Col, mode: IteratorMode) -> Result { let opts = ReadOptions::default(); self.iter_opt(col, mode, &opts) } + /// TODO(doc): @quake fn iter_opt(&self, col: Col, mode: IteratorMode, readopts: &ReadOptions) -> Result; } diff --git a/db/src/lib.rs b/db/src/lib.rs index 9e735c972f..55a64fcdb2 100644 --- a/db/src/lib.rs +++ b/db/src/lib.rs @@ -22,7 +22,9 @@ pub use rocksdb::{ self as internal, DBPinnableSlice, DBVector, Direction, Error as DBError, IteratorMode, }; +/// TODO(doc): @quake pub type Col = &'static str; +/// TODO(doc): @quake pub type Result = result::Result; fn internal_error(reason: S) -> Error { diff --git a/db/src/snapshot.rs b/db/src/snapshot.rs index c9281ff13b..e0eacfc2ea 100644 --- a/db/src/snapshot.rs +++ b/db/src/snapshot.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake use crate::db::cf_handle; use crate::{internal_error, Col, Result}; use libc::{self, c_char, size_t}; @@ -8,6 +9,7 @@ use rocksdb::{ }; use std::sync::Arc; +/// TODO(doc): @quake pub struct RocksDBSnapshot { pub(crate) db: Arc, pub(crate) inner: *const ffi::rocksdb_snapshot_t, @@ -30,6 +32,7 @@ impl RocksDBSnapshot { } } + /// TODO(doc): @quake pub fn get_pinned(&self, col: Col, key: &[u8]) -> Result> { let cf = cf_handle(&self.db, col)?; self.get_pinned_cf_full(Some(cf), &key, None) diff --git a/db/src/transaction.rs b/db/src/transaction.rs index c3cd80556b..8f5497d569 100644 --- a/db/src/transaction.rs +++ b/db/src/transaction.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake use crate::db::cf_handle; use crate::{internal_error, Col, Result}; use rocksdb::ops::{DeleteCF, GetCF, PutCF}; @@ -7,27 +8,32 @@ use rocksdb::{ }; use std::sync::Arc; +/// TODO(doc): @quake pub struct RocksDBTransaction { pub(crate) db: Arc, pub(crate) inner: OptimisticTransaction, } impl RocksDBTransaction { + /// TODO(doc): @quake pub fn get(&self, col: Col, key: &[u8]) -> Result> { let cf = cf_handle(&self.db, col)?; self.inner.get_cf(cf, key).map_err(internal_error) } + /// TODO(doc): @quake pub fn put(&self, col: Col, key: &[u8], value: &[u8]) -> Result<()> { let cf = cf_handle(&self.db, col)?; self.inner.put_cf(cf, key, value).map_err(internal_error) } + /// TODO(doc): @quake pub fn delete(&self, col: Col, key: &[u8]) -> Result<()> { let cf = cf_handle(&self.db, col)?; self.inner.delete_cf(cf, key).map_err(internal_error) } + /// TODO(doc): @quake pub fn get_for_update<'a>( &self, col: Col, @@ -42,14 +48,17 @@ impl RocksDBTransaction { .map_err(internal_error) } + /// TODO(doc): @quake pub fn commit(&self) -> Result<()> { self.inner.commit().map_err(internal_error) } + /// TODO(doc): @quake pub fn rollback(&self) -> Result<()> { self.inner.rollback().map_err(internal_error) } + /// TODO(doc): @quake pub fn get_snapshot(&self) -> RocksDBTransactionSnapshot<'_> { RocksDBTransactionSnapshot { db: Arc::clone(&self.db), @@ -57,21 +66,25 @@ impl RocksDBTransaction { } } + /// TODO(doc): @quake pub fn set_savepoint(&self) { self.inner.set_savepoint() } + /// TODO(doc): @quake pub fn rollback_to_savepoint(&self) -> Result<()> { self.inner.rollback_to_savepoint().map_err(internal_error) } } +/// TODO(doc): @quake pub struct RocksDBTransactionSnapshot<'a> { pub(crate) db: Arc, pub(crate) inner: OptimisticTransactionSnapshot<'a>, } impl<'a> RocksDBTransactionSnapshot<'a> { + /// TODO(doc): @quake pub fn get(&self, col: Col, key: &[u8]) -> Result> { let cf = cf_handle(&self.db, col)?; self.inner.get_cf(cf, key).map_err(internal_error) diff --git a/db/src/write_batch.rs b/db/src/write_batch.rs index 0d6f174e42..ceb46c87b0 100644 --- a/db/src/write_batch.rs +++ b/db/src/write_batch.rs @@ -1,14 +1,17 @@ +//! TODO(doc): @quake use crate::db::cf_handle; use crate::{internal_error, Col, Result}; use rocksdb::{OptimisticTransactionDB, WriteBatch}; use std::sync::Arc; +/// TODO(doc): @quake pub struct RocksDBWriteBatch { pub(crate) db: Arc, pub(crate) inner: WriteBatch, } impl RocksDBWriteBatch { + /// TODO(doc): @quake pub fn len(&self) -> usize { self.inner.len() } @@ -18,15 +21,18 @@ impl RocksDBWriteBatch { self.inner.size_in_bytes() } + /// TODO(doc): @quake pub fn is_empty(&self) -> bool { self.inner.is_empty() } + /// TODO(doc): @quake pub fn put(&mut self, col: Col, key: &[u8], value: &[u8]) -> Result<()> { let cf = cf_handle(&self.db, col)?; self.inner.put_cf(cf, key, value).map_err(internal_error) } + /// TODO(doc): @quake pub fn delete(&mut self, col: Col, key: &[u8]) -> Result<()> { let cf = cf_handle(&self.db, col)?; self.inner.delete_cf(cf, key).map_err(internal_error) @@ -44,6 +50,7 @@ impl RocksDBWriteBatch { .map_err(internal_error) } + /// TODO(doc): @quake pub fn clear(&mut self) -> Result<()> { self.inner.clear().map_err(internal_error) } diff --git a/devtools/ci/check-cargotoml.sh b/devtools/ci/check-cargotoml.sh index 03fddef863..c3c573725a 100755 --- a/devtools/ci/check-cargotoml.sh +++ b/devtools/ci/check-cargotoml.sh @@ -137,7 +137,7 @@ function check_dependencies_for() { fi if [ "${depcnt}" -eq 0 ]; then case "${dependency}" in - phf|quote) + phf) # We cann't handle these crates. printf "Warn: [%s::%s] in <%s>\n" \ "${deptype}" "${dependency}" "${pkgroot}" diff --git a/error/Cargo.toml b/error/Cargo.toml index bff6bf6a35..58ac266b89 100644 --- a/error/Cargo.toml +++ b/error/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" @@ -14,4 +14,3 @@ repository = "https://github.com/nervosnetwork/ckb" failure = "0.1.5" ckb-occupied-capacity = { path = "../util/occupied-capacity", version = "= 0.38.0-pre" } enum-display-derive = "0.1.0" -quote = "1.0.3" diff --git a/error/src/internal.rs b/error/src/internal.rs index 17c10791ef..d22dbe3a45 100644 --- a/error/src/internal.rs +++ b/error/src/internal.rs @@ -2,11 +2,13 @@ use crate::{Error, ErrorKind}; use failure::{err_msg, Backtrace, Context, Fail}; use std::fmt::{self, Debug, Display}; +/// TODO(doc): @keroro520 #[derive(Debug)] pub struct InternalError { kind: Context, } +/// TODO(doc): @keroro520 #[derive(Debug, PartialEq, Eq, Clone, Display)] pub enum InternalErrorKind { /// An arithmetic overflow occurs during capacity calculation, @@ -60,12 +62,14 @@ impl From for Error { } impl InternalErrorKind { + /// TODO(doc): @keroro520 pub fn cause(self, cause: F) -> InternalError { InternalError { kind: cause.context(self), } } + /// TODO(doc): @keroro520 pub fn reason(self, reason: S) -> InternalError { InternalError { kind: err_msg(reason).compat().context(self), @@ -74,6 +78,7 @@ impl InternalErrorKind { } impl InternalError { + /// TODO(doc): @keroro520 pub fn kind(&self) -> &InternalErrorKind { &self.kind.get_context() } diff --git a/error/src/lib.rs b/error/src/lib.rs index 926dd19084..2a0462b370 100644 --- a/error/src/lib.rs +++ b/error/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @keroro520 #[macro_use] extern crate enum_display_derive; @@ -9,19 +10,30 @@ use failure::{Backtrace, Context, Fail}; pub use internal::{InternalError, InternalErrorKind}; use std::fmt::{self, Display}; +/// TODO(doc): @keroro520 #[derive(Debug, Clone, Copy, Eq, PartialEq, Display)] pub enum ErrorKind { + /// TODO(doc): @keroro520 OutPoint, + /// TODO(doc): @keroro520 Transaction, + /// TODO(doc): @keroro520 SubmitTransaction, + /// TODO(doc): @keroro520 Script, + /// TODO(doc): @keroro520 Header, + /// TODO(doc): @keroro520 Block, + /// TODO(doc): @keroro520 Internal, + /// TODO(doc): @keroro520 Dao, + /// TODO(doc): @keroro520 Spec, } +/// TODO(doc): @keroro520 #[derive(Debug)] pub struct Error { kind: Context, @@ -58,14 +70,17 @@ impl Fail for Error { } impl Error { + /// TODO(doc): @keroro520 pub fn kind(&self) -> &ErrorKind { self.kind.get_context() } + /// TODO(doc): @keroro520 pub fn downcast_ref(&self) -> Option<&T> { self.cause().and_then(|cause| cause.downcast_ref::()) } + /// TODO(doc): @keroro520 pub fn unwrap_cause_or_self(&self) -> &dyn Fail { self.cause().unwrap_or(self) } diff --git a/error/src/util.rs b/error/src/util.rs index cd6a45ba80..19734d6d18 100644 --- a/error/src/util.rs +++ b/error/src/util.rs @@ -1,3 +1,5 @@ +//! TODO(doc): @keroro520 + /// Compare two errors /// /// Used for testing only @@ -21,6 +23,7 @@ macro_rules! assert_error_eq { } } +/// TODO(doc): @keroro520 #[macro_export] macro_rules! impl_error_conversion_with_kind { ($source:ty, $kind:expr, $target:ty) => { @@ -32,6 +35,7 @@ macro_rules! impl_error_conversion_with_kind { }; } +/// TODO(doc): @keroro520 #[macro_export] macro_rules! impl_error_conversion_with_adaptor { ($source:ty, $adaptor:ty, $target:ty) => { diff --git a/indexer/Cargo.toml b/indexer/Cargo.toml index 0edcd5749e..9223c019c9 100644 --- a/indexer/Cargo.toml +++ b/indexer/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/indexer/src/lib.rs b/indexer/src/lib.rs index 8b59f339ef..9b9f786ca2 100644 --- a/indexer/src/lib.rs +++ b/indexer/src/lib.rs @@ -1,3 +1,6 @@ +//! **Deprecated**, Please use [ckb-indexer](https://github.com/nervosnetwork/ckb-indexer) as an alternate solution. +#![allow(missing_docs)] + mod migrations; mod store; mod types; diff --git a/miner/Cargo.toml b/miner/Cargo.toml index c9ed11037b..4ce01d6f38 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/miner/src/client.rs b/miner/src/client.rs index 57da2cb26b..b727fb993c 100644 --- a/miner/src/client.rs +++ b/miner/src/client.rs @@ -106,15 +106,21 @@ impl Drop for Rpc { } } +/// TODO(doc): @quake #[derive(Debug, Clone)] pub struct Client { + /// TODO(doc): @quake pub current_work_id: Option, + /// TODO(doc): @quake pub new_work_tx: Sender, + /// TODO(doc): @quake pub config: MinerClientConfig, + /// TODO(doc): @quake pub rpc: Rpc, } impl Client { + /// TODO(doc): @quake pub fn new(new_work_tx: Sender, config: MinerClientConfig) -> Client { let uri: Uri = config.rpc_url.parse().expect("valid rpc url"); @@ -138,6 +144,7 @@ impl Client { self.rpc.request(method, params) } + /// TODO(doc): @quake pub fn submit_block(&self, work_id: &str, block: Block) { let future = self.send_submit_block_request(work_id, block); if self.config.block_on_submit { @@ -155,6 +162,7 @@ impl Client { } } + /// TODO(doc): @quake pub fn poll_block_template(&mut self) { loop { debug!("poll block template..."); @@ -163,6 +171,7 @@ impl Client { } } + /// TODO(doc): @quake pub fn try_update_block_template(&mut self) { match self.get_block_template().wait() { Ok(block_template) => { diff --git a/miner/src/error.rs b/miner/src/error.rs index a5d8a3791b..a3461a6d23 100644 --- a/miner/src/error.rs +++ b/miner/src/error.rs @@ -1,11 +1,15 @@ use failure::Fail; +/// TODO(doc): @quake #[derive(Debug, PartialEq, Clone, Eq, Fail)] pub enum Error { + /// TODO(doc): @quake #[fail(display = "InvalidInput")] InvalidInput, + /// TODO(doc): @quake #[fail(display = "InvalidOutput")] InvalidOutput, + /// TODO(doc): @quake #[fail(display = "InvalidParams {}", _0)] InvalidParams(String), } diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 4298614061..346ac567e6 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake mod client; mod error; mod miner; @@ -11,9 +12,12 @@ use ckb_jsonrpc_types::BlockTemplate; use ckb_types::packed::Block; use std::convert::From; +/// TODO(doc): @quake #[derive(Clone)] pub struct Work { + /// TODO(doc): @quake work_id: u64, + /// TODO(doc): @quake block: Block, } diff --git a/miner/src/miner.rs b/miner/src/miner.rs index f8a7da2cbd..e52c69501f 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -17,20 +17,32 @@ use std::thread; const WORK_CACHE_SIZE: usize = 32; +/// TODO(doc): @quake pub struct Miner { + /// TODO(doc): @quake pub pow: Arc, + /// TODO(doc): @quake pub client: Client, + /// TODO(doc): @quake pub works: LruCache, + /// TODO(doc): @quake pub worker_controllers: Vec, + /// TODO(doc): @quake pub work_rx: Receiver, + /// TODO(doc): @quake pub nonce_rx: Receiver<(Byte32, u128)>, + /// TODO(doc): @quake pub pb: ProgressBar, + /// TODO(doc): @quake pub nonces_found: u128, + /// TODO(doc): @quake pub stderr_is_tty: bool, + /// TODO(doc): @quake pub limit: u128, } impl Miner { + /// TODO(doc): @quake pub fn new( pow: Arc, client: Client, @@ -69,6 +81,7 @@ impl Miner { } } + /// TODO(doc): @quake // remove `allow` tag when https://github.com/crossbeam-rs/crossbeam/issues/404 is solved #[allow(clippy::zero_ptr, clippy::drop_copy)] pub fn run(&mut self) { diff --git a/network/Cargo.toml b/network/Cargo.toml index 570a514ab9..4476ceebbb 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @driftluo crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/network/src/behaviour.rs b/network/src/behaviour.rs index 0316afbad5..41dbc2cd6e 100644 --- a/network/src/behaviour.rs +++ b/network/src/behaviour.rs @@ -14,6 +14,7 @@ pub enum Behaviour { } impl Behaviour { + /// TODO(doc): @driftluo pub fn score(self) -> Score { #[cfg(test)] match self { diff --git a/network/src/benches/peer_store.rs b/network/src/benches/peer_store.rs index d256b985b5..89877c5d6b 100644 --- a/network/src/benches/peer_store.rs +++ b/network/src/benches/peer_store.rs @@ -1,3 +1,4 @@ +#![allow(missing_docs)] #[macro_use] extern crate criterion; extern crate ckb_network; diff --git a/network/src/errors.rs b/network/src/errors.rs index 7763f99a2f..d31a173a1f 100644 --- a/network/src/errors.rs +++ b/network/src/errors.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @driftluo use p2p::{ error::{ DialerErrorKind, ListenErrorKind, ProtocolHandleErrorKind, SendErrorKind, @@ -10,47 +11,73 @@ use std::fmt; use std::fmt::Display; use std::io::Error as IoError; +/// TODO(doc): @driftluo pub type Result = ::std::result::Result; +/// TODO(doc): @driftluo #[derive(Debug)] pub enum Error { + /// TODO(doc): @driftluo Peer(PeerError), + /// TODO(doc): @driftluo Io(IoError), + /// TODO(doc): @driftluo P2P(P2PError), + /// TODO(doc): @driftluo Addr(AddrError), + /// TODO(doc): @driftluo Dial(String), + /// TODO(doc): @driftluo PeerStore(PeerStoreError), } +/// TODO(doc): @driftluo #[derive(Debug)] pub enum P2PError { + /// TODO(doc): @driftluo Transport(TransportErrorKind), + /// TODO(doc): @driftluo Protocol(ProtocolHandleErrorKind), + /// TODO(doc): @driftluo Dail(DialerErrorKind), + /// TODO(doc): @driftluo Listen(ListenErrorKind), + /// TODO(doc): @driftluo Send(SendErrorKind), } +/// TODO(doc): @driftluo #[derive(Debug)] pub enum PeerStoreError { /// indicate the peer store is full EvictionFailed, + /// TODO(doc): @driftluo Serde(serde_json::Error), } +/// TODO(doc): @driftluo #[derive(Debug, Eq, PartialEq)] pub enum PeerError { + /// TODO(doc): @driftluo SessionExists(SessionId), + /// TODO(doc): @driftluo PeerIdExists(PeerId), + /// TODO(doc): @driftluo NonReserved, + /// TODO(doc): @driftluo Banned, + /// TODO(doc): @driftluo ReachMaxInboundLimit, + /// TODO(doc): @driftluo ReachMaxOutboundLimit, } +/// TODO(doc): @driftluo #[derive(Debug)] pub enum AddrError { + /// TODO(doc): @driftluo MissingIP, + /// TODO(doc): @driftluo MissingPort, } diff --git a/network/src/lib.rs b/network/src/lib.rs index ffd23776dd..d52c5c3218 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @driftluo mod behaviour; mod compress; pub mod errors; @@ -33,4 +34,5 @@ pub use p2p::{ }; pub use tokio; +/// TODO(doc): @driftluo pub type ProtocolVersion = String; diff --git a/network/src/network.rs b/network/src/network.rs index c78b899977..6fa0cd3544 100644 --- a/network/src/network.rs +++ b/network/src/network.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @driftluo use crate::errors::{Error, P2PError}; use crate::peer_registry::{ConnectionStatus, PeerRegistry}; use crate::peer_store::{ @@ -59,12 +60,16 @@ const P2P_TRY_SEND_INTERVAL: Duration = Duration::from_millis(100); // After 5 minutes we consider this dial hang const DIAL_HANG_TIMEOUT: Duration = Duration::from_secs(300); +/// TODO(doc): @driftluo #[derive(Debug, Clone)] pub struct SessionInfo { + /// TODO(doc): @driftluo pub peer: Peer, + /// TODO(doc): @driftluo pub protocol_version: Option, } +/// TODO(doc): @driftluo pub struct NetworkState { pub(crate) peer_registry: RwLock, pub(crate) peer_store: Mutex, @@ -86,6 +91,7 @@ pub struct NetworkState { } impl NetworkState { + /// TODO(doc): @driftluo pub fn from_config(config: NetworkConfig) -> Result { config.create_dir_if_not_exists()?; let local_private_key = config.fetch_private_key()?; @@ -276,6 +282,7 @@ impl NetworkState { callback(&mut self.peer_store.lock()) } + /// TODO(doc): @driftluo pub fn local_peer_id(&self) -> &PeerId { &self.local_peer_id } @@ -284,6 +291,7 @@ impl NetworkState { &self.local_private_key } + /// TODO(doc): @driftluo pub fn node_id(&self) -> String { self.local_private_key().peer_id().to_base58() } @@ -307,6 +315,7 @@ impl NetworkState { self.peer_registry.read().connection_status() } + /// TODO(doc): @driftluo pub fn public_urls(&self, max_urls: usize) -> Vec<(String, u8)> { let listened_addrs = self.listened_addrs.read(); self.public_addrs(max_urls.saturating_sub(listened_addrs.len())) @@ -330,6 +339,7 @@ impl NetworkState { format!("{}/p2p/{}", addr, self.node_id()) } + /// TODO(doc): @driftluo pub fn get_protocol_ids bool>(&self, filter: F) -> Vec { self.protocols .read() @@ -479,6 +489,7 @@ impl NetworkState { } } + /// TODO(doc): @driftluo pub fn add_observed_addrs(&self, iter: impl Iterator) { let mut pending_observed_addrs = self.pending_observed_addrs.write(); let mut public_addrs = self.public_addrs.write(); @@ -497,20 +508,25 @@ impl NetworkState { } } + /// TODO(doc): @driftluo pub fn is_active(&self) -> bool { self.active.load(Ordering::Relaxed) } } +/// TODO(doc): @driftluo pub struct EventHandler { pub(crate) network_state: Arc, pub(crate) exit_handler: T, } +/// TODO(doc): @driftluo pub trait ExitHandler: Send + Unpin + 'static { + /// TODO(doc): @driftluo fn notify_exit(&self); } +/// TODO(doc): @driftluo #[derive(Clone, Default)] pub struct DefaultExitHandler { lock: Arc>, @@ -518,6 +534,7 @@ pub struct DefaultExitHandler { } impl DefaultExitHandler { + /// TODO(doc): @driftluo pub fn wait_for_exit(&self) { self.exit.wait(&mut self.lock.lock()); } @@ -764,6 +781,7 @@ impl ServiceHandle for EventHandler { } } +/// TODO(doc): @driftluo pub struct NetworkService { p2p_service: Service>, network_state: Arc, @@ -774,6 +792,7 @@ pub struct NetworkService { } impl NetworkService { + /// TODO(doc): @driftluo pub fn new( network_state: Arc, protocols: Vec, @@ -892,6 +911,7 @@ impl NetworkService { } } + /// TODO(doc): @driftluo pub fn start(self, thread_name: Option) -> Result { let config = self.network_state.config.clone(); @@ -1049,6 +1069,7 @@ impl NetworkService { } } +/// TODO(doc): @driftluo #[derive(Clone)] pub struct NetworkController { version: String, @@ -1059,23 +1080,28 @@ pub struct NetworkController { } impl NetworkController { + /// TODO(doc): @driftluo pub fn public_urls(&self, max_urls: usize) -> Vec<(String, u8)> { self.network_state.public_urls(max_urls) } + /// TODO(doc): @driftluo pub fn version(&self) -> &String { &self.version } + /// TODO(doc): @driftluo pub fn node_id(&self) -> String { self.network_state.node_id() } + /// TODO(doc): @driftluo pub fn add_node(&self, peer_id: &PeerId, address: Multiaddr) { self.network_state .add_node(&self.p2p_control, peer_id, address) } + /// TODO(doc): @driftluo pub fn remove_node(&self, peer_id: &PeerId) { if let Some(session_id) = self .network_state @@ -1093,6 +1119,7 @@ impl NetworkController { } } + /// TODO(doc): @driftluo pub fn get_banned_addrs(&self) -> Vec { self.network_state .peer_store @@ -1101,10 +1128,12 @@ impl NetworkController { .get_banned_addrs() } + /// TODO(doc): @driftluo pub fn clear_banned_addrs(&self) { self.network_state.peer_store.lock().clear_ban_list(); } + /// TODO(doc): @driftluo pub fn addr_info(&self, ip_port: &IpPort) -> Option { self.network_state .peer_store @@ -1114,6 +1143,7 @@ impl NetworkController { .cloned() } + /// TODO(doc): @driftluo pub fn ban(&self, address: IpNetwork, ban_until: u64, ban_reason: String) -> Result<(), Error> { self.network_state .peer_store @@ -1121,6 +1151,7 @@ impl NetworkController { .ban_network(address, ban_until, ban_reason) } + /// TODO(doc): @driftluo pub fn unban(&self, address: &IpNetwork) { self.network_state .peer_store @@ -1129,6 +1160,7 @@ impl NetworkController { .unban_network(address); } + /// TODO(doc): @driftluo pub fn connected_peers(&self) -> Vec<(PeerIndex, Peer)> { self.network_state.with_peer_registry(|reg| { reg.peers() @@ -1173,18 +1205,21 @@ impl NetworkController { } } + /// TODO(doc): @driftluo pub fn broadcast(&self, proto_id: ProtocolId, data: Bytes) -> Result<(), SendErrorKind> { let session_ids = self.network_state.peer_registry.read().connected_peers(); let target = TargetSession::Multi(session_ids); self.try_broadcast(false, target, proto_id, data) } + /// TODO(doc): @driftluo pub fn quick_broadcast(&self, proto_id: ProtocolId, data: Bytes) -> Result<(), SendErrorKind> { let session_ids = self.network_state.peer_registry.read().connected_peers(); let target = TargetSession::Multi(session_ids); self.try_broadcast(true, target, proto_id, data) } + /// TODO(doc): @driftluo pub fn send_message_to( &self, session_id: SessionId, @@ -1195,18 +1230,22 @@ impl NetworkController { self.try_broadcast(false, target, proto_id, data) } + /// TODO(doc): @driftluo pub fn is_active(&self) -> bool { self.network_state.is_active() } + /// TODO(doc): @driftluo pub fn set_active(&self, active: bool) { self.network_state.active.store(active, Ordering::Relaxed); } + /// TODO(doc): @driftluo pub fn protocols(&self) -> Vec<(ProtocolId, String, Vec)> { self.network_state.protocols.read().clone() } + /// TODO(doc): @driftluo pub fn ping_peers(&self) { let mut ping_controller = self.ping_controller.clone(); let _ignore = ping_controller.try_send(()); diff --git a/network/src/peer.rs b/network/src/peer.rs index f37dd8dbb3..8dec0a8a40 100644 --- a/network/src/peer.rs +++ b/network/src/peer.rs @@ -4,29 +4,45 @@ use p2p::{secio::PeerId, SessionId}; use std::collections::HashMap; use std::time::{Duration, Instant}; +/// TODO(doc): @driftluo #[derive(Clone, Debug)] pub struct PeerIdentifyInfo { + /// TODO(doc): @driftluo pub client_version: String, } +/// TODO(doc): @driftluo #[derive(Clone, Debug)] pub struct Peer { + /// TODO(doc): @driftluo pub connected_addr: Multiaddr, + /// TODO(doc): @driftluo pub listened_addrs: Vec, + /// TODO(doc): @driftluo pub peer_id: PeerId, + /// TODO(doc): @driftluo // Client or Server pub identify_info: Option, + /// TODO(doc): @driftluo pub last_message_time: Option, + /// TODO(doc): @driftluo pub ping: Option, + /// TODO(doc): @driftluo pub is_feeler: bool, + /// TODO(doc): @driftluo pub connected_time: Instant, + /// TODO(doc): @driftluo pub session_id: SessionId, + /// TODO(doc): @driftluo pub session_type: SessionType, + /// TODO(doc): @driftluo pub protocols: HashMap, + /// TODO(doc): @driftluo pub is_whitelist: bool, } impl Peer { + /// TODO(doc): @driftluo pub fn new( session_id: SessionId, session_type: SessionType, @@ -50,18 +66,22 @@ impl Peer { } } + /// TODO(doc): @driftluo pub fn is_outbound(&self) -> bool { self.session_type.is_outbound() } + /// TODO(doc): @driftluo pub fn is_inbound(&self) -> bool { self.session_type.is_inbound() } + /// TODO(doc): @driftluo pub fn network_group(&self) -> Group { self.connected_addr.network_group() } + /// TODO(doc): @driftluo pub fn protocol_version(&self, protocol_id: ProtocolId) -> Option { self.protocols.get(&protocol_id).cloned() } diff --git a/network/src/peer_registry.rs b/network/src/peer_registry.rs index 5d803fb8ab..13b726202d 100644 --- a/network/src/peer_registry.rs +++ b/network/src/peer_registry.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @driftluo use crate::peer_store::PeerStore; use crate::{ errors::{Error, PeerError}, @@ -12,6 +13,7 @@ use std::iter::FromIterator; pub(crate) const EVICTION_PROTECT_PEERS: usize = 8; +/// TODO(doc): @driftluo pub struct PeerRegistry { peers: HashMap, // max inbound limitation @@ -24,12 +26,18 @@ pub struct PeerRegistry { feeler_peers: HashSet, } +/// TODO(doc): @driftluo #[derive(Clone, Copy, Debug)] pub struct ConnectionStatus { + /// TODO(doc): @driftluo pub total: u32, + /// TODO(doc): @driftluo pub non_whitelist_inbound: u32, + /// TODO(doc): @driftluo pub non_whitelist_outbound: u32, + /// TODO(doc): @driftluo pub max_inbound: u32, + /// TODO(doc): @driftluo pub max_outbound: u32, } @@ -44,6 +52,7 @@ where } impl PeerRegistry { + /// TODO(doc): @driftluo pub fn new( max_inbound: u32, max_outbound: u32, @@ -178,22 +187,27 @@ impl PeerRegistry { }) } + /// TODO(doc): @driftluo pub fn add_feeler(&mut self, peer_id: PeerId) { self.feeler_peers.insert(peer_id); } + /// TODO(doc): @driftluo pub fn remove_feeler(&mut self, peer_id: &PeerId) { self.feeler_peers.remove(peer_id); } + /// TODO(doc): @driftluo pub fn is_feeler(&self, peer_id: &PeerId) -> bool { self.feeler_peers.contains(peer_id) } + /// TODO(doc): @driftluo pub fn get_peer(&self, session_id: SessionId) -> Option<&Peer> { self.peers.get(&session_id) } + /// TODO(doc): @driftluo pub fn get_peer_mut(&mut self, session_id: SessionId) -> Option<&mut Peer> { self.peers.get_mut(&session_id) } @@ -202,6 +216,7 @@ impl PeerRegistry { self.peers.remove(&session_id) } + /// TODO(doc): @driftluo pub fn get_key_by_peer_id(&self, peer_id: &PeerId) -> Option { self.peers.values().find_map(|peer| { if &peer.peer_id == peer_id { @@ -217,10 +232,12 @@ impl PeerRegistry { .and_then(|session_id| self.peers.remove(&session_id)) } + /// TODO(doc): @driftluo pub fn peers(&self) -> &HashMap { &self.peers } + /// TODO(doc): @driftluo pub fn connected_peers(&self) -> Vec { self.peers.keys().cloned().collect() } diff --git a/network/src/peer_store/addr_manager.rs b/network/src/peer_store/addr_manager.rs index 5eb4dfc2c7..f4969478e1 100644 --- a/network/src/peer_store/addr_manager.rs +++ b/network/src/peer_store/addr_manager.rs @@ -1,7 +1,9 @@ +//! TODO(doc): @driftluo use crate::peer_store::types::{AddrInfo, IpPort}; use rand::Rng; use std::collections::{HashMap, HashSet}; +/// TODO(doc): @driftluo #[derive(Default)] pub struct AddrManager { next_id: u64, @@ -11,6 +13,7 @@ pub struct AddrManager { } impl AddrManager { + /// TODO(doc): @driftluo pub fn add(&mut self, mut addr_info: AddrInfo) { let id = self.next_id; let key = addr_info.ip_port(); @@ -62,14 +65,17 @@ impl AddrManager { addr_infos } + /// TODO(doc): @driftluo pub fn count(&self) -> usize { self.addr_to_id.len() } + /// TODO(doc): @driftluo pub fn addrs_iter(&self) -> impl Iterator { self.id_to_info.values() } + /// TODO(doc): @driftluo pub fn remove(&mut self, addr: &IpPort) -> Option { if let Some(id) = self.addr_to_id.remove(&addr) { let random_id_pos = self.id_to_info.get(&id).expect("exists").random_id_pos; @@ -82,12 +88,14 @@ impl AddrManager { } } + /// TODO(doc): @driftluo pub fn get(&self, addr: &IpPort) -> Option<&AddrInfo> { self.addr_to_id .get(addr) .and_then(|id| self.id_to_info.get(&id)) } + /// TODO(doc): @driftluo pub fn get_mut(&mut self, addr: &IpPort) -> Option<&mut AddrInfo> { if let Some(id) = self.addr_to_id.get(addr) { self.id_to_info.get_mut(&id) diff --git a/network/src/peer_store/ban_list.rs b/network/src/peer_store/ban_list.rs index cd9ae556ff..835651e3ad 100644 --- a/network/src/peer_store/ban_list.rs +++ b/network/src/peer_store/ban_list.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @driftluo use crate::peer_store::types::{ip_to_network, BannedAddr, MultiaddrExt}; use crate::peer_store::Multiaddr; use faketime::unix_time_as_millis; @@ -7,6 +8,7 @@ use std::net::IpAddr; const CLEAR_EXPIRES_PERIOD: usize = 1024; +/// TODO(doc): @driftluo pub struct BanList { inner: HashMap, insert_count: usize, @@ -19,6 +21,7 @@ impl Default for BanList { } impl BanList { + /// TODO(doc): @driftluo pub fn new() -> Self { BanList { inner: HashMap::default(), @@ -26,6 +29,7 @@ impl BanList { } } + /// TODO(doc): @driftluo pub fn ban(&mut self, banned_addr: BannedAddr) { self.inner.insert(banned_addr.address, banned_addr); let (insert_count, _) = self.insert_count.overflowing_add(1); @@ -35,6 +39,7 @@ impl BanList { } } + /// TODO(doc): @driftluo pub fn unban_network(&mut self, ip_network: &IpNetwork) { self.inner.remove(&ip_network); } @@ -52,11 +57,13 @@ impl BanList { }) } + /// TODO(doc): @driftluo pub fn is_ip_banned(&self, ip: &IpAddr) -> bool { let now_ms = unix_time_as_millis(); self.is_ip_banned_until(ip.to_owned(), now_ms) } + /// TODO(doc): @driftluo pub fn is_addr_banned(&self, addr: &Multiaddr) -> bool { let now_ms = unix_time_as_millis(); if let Ok(ip_port) = addr.extract_ip_addr() { @@ -65,6 +72,7 @@ impl BanList { false } + /// TODO(doc): @driftluo pub fn get_banned_addrs(&self) -> Vec { self.inner.values().map(ToOwned::to_owned).collect() } diff --git a/network/src/peer_store/mod.rs b/network/src/peer_store/mod.rs index 65b860f9bc..fef108662d 100644 --- a/network/src/peer_store/mod.rs +++ b/network/src/peer_store/mod.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @driftluo pub mod addr_manager; pub mod ban_list; mod peer_id_serde; @@ -17,13 +18,17 @@ const ADDR_TIMEOUT_MS: u64 = 7 * 24 * 3600 * 1000; const ADDR_MAX_RETRIES: u32 = 3; const ADDR_MAX_FAILURES: u32 = 10; +/// TODO(doc): @driftluo pub type Score = i32; /// PeerStore Scoring configuration #[derive(Copy, Clone, Debug)] pub struct PeerScoreConfig { + /// TODO(doc): @driftluo pub default_score: Score, + /// TODO(doc): @driftluo pub ban_score: Score, + /// TODO(doc): @driftluo pub ban_timeout_ms: u64, } @@ -40,21 +45,28 @@ impl Default for PeerScoreConfig { /// Peer Status #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum Status { + /// TODO(doc): @driftluo Connected, + /// TODO(doc): @driftluo Disconnected, } +/// TODO(doc): @driftluo #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum ReportResult { + /// TODO(doc): @driftluo Ok, + /// TODO(doc): @driftluo Banned, } impl ReportResult { + /// TODO(doc): @driftluo pub fn is_banned(self) -> bool { self == ReportResult::Banned } + /// TODO(doc): @driftluo pub fn is_ok(self) -> bool { self == ReportResult::Ok } diff --git a/network/src/peer_store/peer_store_db.rs b/network/src/peer_store/peer_store_db.rs index 28f3097757..61c7424bc7 100644 --- a/network/src/peer_store/peer_store_db.rs +++ b/network/src/peer_store/peer_store_db.rs @@ -16,6 +16,7 @@ const DEFAULT_ADDR_MANAGER_DB: &str = "addr_manager.db"; const DEFAULT_BAN_LIST_DB: &str = "ban_list.db"; impl AddrManager { + /// TODO(doc): @driftluo pub fn load(r: R) -> Result { let addrs: Vec = serde_json::from_reader(r).map_err(PeerStoreError::Serde)?; let mut addr_manager = AddrManager::default(); @@ -23,6 +24,7 @@ impl AddrManager { Ok(addr_manager) } + /// TODO(doc): @driftluo pub fn dump(&self, w: W) -> Result<(), Error> { let addrs: Vec<_> = self.addrs_iter().collect(); debug!("dump {} addrs", addrs.len()); @@ -31,6 +33,7 @@ impl AddrManager { } impl BanList { + /// TODO(doc): @driftluo pub fn load(r: R) -> Result { let banned_addrs: Vec = serde_json::from_reader(r).map_err(PeerStoreError::Serde)?; @@ -41,6 +44,7 @@ impl BanList { Ok(ban_list) } + /// TODO(doc): @driftluo pub fn dump(&self, w: W) -> Result<(), Error> { let banned_addrs = self.get_banned_addrs(); debug!("dump {} banned addrs", banned_addrs.len()); @@ -49,6 +53,7 @@ impl BanList { } impl PeerStore { + /// TODO(doc): @driftluo pub fn load_from_dir_or_default>(path: P) -> Self { let addr_manager_path = path.as_ref().join(DEFAULT_ADDR_MANAGER_DB); let ban_list_path = path.as_ref().join(DEFAULT_BAN_LIST_DB); @@ -90,6 +95,7 @@ impl PeerStore { PeerStore::new(addr_manager, ban_list) } + /// TODO(doc): @driftluo pub fn dump_to_dir>(&self, path: P) -> Result<(), Error> { // create dir create_dir_all(&path)?; diff --git a/network/src/peer_store/peer_store_impl.rs b/network/src/peer_store/peer_store_impl.rs index 1f7c3fdb24..eb31a8577d 100644 --- a/network/src/peer_store/peer_store_impl.rs +++ b/network/src/peer_store/peer_store_impl.rs @@ -14,6 +14,7 @@ use ipnetwork::IpNetwork; use std::cell::{Ref, RefCell}; use std::collections::{hash_map::Entry, HashMap}; +/// TODO(doc): @driftluo #[derive(Default)] pub struct PeerStore { addr_manager: AddrManager, @@ -23,6 +24,7 @@ pub struct PeerStore { } impl PeerStore { + /// TODO(doc): @driftluo pub fn new(addr_manager: AddrManager, ban_list: BanList) -> Self { PeerStore { addr_manager, @@ -81,10 +83,12 @@ impl PeerStore { Ok(()) } + /// TODO(doc): @driftluo pub fn addr_manager(&self) -> &AddrManager { &self.addr_manager } + /// TODO(doc): @driftluo pub fn mut_addr_manager(&mut self) -> &mut AddrManager { &mut self.addr_manager } @@ -111,10 +115,12 @@ impl PeerStore { Ok(ReportResult::Ok) } + /// TODO(doc): @driftluo pub fn remove_disconnected_peer(&mut self, peer_id: &PeerId) -> Option { self.peers.borrow_mut().remove(peer_id) } + /// TODO(doc): @driftluo pub fn peer_status(&self, peer_id: &PeerId) -> Status { if self.peers.borrow().contains_key(peer_id) { Status::Connected @@ -197,18 +203,22 @@ impl PeerStore { Ok(()) } + /// TODO(doc): @driftluo pub fn is_addr_banned(&self, addr: &Multiaddr) -> bool { self.ban_list().is_addr_banned(addr) } + /// TODO(doc): @driftluo pub fn ban_list(&self) -> Ref { self.ban_list.borrow() } + /// TODO(doc): @driftluo pub fn mut_ban_list(&mut self) -> &mut BanList { self.ban_list.get_mut() } + /// TODO(doc): @driftluo pub fn clear_ban_list(&self) { self.ban_list.replace(Default::default()); } diff --git a/network/src/peer_store/types.rs b/network/src/peer_store/types.rs index 698298cc0a..7ab15f81f6 100644 --- a/network/src/peer_store/types.rs +++ b/network/src/peer_store/types.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @driftluo use crate::{ errors::{AddrError, Error}, peer_store::{ @@ -10,21 +11,30 @@ use p2p::multiaddr::{self, Multiaddr, Protocol}; use serde::{Deserialize, Serialize}; use std::{borrow::Cow, net::IpAddr}; +/// TODO(doc): @driftluo #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)] pub struct IpPort { + /// TODO(doc): @driftluo pub ip: IpAddr, + /// TODO(doc): @driftluo pub port: u16, } +/// TODO(doc): @driftluo #[derive(Debug, Clone)] pub struct PeerInfo { + /// TODO(doc): @driftluo pub peer_id: PeerId, + /// TODO(doc): @driftluo pub connected_addr: Multiaddr, + /// TODO(doc): @driftluo pub session_type: SessionType, + /// TODO(doc): @driftluo pub last_connected_at_ms: u64, } impl PeerInfo { + /// TODO(doc): @driftluo pub fn new( peer_id: PeerId, connected_addr: Multiaddr, @@ -40,20 +50,30 @@ impl PeerInfo { } } +/// TODO(doc): @driftluo #[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct AddrInfo { + /// TODO(doc): @driftluo #[serde(with = "peer_id_serde")] pub peer_id: PeerId, + /// TODO(doc): @driftluo pub ip_port: IpPort, + /// TODO(doc): @driftluo pub addr: Multiaddr, + /// TODO(doc): @driftluo pub score: Score, + /// TODO(doc): @driftluo pub last_connected_at_ms: u64, + /// TODO(doc): @driftluo pub last_tried_at_ms: u64, + /// TODO(doc): @driftluo pub attempts_count: u32, + /// TODO(doc): @driftluo pub random_id_pos: usize, } impl AddrInfo { + /// TODO(doc): @driftluo pub fn new( peer_id: PeerId, ip_port: IpPort, @@ -73,18 +93,22 @@ impl AddrInfo { } } + /// TODO(doc): @driftluo pub fn ip_port(&self) -> IpPort { self.ip_port } + /// TODO(doc): @driftluo pub fn had_connected(&self, expires_ms: u64) -> bool { self.last_connected_at_ms > expires_ms } + /// TODO(doc): @driftluo pub fn tried_in_last_minute(&self, now_ms: u64) -> bool { self.last_tried_at_ms >= now_ms.saturating_sub(60_000) } + /// TODO(doc): @driftluo pub fn is_terrible(&self, now_ms: u64) -> bool { // do not remove addr tried in last minute if self.tried_in_last_minute(now_ms) { @@ -103,30 +127,39 @@ impl AddrInfo { false } + /// TODO(doc): @driftluo pub fn mark_tried(&mut self, tried_at_ms: u64) { self.last_tried_at_ms = tried_at_ms; self.attempts_count = self.attempts_count.saturating_add(1); } + /// TODO(doc): @driftluo pub fn mark_connected(&mut self, connected_at_ms: u64) { self.last_connected_at_ms = connected_at_ms; // reset attempts self.attempts_count = 0; } + /// TODO(doc): @driftluo pub fn multiaddr(&self) -> Result { self.addr.attach_p2p(&self.peer_id) } } +/// TODO(doc): @driftluo #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct BannedAddr { + /// TODO(doc): @driftluo pub address: IpNetwork, + /// TODO(doc): @driftluo pub ban_until: u64, + /// TODO(doc): @driftluo pub ban_reason: String, + /// TODO(doc): @driftluo pub created_at: u64, } +/// TODO(doc): @driftluo pub fn multiaddr_to_ip_network(multiaddr: &Multiaddr) -> Option { for addr_component in multiaddr { match addr_component { @@ -138,6 +171,7 @@ pub fn multiaddr_to_ip_network(multiaddr: &Multiaddr) -> Option { None } +/// TODO(doc): @driftluo pub fn ip_to_network(ip: IpAddr) -> IpNetwork { match ip { IpAddr::V4(ipv4) => IpNetwork::V4(ipv4.into()), @@ -145,10 +179,13 @@ pub fn ip_to_network(ip: IpAddr) -> IpNetwork { } } +/// TODO(doc): @driftluo pub trait MultiaddrExt { /// extract IP from multiaddr, fn extract_ip_addr(&self) -> Result; + /// TODO(doc): @driftluo fn exclude_p2p(&self) -> Multiaddr; + /// TODO(doc): @driftluo fn attach_p2p(&self, peer_id: &PeerId) -> Result; } diff --git a/network/src/protocols/mod.rs b/network/src/protocols/mod.rs index a4e9c3c811..987a836886 100644 --- a/network/src/protocols/mod.rs +++ b/network/src/protocols/mod.rs @@ -26,7 +26,9 @@ use std::{ }; use tokio_util::codec::length_delimited; +/// TODO(doc): @driftluo pub type PeerIndex = SessionId; +/// TODO(doc): @driftluo pub type BoxedFutureTask = Pin + 'static + Send>>; use crate::{ @@ -35,45 +37,66 @@ use crate::{ Behaviour, Error, NetworkState, Peer, ProtocolVersion, }; +/// TODO(doc): @driftluo pub trait CKBProtocolContext: Send { + /// TODO(doc): @driftluo // Interact with underlying p2p service fn set_notify(&self, interval: Duration, token: u64) -> Result<(), Error>; + /// TODO(doc): @driftluo fn remove_notify(&self, token: u64) -> Result<(), Error>; + /// TODO(doc): @driftluo fn quick_send_message( &self, proto_id: ProtocolId, peer_index: PeerIndex, data: Bytes, ) -> Result<(), Error>; + /// TODO(doc): @driftluo fn quick_send_message_to(&self, peer_index: PeerIndex, data: Bytes) -> Result<(), Error>; + /// TODO(doc): @driftluo fn quick_filter_broadcast(&self, target: TargetSession, data: Bytes) -> Result<(), Error>; // spawn a future task, if `blocking` is true we use tokio_threadpool::blocking to handle the task. + /// TODO(doc): @driftluo fn future_task(&self, task: BoxedFutureTask, blocking: bool) -> Result<(), Error>; + /// TODO(doc): @driftluo fn send_message( &self, proto_id: ProtocolId, peer_index: PeerIndex, data: Bytes, ) -> Result<(), Error>; + /// TODO(doc): @driftluo fn send_message_to(&self, peer_index: PeerIndex, data: Bytes) -> Result<(), Error>; + /// TODO(doc): @driftluo // TODO allow broadcast to target ProtocolId fn filter_broadcast(&self, target: TargetSession, data: Bytes) -> Result<(), Error>; + /// TODO(doc): @driftluo fn disconnect(&self, peer_index: PeerIndex, message: &str) -> Result<(), Error>; // Interact with NetworkState + /// TODO(doc): @driftluo fn get_peer(&self, peer_index: PeerIndex) -> Option; + /// TODO(doc): @driftluo fn with_peer_mut(&self, peer_index: PeerIndex, f: Box); + /// TODO(doc): @driftluo fn connected_peers(&self) -> Vec; + /// TODO(doc): @driftluo fn report_peer(&self, peer_index: PeerIndex, behaviour: Behaviour); + /// TODO(doc): @driftluo fn ban_peer(&self, peer_index: PeerIndex, duration: Duration, reason: String); + /// TODO(doc): @driftluo fn send_paused(&self) -> bool; + /// TODO(doc): @driftluo // Other methods fn protocol_id(&self) -> ProtocolId; + /// TODO(doc): @driftluo fn p2p_control(&self) -> Option<&ServiceControl> { None } } +/// TODO(doc): @driftluo pub trait CKBProtocolHandler: Sync + Send { + /// TODO(doc): @driftluo fn init(&mut self, nc: Arc); /// Called when opening protocol fn connected( @@ -101,6 +124,7 @@ pub trait CKBProtocolHandler: Sync + Send { } } +/// TODO(doc): @driftluo pub struct CKBProtocol { id: ProtocolId, // for example: b"/ckb/" @@ -114,6 +138,7 @@ pub struct CKBProtocol { } impl CKBProtocol { + /// TODO(doc): @driftluo // a helper constructor to build `CKBProtocol` with `SupportProtocols` enum pub fn new_with_support_protocol( support_protocol: support_protocols::SupportProtocols, @@ -131,6 +156,7 @@ impl CKBProtocol { } } + /// TODO(doc): @driftluo pub fn new( protocol_name: String, id: ProtocolId, @@ -157,18 +183,22 @@ impl CKBProtocol { } } + /// TODO(doc): @driftluo pub fn id(&self) -> ProtocolId { self.id } + /// TODO(doc): @driftluo pub fn protocol_name(&self) -> String { self.protocol_name.clone() } + /// TODO(doc): @driftluo pub fn match_version(&self, version: ProtocolVersion) -> bool { self.supported_versions.contains(&version) } + /// TODO(doc): @driftluo pub fn build(self) -> ProtocolMeta { let protocol_name = self.protocol_name(); let max_frame_length = self.max_frame_length; diff --git a/network/src/protocols/support_protocols.rs b/network/src/protocols/support_protocols.rs index 807c37771f..6ef754eaec 100644 --- a/network/src/protocols/support_protocols.rs +++ b/network/src/protocols/support_protocols.rs @@ -6,20 +6,31 @@ use p2p::{ }; use tokio_util::codec::length_delimited; +/// TODO(doc): @driftluo #[derive(Clone, Debug)] pub enum SupportProtocols { + /// TODO(doc): @driftluo Ping, + /// TODO(doc): @driftluo Discovery, + /// TODO(doc): @driftluo Identify, + /// TODO(doc): @driftluo Feeler, + /// TODO(doc): @driftluo DisconnectMessage, + /// TODO(doc): @driftluo Sync, + /// TODO(doc): @driftluo Relay, + /// TODO(doc): @driftluo Time, + /// TODO(doc): @driftluo Alert, } impl SupportProtocols { + /// TODO(doc): @driftluo pub fn protocol_id(&self) -> ProtocolId { match self { SupportProtocols::Ping => 0, @@ -35,6 +46,7 @@ impl SupportProtocols { .into() } + /// TODO(doc): @driftluo pub fn name(&self) -> String { match self { SupportProtocols::Ping => "/ckb/ping", @@ -50,6 +62,7 @@ impl SupportProtocols { .to_owned() } + /// TODO(doc): @driftluo pub fn support_versions(&self) -> Vec { // we didn't invoke MetaBuilder#support_versions fn for these protocols (Ping/Discovery/Identify/Feeler/DisconnectMessage) // in previous code, so the default 0.0.1 value is used ( https://github.com/nervosnetwork/tentacle/blob/master/src/builder.rs#L312 ) @@ -67,6 +80,7 @@ impl SupportProtocols { } } + /// TODO(doc): @driftluo pub fn max_frame_length(&self) -> usize { match self { SupportProtocols::Ping => 1024, // 1 KB @@ -81,6 +95,7 @@ impl SupportProtocols { } } + /// TODO(doc): @driftluo pub fn flag(&self) -> BlockingFlag { match self { SupportProtocols::Ping @@ -104,6 +119,7 @@ impl SupportProtocols { } } + /// TODO(doc): @driftluo // a helper fn to build `ProtocolMeta` pub fn build_meta_with_service_handle< SH: FnOnce() -> ProtocolHandle>, diff --git a/notify/Cargo.toml b/notify/Cargo.toml index 050c435a0b..58c5fd513b 100644 --- a/notify/Cargo.toml +++ b/notify/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/notify/src/lib.rs b/notify/src/lib.rs index 68c42f667e..f541635d50 100644 --- a/notify/src/lib.rs +++ b/notify/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake use ckb_app_config::NotifyConfig; use ckb_channel::{bounded, select, Receiver, RecvError, Sender}; use ckb_logger::{debug, error, trace}; @@ -10,20 +11,30 @@ use std::collections::HashMap; use std::process::Command; use std::thread; +/// TODO(doc): @quake pub const SIGNAL_CHANNEL_SIZE: usize = 1; +/// TODO(doc): @quake pub const REGISTER_CHANNEL_SIZE: usize = 2; +/// TODO(doc): @quake pub const NOTIFY_CHANNEL_SIZE: usize = 128; +/// TODO(doc): @quake pub type NotifyRegister = Sender>>; +/// TODO(doc): @quake #[derive(Debug, Clone)] pub struct PoolTransactionEntry { + /// TODO(doc): @quake pub transaction: TransactionView, + /// TODO(doc): @quake pub cycles: Cycle, + /// TODO(doc): @quake pub size: usize, + /// TODO(doc): @quake pub fee: Capacity, } +/// TODO(doc): @quake #[derive(Clone)] pub struct NotifyController { stop: StopHandler<()>, @@ -41,6 +52,7 @@ impl Drop for NotifyController { } } +/// TODO(doc): @quake pub struct NotifyService { config: NotifyConfig, new_block_subscribers: HashMap>, @@ -49,6 +61,7 @@ pub struct NotifyService { } impl NotifyService { + /// TODO(doc): @quake pub fn new(config: NotifyConfig) -> Self { Self { config, @@ -58,6 +71,7 @@ impl NotifyService { } } + /// TODO(doc): @quake // remove `allow` tag when https://github.com/crossbeam-rs/crossbeam/issues/404 is solved #[allow(clippy::zero_ptr, clippy::drop_copy)] pub fn start(mut self, thread_name: Option) -> NotifyController { @@ -223,15 +237,18 @@ impl NotifyService { } impl NotifyController { + /// TODO(doc): @quake pub fn subscribe_new_block(&self, name: S) -> Receiver { Request::call(&self.new_block_register, name.to_string()) .expect("Subscribe new block should be OK") } + /// TODO(doc): @quake pub fn notify_new_block(&self, block: BlockView) { let _ = self.new_block_notifier.send(block); } + /// TODO(doc): @quake pub fn subscribe_new_transaction( &self, name: S, @@ -240,15 +257,18 @@ impl NotifyController { .expect("Subscribe new transaction should be OK") } + /// TODO(doc): @quake pub fn notify_new_transaction(&self, tx_entry: PoolTransactionEntry) { let _ = self.new_transaction_notifier.send(tx_entry); } + /// TODO(doc): @quake pub fn subscribe_network_alert(&self, name: S) -> Receiver { Request::call(&self.network_alert_register, name.to_string()) .expect("Subscribe network alert should be OK") } + /// TODO(doc): @quake pub fn notify_network_alert(&self, alert: Alert) { let _ = self.network_alert_notifier.send(alert); } diff --git a/pow/Cargo.toml b/pow/Cargo.toml index a0048e65ef..e66e3d559d 100644 --- a/pow/Cargo.toml +++ b/pow/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/pow/src/dummy.rs b/pow/src/dummy.rs index b73aaf77c9..bd35869a02 100644 --- a/pow/src/dummy.rs +++ b/pow/src/dummy.rs @@ -1,6 +1,7 @@ use super::PowEngine; use ckb_types::packed::Header; +/// TODO(doc): @quake pub struct DummyPowEngine; impl PowEngine for DummyPowEngine { diff --git a/pow/src/eaglesong.rs b/pow/src/eaglesong.rs index d3342d8049..f63749caf5 100644 --- a/pow/src/eaglesong.rs +++ b/pow/src/eaglesong.rs @@ -4,6 +4,7 @@ use eaglesong::eaglesong; use log::Level::Debug; use log::{debug, log_enabled}; +/// TODO(doc): @quake pub struct EaglesongPowEngine; impl PowEngine for EaglesongPowEngine { diff --git a/pow/src/eaglesong_blake2b.rs b/pow/src/eaglesong_blake2b.rs index 64fa931cee..7c2638f90f 100644 --- a/pow/src/eaglesong_blake2b.rs +++ b/pow/src/eaglesong_blake2b.rs @@ -5,6 +5,7 @@ use eaglesong::eaglesong; use log::Level::Debug; use log::{debug, log_enabled}; +/// TODO(doc): @quake pub struct EaglesongBlake2bPowEngine; impl PowEngine for EaglesongBlake2bPowEngine { diff --git a/pow/src/lib.rs b/pow/src/lib.rs index a283a8787c..28470f6fd4 100644 --- a/pow/src/lib.rs +++ b/pow/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake use byteorder::{ByteOrder, LittleEndian}; use ckb_types::{ packed::{Byte32, Header}, @@ -16,11 +17,15 @@ pub use crate::dummy::DummyPowEngine; pub use crate::eaglesong::EaglesongPowEngine; pub use crate::eaglesong_blake2b::EaglesongBlake2bPowEngine; +/// TODO(doc): @quake #[derive(Clone, Serialize, Deserialize, Eq, PartialEq, Hash, Debug)] #[serde(tag = "func", content = "params")] pub enum Pow { + /// TODO(doc): @quake Dummy, + /// TODO(doc): @quake Eaglesong, + /// TODO(doc): @quake EaglesongBlake2b, } @@ -35,6 +40,7 @@ impl fmt::Display for Pow { } impl Pow { + /// TODO(doc): @quake pub fn engine(&self) -> Arc { match *self { Pow::Dummy => Arc::new(DummyPowEngine), @@ -43,11 +49,13 @@ impl Pow { } } + /// TODO(doc): @quake pub fn is_dummy(&self) -> bool { *self == Pow::Dummy } } +/// TODO(doc): @quake pub fn pow_message(pow_hash: &Byte32, nonce: u128) -> [u8; 48] { let mut message = [0; 48]; message[0..32].copy_from_slice(pow_hash.as_slice()); @@ -55,11 +63,15 @@ pub fn pow_message(pow_hash: &Byte32, nonce: u128) -> [u8; 48] { message } +/// TODO(doc): @quake pub trait PowEngine: Send + Sync + AsAny { + /// TODO(doc): @quake fn verify(&self, header: &Header) -> bool; } +/// TODO(doc): @quake pub trait AsAny { + /// TODO(doc): @quake fn as_any(&self) -> &dyn Any; } diff --git a/resource/Cargo.toml b/resource/Cargo.toml index b74f5639d5..1ba03c945a 100644 --- a/resource/Cargo.toml +++ b/resource/Cargo.toml @@ -5,7 +5,7 @@ license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" build = "build.rs" -description = "TODO(doc): crate description" +description = "Bundled resources for the CKB binary." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/resource/build.rs b/resource/build.rs index c26391fd56..9b9f878d18 100644 --- a/resource/build.rs +++ b/resource/build.rs @@ -1,3 +1,4 @@ +//! Build script for crate `ckb-resource` to bundle the resources. use ckb_types::H256; use includedir_codegen::Compression; use std::env; @@ -41,28 +42,32 @@ fn main() { writeln!( &mut out_file, - "pub const CODE_HASH_SECP256K1_DATA: H256 = {:?};", + "/// Data hash of the cell containing secp256k1 data.\n\ + pub const CODE_HASH_SECP256K1_DATA: H256 = {:?};", H256(CODE_HASH_SECP256K1_DATA) ) .expect("write to code_hashes.rs"); writeln!( &mut out_file, - "pub const CODE_HASH_SECP256K1_BLAKE160_SIGHASH_ALL: H256 = {:?};", + "/// Data hash of the cell containing secp256k1 blake160 sighash all lock script.\n\ + pub const CODE_HASH_SECP256K1_BLAKE160_SIGHASH_ALL: H256 = {:?};", H256(CODE_HASH_SECP256K1_BLAKE160_SIGHASH_ALL) ) .expect("write to code_hashes.rs"); writeln!( &mut out_file, - "pub const CODE_HASH_SECP256K1_BLAKE160_MULTISIG_ALL: H256 = {:?};", + "/// Data hash of the cell containing secp256k1 blake160 multisig all lock script.\n\ + pub const CODE_HASH_SECP256K1_BLAKE160_MULTISIG_ALL: H256 = {:?};", H256(CODE_HASH_SECP256K1_BLAKE160_MULTISIG_ALL) ) .expect("write to code_hashes.rs"); writeln!( &mut out_file, - "pub const CODE_HASH_DAO: H256 = {:?};", + "/// Data hash of the cell containing DAO type script.\n\ + pub const CODE_HASH_DAO: H256 = {:?};", H256(CODE_HASH_DAO) ) .expect("write to code_hashes.rs"); diff --git a/resource/src/lib.rs b/resource/src/lib.rs index 6654457dab..f4b8abd0af 100644 --- a/resource/src/lib.rs +++ b/resource/src/lib.rs @@ -1,14 +1,29 @@ -// Shields clippy errors in generated bundled.rs -#![allow(clippy::unreadable_literal)] +//! Bundles resources in the ckb binary. +//! +//! This crate bundles the files ckb.toml, ckb-miner.toml, default.db-options, and all files in the +//! directory `specs` in the binary. +//! +//! The bundled files can be read via `Resource::Bundled`, for example: +//! +//! ``` +//! // Read bundled ckb.toml +//! use ckb_resource::{Resource, CKB_CONFIG_FILE_NAME}; +//! +//! let ckb_toml_bytes = Resource::bundled(CKB_CONFIG_FILE_NAME.to_string()).get().unwrap(); +//! println!("ckb.toml\n{}", String::from_utf8(ckb_toml_bytes.to_vec()).unwrap()); +//! ``` +//! +//! These bundled files can be customized for different chains using spec branches. +//! See [Template](struct.Template.html). mod template; +pub use self::template::Template; pub use self::template::{ TemplateContext, AVAILABLE_SPECS, DEFAULT_P2P_PORT, DEFAULT_RPC_PORT, DEFAULT_SPEC, }; pub use std::io::{Error, Result}; -use self::template::Template; use ckb_types::H256; use includedir::Files; use serde::{Deserialize, Serialize}; @@ -21,19 +36,39 @@ use tempfile::NamedTempFile; use ckb_system_scripts::BUNDLED_CELL; -include!(concat!(env!("OUT_DIR"), "/bundled.rs")); +mod bundled { + #![allow(missing_docs, clippy::unreadable_literal)] + include!(concat!(env!("OUT_DIR"), "/bundled.rs")); +} +/// Bundled resources in ckb binary. +pub use bundled::BUNDLED; + include!(concat!(env!("OUT_DIR"), "/code_hashes.rs")); +/// CKB config file name. pub const CKB_CONFIG_FILE_NAME: &str = "ckb.toml"; +/// CKB miner config file name. pub const MINER_CONFIG_FILE_NAME: &str = "ckb-miner.toml"; +/// The relative spec file path for the dev chain. pub const SPEC_DEV_FILE_NAME: &str = "specs/dev.toml"; +/// The file name of the generated RocksDB options file. pub const DB_OPTIONS_FILE_NAME: &str = "default.db-options"; +/// Represents a resource, which is either bundled in the CKB binary or resident in the local file +/// system. #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub enum Resource { - Bundled { bundled: String }, - FileSystem { file: PathBuf }, + /// A resource that bundled in the CKB binary. + Bundled { + /// The identifier of the bundled resource. + bundled: String, + }, + /// A resource that resides in the local file system. + FileSystem { + /// The file path to the resource. + file: PathBuf, + }, } impl fmt::Display for Resource { @@ -46,38 +81,56 @@ impl fmt::Display for Resource { } impl Resource { + /// Creates a reference to the bundled resource. pub fn bundled(bundled: String) -> Resource { Resource::Bundled { bundled } } + /// Creates a reference to the resource recident in the file system. pub fn file_system(file: PathBuf) -> Resource { Resource::FileSystem { file } } + /// Creates the CKB config file resource from the file system. + /// + /// It searches the file name `CKB_CONFIG_FILE_NAME` in the directory `root_dir`. pub fn ckb_config>(root_dir: P) -> Resource { Resource::file_system(root_dir.as_ref().join(CKB_CONFIG_FILE_NAME)) } + /// Creates the CKB miner config file resource from the file system. + /// + /// It searches the file name `MINER_CONFIG_FILE_NAME` in the directory `root_dir`. pub fn miner_config>(root_dir: P) -> Resource { Resource::file_system(root_dir.as_ref().join(MINER_CONFIG_FILE_NAME)) } + /// Creates the RocksDB options file resource from the file system. + /// + /// It searches the file name `DB_OPTIONS_FILE_NAME` in the directory `root_dir`. pub fn db_options>(root_dir: P) -> Resource { Resource::file_system(root_dir.as_ref().join(DB_OPTIONS_FILE_NAME)) } + /// Creates the bundled CKB config file resource. pub fn bundled_ckb_config() -> Resource { Resource::bundled(CKB_CONFIG_FILE_NAME.to_string()) } + /// Creates the bundled CKB miner config file resource. pub fn bundled_miner_config() -> Resource { Resource::bundled(MINER_CONFIG_FILE_NAME.to_string()) } + /// Creates the bundled RocksDB options file resource. pub fn bundled_db_options() -> Resource { Resource::bundled(DB_OPTIONS_FILE_NAME.to_string()) } + /// Checks whether any of the bundled resource has been exported in the specified directory. + /// + /// This can be used to avoid overwritting to export all the bundled resources to the specified + /// directory. pub fn exported_in>(root_dir: P) -> bool { BUNDLED .file_names() @@ -85,6 +138,7 @@ impl Resource { .any(|name| join_bundled_key(root_dir.as_ref().to_path_buf(), name).exists()) } + /// Returns `true` if this is a bundled resource. pub fn is_bundled(&self) -> bool { match self { Resource::Bundled { .. } => true, @@ -92,6 +146,11 @@ impl Resource { } } + /// Returns `true` if the resource exists. + /// + /// The bundled resource exists only when the identifier is included in the bundle. + /// + /// The file system resource exists only when the file exists. pub fn exists(&self) -> bool { match self { Resource::Bundled { bundled } => { @@ -101,6 +160,9 @@ impl Resource { } } + /// The parent directory of the resource. + /// + /// It always returns `None` on bundled resource. pub fn parent(&self) -> Option<&Path> { match self { Resource::FileSystem { file } => file.parent(), @@ -108,6 +170,9 @@ impl Resource { } } + /// Modifies the file system resource to ensure the path is absolute. + /// + /// If the path is relative, expand the path relative to the directory `base`. pub fn absolutize>(&mut self, base: P) { if let Resource::FileSystem { file: ref mut path } = self { if path.is_relative() { @@ -116,7 +181,7 @@ impl Resource { } } - /// Gets resource content + /// Gets resource content. pub fn get(&self) -> Result> { match self { Resource::Bundled { bundled } => SourceFiles::new(&BUNDLED_CELL, &BUNDLED).get(bundled), @@ -124,7 +189,7 @@ impl Resource { } } - /// Gets resource input stream + /// Gets resource content via an input stream. pub fn read(&self) -> Result> { match self { Resource::Bundled { bundled } => { @@ -134,6 +199,14 @@ impl Resource { } } + /// Exports a bundled resource. + /// + /// This function returns `Ok` immediatly when invoked on a file system resource. + /// + /// The file is exported to the path by combining `root_dir` and the resource indentifier. + /// + /// These bundled files can be customized for different chains using spec branches. + /// See [Template](struct.Template.html). pub fn export<'a, P: AsRef>( &self, context: &TemplateContext<'a>, @@ -149,7 +222,7 @@ impl Resource { if let Some(dir) = target.parent() { fs::create_dir_all(dir)?; } - template.write_to(&mut out, context)?; + template.render_to(&mut out, context)?; out.persist(target)?; Ok(()) } diff --git a/resource/src/template.rs b/resource/src/template.rs index 9284ee19a5..75ff01891d 100644 --- a/resource/src/template.rs +++ b/resource/src/template.rs @@ -1,6 +1,10 @@ +/// Default chain spec. pub const DEFAULT_SPEC: &str = "mainnet"; +/// The list of bundled chain specs. pub const AVAILABLE_SPECS: &[&str] = &["mainnet", "testnet", "staging", "dev"]; +/// The default RPC listen port *8114*. pub const DEFAULT_RPC_PORT: &str = "8114"; +/// The default P2P listen port *8115*. pub const DEFAULT_P2P_PORT: &str = "8115"; const START_MARKER: &str = " # {{"; @@ -10,14 +14,94 @@ const WILDCARD_BRANCH: &str = "# _ => "; use std::collections::HashMap; use std::io; -pub struct Template(T); +/// A simple template which supports spec branches and variables. +/// +/// The template is designed so that without expanding the template, it is still a valid TOML file. +/// +/// ### Spec Branches +/// +/// A spec branches block replaces a line with a branch matching the given spec name. +/// +/// The block starts with the line ending with ` # {{` (the leading space is required) and ends +/// with a line `# }}`. +/// +/// Between the start and end markers, every line is a branch starting with `# SPEC => CONTENT`, where +/// `SPEC` is the branch spec name, and `CONTENT` is the text to be replaced for the spec. +/// A special spec name `_` acts as a wildcard which matches any spec name. +/// +/// The spec name is required to render the template, see [`Template::new`]. The block including +/// the **whole** starting line which ends with ` # {{` will be replaced by the branch `CONTENT` +/// which `SPEC` is `_` or equals to the given spec name. +/// +/// In the `CONTENT`, variables are expanded and all the escape sequences `\n` are replaced by new +/// lines. +/// +/// ``` +/// use ckb_resource::{Template, TemplateContext}; +/// +/// let template = Template::new( +/// r#"filter = "debug" # {{ +/// ## mainnet => filter = "error" +/// ## _ => filter = "info" +/// ## }}"# +/// .to_string(), +/// ); +/// let mainnet_result = template.render(&TemplateContext::new("mainnet", Vec::new())); +/// assert_eq!("filter = \"error\"\n", mainnet_result.unwrap()); +/// let testnet_result = template.render(&TemplateContext::new("testnet", Vec::new())); +/// assert_eq!("filter = \"info\"\n", testnet_result.unwrap()); +/// ``` +/// +/// ### Template Variables +/// +/// Template variables are defined as key value dictionary in [`TemplateContext`] via +/// [`TemplateContext::new`] or [`TemplateContext::insert`]. +/// +/// Template uses variables by surrounding the variable names with curly brackets. +/// +/// The variables expansions **only** happen inside the spec branches in the spec `CONTENT`. +/// It is a trick to use a wildcard branch as in the following example. +/// +/// ``` +/// use ckb_resource::{Template, TemplateContext}; +/// +/// let template = Template::new( +/// r#"# # {{ +/// ## _ => listen_address = "127.0.0.1:{rpc_port}" +/// ## }}"# +/// .to_string(), +/// ); +/// let text = template.render(&TemplateContext::new("dev", vec![("rpc_port", "18114")])); +/// assert_eq!("listen_address = \"127.0.0.1:18114\"\n", text.unwrap()); +/// ``` +/// +/// [`TemplateContext`]: struct.TemplateContext.html +/// [`TemplateContext::new`]: struct.TemplateContext.html#method_new +/// [`TemplateContext::insert`]: struct.TemplateContext.html#method_insert +pub struct Template(String); +/// The context used to expand the [`Template`](struct.Template.html). pub struct TemplateContext<'a> { spec: &'a str, kvs: HashMap<&'a str, &'a str>, } impl<'a> TemplateContext<'a> { + /// Creates a new template. + /// + /// * `spec` - the chain spec name for template spec branch. + /// * `kvs` - the initial template variables. + /// + /// ## Examples + /// + /// ``` + /// use ckb_resource::TemplateContext; + /// // Creates a context for *dev* chain and initializes variables: + /// // + /// // rpc_port => 8114 + /// // p2p_port => 8115 + /// TemplateContext::new("dev", vec![("rpc_port", "8114"), ("p2p_port", "8115")]); + /// ``` pub fn new(spec: &'a str, kvs: I) -> Self where I: IntoIterator, @@ -28,13 +112,18 @@ impl<'a> TemplateContext<'a> { } } + /// Inserts a new variable into the context. + /// + /// * `key` - the variable name + /// * `value` - the variable value pub fn insert(&mut self, key: &'a str, value: &'a str) { self.kvs.insert(key, value); } } -impl Template { - pub fn new(content: T) -> Self { +impl Template { + /// Creates the template with the specified content. + pub fn new(content: String) -> Self { Template(content) } } @@ -60,11 +149,14 @@ pub enum TemplateState<'a> { SearchEndMarker, } -impl Template -where - T: AsRef, -{ - pub fn write_to<'c, W: io::Write>( +impl Template { + /// Expands the template using the context and writes the result via the writer `w`. + /// + /// ## Errors + /// + /// This method returns `std::io::Error` when it fails to write the chunks to the underlying + /// writer. + pub fn render_to<'c, W: io::Write>( &self, w: &mut W, context: &TemplateContext<'c>, @@ -72,7 +164,7 @@ where let spec_branch = format!("# {} => ", context.spec); let mut state = TemplateState::SearchStartMarker; - for line in self.0.as_ref().lines() { + for line in self.0.lines() { // dbg!((line, &state)); match state { TemplateState::SearchStartMarker => { @@ -116,4 +208,17 @@ where Ok(()) } + + /// Renders the template and returns the result as a string. + /// + /// ## Errors + /// + /// This method returns `std::io::Error` when it fails to write the chunks to the underlying + /// writer or it failed to convert the result text to UTF-8. + pub fn render<'c>(&self, context: &TemplateContext<'c>) -> io::Result { + let mut out = Vec::new(); + self.render_to(&mut out, context)?; + String::from_utf8(out) + .map_err(|from_utf8_err| io::Error::new(io::ErrorKind::InvalidInput, from_utf8_err)) + } } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 48e04df7a1..1786c094ea 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/rpc/README.md b/rpc/README.md index ccd99e4ff8..e6ce788962 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -92,6 +92,7 @@ For example, a method is marked as deprecated in 0.35.0, it can be disabled in 0 * [Type `BannedAddr`](#type-bannedaddr) * [Type `Block`](#type-block) * [Type `BlockEconomicState`](#type-blockeconomicstate) + * [Type `BlockIssuance`](#type-blockissuance) * [Type `BlockNumber`](#type-blocknumber) * [Type `BlockReward`](#type-blockreward) * [Type `BlockTemplate`](#type-blocktemplate) @@ -3686,12 +3687,16 @@ The JSON view of a Block used as a parameter in the RPC. ### Type `BlockEconomicState` -Block base rewards. +Block Economic State. + +It includes the rewards details and when it is finalized. #### Fields `BlockEconomicState` is a JSON object with the following fields. +* `issuance`: [`BlockIssuance`](#type-blockissuance) - Block base rewards. + * `miner_reward`: [`MinerReward`](#type-minerreward) - Block rewards for miners. * `txs_fee`: [`Capacity`](#type-capacity) - The total fees of all transactions committed in the block. @@ -3699,6 +3704,19 @@ Block base rewards. * `finalized_at`: [`H256`](#type-h256) - The block hash of the block which creates the rewards as cells in its cellbase transaction. +### Type `BlockIssuance` + +Block base rewards. + +#### Fields + +`BlockIssuance` is a JSON object with the following fields. + +* `primary`: [`Capacity`](#type-capacity) - The primary base rewards. + +* `secondary`: [`Capacity`](#type-capacity) - The secondary base rewards. + + ### Type `BlockNumber` Consecutive block number starting from 0. @@ -4260,7 +4278,9 @@ CKB adjusts difficulty based on epochs. It also equals the total count of blocks in all the epochs which epoch number is less than this epoch. -* `length`: [`BlockNumber`](#type-blocknumber) - The resulting type after obtaining ownership. +* `length`: [`BlockNumber`](#type-blocknumber) - The number of blocks in this epoch. + +* `compact_target`: [`Uint32`](#type-uint32) - The difficulty target for any block in this epoch. ### Type `EstimateResult` @@ -4271,7 +4291,7 @@ The estimated fee rate. `EstimateResult` is a JSON object with the following fields. -* `fee_rate`: [`FeeRate`](#type-feerate) - The resulting type after obtaining ownership. +* `fee_rate`: [`FeeRate`](#type-feerate) - The estimated fee rate. ### Type `FeeRate` diff --git a/rpc/src/error.rs b/rpc/src/error.rs index e564e0b1d1..7c6a24ae56 100644 --- a/rpc/src/error.rs +++ b/rpc/src/error.rs @@ -111,6 +111,7 @@ pub enum RPCError { } impl RPCError { + /// TODO(doc): @doitian pub fn invalid_params(message: T) -> Error { Error { code: ErrorCode::InvalidParams, @@ -119,6 +120,7 @@ impl RPCError { } } + /// TODO(doc): @doitian pub fn custom(error_code: RPCError, message: T) -> Error { Error { code: ErrorCode::ServerError(error_code as i64), @@ -127,6 +129,7 @@ impl RPCError { } } + /// TODO(doc): @doitian pub fn custom_with_data( error_code: RPCError, message: T, @@ -139,6 +142,7 @@ impl RPCError { } } + /// TODO(doc): @doitian pub fn custom_with_error(error_code: RPCError, err: T) -> Error { Error { code: ErrorCode::ServerError(error_code as i64), @@ -147,6 +151,7 @@ impl RPCError { } } + /// TODO(doc): @doitian pub fn from_submit_transaction_reject(reject: &Reject) -> Error { let code = match reject { Reject::LowFeeRate(_, _) => RPCError::PoolRejectedTransactionByMinFeeRate, @@ -160,6 +165,7 @@ impl RPCError { RPCError::custom_with_error(code, reject) } + /// TODO(doc): @doitian pub fn downcast_submit_transaction_reject(err: &CKBError) -> Option<&Reject> { use ckb_error::ErrorKind::SubmitTransaction; match err.kind() { @@ -168,6 +174,7 @@ impl RPCError { } } + /// TODO(doc): @doitian pub fn from_ckb_error(err: CKBError) -> Error { match err.kind() { ErrorKind::Dao => { @@ -200,6 +207,7 @@ impl RPCError { } } + /// TODO(doc): @doitian pub fn from_failure_error(err: failure::Error) -> Error { match err.downcast::() { Ok(ckb_error) => Self::from_ckb_error(ckb_error), @@ -207,10 +215,12 @@ impl RPCError { } } + /// TODO(doc): @doitian pub fn ckb_internal_error(err: T) -> Error { Self::custom_with_error(RPCError::CKBInternalError, err) } + /// TODO(doc): @doitian pub fn rpc_module_is_disabled(module: &str) -> Error { Self::custom( RPCError::RPCModuleIsDisabled, @@ -222,6 +232,7 @@ impl RPCError { ) } + /// TODO(doc): @doitian pub fn rpc_method_is_deprecated() -> Error { Self::custom( RPCError::Deprecated, diff --git a/rpc/src/module/subscription.rs b/rpc/src/module/subscription.rs index 2503d5d7ca..271ab7e185 100644 --- a/rpc/src/module/subscription.rs +++ b/rpc/src/module/subscription.rs @@ -87,6 +87,7 @@ pub enum Topic { #[allow(clippy::needless_return)] #[rpc(server)] pub trait SubscriptionRpc { + /// TODO(doc): @doitian type Metadata; /// Subscribes to a topic. diff --git a/rpc/src/server.rs b/rpc/src/server.rs index 863264671d..269eba1cf3 100644 --- a/rpc/src/server.rs +++ b/rpc/src/server.rs @@ -16,6 +16,7 @@ pub struct RpcServer { } impl RpcServer { + /// TODO(doc): @doitian pub fn new( config: RpcConfig, io_handler: IoHandler, @@ -100,6 +101,7 @@ impl RpcServer { RpcServer { http, _tcp, _ws } } + /// TODO(doc): @doitian pub fn http_address(&self) -> &SocketAddr { self.http.address() } diff --git a/rpc/src/service_builder.rs b/rpc/src/service_builder.rs index bac4a3a59e..80932c5916 100644 --- a/rpc/src/service_builder.rs +++ b/rpc/src/service_builder.rs @@ -30,12 +30,14 @@ pub struct ServiceBuilder<'a> { } impl<'a> ServiceBuilder<'a> { + /// TODO(doc): @doitian pub fn new(config: &'a RpcConfig) -> Self { Self { config, io_handler: IoHandler::default(), } } + /// TODO(doc): @doitian pub fn enable_chain(mut self, shared: Shared) -> Self { let rpc_methods = ChainRpcImpl { shared }.to_delegate(); if self.config.chain_enable() { @@ -46,6 +48,7 @@ impl<'a> ServiceBuilder<'a> { self } + /// TODO(doc): @doitian pub fn enable_pool( mut self, shared: Shared, @@ -64,6 +67,7 @@ impl<'a> ServiceBuilder<'a> { self } + /// TODO(doc): @doitian pub fn enable_miner( mut self, shared: Shared, @@ -85,6 +89,7 @@ impl<'a> ServiceBuilder<'a> { self } + /// TODO(doc): @doitian pub fn enable_net( mut self, network_controller: NetworkController, @@ -103,6 +108,7 @@ impl<'a> ServiceBuilder<'a> { self } + /// TODO(doc): @doitian pub fn enable_stats( mut self, shared: Shared, @@ -123,6 +129,7 @@ impl<'a> ServiceBuilder<'a> { self } + /// TODO(doc): @doitian pub fn enable_experiment(mut self, shared: Shared) -> Self { let rpc_methods = ExperimentRpcImpl { shared }.to_delegate(); if self.config.experiment_enable() { @@ -133,6 +140,7 @@ impl<'a> ServiceBuilder<'a> { self } + /// TODO(doc): @doitian pub fn enable_integration_test( mut self, shared: Shared, @@ -153,6 +161,7 @@ impl<'a> ServiceBuilder<'a> { self } + /// TODO(doc): @doitian pub fn enable_alert( mut self, alert_verifier: Arc, @@ -169,6 +178,7 @@ impl<'a> ServiceBuilder<'a> { self } + /// TODO(doc): @doitian pub fn enable_indexer(mut self, indexer_config: &IndexerConfig, shared: Shared) -> Self { let store = DefaultIndexerStore::new(indexer_config, shared); let rpc_methods = IndexerRpcImpl { @@ -184,6 +194,7 @@ impl<'a> ServiceBuilder<'a> { self } + /// TODO(doc): @doitian pub fn enable_debug(mut self) -> Self { if self.config.debug_enable() { self.io_handler.extend_with(DebugRpcImpl {}.to_delegate()); @@ -226,6 +237,7 @@ impl<'a> ServiceBuilder<'a> { })); } + /// TODO(doc): @doitian pub fn build(self) -> IoHandler { let mut io_handler = self.io_handler; io_handler.add_method("ping", |_| futures::future::ok("pong".into())); diff --git a/script/Cargo.toml b/script/Cargo.toml index e73405eef0..dcad806e6b 100644 --- a/script/Cargo.toml +++ b/script/Cargo.toml @@ -5,7 +5,7 @@ license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" build = "build.rs" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/script/build.rs b/script/build.rs index 3e60e66580..f65eb337ec 100644 --- a/script/build.rs +++ b/script/build.rs @@ -1,3 +1,4 @@ +//! Build script for crate `ckb-script`. use std::env; fn main() { diff --git a/script/src/cost_model.rs b/script/src/cost_model.rs index ea938eae85..9c1cafbe59 100644 --- a/script/src/cost_model.rs +++ b/script/src/cost_model.rs @@ -1,16 +1,20 @@ +//! TODO(doc): @doitian use ckb_vm::{ instructions::{extract_opcode, insts}, Instruction, }; +/// TODO(doc): @doitian // 0.25 cycles per byte pub const BYTES_PER_CYCLE: u64 = 4; +/// TODO(doc): @doitian pub fn transferred_byte_cycles(bytes: u64) -> u64 { // Compiler will optimize the divisin here to shifts. (bytes + BYTES_PER_CYCLE - 1) / BYTES_PER_CYCLE } +/// TODO(doc): @doitian pub fn instruction_cycles(i: Instruction) -> u64 { match extract_opcode(i) { insts::OP_JALR => 3, diff --git a/script/src/error.rs b/script/src/error.rs index 3c24053d32..088ac6f259 100644 --- a/script/src/error.rs +++ b/script/src/error.rs @@ -4,6 +4,7 @@ use ckb_types::core::Cycle; use failure::Fail; use std::fmt; +/// TODO(doc): @doitian #[derive(Fail, Debug, PartialEq, Eq, Clone)] pub enum ScriptError { /// The field code_hash in script is invalid @@ -66,6 +67,7 @@ impl fmt::Display for TransactionScriptErrorSource { } } +/// TODO(doc): @doitian #[derive(Fail, Debug, PartialEq, Eq, Clone)] pub struct TransactionScriptError { source: TransactionScriptErrorSource, @@ -90,6 +92,7 @@ impl ScriptError { } } + /// TODO(doc): @doitian pub fn input_lock_script(self, index: usize) -> TransactionScriptError { TransactionScriptError { source: TransactionScriptErrorSource::Inputs(index, ScriptGroupType::Lock), @@ -97,6 +100,7 @@ impl ScriptError { } } + /// TODO(doc): @doitian pub fn input_type_script(self, index: usize) -> TransactionScriptError { TransactionScriptError { source: TransactionScriptErrorSource::Inputs(index, ScriptGroupType::Type), @@ -104,6 +108,7 @@ impl ScriptError { } } + /// TODO(doc): @doitian pub fn output_type_script(self, index: usize) -> TransactionScriptError { TransactionScriptError { source: TransactionScriptErrorSource::Outputs(index, ScriptGroupType::Type), diff --git a/script/src/ill_transaction_checker.rs b/script/src/ill_transaction_checker.rs index 302323a842..4bba2fd905 100644 --- a/script/src/ill_transaction_checker.rs +++ b/script/src/ill_transaction_checker.rs @@ -10,15 +10,18 @@ use goblin::elf::{section_header::SHF_EXECINSTR, Elf}; const CKB_VM_ISSUE_92: &str = "https://github.com/nervosnetwork/ckb-vm/issues/92"; +/// TODO(doc): @doitian pub struct IllTransactionChecker<'a> { tx: &'a TransactionView, } impl<'a> IllTransactionChecker<'a> { + /// TODO(doc): @doitian pub fn new(tx: &'a TransactionView) -> Self { IllTransactionChecker { tx } } + /// TODO(doc): @doitian pub fn check(&self) -> Result<(), ScriptError> { for (i, data) in self.tx.outputs_data().into_iter().enumerate() { IllScriptChecker::new(&data.raw_data(), i).check()?; diff --git a/script/src/lib.rs b/script/src/lib.rs index 3a1c3610e0..e27a4cec39 100644 --- a/script/src/lib.rs +++ b/script/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @doitian pub mod cost_model; mod error; mod ill_transaction_checker; diff --git a/script/src/types.rs b/script/src/types.rs index be085a97ad..e92ea3cdc0 100644 --- a/script/src/types.rs +++ b/script/src/types.rs @@ -2,18 +2,24 @@ use ckb_types::packed::Script; use serde::{Deserialize, Serialize}; use std::fmt; -// A script group is defined as scripts that share the same hash. -// A script group will only be executed once per transaction, the -// script itself should check against all inputs/outputs in its group -// if needed. +/// A script group is defined as scripts that share the same hash. +/// +/// A script group will only be executed once per transaction, the +/// script itself should check against all inputs/outputs in its group +/// if needed. pub struct ScriptGroup { + /// TODO(doc): @doitian pub script: Script, + /// TODO(doc): @doitian pub group_type: ScriptGroupType, + /// TODO(doc): @doitian pub input_indices: Vec, + /// TODO(doc): @doitian pub output_indices: Vec, } impl ScriptGroup { + /// TODO(doc): @doitian pub fn new(script: &Script, group_type: ScriptGroupType) -> Self { Self { group_type, @@ -23,19 +29,24 @@ impl ScriptGroup { } } + /// TODO(doc): @doitian pub fn from_lock_script(script: &Script) -> Self { Self::new(script, ScriptGroupType::Lock) } + /// TODO(doc): @doitian pub fn from_type_script(script: &Script) -> Self { Self::new(script, ScriptGroupType::Type) } } +/// TODO(doc): @doitian #[derive(Copy, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)] #[serde(rename_all = "snake_case")] pub enum ScriptGroupType { + /// TODO(doc): @doitian Lock, + /// TODO(doc): @doitian Type, } diff --git a/script/src/verify.rs b/script/src/verify.rs index d649ca8ae0..4dadd25201 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -41,9 +41,10 @@ type CoreMachineType = Box; #[cfg(not(has_asm))] type CoreMachineType = DefaultCoreMachine>>; -// This struct leverages CKB VM to verify transaction inputs. -// FlatBufferBuilder owned Vec that grows as needed, in the -// future, we might refactor this to share buffer to achieve zero-copy +/// This struct leverages CKB VM to verify transaction inputs. +/// +/// FlatBufferBuilder owned `Vec` that grows as needed, in the +/// future, we might refactor this to share buffer to achieve zero-copy pub struct TransactionScriptsVerifier<'a, DL> { data_loader: &'a DL, debug_printer: Box, @@ -58,6 +59,7 @@ pub struct TransactionScriptsVerifier<'a, DL> { } impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, DL> { + /// TODO(doc): @doitian pub fn new( rtx: &'a ResolvedTransaction, data_loader: &'a DL, @@ -142,6 +144,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D } } + /// TODO(doc): @doitian pub fn set_debug_printer(&mut self, func: F) { self.debug_printer = Box::new(func); } @@ -240,7 +243,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D LoadScript::new(script) } - // Extracts actual script binary either in dep cells. + /// Extracts actual script binary either in dep cells. pub fn extract_script(&self, script: &'a Script) -> Result { match ScriptHashType::try_from(script.hash_type()).expect("checked data") { ScriptHashType::Data => { @@ -265,6 +268,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D } } + /// TODO(doc): @doitian pub fn verify(&self, max_cycles: Cycle) -> Result { let mut cycles: Cycle = 0; @@ -293,8 +297,8 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D Ok(cycles) } - // Run a single script in current transaction, while this is not useful for - // CKB itself, it can be very helpful when building a CKB debugger. + /// Runs a single script in current transaction, while this is not useful for + /// CKB itself, it can be very helpful when building a CKB debugger. pub fn verify_single( &self, script_group_type: ScriptGroupType, @@ -326,6 +330,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D } } + /// TODO(doc): @doitian pub fn find_script_group( &self, script_group_type: ScriptGroupType, @@ -337,10 +342,12 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D } } + /// TODO(doc): @doitian pub fn cost_model(&self) -> Box { Box::new(instruction_cycles) } + /// TODO(doc): @doitian pub fn generate_syscalls( &'a self, script_group: &'a ScriptGroup, diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 0de5f4239c..d05ab448f8 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/shared/src/lib.rs b/shared/src/lib.rs index 6fb04c689a..6c239d5845 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake mod migrations; pub mod shared; diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 49cab818b4..535d0583f7 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake use crate::{migrations, Snapshot, SnapshotMgr}; use arc_swap::Guard; use ckb_app_config::{BlockAssemblerConfig, DBConfig, NotifyConfig, StoreConfig, TxPoolConfig}; @@ -20,6 +21,7 @@ use ckb_verification::cache::TxVerifyCache; use std::collections::HashSet; use std::sync::Arc; +/// TODO(doc): @quake #[derive(Clone)] pub struct Shared { pub(crate) store: ChainDB, @@ -31,6 +33,7 @@ pub struct Shared { } impl Shared { + /// TODO(doc): @quake pub fn init( store: ChainDB, consensus: Consensus, @@ -147,31 +150,38 @@ impl Shared { } } + /// TODO(doc): @quake pub fn tx_pool_controller(&self) -> &TxPoolController { &self.tx_pool_controller } + /// TODO(doc): @quake pub fn txs_verify_cache(&self) -> Arc> { Arc::clone(&self.txs_verify_cache) } + /// TODO(doc): @quake pub fn notify_controller(&self) -> &NotifyController { &self.notify_controller } + /// TODO(doc): @quake pub fn snapshot(&self) -> Guard> { self.snapshot_mgr.load() } + /// TODO(doc): @quake pub fn store_snapshot(&self, snapshot: Arc) { self.snapshot_mgr.store(snapshot) } + /// TODO(doc): @quake pub fn refresh_snapshot(&self) { let new = self.snapshot().refresh(self.store.get_snapshot()); self.store_snapshot(Arc::new(new)); } + /// TODO(doc): @quake pub fn new_snapshot( &self, tip_header: HeaderView, @@ -189,19 +199,23 @@ impl Shared { )) } + /// TODO(doc): @quake pub fn consensus(&self) -> &Consensus { &self.consensus } + /// TODO(doc): @quake pub fn genesis_hash(&self) -> Byte32 { self.consensus.genesis_hash() } + /// TODO(doc): @quake pub fn store(&self) -> &ChainDB { &self.store } } +/// TODO(doc): @quake pub struct SharedBuilder { db: RocksDB, consensus: Option, @@ -229,6 +243,7 @@ impl Default for SharedBuilder { const INIT_DB_VERSION: &str = "20191127135521"; impl SharedBuilder { + /// TODO(doc): @quake pub fn with_db_config(config: &DBConfig) -> Self { let db = RocksDB::open(config, COLUMNS); let mut migrations = Migrations::default(); @@ -249,31 +264,37 @@ impl SharedBuilder { } impl SharedBuilder { + /// TODO(doc): @quake pub fn consensus(mut self, value: Consensus) -> Self { self.consensus = Some(value); self } + /// TODO(doc): @quake pub fn tx_pool_config(mut self, config: TxPoolConfig) -> Self { self.tx_pool_config = Some(config); self } + /// TODO(doc): @quake pub fn notify_config(mut self, config: NotifyConfig) -> Self { self.notify_config = Some(config); self } + /// TODO(doc): @quake pub fn store_config(mut self, config: StoreConfig) -> Self { self.store_config = Some(config); self } + /// TODO(doc): @quake pub fn block_assembler_config(mut self, config: Option) -> Self { self.block_assembler_config = config; self } + /// TODO(doc): @quake pub fn build(self) -> Result<(Shared, ProposalTable), Error> { let consensus = self.consensus.unwrap_or_else(Consensus::default); let tx_pool_config = self.tx_pool_config.unwrap_or_else(Default::default); diff --git a/spec/Cargo.toml b/spec/Cargo.toml index ad034161c4..df1b2d547b 100644 --- a/spec/Cargo.toml +++ b/spec/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @zhangsoledad crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index 47278825fd..e70e67aca0 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @zhangsoledad #![allow(clippy::inconsistent_digit_grouping)] use crate::{ @@ -52,11 +53,11 @@ const ORPHAN_RATE_TARGET: RationalU256 = RationalU256::new_raw(U256::one(), u256 const MAX_BLOCK_INTERVAL: u64 = 48; // 48s const MIN_BLOCK_INTERVAL: u64 = 8; // 8s -// cycles of a typical two-in-two-out tx +/// cycles of a typical two-in-two-out tx. pub const TWO_IN_TWO_OUT_CYCLES: Cycle = 3_500_000; -// bytes of a typical two-in-two-out tx +/// bytes of a typical two-in-two-out tx. pub const TWO_IN_TWO_OUT_BYTES: u64 = 597; -// count of two-in-two-out txs a block should capable to package +/// count of two-in-two-out txs a block should capable to package. const TWO_IN_TWO_OUT_COUNT: u64 = 1_000; pub(crate) const DEFAULT_EPOCH_DURATION_TARGET: u64 = 4 * 60 * 60; // 4 hours, unit: second const MILLISECONDS_IN_A_SECOND: u64 = 1000; @@ -65,8 +66,10 @@ const MIN_EPOCH_LENGTH: u64 = DEFAULT_EPOCH_DURATION_TARGET / MAX_BLOCK_INTERVAL pub(crate) const DEFAULT_PRIMARY_EPOCH_REWARD_HALVING_INTERVAL: EpochNumber = 4 * 365 * 24 * 60 * 60 / DEFAULT_EPOCH_DURATION_TARGET; // every 4 years +/// TODO(doc): @zhangsoledad pub const MAX_BLOCK_BYTES: u64 = TWO_IN_TWO_OUT_BYTES * TWO_IN_TWO_OUT_COUNT; pub(crate) const MAX_BLOCK_CYCLES: u64 = TWO_IN_TWO_OUT_CYCLES * TWO_IN_TWO_OUT_COUNT; +/// TODO(doc): @zhangsoledad // 1.5 * TWO_IN_TWO_OUT_COUNT pub const MAX_BLOCK_PROPOSALS_LIMIT: u64 = 1_500; const PROPOSER_REWARD_RATIO: Ratio = Ratio(4, 10); @@ -77,12 +80,14 @@ pub(crate) const SATOSHI_PUBKEY_HASH: H160 = h160!("0x62e907b15cbf27d5425399ebf6 // only affects genesis cellbase's satoshi lock cells. pub(crate) const SATOSHI_CELL_OCCUPIED_RATIO: Ratio = Ratio(6, 10); +/// TODO(doc): @zhangsoledad #[derive(Clone, PartialEq, Debug, Eq, Copy)] pub struct ProposalWindow(pub BlockNumber, pub BlockNumber); -// "TYPE_ID" in hex +/// "TYPE_ID" in hex pub const TYPE_ID_CODE_HASH: H256 = h256!("0x545950455f4944"); +/// TODO(doc): @zhangsoledad // 500_000 total difficulty const MIN_CHAIN_WORK_500K: U256 = u256!("0x3314412053c82802a7"); // const MIN_CHAIN_WORK_1000K: U256 = u256!("0x6f1e2846acc0c9807d"); @@ -106,19 +111,23 @@ const MIN_CHAIN_WORK_500K: U256 = u256!("0x3314412053c82802a7"); /// impl ProposalWindow { + /// TODO(doc): @zhangsoledad pub fn closest(&self) -> BlockNumber { self.0 } + /// TODO(doc): @zhangsoledad pub fn farthest(&self) -> BlockNumber { self.1 } + /// TODO(doc): @zhangsoledad pub fn length(&self) -> BlockNumber { self.1 - self.0 + 1 } } +/// TODO(doc): @zhangsoledad pub struct ConsensusBuilder { inner: Consensus, } @@ -174,6 +183,7 @@ impl Default for ConsensusBuilder { } } +/// TODO(doc): @zhangsoledad pub fn build_genesis_epoch_ext( epoch_reward: Capacity, compact_target: u32, @@ -200,6 +210,7 @@ pub fn build_genesis_epoch_ext( .build() } +/// TODO(doc): @zhangsoledad pub fn build_genesis_dao_data( txs: Vec<&TransactionView>, satoshi_pubkey_hash: &H160, @@ -218,6 +229,7 @@ pub fn build_genesis_dao_data( } impl ConsensusBuilder { + /// TODO(doc): @zhangsoledad pub fn new(genesis_block: BlockView, genesis_epoch_ext: EpochExt) -> Self { ConsensusBuilder { inner: Consensus { @@ -264,6 +276,7 @@ impl ConsensusBuilder { .map(|type_script| type_script.calc_script_hash()) } + /// TODO(doc): @zhangsoledad pub fn build(mut self) -> Consensus { debug_assert!( self.inner.genesis_block.difficulty() > U256::zero(), @@ -324,84 +337,99 @@ impl ConsensusBuilder { self.inner } + /// TODO(doc): @zhangsoledad pub fn id(mut self, id: String) -> Self { self.inner.id = id; self } + /// TODO(doc): @zhangsoledad pub fn genesis_block(mut self, genesis_block: BlockView) -> Self { self.inner.genesis_block = genesis_block; self } + /// TODO(doc): @zhangsoledad #[must_use] pub fn initial_primary_epoch_reward(mut self, initial_primary_epoch_reward: Capacity) -> Self { self.inner.initial_primary_epoch_reward = initial_primary_epoch_reward; self } + /// TODO(doc): @zhangsoledad #[must_use] pub fn secondary_epoch_reward(mut self, secondary_epoch_reward: Capacity) -> Self { self.inner.secondary_epoch_reward = secondary_epoch_reward; self } + /// TODO(doc): @zhangsoledad #[must_use] pub fn max_block_cycles(mut self, max_block_cycles: Cycle) -> Self { self.inner.max_block_cycles = max_block_cycles; self } + /// TODO(doc): @zhangsoledad #[must_use] pub fn max_block_bytes(mut self, max_block_bytes: u64) -> Self { self.inner.max_block_bytes = max_block_bytes; self } + /// TODO(doc): @zhangsoledad #[must_use] pub fn cellbase_maturity(mut self, cellbase_maturity: EpochNumberWithFraction) -> Self { self.inner.cellbase_maturity = cellbase_maturity; self } + /// TODO(doc): @zhangsoledad pub fn tx_proposal_window(mut self, proposal_window: ProposalWindow) -> Self { self.inner.tx_proposal_window = proposal_window; self } + /// TODO(doc): @zhangsoledad pub fn pow(mut self, pow: Pow) -> Self { self.inner.pow = pow; self } + /// TODO(doc): @zhangsoledad pub fn satoshi_pubkey_hash(mut self, pubkey_hash: H160) -> Self { self.inner.satoshi_pubkey_hash = pubkey_hash; self } + /// TODO(doc): @zhangsoledad pub fn satoshi_cell_occupied_ratio(mut self, ratio: Ratio) -> Self { self.inner.satoshi_cell_occupied_ratio = ratio; self } + /// TODO(doc): @zhangsoledad #[must_use] pub fn primary_epoch_reward_halving_interval(mut self, halving_interval: u64) -> Self { self.inner.primary_epoch_reward_halving_interval = halving_interval; self } + /// TODO(doc): @zhangsoledad #[must_use] pub fn epoch_duration_target(mut self, target: u64) -> Self { self.inner.epoch_duration_target = target; self } + /// TODO(doc): @zhangsoledad #[must_use] pub fn permanent_difficulty_in_dummy(mut self, permanent: bool) -> Self { self.inner.permanent_difficulty_in_dummy = permanent; self } + /// TODO(doc): @zhangsoledad #[must_use] pub fn max_block_proposals_limit(mut self, max_block_proposals_limit: u64) -> Self { self.inner.max_block_proposals_limit = max_block_proposals_limit; @@ -409,51 +437,80 @@ impl ConsensusBuilder { } } +/// TODO(doc): @zhangsoledad #[derive(Clone, Debug)] pub struct Consensus { + /// TODO(doc): @zhangsoledad pub id: String, + /// TODO(doc): @zhangsoledad pub genesis_block: BlockView, + /// TODO(doc): @zhangsoledad pub genesis_hash: Byte32, + /// TODO(doc): @zhangsoledad pub dao_type_hash: Option, + /// TODO(doc): @zhangsoledad pub secp256k1_blake160_sighash_all_type_hash: Option, + /// TODO(doc): @zhangsoledad pub secp256k1_blake160_multisig_all_type_hash: Option, + /// TODO(doc): @zhangsoledad pub initial_primary_epoch_reward: Capacity, + /// TODO(doc): @zhangsoledad pub secondary_epoch_reward: Capacity, + /// TODO(doc): @zhangsoledad pub max_uncles_num: usize, + /// TODO(doc): @zhangsoledad pub orphan_rate_target: RationalU256, + /// TODO(doc): @zhangsoledad pub epoch_duration_target: u64, + /// TODO(doc): @zhangsoledad pub tx_proposal_window: ProposalWindow, + /// TODO(doc): @zhangsoledad pub proposer_reward_ratio: Ratio, + /// TODO(doc): @zhangsoledad pub pow: Pow, + /// TODO(doc): @zhangsoledad // For each input, if the referenced output transaction is cellbase, // it must have at least `cellbase_maturity` confirmations; // else reject this transaction. pub cellbase_maturity: EpochNumberWithFraction, + /// TODO(doc): @zhangsoledad // This parameter indicates the count of past blocks used in the median time calculation pub median_time_block_count: usize, + /// TODO(doc): @zhangsoledad // Maximum cycles that all the scripts in all the commit transactions can take pub max_block_cycles: Cycle, + /// TODO(doc): @zhangsoledad // Maximum number of bytes to use for the entire block pub max_block_bytes: u64, + /// TODO(doc): @zhangsoledad // block version number supported pub block_version: Version, + /// TODO(doc): @zhangsoledad // tx version number supported pub tx_version: Version, + /// TODO(doc): @zhangsoledad // "TYPE_ID" in hex pub type_id_code_hash: H256, + /// TODO(doc): @zhangsoledad // Limit to the number of proposals per block pub max_block_proposals_limit: u64, + /// TODO(doc): @zhangsoledad pub genesis_epoch_ext: EpochExt, + /// TODO(doc): @zhangsoledad // Satoshi's pubkey hash in Bitcoin genesis. pub satoshi_pubkey_hash: H160, + /// TODO(doc): @zhangsoledad // Ratio of satoshi cell occupied of capacity, // only affects genesis cellbase's satoshi lock cells. pub satoshi_cell_occupied_ratio: Ratio, + /// TODO(doc): @zhangsoledad // Primary reward is cut in half every halving_interval epoch // which will occur approximately every 4 years. pub primary_epoch_reward_halving_interval: EpochNumber, + /// TODO(doc): @zhangsoledad // Keep difficulty be permanent if the pow is dummy pub permanent_difficulty_in_dummy: bool, + /// TODO(doc): @zhangsoledad // Proof of minimum work during synchronization pub min_chain_work: U256, } @@ -467,18 +524,22 @@ impl Default for Consensus { #[allow(clippy::op_ref)] impl Consensus { + /// TODO(doc): @zhangsoledad pub fn genesis_block(&self) -> &BlockView { &self.genesis_block } + /// TODO(doc): @zhangsoledad pub fn proposer_reward_ratio(&self) -> Ratio { self.proposer_reward_ratio } + /// TODO(doc): @zhangsoledad pub fn finalization_delay_length(&self) -> BlockNumber { self.tx_proposal_window.farthest() + 1 } + /// TODO(doc): @zhangsoledad pub fn finalize_target(&self, block_number: BlockNumber) -> Option { if block_number != 0 { Some(block_number.saturating_sub(self.finalization_delay_length())) @@ -488,109 +549,136 @@ impl Consensus { } } + /// TODO(doc): @zhangsoledad pub fn genesis_hash(&self) -> Byte32 { self.genesis_hash.clone() } + /// TODO(doc): @zhangsoledad pub fn dao_type_hash(&self) -> Option { self.dao_type_hash.clone() } + /// TODO(doc): @zhangsoledad pub fn secp256k1_blake160_sighash_all_type_hash(&self) -> Option { self.secp256k1_blake160_sighash_all_type_hash.clone() } + /// TODO(doc): @zhangsoledad pub fn secp256k1_blake160_multisig_all_type_hash(&self) -> Option { self.secp256k1_blake160_multisig_all_type_hash.clone() } + /// TODO(doc): @zhangsoledad pub fn max_uncles_num(&self) -> usize { self.max_uncles_num } + /// TODO(doc): @zhangsoledad pub fn min_difficulty(&self) -> U256 { self.genesis_block.difficulty() } + /// TODO(doc): @zhangsoledad pub fn initial_primary_epoch_reward(&self) -> Capacity { self.initial_primary_epoch_reward } + /// TODO(doc): @zhangsoledad pub fn primary_epoch_reward(&self, epoch_number: u64) -> Capacity { let halvings = epoch_number / self.primary_epoch_reward_halving_interval(); Capacity::shannons(self.initial_primary_epoch_reward.as_u64() >> halvings) } + /// TODO(doc): @zhangsoledad pub fn primary_epoch_reward_halving_interval(&self) -> EpochNumber { self.primary_epoch_reward_halving_interval } + /// TODO(doc): @zhangsoledad pub fn epoch_duration_target(&self) -> u64 { self.epoch_duration_target } + /// TODO(doc): @zhangsoledad pub fn genesis_epoch_ext(&self) -> &EpochExt { &self.genesis_epoch_ext } + /// TODO(doc): @zhangsoledad pub fn max_epoch_length(&self) -> BlockNumber { MAX_EPOCH_LENGTH } + /// TODO(doc): @zhangsoledad pub fn min_epoch_length(&self) -> BlockNumber { MIN_EPOCH_LENGTH } + /// TODO(doc): @zhangsoledad pub fn secondary_epoch_reward(&self) -> Capacity { self.secondary_epoch_reward } + /// TODO(doc): @zhangsoledad pub fn orphan_rate_target(&self) -> &RationalU256 { &self.orphan_rate_target } + /// TODO(doc): @zhangsoledad pub fn pow_engine(&self) -> Arc { self.pow.engine() } + /// TODO(doc): @zhangsoledad pub fn permanent_difficulty(&self) -> bool { self.pow.is_dummy() && self.permanent_difficulty_in_dummy } + /// TODO(doc): @zhangsoledad pub fn cellbase_maturity(&self) -> EpochNumberWithFraction { self.cellbase_maturity } + /// TODO(doc): @zhangsoledad pub fn median_time_block_count(&self) -> usize { self.median_time_block_count } + /// TODO(doc): @zhangsoledad pub fn max_block_cycles(&self) -> Cycle { self.max_block_cycles } + /// TODO(doc): @zhangsoledad pub fn max_block_bytes(&self) -> u64 { self.max_block_bytes } + /// TODO(doc): @zhangsoledad pub fn max_block_proposals_limit(&self) -> u64 { self.max_block_proposals_limit } + /// TODO(doc): @zhangsoledad pub fn block_version(&self) -> Version { self.block_version } + /// TODO(doc): @zhangsoledad pub fn tx_version(&self) -> Version { self.tx_version } + /// TODO(doc): @zhangsoledad pub fn type_id_code_hash(&self) -> &H256 { &self.type_id_code_hash } + /// TODO(doc): @zhangsoledad pub fn tx_proposal_window(&self) -> ProposalWindow { self.tx_proposal_window } + /// TODO(doc): @zhangsoledad pub fn bounding_hash_rate( &self, last_epoch_hash_rate: U256, @@ -612,6 +700,7 @@ impl Consensus { last_epoch_hash_rate } + /// TODO(doc): @zhangsoledad pub fn bounding_epoch_length( &self, length: BlockNumber, @@ -628,6 +717,7 @@ impl Consensus { } } + /// TODO(doc): @zhangsoledad pub fn next_epoch_ext( &self, last_epoch: &EpochExt, @@ -771,11 +861,13 @@ impl Consensus { Some(epoch_ext) } + /// TODO(doc): @zhangsoledad pub fn identify_name(&self) -> String { let genesis_hash = format!("{:x}", Unpack::::unpack(&self.genesis_hash)); format!("/{}/{}", self.id, &genesis_hash[..8]) } + /// TODO(doc): @zhangsoledad pub fn get_secp_type_script_hash(&self) -> Byte32 { let secp_cell_data = Resource::bundled("specs/cells/secp256k1_blake160_sighash_all".to_string()) diff --git a/spec/src/error.rs b/spec/src/error.rs index b15f90d1f1..42c3d7c37b 100644 --- a/spec/src/error.rs +++ b/spec/src/error.rs @@ -2,19 +2,28 @@ use ckb_error::{Error, ErrorKind}; use ckb_types::packed::Byte32; use failure::Fail; +/// TODO(doc): @zhangsoledad #[derive(Fail, Debug, Clone, Eq, PartialEq)] pub enum SpecError { + /// TODO(doc): @zhangsoledad #[fail(display = "FileNotFound")] FileNotFound(String), + /// TODO(doc): @zhangsoledad #[fail(display = "ChainNameNotAllowed: {}", _0)] ChainNameNotAllowed(String), + /// TODO(doc): @zhangsoledad #[fail( display = "GenesisMismatch(expected: {}, actual: {})", expected, actual )] - GenesisMismatch { expected: Byte32, actual: Byte32 }, + GenesisMismatch { + /// TODO(doc): @zhangsoledad + expected: Byte32, + /// TODO(doc): @zhangsoledad + actual: Byte32, + }, } impl From for Error { diff --git a/spec/src/lib.rs b/spec/src/lib.rs index 6b146fb939..eb1414b01c 100644 --- a/spec/src/lib.rs +++ b/spec/src/lib.rs @@ -47,20 +47,30 @@ mod error; const SPECIAL_CELL_PRIVKEY: H256 = h256!("0xd0c5c1e2d5af8b6ced3c0800937f996c1fa38c29186cade0cd8b5a73c97aaca3"); +/// TODO(doc): @zhangsoledad pub const OUTPUT_INDEX_SECP256K1_BLAKE160_SIGHASH_ALL: u64 = 1; +/// TODO(doc): @zhangsoledad pub const OUTPUT_INDEX_DAO: u64 = 2; +/// TODO(doc): @zhangsoledad pub const OUTPUT_INDEX_SECP256K1_DATA: u64 = 3; +/// TODO(doc): @zhangsoledad pub const OUTPUT_INDEX_SECP256K1_BLAKE160_MULTISIG_ALL: u64 = 4; +/// TODO(doc): @zhangsoledad #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] pub struct ChainSpec { + /// TODO(doc): @zhangsoledad pub name: String, + /// TODO(doc): @zhangsoledad pub genesis: Genesis, + /// TODO(doc): @zhangsoledad #[serde(default)] pub params: Params, + /// TODO(doc): @zhangsoledad pub pow: Pow, } +/// TODO(doc): @zhangsoledad pub mod default_params { use crate::consensus::{ CELLBASE_MATURITY, DEFAULT_EPOCH_DURATION_TARGET, @@ -70,63 +80,83 @@ pub mod default_params { }; use ckb_types::core::{Capacity, Cycle, EpochNumber}; + /// TODO(doc): @zhangsoledad pub fn initial_primary_epoch_reward() -> Capacity { INITIAL_PRIMARY_EPOCH_REWARD } + /// TODO(doc): @zhangsoledad pub fn secondary_epoch_reward() -> Capacity { DEFAULT_SECONDARY_EPOCH_REWARD } + /// TODO(doc): @zhangsoledad pub fn max_block_cycles() -> Cycle { MAX_BLOCK_CYCLES } + /// TODO(doc): @zhangsoledad pub fn max_block_bytes() -> u64 { MAX_BLOCK_BYTES } + /// TODO(doc): @zhangsoledad pub fn cellbase_maturity() -> u64 { CELLBASE_MATURITY.full_value() } + /// TODO(doc): @zhangsoledad pub fn primary_epoch_reward_halving_interval() -> EpochNumber { DEFAULT_PRIMARY_EPOCH_REWARD_HALVING_INTERVAL } + /// TODO(doc): @zhangsoledad pub fn epoch_duration_target() -> u64 { DEFAULT_EPOCH_DURATION_TARGET } + /// TODO(doc): @zhangsoledad pub fn genesis_epoch_length() -> u64 { GENESIS_EPOCH_LENGTH } + /// TODO(doc): @zhangsoledad pub fn max_block_proposals_limit() -> u64 { MAX_BLOCK_PROPOSALS_LIMIT } } +/// TODO(doc): @zhangsoledad #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] pub struct Params { + /// TODO(doc): @zhangsoledad #[serde(default = "default_params::initial_primary_epoch_reward")] pub initial_primary_epoch_reward: Capacity, + /// TODO(doc): @zhangsoledad #[serde(default = "default_params::secondary_epoch_reward")] pub secondary_epoch_reward: Capacity, + /// TODO(doc): @zhangsoledad #[serde(default = "default_params::max_block_cycles")] pub max_block_cycles: Cycle, + /// TODO(doc): @zhangsoledad #[serde(default = "default_params::max_block_bytes")] pub max_block_bytes: u64, + /// TODO(doc): @zhangsoledad #[serde(default = "default_params::cellbase_maturity")] pub cellbase_maturity: u64, + /// TODO(doc): @zhangsoledad #[serde(default = "default_params::primary_epoch_reward_halving_interval")] pub primary_epoch_reward_halving_interval: EpochNumber, + /// TODO(doc): @zhangsoledad #[serde(default = "default_params::epoch_duration_target")] pub epoch_duration_target: u64, + /// TODO(doc): @zhangsoledad #[serde(default = "default_params::genesis_epoch_length")] pub genesis_epoch_length: BlockNumber, + /// TODO(doc): @zhangsoledad #[serde(default)] pub permanent_difficulty_in_dummy: bool, + /// TODO(doc): @zhangsoledad #[serde(default = "default_params::max_block_proposals_limit")] pub max_block_proposals_limit: u64, } @@ -149,55 +179,86 @@ impl Default for Params { } } +/// TODO(doc): @zhangsoledad #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] pub struct Genesis { + /// TODO(doc): @zhangsoledad pub version: u32, + /// TODO(doc): @zhangsoledad pub parent_hash: H256, + /// TODO(doc): @zhangsoledad pub timestamp: u64, + /// TODO(doc): @zhangsoledad pub compact_target: u32, + /// TODO(doc): @zhangsoledad pub uncles_hash: H256, + /// TODO(doc): @zhangsoledad pub hash: Option, + /// TODO(doc): @zhangsoledad pub nonce: U128, + /// TODO(doc): @zhangsoledad pub issued_cells: Vec, + /// TODO(doc): @zhangsoledad pub genesis_cell: GenesisCell, + /// TODO(doc): @zhangsoledad pub system_cells: Vec, + /// TODO(doc): @zhangsoledad pub system_cells_lock: Script, + /// TODO(doc): @zhangsoledad pub bootstrap_lock: Script, + /// TODO(doc): @zhangsoledad pub dep_groups: Vec, + /// TODO(doc): @zhangsoledad #[serde(default)] pub satoshi_gift: SatoshiGift, } +/// TODO(doc): @zhangsoledad #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] pub struct SystemCell { + /// TODO(doc): @zhangsoledad // NOTE: must put `create_type_id` before `file` otherwise this struct can not serialize pub create_type_id: bool, + /// TODO(doc): @zhangsoledad // Overwrite the cell capacity. Set to None to use the minimal capacity. pub capacity: Option, + /// TODO(doc): @zhangsoledad pub file: Resource, } +/// TODO(doc): @zhangsoledad #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] pub struct GenesisCell { + /// TODO(doc): @zhangsoledad pub message: String, + /// TODO(doc): @zhangsoledad pub lock: Script, } +/// TODO(doc): @zhangsoledad #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] pub struct IssuedCell { + /// TODO(doc): @zhangsoledad pub capacity: Capacity, + /// TODO(doc): @zhangsoledad pub lock: Script, } +/// TODO(doc): @zhangsoledad #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] pub struct DepGroupResource { + /// TODO(doc): @zhangsoledad pub name: String, + /// TODO(doc): @zhangsoledad pub files: Vec, } +/// TODO(doc): @zhangsoledad #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] pub struct SatoshiGift { + /// TODO(doc): @zhangsoledad pub satoshi_pubkey_hash: H160, + /// TODO(doc): @zhangsoledad pub satoshi_cell_occupied_ratio: Ratio, } @@ -210,10 +271,18 @@ impl Default for SatoshiGift { } } +/// TODO(doc): @zhangsoledad #[derive(Debug)] pub enum SpecLoadError { + /// TODO(doc): @zhangsoledad FileNotFound, - GenesisMismatch { expect: H256, actual: H256 }, + /// TODO(doc): @zhangsoledad + GenesisMismatch { + /// TODO(doc): @zhangsoledad + expect: H256, + /// TODO(doc): @zhangsoledad + actual: H256, + }, } impl SpecLoadError { @@ -242,6 +311,7 @@ impl fmt::Display for SpecLoadError { } impl ChainSpec { + /// TODO(doc): @zhangsoledad pub fn load_from(resource: &Resource) -> Result> { if !resource.exists() { return Err(SpecLoadError::file_not_found()); @@ -258,6 +328,7 @@ impl ChainSpec { Ok(spec) } + /// TODO(doc): @zhangsoledad pub fn pow_engine(&self) -> Arc { self.pow.engine() } @@ -272,6 +343,7 @@ impl ChainSpec { Ok(()) } + /// TODO(doc): @zhangsoledad pub fn build_consensus(&self) -> Result> { let genesis_epoch_ext = build_genesis_epoch_ext( self.params.initial_primary_epoch_reward, @@ -305,6 +377,7 @@ impl ChainSpec { Ok(consensus) } + /// TODO(doc): @zhangsoledad pub fn build_genesis(&self) -> Result> { let special_cell_capacity = { let cellbase_transaction_for_special_cell_capacity = @@ -684,10 +757,12 @@ fn secp_lock_arg(privkey: &Privkey) -> Bytes { Bytes::from((&blake2b_256(&pubkey_data)[0..20]).to_owned()) } +/// TODO(doc): @zhangsoledad pub fn build_genesis_type_id_script(output_index: u64) -> packed::Script { build_type_id_script(&packed::CellInput::new_cellbase_input(0), output_index) } +/// TODO(doc): @zhangsoledad pub fn build_type_id_script(input: &packed::CellInput, output_index: u64) -> packed::Script { let mut blake2b = new_blake2b(); blake2b.update(&input.as_slice()); @@ -702,6 +777,7 @@ pub fn build_type_id_script(input: &packed::CellInput, output_index: u64) -> pac .build() } +/// TODO(doc): @zhangsoledad pub fn calculate_block_reward(epoch_reward: Capacity, epoch_length: BlockNumber) -> Capacity { let epoch_reward = epoch_reward.as_u64(); Capacity::shannons({ diff --git a/src/main.rs b/src/main.rs index f0eac6fa54..906defee75 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @doitian use ckb_bin::run_app; use ckb_build_info::Version; diff --git a/store/Cargo.toml b/store/Cargo.toml index b736e3adf7..ef654bc399 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/store/src/cache.rs b/store/src/cache.rs index 4008dc67ab..07d3156b0e 100644 --- a/store/src/cache.rs +++ b/store/src/cache.rs @@ -7,12 +7,19 @@ use ckb_types::{ use ckb_util::Mutex; use lru::LruCache; +/// TODO(doc): @quake pub struct StoreCache { + /// TODO(doc): @quake pub headers: Mutex>, + /// TODO(doc): @quake pub cell_data: Mutex, (Bytes, Byte32)>>, + /// TODO(doc): @quake pub block_proposals: Mutex>, + /// TODO(doc): @quake pub block_tx_hashes: Mutex>>, + /// TODO(doc): @quake pub block_uncles: Mutex>, + /// TODO(doc): @quake pub cellbase: Mutex>, } @@ -23,6 +30,7 @@ impl Default for StoreCache { } impl StoreCache { + /// TODO(doc): @quake pub fn from_config(config: StoreConfig) -> Self { StoreCache { headers: Mutex::new(LruCache::new(config.header_cache_size)), diff --git a/store/src/cell.rs b/store/src/cell.rs index 689d708466..597180bec5 100644 --- a/store/src/cell.rs +++ b/store/src/cell.rs @@ -83,7 +83,7 @@ pub fn attach_block_cell(txn: &StoreTransaction, block: &BlockView) -> Result<() Ok(()) } -// Undo the effects of this block on the live cell set. +/// Undoes the effects of this block on the live cell set. pub fn detach_block_cell(txn: &StoreTransaction, block: &BlockView) -> Result<(), Error> { let transactions = block.transactions(); let mut input_pts = HashMap::with_capacity(transactions.len()); diff --git a/store/src/data_loader_wrapper.rs b/store/src/data_loader_wrapper.rs index 8894831720..740edfa399 100644 --- a/store/src/data_loader_wrapper.rs +++ b/store/src/data_loader_wrapper.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake use crate::ChainStore; use ckb_traits::{CellDataProvider, HeaderProvider}; use ckb_types::{ @@ -6,8 +7,10 @@ use ckb_types::{ packed::{Byte32, OutPoint}, }; +/// TODO(doc): @quake pub struct DataLoaderWrapper<'a, T>(&'a T); impl<'a, T: ChainStore<'a>> DataLoaderWrapper<'a, T> { + /// TODO(doc): @quake pub fn new(source: &'a T) -> Self { DataLoaderWrapper(source) } diff --git a/store/src/db.rs b/store/src/db.rs index 27ceba422f..2f128b14f4 100644 --- a/store/src/db.rs +++ b/store/src/db.rs @@ -14,6 +14,7 @@ use ckb_error::Error; use ckb_types::core::BlockExt; use std::sync::Arc; +/// TODO(doc): @quake #[derive(Clone)] pub struct ChainDB { db: RocksDB, @@ -39,6 +40,7 @@ impl<'a> ChainStore<'a> for ChainDB { } impl ChainDB { + /// TODO(doc): @quake pub fn new(db: RocksDB, config: StoreConfig) -> Self { let cache = StoreCache::from_config(config); ChainDB { @@ -47,14 +49,17 @@ impl ChainDB { } } + /// TODO(doc): @quake pub fn db(&self) -> &RocksDB { &self.db } + /// TODO(doc): @quake pub fn into_inner(self) -> RocksDB { self.db } + /// TODO(doc): @quake pub fn begin_transaction(&self) -> StoreTransaction { StoreTransaction { inner: self.db.transaction(), @@ -62,6 +67,7 @@ impl ChainDB { } } + /// TODO(doc): @quake pub fn get_snapshot(&self) -> StoreSnapshot { StoreSnapshot { inner: self.db.get_snapshot(), @@ -69,16 +75,19 @@ impl ChainDB { } } + /// TODO(doc): @quake pub fn new_write_batch(&self) -> StoreWriteBatch { StoreWriteBatch { inner: self.db.new_write_batch(), } } + /// TODO(doc): @quake pub fn write(&self, write_batch: &StoreWriteBatch) -> Result<(), Error> { self.db.write(&write_batch.inner) } + /// TODO(doc): @quake pub fn init(&self, consensus: &Consensus) -> Result<(), Error> { let genesis = consensus.genesis_block(); let epoch = consensus.genesis_epoch_ext(); diff --git a/store/src/lib.rs b/store/src/lib.rs index 8c0d56e551..b9b990fb4b 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake mod cache; mod cell; pub mod data_loader_wrapper; @@ -17,20 +18,36 @@ pub use write_batch::StoreWriteBatch; use ckb_db::Col; +/// TODO(doc): @quake pub const COLUMNS: u32 = 13; +/// TODO(doc): @quake pub const COLUMN_INDEX: Col = "0"; +/// TODO(doc): @quake pub const COLUMN_BLOCK_HEADER: Col = "1"; +/// TODO(doc): @quake pub const COLUMN_BLOCK_BODY: Col = "2"; +/// TODO(doc): @quake pub const COLUMN_BLOCK_UNCLE: Col = "3"; +/// TODO(doc): @quake pub const COLUMN_META: Col = "4"; +/// TODO(doc): @quake pub const COLUMN_TRANSACTION_INFO: Col = "5"; +/// TODO(doc): @quake pub const COLUMN_BLOCK_EXT: Col = "6"; +/// TODO(doc): @quake pub const COLUMN_BLOCK_PROPOSAL_IDS: Col = "7"; +/// TODO(doc): @quake pub const COLUMN_BLOCK_EPOCH: Col = "8"; +/// TODO(doc): @quake pub const COLUMN_EPOCH: Col = "9"; +/// TODO(doc): @quake pub const COLUMN_CELL: Col = "10"; +/// TODO(doc): @quake pub const COLUMN_UNCLES: Col = "11"; +/// TODO(doc): @quake pub const COLUMN_CELL_DATA: Col = "12"; +/// TODO(doc): @quake pub const META_TIP_HEADER_KEY: &[u8] = b"TIP_HEADER"; +/// TODO(doc): @quake pub const META_CURRENT_EPOCH_KEY: &[u8] = b"CURRENT_EPOCH"; diff --git a/store/src/snapshot.rs b/store/src/snapshot.rs index 51e4895546..06b6b3c4d2 100644 --- a/store/src/snapshot.rs +++ b/store/src/snapshot.rs @@ -6,6 +6,7 @@ use ckb_db::{ }; use std::sync::Arc; +/// TODO(doc): @quake pub struct StoreSnapshot { pub(crate) inner: RocksDBSnapshot, pub(crate) cache: Arc, diff --git a/store/src/store.rs b/store/src/store.rs index 4d97f3fa98..c3437d21e9 100644 --- a/store/src/store.rs +++ b/store/src/store.rs @@ -23,11 +23,17 @@ use ckb_types::{ pub struct CellProviderWrapper<'a, S>(&'a S); +/// TODO(doc): @quake pub trait ChainStore<'a>: Send + Sync + Sized { + /// TODO(doc): @quake type Vector: AsRef<[u8]>; + /// TODO(doc): @quake fn cache(&'a self) -> Option<&'a StoreCache>; + /// TODO(doc): @quake fn get(&'a self, col: Col, key: &[u8]) -> Option; + /// TODO(doc): @quake fn get_iter(&self, col: Col, mode: IteratorMode) -> DBIter; + /// TODO(doc): @quake fn cell_provider(&self) -> CellProviderWrapper { CellProviderWrapper(self) } @@ -182,10 +188,12 @@ pub trait ChainStore<'a>: Send + Sync + Sized { .map(|raw| packed::Uint64Reader::from_slice_should_be_ok(&raw.as_ref()).unpack()) } + /// TODO(doc): @quake fn is_main_chain(&'a self, hash: &packed::Byte32) -> bool { self.get(COLUMN_INDEX, hash.as_slice()).is_some() } + /// TODO(doc): @quake fn get_tip_header(&'a self) -> Option { self.get(COLUMN_META, META_TIP_HEADER_KEY) .and_then(|raw| { @@ -205,6 +213,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { .map(|(tx, tx_info)| (tx, tx_info.block_hash)) } + /// TODO(doc): @quake fn get_transaction_with_info( &'a self, hash: &packed::Byte32, @@ -218,6 +227,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { }) } + /// TODO(doc): @quake fn get_transaction_info(&'a self, hash: &packed::Byte32) -> Option { self.get(COLUMN_TRANSACTION_INFO, hash.as_slice()) .map(|slice| { @@ -227,6 +237,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { }) } + /// TODO(doc): @quake fn get_cell(&'a self, out_point: &OutPoint) -> Option { let key = out_point.to_cell_key(); self.get(COLUMN_CELL, &key).map(|slice| { @@ -235,6 +246,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { }) } + /// TODO(doc): @quake fn get_cell_data(&'a self, out_point: &OutPoint) -> Option<(Bytes, packed::Byte32)> { let key = out_point.to_cell_key(); if let Some(cache) = self.cache() { @@ -273,41 +285,43 @@ pub trait ChainStore<'a>: Send + Sync + Sized { } } - // Get current epoch ext + /// Gets current epoch ext fn get_current_epoch_ext(&'a self) -> Option { self.get(COLUMN_META, META_CURRENT_EPOCH_KEY) .map(|slice| packed::EpochExtReader::from_slice_should_be_ok(&slice.as_ref()).unpack()) } - // Get epoch ext by epoch index + /// Gets epoch ext by epoch index fn get_epoch_ext(&'a self, hash: &packed::Byte32) -> Option { self.get(COLUMN_EPOCH, hash.as_slice()) .map(|slice| packed::EpochExtReader::from_slice_should_be_ok(&slice.as_ref()).unpack()) } - // Get epoch index by epoch number + /// Gets epoch index by epoch number fn get_epoch_index(&'a self, number: EpochNumber) -> Option { let epoch_number: packed::Uint64 = number.pack(); self.get(COLUMN_EPOCH, epoch_number.as_slice()) .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(&raw.as_ref()).to_entity()) } - // Get epoch index by block hash + /// Gets epoch index by block hash fn get_block_epoch_index(&'a self, block_hash: &packed::Byte32) -> Option { self.get(COLUMN_BLOCK_EPOCH, block_hash.as_slice()) .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(&raw.as_ref()).to_entity()) } + /// TODO(doc): @quake fn get_block_epoch(&'a self, hash: &packed::Byte32) -> Option { self.get_block_epoch_index(hash) .and_then(|index| self.get_epoch_ext(&index)) } + /// TODO(doc): @quake fn is_uncle(&'a self, hash: &packed::Byte32) -> bool { self.get(COLUMN_UNCLES, hash.as_slice()).is_some() } - /// Get header by uncle header hash + /// Gets header by uncle header hash fn get_uncle_header(&'a self, hash: &packed::Byte32) -> Option { self.get(COLUMN_UNCLES, hash.as_slice()).map(|slice| { let reader = packed::HeaderViewReader::from_slice_should_be_ok(&slice.as_ref()); @@ -315,6 +329,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { }) } + /// TODO(doc): @quake fn block_exists(&'a self, hash: &packed::Byte32) -> bool { if let Some(cache) = self.cache() { if cache.headers.lock().get(hash).is_some() { @@ -324,7 +339,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { self.get(COLUMN_BLOCK_HEADER, hash.as_slice()).is_some() } - // Get cellbase by block hash + /// Gets cellbase by block hash fn get_cellbase(&'a self, hash: &packed::Byte32) -> Option { if let Some(cache) = self.cache() { if let Some(data) = cache.cellbase.lock().get(hash) { @@ -348,6 +363,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { } } + /// TODO(doc): @quake fn next_epoch_ext( &'a self, consensus: &Consensus, @@ -362,6 +378,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { ) } + /// TODO(doc): @quake fn get_packed_block(&'a self, hash: &packed::Byte32) -> Option { self.get_packed_block_header(hash).map(|header| { let txs = { @@ -402,6 +419,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { }) } + /// TODO(doc): @quake fn get_packed_block_header(&'a self, hash: &packed::Byte32) -> Option { self.get(COLUMN_BLOCK_HEADER, hash.as_slice()).map(|slice| { let reader = packed::HeaderViewReader::from_slice_should_be_ok(&slice.as_ref()); diff --git a/store/src/transaction.rs b/store/src/transaction.rs index 5b39f96a49..86c6d42ad2 100644 --- a/store/src/transaction.rs +++ b/store/src/transaction.rs @@ -18,6 +18,7 @@ use ckb_types::{ }; use std::sync::Arc; +/// TODO(doc): @quake pub struct StoreTransaction { pub(crate) inner: RocksDBTransaction, pub(crate) cache: Arc, @@ -65,18 +66,22 @@ impl<'a> ChainStore<'a> for StoreTransactionSnapshot<'a> { } impl StoreTransaction { + /// TODO(doc): @quake pub fn insert_raw(&self, col: Col, key: &[u8], value: &[u8]) -> Result<(), Error> { self.inner.put(col, key, value) } + /// TODO(doc): @quake pub fn delete(&self, col: Col, key: &[u8]) -> Result<(), Error> { self.inner.delete(col, key) } + /// TODO(doc): @quake pub fn commit(&self) -> Result<(), Error> { self.inner.commit() } + /// TODO(doc): @quake pub fn get_snapshot(&self) -> StoreTransactionSnapshot<'_> { StoreTransactionSnapshot { inner: self.inner.get_snapshot(), @@ -84,6 +89,7 @@ impl StoreTransaction { } } + /// TODO(doc): @quake pub fn get_update_for_tip_hash( &self, snapshot: &StoreTransactionSnapshot<'_>, @@ -94,10 +100,12 @@ impl StoreTransaction { .map(|slice| packed::Byte32Reader::from_slice_should_be_ok(&slice.as_ref()).to_entity()) } + /// TODO(doc): @quake pub fn insert_tip_header(&self, h: &HeaderView) -> Result<(), Error> { self.insert_raw(COLUMN_META, META_TIP_HEADER_KEY, h.hash().as_slice()) } + /// TODO(doc): @quake pub fn insert_block(&self, block: &BlockView) -> Result<(), Error> { let hash = block.hash(); let header = block.header().pack(); @@ -121,6 +129,7 @@ impl StoreTransaction { Ok(()) } + /// TODO(doc): @quake pub fn delete_block(&self, hash: &packed::Byte32, txs_len: usize) -> Result<(), Error> { self.delete(COLUMN_BLOCK_HEADER, hash.as_slice())?; self.delete(COLUMN_BLOCK_UNCLE, hash.as_slice())?; @@ -137,6 +146,7 @@ impl StoreTransaction { Ok(()) } + /// TODO(doc): @quake pub fn insert_block_ext( &self, block_hash: &packed::Byte32, @@ -149,6 +159,7 @@ impl StoreTransaction { ) } + /// TODO(doc): @quake pub fn attach_block(&self, block: &BlockView) -> Result<(), Error> { let header = block.data().header(); let block_hash = block.hash(); @@ -176,6 +187,7 @@ impl StoreTransaction { self.insert_raw(COLUMN_INDEX, block_hash.as_slice(), block_number.as_slice()) } + /// TODO(doc): @quake pub fn detach_block(&self, block: &BlockView) -> Result<(), Error> { for tx_hash in block.tx_hashes().iter() { self.delete(COLUMN_TRANSACTION_INFO, tx_hash.as_slice())?; @@ -188,6 +200,7 @@ impl StoreTransaction { self.delete(COLUMN_INDEX, block.hash().as_slice()) } + /// TODO(doc): @quake pub fn insert_block_epoch_index( &self, block_hash: &packed::Byte32, @@ -200,16 +213,19 @@ impl StoreTransaction { ) } + /// TODO(doc): @quake pub fn insert_epoch_ext(&self, hash: &packed::Byte32, epoch: &EpochExt) -> Result<(), Error> { self.insert_raw(COLUMN_EPOCH, hash.as_slice(), epoch.pack().as_slice())?; let epoch_number: packed::Uint64 = epoch.number().pack(); self.insert_raw(COLUMN_EPOCH, epoch_number.as_slice(), hash.as_slice()) } + /// TODO(doc): @quake pub fn insert_current_epoch_ext(&self, epoch: &EpochExt) -> Result<(), Error> { self.insert_raw(COLUMN_META, META_CURRENT_EPOCH_KEY, epoch.pack().as_slice()) } + /// TODO(doc): @quake pub fn insert_cells( &self, cells: impl Iterator< @@ -232,6 +248,7 @@ impl StoreTransaction { Ok(()) } + /// TODO(doc): @quake pub fn delete_cells( &self, out_points: impl Iterator, diff --git a/store/src/write_batch.rs b/store/src/write_batch.rs index 5af8a14392..af486fb7ec 100644 --- a/store/src/write_batch.rs +++ b/store/src/write_batch.rs @@ -4,15 +4,18 @@ use ckb_db::RocksDBWriteBatch; use ckb_error::Error; use ckb_types::{packed, prelude::*}; +/// TODO(doc): @quake pub struct StoreWriteBatch { pub(crate) inner: RocksDBWriteBatch, } impl StoreWriteBatch { + /// TODO(doc): @quake pub fn put(&mut self, col: Col, key: &[u8], value: &[u8]) -> Result<(), Error> { self.inner.put(col, key, value) } + /// TODO(doc): @quake pub fn delete(&mut self, col: Col, key: &[u8]) -> Result<(), Error> { self.inner.delete(col, key) } @@ -22,18 +25,22 @@ impl StoreWriteBatch { self.inner.size_in_bytes() } + /// TODO(doc): @quake pub fn len(&self) -> usize { self.inner.len() } + /// TODO(doc): @quake pub fn is_empty(&self) -> bool { self.inner.is_empty() } + /// TODO(doc): @quake pub fn clear(&mut self) -> Result<(), Error> { self.inner.clear() } + /// TODO(doc): @quake pub fn insert_cells( &mut self, cells: impl Iterator< @@ -56,6 +63,7 @@ impl StoreWriteBatch { Ok(()) } + /// TODO(doc): @quake pub fn delete_cells( &mut self, out_points: impl Iterator, diff --git a/sync/Cargo.toml b/sync/Cargo.toml index c36d444048..f19d78cc1e 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @driftluo crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/sync/src/lib.rs b/sync/src/lib.rs index e57e0e0cf8..0a5709a9a1 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -21,17 +21,27 @@ pub use crate::synchronizer::Synchronizer; pub use crate::types::SyncShared; use std::time::Duration; +/// TODO(doc): @driftluo pub const MAX_HEADERS_LEN: usize = 2_000; +/// TODO(doc): @driftluo pub const MAX_INVENTORY_LEN: usize = 50_000; +/// TODO(doc): @driftluo pub const MAX_SCHEDULED_LEN: usize = 4 * 1024; +/// TODO(doc): @driftluo pub const MAX_BLOCKS_TO_ANNOUNCE: usize = 8; +/// TODO(doc): @driftluo pub const MAX_UNCONNECTING_HEADERS: usize = 10; +/// TODO(doc): @driftluo pub const MAX_TIP_AGE: u64 = 24 * 60 * 60 * 1000; +/// TODO(doc): @driftluo pub const STALE_RELAY_AGE_LIMIT: u64 = 30 * 24 * 60 * 60 * 1000; +/// TODO(doc): @driftluo /* About Download Scheduler */ pub const INIT_BLOCKS_IN_TRANSIT_PER_PEER: usize = 16; +/// TODO(doc): @driftluo pub const MAX_BLOCKS_IN_TRANSIT_PER_PEER: usize = 128; +/// TODO(doc): @driftluo pub const CHECK_POINT_WINDOW: u64 = (MAX_BLOCKS_IN_TRANSIT_PER_PEER * 4) as u64; // Time recording window size, ibd period scheduler dynamically adjusts frequency @@ -46,8 +56,10 @@ pub(crate) const LOW_INDEX: usize = TIME_TRACE_SIZE * 9 / 10; pub(crate) const LOG_TARGET_RELAY: &str = "ckb_relay"; +/// TODO(doc): @driftluo // Inspect the headers downloading every 2 minutes pub const HEADERS_DOWNLOAD_INSPECT_WINDOW: u64 = 2 * 60 * 1000; +/// TODO(doc): @driftluo // Global Average Speed // Expect 300 KiB/second // = 1600 headers/second (300*1024/192) @@ -55,31 +67,43 @@ pub const HEADERS_DOWNLOAD_INSPECT_WINDOW: u64 = 2 * 60 * 1000; // = 11.11 days-in-blockchain/minute-in-reality (96000*10/60/60/24) // => Sync 1 year headers in blockchain will be in 32.85 minutes (365/11.11) in reality pub const HEADERS_DOWNLOAD_HEADERS_PER_SECOND: u64 = 1600; +/// TODO(doc): @driftluo // Acceptable Lowest Instantaneous Speed: 75.0 KiB/second (300/4) pub const HEADERS_DOWNLOAD_TOLERABLE_BIAS_FOR_SINGLE_SAMPLE: u64 = 4; +/// TODO(doc): @driftluo pub const POW_INTERVAL: u64 = 10; +/// TODO(doc): @driftluo // Protect at least this many outbound peers from disconnection due to slow // behind headers chain. pub const MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT: usize = 4; +/// TODO(doc): @driftluo pub const CHAIN_SYNC_TIMEOUT: u64 = 12 * 60 * 1000; // 12 minutes +/// TODO(doc): @driftluo pub const SUSPEND_SYNC_TIME: u64 = 5 * 60 * 1000; // 5 minutes +/// TODO(doc): @driftluo pub const EVICTION_HEADERS_RESPONSE_TIME: u64 = 120 * 1000; // 2 minutes +/// TODO(doc): @driftluo //The maximum number of entries in a locator pub const MAX_LOCATOR_SIZE: usize = 101; +/// TODO(doc): @driftluo pub const BLOCK_DOWNLOAD_TIMEOUT: u64 = 30 * 1000; // 30s +/// TODO(doc): @driftluo // Size of the "block download window": how far ahead of our current height do we fetch? // Larger windows tolerate larger download speed differences between peers, but increase the // potential degree of disordering of blocks. pub const BLOCK_DOWNLOAD_WINDOW: u64 = 1024 * 8; // 1024 * default_outbound_peers +/// TODO(doc): @driftluo pub const RETRY_ASK_TX_TIMEOUT_INCREASE: Duration = Duration::from_secs(30); +/// TODO(doc): @driftluo // ban time // 5 minutes pub const BAD_MESSAGE_BAN_TIME: Duration = Duration::from_secs(5 * 60); +/// TODO(doc): @driftluo // 10 minutes, peer have no common ancestor block pub const SYNC_USELESS_BAN_TIME: Duration = Duration::from_secs(10 * 60); diff --git a/sync/src/net_time_checker.rs b/sync/src/net_time_checker.rs index 8086820d77..ff016e6693 100644 --- a/sync/src/net_time_checker.rs +++ b/sync/src/net_time_checker.rs @@ -85,6 +85,7 @@ impl Clone for NetTimeProtocol { } impl NetTimeProtocol { + /// TODO(doc): @driftluo pub fn new(min_samples: usize, max_samples: usize, tolerant_offset: u64) -> Self { let checker = RwLock::new(NetTimeChecker::new( min_samples, diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index ffc9c6c973..d5eb7210b1 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -60,6 +60,7 @@ pub enum ReconstructionResult { Error(Status), } +/// TODO(doc): @driftluo #[derive(Clone)] pub struct Relayer { chain: ChainController, @@ -70,6 +71,7 @@ pub struct Relayer { } impl Relayer { + /// TODO(doc): @driftluo pub fn new( chain: ChainController, shared: Arc, @@ -90,6 +92,7 @@ impl Relayer { } } + /// TODO(doc): @driftluo pub fn shared(&self) -> &Arc { &self.shared } @@ -197,6 +200,7 @@ impl Relayer { } } + /// TODO(doc): @driftluo pub fn request_proposal_txs( &self, nc: &dyn CKBProtocolContext, @@ -247,6 +251,7 @@ impl Relayer { } } + /// TODO(doc): @driftluo pub fn accept_block( &self, nc: &dyn CKBProtocolContext, @@ -296,6 +301,7 @@ impl Relayer { } } + /// TODO(doc): @driftluo // nodes should attempt to reconstruct the full block by taking the prefilledtxn transactions // from the original CompactBlock message and placing them in the marked positions, // then for each short transaction ID from the original compact_block message, in order, @@ -526,6 +532,7 @@ impl Relayer { } } + /// TODO(doc): @driftluo // Ask for relay transaction by hash from all peers pub fn ask_for_txs(&self, nc: &dyn CKBProtocolContext) { let state = self.shared().state(); @@ -568,6 +575,7 @@ impl Relayer { } } + /// TODO(doc): @driftluo // Send bulk of tx hashes to selected peers pub fn send_bulk_of_tx_hashes(&self, nc: &dyn CKBProtocolContext) { let connected_peers = nc.connected_peers(); diff --git a/sync/src/status.rs b/sync/src/status.rs index 32771320cf..f54d5f682a 100644 --- a/sync/src/status.rs +++ b/sync/src/status.rs @@ -111,11 +111,13 @@ pub enum StatusCode { } impl StatusCode { + /// TODO(doc): @driftluo pub fn with_context(self, context: S) -> Status { Status::new(self, Some(context)) } } +/// TODO(doc): @driftluo #[derive(Clone, Debug, Eq)] pub struct Status { code: StatusCode, @@ -123,6 +125,7 @@ pub struct Status { } impl Status { + /// TODO(doc): @driftluo pub fn new(code: StatusCode, context: Option) -> Self { Self { code, @@ -130,18 +133,22 @@ impl Status { } } + /// TODO(doc): @driftluo pub fn ok() -> Self { Self::new::<&str>(StatusCode::OK, None) } + /// TODO(doc): @driftluo pub fn ignored() -> Self { Self::new::<&str>(StatusCode::Ignored, None) } + /// TODO(doc): @driftluo pub fn is_ok(&self) -> bool { self.code == StatusCode::OK } + /// TODO(doc): @driftluo pub fn should_ban(&self) -> Option { if !(400..500).contains(&(self.code as u16)) { return None; @@ -152,10 +159,12 @@ impl Status { } } + /// TODO(doc): @driftluo pub fn should_warn(&self) -> bool { self.code as u16 >= 500 } + /// TODO(doc): @driftluo pub fn code(&self) -> StatusCode { self.code } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index ac3af1a125..84f2ac3578 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -123,14 +123,17 @@ impl BlockFetchCMD { } } +/// TODO(doc): @driftluo #[derive(Clone)] pub struct Synchronizer { chain: ChainController, + /// TODO(doc): @driftluo pub shared: Arc, fetch_channel: Option>, } impl Synchronizer { + /// TODO(doc): @driftluo pub fn new(chain: ChainController, shared: Arc) -> Synchronizer { Synchronizer { chain, @@ -139,6 +142,7 @@ impl Synchronizer { } } + /// TODO(doc): @driftluo pub fn shared(&self) -> &Arc { &self.shared } @@ -198,6 +202,7 @@ impl Synchronizer { } } + /// TODO(doc): @driftluo pub fn peers(&self) -> &Peers { self.shared().state().peers() } @@ -219,6 +224,7 @@ impl Synchronizer { } } + /// TODO(doc): @driftluo //TODO: process block which we don't request pub fn process_new_block(&self, block: core::BlockView) -> Result { let block_hash = block.hash(); @@ -240,6 +246,7 @@ impl Synchronizer { } } + /// TODO(doc): @driftluo pub fn get_blocks_to_fetch( &self, peer: PeerIndex, @@ -277,6 +284,7 @@ impl Synchronizer { ); } + /// TODO(doc): @driftluo // - If at timeout their best known block now has more work than our tip // when the timeout was set, then either reset the timeout or clear it // (after comparing against our current tip's work) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 1b9edeb207..b2564888aa 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1173,6 +1173,7 @@ type PendingCompactBlockMap = HashMap< ), >; +/// TODO(doc): @driftluo #[derive(Clone)] pub struct SyncShared { shared: Shared, @@ -1180,10 +1181,12 @@ pub struct SyncShared { } impl SyncShared { + /// TODO(doc): @driftluo pub fn new(shared: Shared, sync_config: SyncConfig) -> SyncShared { Self::with_tmpdir::(shared, sync_config, None) } + /// TODO(doc): @driftluo pub fn with_tmpdir

(shared: Shared, sync_config: SyncConfig, tmpdir: Option

) -> SyncShared where P: AsRef, @@ -1229,10 +1232,12 @@ impl SyncShared { } } + /// TODO(doc): @driftluo pub fn shared(&self) -> &Shared { &self.shared } + /// TODO(doc): @driftluo pub fn active_chain(&self) -> ActiveChain { ActiveChain { shared: self.clone(), @@ -1241,18 +1246,22 @@ impl SyncShared { } } + /// TODO(doc): @driftluo pub fn store(&self) -> &ChainDB { self.shared.store() } + /// TODO(doc): @driftluo pub fn state(&self) -> &SyncState { &self.state } + /// TODO(doc): @driftluo pub fn consensus(&self) -> &Consensus { self.shared.consensus() } + /// TODO(doc): @driftluo pub fn insert_new_block( &self, chain: &ChainController, @@ -1282,6 +1291,7 @@ impl SyncShared { ret } + /// TODO(doc): @driftluo pub fn try_search_orphan_pool(&self, chain: &ChainController, parent_hash: &Byte32) { let descendants = self.state.remove_orphan_by_parent(parent_hash); debug!( @@ -1337,6 +1347,7 @@ impl SyncShared { Ok(ret?) } + /// TODO(doc): @driftluo // Update the header_map // Update the block_status_map // Update the shared_best_header if need @@ -1378,6 +1389,7 @@ impl SyncShared { self.state.may_set_shared_best_header(header_view); } + /// TODO(doc): @driftluo pub fn get_header_view( &self, hash: &Byte32, @@ -1404,6 +1416,7 @@ impl SyncShared { } } + /// TODO(doc): @driftluo pub fn get_header(&self, hash: &Byte32) -> Option { self.state .header_map @@ -1412,12 +1425,14 @@ impl SyncShared { .or_else(|| self.store().get_block_header(hash)) } + /// TODO(doc): @driftluo pub fn is_parent_stored(&self, block: &core::BlockView) -> bool { self.store() .get_block_header(&block.data().header().raw().parent_hash()) .is_some() } + /// TODO(doc): @driftluo pub fn get_epoch_ext(&self, hash: &Byte32) -> Option { self.store().get_block_epoch(&hash) } diff --git a/test/Cargo.toml b/test/Cargo.toml index c2d94c6cb4..21fa1c524c 100644 --- a/test/Cargo.toml +++ b/test/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/traits/Cargo.toml b/traits/Cargo.toml index 0b6f85d91e..284f0b691b 100644 --- a/traits/Cargo.toml +++ b/traits/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/traits/src/block_median_time_context.rs b/traits/src/block_median_time_context.rs index 23d4a8af12..88a4fea371 100644 --- a/traits/src/block_median_time_context.rs +++ b/traits/src/block_median_time_context.rs @@ -4,6 +4,7 @@ use ckb_types::{core::BlockNumber, packed::Byte32}; /// The invoker should only rely on `block_median_time` function /// the other functions only use to help the default `block_median_time`, and maybe unimplemented. pub trait BlockMedianTimeContext: HeaderProvider { + /// TODO(doc): @quake fn median_block_count(&self) -> u64; /// Return timestamp and block_number of the corresponding block_hash, and hash of parent block diff --git a/traits/src/cell_data_provider.rs b/traits/src/cell_data_provider.rs index 210feddf27..829f71bf3a 100644 --- a/traits/src/cell_data_provider.rs +++ b/traits/src/cell_data_provider.rs @@ -4,7 +4,9 @@ use ckb_types::{ packed::{Byte32, OutPoint}, }; +/// TODO(doc): @quake pub trait CellDataProvider { + /// TODO(doc): @quake fn load_cell_data(&self, cell: &CellMeta) -> Option<(Bytes, Byte32)> { cell.mem_cell_data .as_ref() @@ -12,5 +14,6 @@ pub trait CellDataProvider { .or_else(|| self.get_cell_data(&cell.out_point)) } + /// TODO(doc): @quake fn get_cell_data(&self, out_point: &OutPoint) -> Option<(Bytes, Byte32)>; } diff --git a/traits/src/header_provider.rs b/traits/src/header_provider.rs index 245a1fc6c9..433c239e6e 100644 --- a/traits/src/header_provider.rs +++ b/traits/src/header_provider.rs @@ -1,5 +1,7 @@ use ckb_types::{core::HeaderView, packed::Byte32}; +/// TODO(doc): @quake pub trait HeaderProvider { + /// TODO(doc): @quake fn get_header(&self, hash: &Byte32) -> Option; } diff --git a/traits/src/lib.rs b/traits/src/lib.rs index 5fd2a9d39b..761cd5d3dd 100644 --- a/traits/src/lib.rs +++ b/traits/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake mod block_median_time_context; mod cell_data_provider; mod header_provider; diff --git a/tx-pool/Cargo.toml b/tx-pool/Cargo.toml index 5f4fc7e1cd..4b51427f1f 100644 --- a/tx-pool/Cargo.toml +++ b/tx-pool/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @zhangsoledad crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index f05f6a4d28..a49b2dd4d9 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -87,10 +87,12 @@ impl TxEntry { } } + /// TODO(doc): @zhangsoledad pub fn as_sorted_key(&self) -> AncestorsScoreSortKey { AncestorsScoreSortKey::from(self) } + /// TODO(doc): @zhangsoledad pub fn add_entry_weight(&mut self, entry: &TxEntry) { self.ancestors_count = self.ancestors_count.saturating_add(1); self.ancestors_size = self.ancestors_size.saturating_add(entry.size); @@ -101,6 +103,7 @@ impl TxEntry { .saturating_add(entry.fee.as_u64()), ); } + /// TODO(doc): @zhangsoledad pub fn sub_entry_weight(&mut self, entry: &TxEntry) { self.ancestors_count = self.ancestors_count.saturating_sub(1); self.ancestors_size = self.ancestors_size.saturating_sub(entry.size); @@ -112,6 +115,7 @@ impl TxEntry { ); } + /// TODO(doc): @zhangsoledad pub fn add_ancestors_weight(&mut self, entry: &TxEntry) { self.ancestors_count = self.ancestors_count.saturating_add(entry.ancestors_count); self.ancestors_size = self.ancestors_size.saturating_add(entry.ancestors_size); @@ -122,6 +126,7 @@ impl TxEntry { .saturating_add(entry.ancestors_fee.as_u64()), ); } + /// TODO(doc): @zhangsoledad pub fn sub_ancestors_weight(&mut self, entry: &TxEntry) { self.ancestors_count = self.ancestors_count.saturating_sub(entry.ancestors_count); self.ancestors_size = self.ancestors_size.saturating_sub(entry.ancestors_size); diff --git a/tx-pool/src/error.rs b/tx-pool/src/error.rs index 182e6f7909..3115f1c395 100644 --- a/tx-pool/src/error.rs +++ b/tx-pool/src/error.rs @@ -1,8 +1,10 @@ +//! TODO(doc): @zhangsoledad use ckb_error::{Error, ErrorKind}; use ckb_types::packed::Byte32; use failure::Fail; use tokio::sync::mpsc::error::TrySendError as TokioTrySendError; +/// TODO(doc): @zhangsoledad #[derive(Debug, PartialEq, Clone, Eq, Fail)] pub enum Reject { /// The fee rate of transaction is lower than min fee rate @@ -12,18 +14,22 @@ pub enum Reject { )] LowFeeRate(u64, u64), + /// TODO(doc): @zhangsoledad #[fail(display = "Transaction exceeded maximum ancestors count limit, try send it later")] ExceededMaximumAncestorsCount, + /// TODO(doc): @zhangsoledad #[fail( display = "Transaction pool exceeded maximum {} limit({}), try send it later", _0, _1 )] Full(String, u64), + /// TODO(doc): @zhangsoledad #[fail(display = "Transaction({}) already exist in transaction_pool", _0)] Duplicated(Byte32), + /// TODO(doc): @zhangsoledad #[fail(display = "Malformed {} transaction", _0)] Malformed(String), } @@ -34,20 +40,26 @@ impl From for Error { } } +/// TODO(doc): @zhangsoledad #[derive(Debug, PartialEq, Clone, Eq, Fail)] pub enum BlockAssemblerError { + /// TODO(doc): @zhangsoledad #[fail(display = "InvalidInput")] InvalidInput, + /// TODO(doc): @zhangsoledad #[fail(display = "InvalidParams {}", _0)] InvalidParams(String), + /// TODO(doc): @zhangsoledad #[fail(display = "Disabled")] Disabled, } +/// TODO(doc): @zhangsoledad #[derive(Fail, Debug)] #[fail(display = "TrySendError {}.", _0)] pub struct TrySendError(String); +/// TODO(doc): @zhangsoledad pub fn handle_try_send_error(error: TokioTrySendError) -> (T, TrySendError) { let e = TrySendError(format!("{}", error)); let m = match error { diff --git a/tx-pool/src/lib.rs b/tx-pool/src/lib.rs index 6891d6fe9d..0628884ec7 100644 --- a/tx-pool/src/lib.rs +++ b/tx-pool/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @zhangsoledad mod block_assembler; mod component; pub mod error; diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 336347bb33..bfa9fa6f59 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -30,6 +30,7 @@ use std::sync::{ Arc, }; +/// TODO(doc): @zhangsoledad pub struct TxPool { pub(crate) config: TxPoolConfig, /// The short id that has not been proposed @@ -50,6 +51,7 @@ pub struct TxPool { pub(crate) total_tx_size: usize, // sum of all tx_pool tx's cycles. pub(crate) total_tx_cycles: Cycle, + /// TODO(doc): @zhangsoledad pub snapshot: Arc, } @@ -86,6 +88,7 @@ pub struct TxPoolInfo { } impl TxPool { + /// TODO(doc): @zhangsoledad pub fn new( config: TxPoolConfig, snapshot: Arc, @@ -109,14 +112,17 @@ impl TxPool { } } + /// TODO(doc): @zhangsoledad pub fn snapshot(&self) -> &Snapshot { &self.snapshot } + /// TODO(doc): @zhangsoledad pub fn cloned_snapshot(&self) -> Arc { Arc::clone(&self.snapshot) } + /// TODO(doc): @zhangsoledad pub fn info(&self) -> TxPoolInfo { let tip_header = self.snapshot.tip_header(); TxPoolInfo { @@ -131,19 +137,23 @@ impl TxPool { } } + /// TODO(doc): @zhangsoledad pub fn reach_size_limit(&self, tx_size: usize) -> bool { (self.total_tx_size + tx_size) > self.config.max_mem_size } + /// TODO(doc): @zhangsoledad pub fn reach_cycles_limit(&self, cycles: Cycle) -> bool { (self.total_tx_cycles + cycles) > self.config.max_cycles } + /// TODO(doc): @zhangsoledad pub fn update_statics_for_add_tx(&mut self, tx_size: usize, cycles: Cycle) { self.total_tx_size += tx_size; self.total_tx_cycles += cycles; } + /// TODO(doc): @zhangsoledad // cycles overflow is possible, currently obtaining cycles is not accurate pub fn update_statics_for_remove_tx(&mut self, tx_size: usize, cycles: Cycle) { let total_tx_size = self.total_tx_size.checked_sub(tx_size).unwrap_or_else(|| { @@ -164,6 +174,7 @@ impl TxPool { self.total_tx_cycles = total_tx_cycles; } + /// TODO(doc): @zhangsoledad // If did have this value present, false is returned. pub fn add_pending(&mut self, entry: TxEntry) -> Result { if self @@ -176,12 +187,14 @@ impl TxPool { self.pending.add_entry(entry).map(|entry| entry.is_none()) } + /// TODO(doc): @zhangsoledad // add_gap inserts proposed but still uncommittable transaction. pub fn add_gap(&mut self, entry: TxEntry) -> Result { trace!("add_gap {}", entry.transaction.hash()); self.gap.add_entry(entry).map(|entry| entry.is_none()) } + /// TODO(doc): @zhangsoledad pub fn add_proposed(&mut self, entry: TxEntry) -> Result { trace!("add_proposed {}", entry.transaction.hash()); self.touch_last_txs_updated_at(); @@ -205,10 +218,12 @@ impl TxPool { .store(unix_time_as_millis(), Ordering::SeqCst); } + /// TODO(doc): @zhangsoledad pub fn get_last_txs_updated_at(&self) -> u64 { self.last_txs_updated_at.load(Ordering::SeqCst) } + /// TODO(doc): @zhangsoledad pub fn contains_proposal_id(&self, id: &ProposalShortId) -> bool { self.pending.contains_key(id) || self.conflict.contains(id) @@ -216,6 +231,7 @@ impl TxPool { || self.orphan.contains_key(id) } + /// TODO(doc): @zhangsoledad pub fn contains_tx(&self, id: &ProposalShortId) -> bool { self.pending.contains_key(id) || self.gap.contains_key(id) @@ -224,6 +240,7 @@ impl TxPool { || self.conflict.contains(id) } + /// TODO(doc): @zhangsoledad pub fn get_tx_with_cycles( &self, id: &ProposalShortId, @@ -258,6 +275,7 @@ impl TxPool { }) } + /// TODO(doc): @zhangsoledad pub fn get_tx(&self, id: &ProposalShortId) -> Option { self.pending .get_tx(id) @@ -268,6 +286,7 @@ impl TxPool { .cloned() } + /// TODO(doc): @zhangsoledad pub fn get_tx_without_conflict(&self, id: &ProposalShortId) -> Option { self.pending .get_tx(id) @@ -277,10 +296,12 @@ impl TxPool { .cloned() } + /// TODO(doc): @zhangsoledad pub fn proposed(&self) -> &ProposedPool { &self.proposed } + /// TODO(doc): @zhangsoledad pub fn get_tx_from_proposed_and_others(&self, id: &ProposalShortId) -> Option { self.proposed .get_tx(id) @@ -306,6 +327,7 @@ impl TxPool { } } + /// TODO(doc): @zhangsoledad pub fn remove_expired<'a>(&mut self, ids: impl Iterator) { for id in ids { for entry in self.gap.remove_entry_and_descendants(id) { @@ -325,6 +347,7 @@ impl TxPool { self.snapshot().proposals().contains_proposed(short_id) } + /// TODO(doc): @zhangsoledad pub fn resolve_tx_from_pending_and_proposed( &self, tx: TransactionView, @@ -343,6 +366,7 @@ impl TxPool { ) } + /// TODO(doc): @zhangsoledad pub fn resolve_tx_from_proposed( &self, tx: TransactionView, @@ -687,6 +711,7 @@ impl TxPool { ret } + /// TODO(doc): @zhangsoledad pub fn get_proposals( &self, limit: usize, @@ -701,6 +726,7 @@ impl TxPool { proposals } + /// TODO(doc): @zhangsoledad pub fn get_tx_from_pool_or_store( &self, proposal_id: &ProposalShortId, diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 85b7bbd737..bb524724b7 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -38,8 +38,11 @@ use std::sync::{atomic::AtomicU64, Arc}; use std::{cmp, iter}; use tokio::task::block_in_place; +/// TODO(doc): @zhangsoledad pub enum PlugTarget { + /// TODO(doc): @zhangsoledad Pending, + /// TODO(doc): @zhangsoledad Proposed, } diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index b5b3d6ec67..13108dab78 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @zhangsoledad use crate::block_assembler::BlockAssembler; use crate::component::entry::TxEntry; use crate::error::handle_try_send_error; @@ -24,14 +25,19 @@ use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicU64, Arc}; use tokio::sync::{mpsc, oneshot, RwLock}; +/// TODO(doc): @zhangsoledad pub const DEFAULT_CHANNEL_SIZE: usize = 512; +/// TODO(doc): @zhangsoledad pub struct Request { + /// TODO(doc): @zhangsoledad pub responder: oneshot::Sender, + /// TODO(doc): @zhangsoledad pub arguments: A, } impl Request { + /// TODO(doc): @zhangsoledad pub fn call(arguments: A, responder: oneshot::Sender) -> Request { Request { responder, @@ -40,16 +46,20 @@ impl Request { } } +/// TODO(doc): @zhangsoledad pub struct Notify { + /// TODO(doc): @zhangsoledad pub arguments: A, } impl Notify { + /// TODO(doc): @zhangsoledad pub fn notify(arguments: A) -> Notify { Notify { arguments } } } +/// TODO(doc): @zhangsoledad pub type BlockTemplateResult = Result; type BlockTemplateArgs = ( Option, @@ -58,6 +68,7 @@ type BlockTemplateArgs = ( Option, ); +/// TODO(doc): @zhangsoledad pub type SubmitTxsResult = Result, Error>; type NotifyTxsCallback = Option>; @@ -65,6 +76,7 @@ type FetchTxRPCResult = Option<(bool, TransactionView)>; type FetchTxsWithCyclesResult = Vec<(ProposalShortId, (TransactionView, Cycle))>; +/// TODO(doc): @zhangsoledad pub type ChainReorgArgs = ( VecDeque, VecDeque, @@ -72,21 +84,35 @@ pub type ChainReorgArgs = ( Arc, ); +/// TODO(doc): @zhangsoledad pub enum Message { + /// TODO(doc): @zhangsoledad BlockTemplate(Request), + /// TODO(doc): @zhangsoledad SubmitTxs(Request, SubmitTxsResult>), + /// TODO(doc): @zhangsoledad NotifyTxs(Notify<(Vec, NotifyTxsCallback)>), + /// TODO(doc): @zhangsoledad ChainReorg(Notify), + /// TODO(doc): @zhangsoledad FreshProposalsFilter(Request, Vec>), + /// TODO(doc): @zhangsoledad FetchTxs(Request, HashMap>), + /// TODO(doc): @zhangsoledad FetchTxsWithCycles(Request, FetchTxsWithCyclesResult>), + /// TODO(doc): @zhangsoledad GetTxPoolInfo(Request<(), TxPoolInfo>), + /// TODO(doc): @zhangsoledad FetchTxRPC(Request>), + /// TODO(doc): @zhangsoledad NewUncle(Notify), + /// TODO(doc): @zhangsoledad PlugEntry(Request<(Vec, PlugTarget), ()>), + /// TODO(doc): @zhangsoledad ClearPool(Request, ()>), } +/// TODO(doc): @zhangsoledad #[derive(Clone)] pub struct TxPoolController { sender: mpsc::Sender, @@ -101,10 +127,12 @@ impl Drop for TxPoolController { } impl TxPoolController { + /// TODO(doc): @zhangsoledad pub fn handle(&self) -> &Handle { &self.handle } + /// TODO(doc): @zhangsoledad pub fn get_block_template( &self, bytes_limit: Option, @@ -119,6 +147,7 @@ impl TxPoolController { ) } + /// TODO(doc): @zhangsoledad pub fn get_block_template_with_block_assembler_config( &self, bytes_limit: Option, @@ -147,6 +176,7 @@ impl TxPoolController { self.handle.block_on(response).map_err(Into::into) } + /// TODO(doc): @zhangsoledad pub fn notify_new_uncle(&self, uncle: UncleBlockView) -> Result<(), FailureError> { let mut sender = self.sender.clone(); let notify = Notify::notify(uncle); @@ -156,6 +186,7 @@ impl TxPoolController { }) } + /// TODO(doc): @zhangsoledad pub fn update_tx_pool_for_reorg( &self, detached_blocks: VecDeque, @@ -176,6 +207,7 @@ impl TxPoolController { }) } + /// TODO(doc): @zhangsoledad pub fn submit_txs(&self, txs: Vec) -> Result { let mut sender = self.sender.clone(); let (responder, response) = oneshot::channel(); @@ -187,6 +219,7 @@ impl TxPoolController { self.handle.block_on(response).map_err(Into::into) } + /// TODO(doc): @zhangsoledad pub fn plug_entry( &self, entries: Vec, @@ -202,6 +235,7 @@ impl TxPoolController { self.handle.block_on(response).map_err(Into::into) } + /// TODO(doc): @zhangsoledad pub fn notify_txs( &self, txs: Vec, @@ -215,6 +249,7 @@ impl TxPoolController { }) } + /// TODO(doc): @zhangsoledad pub fn get_tx_pool_info(&self) -> Result { let mut sender = self.sender.clone(); let (responder, response) = oneshot::channel(); @@ -228,6 +263,7 @@ impl TxPoolController { self.handle.block_on(response).map_err(Into::into) } + /// TODO(doc): @zhangsoledad pub fn fresh_proposals_filter( &self, proposals: Vec, @@ -244,6 +280,7 @@ impl TxPoolController { self.handle.block_on(response).map_err(Into::into) } + /// TODO(doc): @zhangsoledad pub fn fetch_tx_for_rpc(&self, id: ProposalShortId) -> Result { let mut sender = self.sender.clone(); let (responder, response) = oneshot::channel(); @@ -255,6 +292,7 @@ impl TxPoolController { self.handle.block_on(response).map_err(Into::into) } + /// TODO(doc): @zhangsoledad pub fn fetch_txs( &self, short_ids: Vec, @@ -269,6 +307,7 @@ impl TxPoolController { self.handle.block_on(response).map_err(Into::into) } + /// TODO(doc): @zhangsoledad pub fn fetch_txs_with_cycles( &self, short_ids: Vec, @@ -285,6 +324,7 @@ impl TxPoolController { self.handle.block_on(response).map_err(Into::into) } + /// TODO(doc): @zhangsoledad pub fn clear_pool(&self, new_snapshot: Arc) -> Result<(), FailureError> { let mut sender = self.sender.clone(); let (responder, response) = oneshot::channel(); @@ -297,11 +337,13 @@ impl TxPoolController { } } +/// TODO(doc): @zhangsoledad pub struct TxPoolServiceBuilder { service: Option, } impl TxPoolServiceBuilder { + /// TODO(doc): @zhangsoledad pub fn new( tx_pool_config: TxPoolConfig, snapshot: Arc, @@ -328,6 +370,7 @@ impl TxPoolServiceBuilder { } } + /// TODO(doc): @zhangsoledad pub fn start(mut self) -> TxPoolController { let (sender, mut receiver) = mpsc::channel(DEFAULT_CHANNEL_SIZE); let (signal_sender, mut signal_receiver) = oneshot::channel(); @@ -355,6 +398,7 @@ impl TxPoolServiceBuilder { } } +/// TODO(doc): @zhangsoledad #[derive(Clone)] pub struct TxPoolService { pub(crate) tx_pool: Arc>, @@ -368,6 +412,7 @@ pub struct TxPoolService { } impl TxPoolService { + /// TODO(doc): @zhangsoledad pub fn new( tx_pool: TxPool, consensus: Arc, diff --git a/util/Cargo.toml b/util/Cargo.toml index a63de5bc9b..8f9fd955e6 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/app-config/Cargo.toml b/util/app-config/Cargo.toml index d9e20681d2..7fdb7888e0 100644 --- a/util/app-config/Cargo.toml +++ b/util/app-config/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/app-config/src/app_config.rs b/util/app-config/src/app_config.rs index 980f9f5c99..fa126caf4f 100644 --- a/util/app-config/src/app_config.rs +++ b/util/app-config/src/app_config.rs @@ -19,60 +19,91 @@ use super::configs::*; use super::sentry_config::SentryConfig; use super::{cli, ExitCode}; +/// TODO(doc): @doitian pub enum AppConfig { + /// TODO(doc): @doitian CKB(Box), + /// TODO(doc): @doitian Miner(Box), } +/// TODO(doc): @doitian // change the order of fields will break integration test, see module doc. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct CKBAppConfig { + /// TODO(doc): @doitian pub data_dir: PathBuf, + /// TODO(doc): @doitian pub tmp_dir: Option, + /// TODO(doc): @doitian pub logger: LogConfig, + /// TODO(doc): @doitian pub sentry: SentryConfig, + /// TODO(doc): @doitian #[serde(default)] pub metrics: MetricsConfig, + /// TODO(doc): @doitian #[serde(default)] pub memory_tracker: MemoryTrackerConfig, + /// TODO(doc): @doitian pub chain: ChainConfig, + /// TODO(doc): @doitian pub block_assembler: Option, + /// TODO(doc): @doitian #[serde(default)] pub db: DBConfig, + /// TODO(doc): @doitian #[serde(default)] pub indexer: IndexerConfig, + /// TODO(doc): @doitian pub network: NetworkConfig, + /// TODO(doc): @doitian pub rpc: RpcConfig, + /// TODO(doc): @doitian pub tx_pool: TxPoolConfig, + /// TODO(doc): @doitian #[serde(default)] pub store: StoreConfig, + /// TODO(doc): @doitian pub alert_signature: Option, + /// TODO(doc): @doitian #[serde(default)] pub notify: NotifyConfig, } +/// TODO(doc): @doitian // change the order of fields will break integration test, see module doc. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct MinerAppConfig { + /// TODO(doc): @doitian pub data_dir: PathBuf, + /// TODO(doc): @doitian pub chain: ChainConfig, + /// TODO(doc): @doitian pub logger: LogConfig, + /// TODO(doc): @doitian pub sentry: SentryConfig, + /// TODO(doc): @doitian #[serde(default)] pub metrics: MetricsConfig, + /// TODO(doc): @doitian #[serde(default)] pub memory_tracker: MemoryTrackerConfig, + /// TODO(doc): @doitian pub miner: MinerConfig, } +/// TODO(doc): @doitian #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ChainConfig { + /// TODO(doc): @doitian pub spec: Resource, } impl AppConfig { + /// TODO(doc): @doitian pub fn load_for_subcommand>( root_dir: P, subcommand_name: &str, @@ -96,6 +127,7 @@ impl AppConfig { } } + /// TODO(doc): @doitian pub fn logger(&self) -> &LogConfig { match self { AppConfig::CKB(config) => &config.logger, @@ -103,6 +135,7 @@ impl AppConfig { } } + /// TODO(doc): @doitian pub fn sentry(&self) -> &SentryConfig { match self { AppConfig::CKB(config) => &config.sentry, @@ -110,6 +143,7 @@ impl AppConfig { } } + /// TODO(doc): @doitian pub fn metrics(&self) -> &MetricsConfig { match self { AppConfig::CKB(config) => &config.metrics, @@ -117,6 +151,7 @@ impl AppConfig { } } + /// TODO(doc): @doitian pub fn memory_tracker(&self) -> &MemoryTrackerConfig { match self { AppConfig::CKB(config) => &config.memory_tracker, @@ -124,6 +159,7 @@ impl AppConfig { } } + /// TODO(doc): @doitian pub fn chain_spec(&self) -> Result { let spec_resource = match self { AppConfig::CKB(config) => &config.chain.spec, @@ -135,6 +171,7 @@ impl AppConfig { }) } + /// TODO(doc): @doitian pub fn into_ckb(self) -> Result, ExitCode> { match self { AppConfig::CKB(config) => Ok(config), @@ -145,6 +182,7 @@ impl AppConfig { } } + /// TODO(doc): @doitian pub fn into_miner(self) -> Result, ExitCode> { match self { AppConfig::Miner(config) => Ok(config), diff --git a/util/app-config/src/args.rs b/util/app-config/src/args.rs index f69412ffab..ad0226eb03 100644 --- a/util/app-config/src/args.rs +++ b/util/app-config/src/args.rs @@ -5,87 +5,151 @@ use ckb_pow::PowEngine; use std::path::PathBuf; use std::sync::Arc; +/// TODO(doc): @doitian pub struct ExportArgs { + /// TODO(doc): @doitian pub config: Box, + /// TODO(doc): @doitian pub consensus: Consensus, + /// TODO(doc): @doitian pub target: PathBuf, } +/// TODO(doc): @doitian pub struct ImportArgs { + /// TODO(doc): @doitian pub config: Box, + /// TODO(doc): @doitian pub consensus: Consensus, + /// TODO(doc): @doitian pub source: PathBuf, } +/// TODO(doc): @doitian pub struct RunArgs { + /// TODO(doc): @doitian pub config: Box, + /// TODO(doc): @doitian pub consensus: Consensus, + /// TODO(doc): @doitian pub block_assembler_advanced: bool, } pub type ProfileArgs = Option<(Option, Option)>; +/// TODO(doc): @doitian pub struct ReplayArgs { + /// TODO(doc): @doitian pub config: Box, + /// TODO(doc): @doitian pub consensus: Consensus, + /// TODO(doc): @doitian pub tmp_target: PathBuf, + /// TODO(doc): @doitian pub profile: ProfileArgs, + /// TODO(doc): @doitian pub sanity_check: bool, + /// TODO(doc): @doitian pub full_verfication: bool, } +/// TODO(doc): @doitian pub struct MinerArgs { + /// TODO(doc): @doitian pub config: MinerConfig, + /// TODO(doc): @doitian pub pow_engine: Arc, + /// TODO(doc): @doitian pub memory_tracker: MemoryTrackerConfig, + /// TODO(doc): @doitian pub limit: u128, } +/// TODO(doc): @doitian pub struct StatsArgs { + /// TODO(doc): @doitian pub config: Box, + /// TODO(doc): @doitian pub consensus: Consensus, + /// TODO(doc): @doitian pub from: Option, + /// TODO(doc): @doitian pub to: Option, } +/// TODO(doc): @doitian pub struct InitArgs { + /// TODO(doc): @doitian pub interactive: bool, + /// TODO(doc): @doitian pub root_dir: PathBuf, + /// TODO(doc): @doitian pub chain: String, + /// TODO(doc): @doitian pub rpc_port: String, + /// TODO(doc): @doitian pub p2p_port: String, + /// TODO(doc): @doitian pub log_to_file: bool, + /// TODO(doc): @doitian pub log_to_stdout: bool, + /// TODO(doc): @doitian pub list_chains: bool, + /// TODO(doc): @doitian pub force: bool, + /// TODO(doc): @doitian pub block_assembler_code_hash: Option, + /// TODO(doc): @doitian pub block_assembler_args: Vec, + /// TODO(doc): @doitian pub block_assembler_hash_type: ScriptHashType, + /// TODO(doc): @doitian pub block_assembler_message: Option, + /// TODO(doc): @doitian pub import_spec: Option, } +/// TODO(doc): @doitian pub struct ResetDataArgs { + /// TODO(doc): @doitian pub force: bool, + /// TODO(doc): @doitian pub all: bool, + /// TODO(doc): @doitian pub database: bool, + /// TODO(doc): @doitian pub indexer: bool, + /// TODO(doc): @doitian pub network: bool, + /// TODO(doc): @doitian pub network_peer_store: bool, + /// TODO(doc): @doitian pub network_secret_key: bool, + /// TODO(doc): @doitian pub logs: bool, + /// TODO(doc): @doitian pub data_dir: PathBuf, + /// TODO(doc): @doitian pub db_path: PathBuf, + /// TODO(doc): @doitian pub indexer_db_path: PathBuf, + /// TODO(doc): @doitian pub network_dir: PathBuf, + /// TODO(doc): @doitian pub network_peer_store_path: PathBuf, + /// TODO(doc): @doitian pub network_secret_key_path: PathBuf, + /// TODO(doc): @doitian pub logs_dir: Option, } +/// TODO(doc): @doitian pub struct PeerIDArgs { + /// TODO(doc): @doitian pub peer_id: secio::PeerId, } +/// TODO(doc): @doitian pub struct MigrateArgs { + /// TODO(doc): @doitian pub config: Box, } diff --git a/util/app-config/src/cli.rs b/util/app-config/src/cli.rs index 9b38c59ffb..72546d354e 100644 --- a/util/app-config/src/cli.rs +++ b/util/app-config/src/cli.rs @@ -1,56 +1,105 @@ +//! TODO(doc): @doitian use ckb_build_info::Version; use ckb_resource::{DEFAULT_P2P_PORT, DEFAULT_RPC_PORT, DEFAULT_SPEC}; use clap::{App, AppSettings, Arg, ArgGroup, ArgMatches, SubCommand}; +/// TODO(doc): @doitian pub const CMD_RUN: &str = "run"; +/// TODO(doc): @doitian pub const CMD_MINER: &str = "miner"; +/// TODO(doc): @doitian pub const CMD_EXPORT: &str = "export"; +/// TODO(doc): @doitian pub const CMD_IMPORT: &str = "import"; +/// TODO(doc): @doitian pub const CMD_INIT: &str = "init"; +/// TODO(doc): @doitian pub const CMD_REPLAY: &str = "replay"; +/// TODO(doc): @doitian pub const CMD_STATS: &str = "stats"; +/// TODO(doc): @doitian pub const CMD_LIST_HASHES: &str = "list-hashes"; +/// TODO(doc): @doitian pub const CMD_RESET_DATA: &str = "reset-data"; +/// TODO(doc): @doitian pub const CMD_PEERID: &str = "peer-id"; +/// TODO(doc): @doitian pub const CMD_GEN_SECRET: &str = "gen"; +/// TODO(doc): @doitian pub const CMD_FROM_SECRET: &str = "from-secret"; +/// TODO(doc): @doitian pub const CMD_MIGRATE: &str = "migrate"; +/// TODO(doc): @doitian pub const ARG_CONFIG_DIR: &str = "config-dir"; +/// TODO(doc): @doitian pub const ARG_FORMAT: &str = "format"; +/// TODO(doc): @doitian pub const ARG_TARGET: &str = "target"; +/// TODO(doc): @doitian pub const ARG_SOURCE: &str = "source"; +/// TODO(doc): @doitian pub const ARG_DATA: &str = "data"; +/// TODO(doc): @doitian pub const ARG_LIST_CHAINS: &str = "list-chains"; +/// TODO(doc): @doitian pub const ARG_INTERACTIVE: &str = "interactive"; +/// TODO(doc): @doitian pub const ARG_CHAIN: &str = "chain"; +/// TODO(doc): @doitian pub const ARG_IMPORT_SPEC: &str = "import-spec"; +/// TODO(doc): @doitian pub const ARG_P2P_PORT: &str = "p2p-port"; +/// TODO(doc): @doitian pub const ARG_RPC_PORT: &str = "rpc-port"; +/// TODO(doc): @doitian pub const ARG_FORCE: &str = "force"; +/// TODO(doc): @doitian pub const ARG_LOG_TO: &str = "log-to"; +/// TODO(doc): @doitian pub const ARG_BUNDLED: &str = "bundled"; +/// TODO(doc): @doitian pub const ARG_BA_CODE_HASH: &str = "ba-code-hash"; +/// TODO(doc): @doitian pub const ARG_BA_ARG: &str = "ba-arg"; +/// TODO(doc): @doitian pub const ARG_BA_HASH_TYPE: &str = "ba-hash-type"; +/// TODO(doc): @doitian pub const ARG_BA_MESSAGE: &str = "ba-message"; +/// TODO(doc): @doitian pub const ARG_BA_ADVANCED: &str = "ba-advanced"; +/// TODO(doc): @doitian pub const ARG_FROM: &str = "from"; +/// TODO(doc): @doitian pub const ARG_TO: &str = "to"; +/// TODO(doc): @doitian pub const ARG_ALL: &str = "all"; +/// TODO(doc): @doitian pub const ARG_LIMIT: &str = "limit"; +/// TODO(doc): @doitian pub const ARG_DATABASE: &str = "database"; +/// TODO(doc): @doitian pub const ARG_INDEXER: &str = "indexer"; +/// TODO(doc): @doitian pub const ARG_NETWORK: &str = "network"; +/// TODO(doc): @doitian pub const ARG_NETWORK_PEER_STORE: &str = "network-peer-store"; +/// TODO(doc): @doitian pub const ARG_NETWORK_SECRET_KEY: &str = "network-secret-key"; +/// TODO(doc): @doitian pub const ARG_LOGS: &str = "logs"; +/// TODO(doc): @doitian pub const ARG_TMP_TARGET: &str = "tmp-target"; +/// TODO(doc): @doitian pub const ARG_SECRET_PATH: &str = "secret-path"; +/// TODO(doc): @doitian pub const ARG_PROFILE: &str = "profile"; +/// TODO(doc): @doitian pub const ARG_SANITY_CHECK: &str = "sanity-check"; +/// TODO(doc): @doitian pub const ARG_FULL_VERFICATION: &str = "full-verfication"; +/// TODO(doc): @doitian const GROUP_BA: &str = "ba"; fn basic_app<'b>() -> App<'static, 'b> { @@ -81,6 +130,7 @@ fn basic_app<'b>() -> App<'static, 'b> { .subcommand(migrate()) } +/// TODO(doc): @doitian pub fn get_matches(version: &Version) -> ArgMatches<'static> { basic_app() .version(version.short().as_str()) diff --git a/util/app-config/src/configs/db.rs b/util/app-config/src/configs/db.rs index b572304137..bd547aa126 100644 --- a/util/app-config/src/configs/db.rs +++ b/util/app-config/src/configs/db.rs @@ -2,16 +2,21 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::path::{Path, PathBuf}; +/// TODO(doc): @doitian #[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct Config { + /// TODO(doc): @doitian #[serde(default)] pub path: PathBuf, + /// TODO(doc): @doitian #[serde(default)] pub options: HashMap, + /// TODO(doc): @doitian pub options_file: Option, } impl Config { + /// TODO(doc): @doitian pub fn adjust>(&mut self, root_dir: &Path, data_dir: P, name: &str) { // If path is not set, use the default path if self.path.to_str().is_none() || self.path.to_str() == Some("") { diff --git a/util/app-config/src/configs/indexer.rs b/util/app-config/src/configs/indexer.rs index 07da51da34..ea3bb59041 100644 --- a/util/app-config/src/configs/indexer.rs +++ b/util/app-config/src/configs/indexer.rs @@ -8,6 +8,7 @@ pub struct Config { pub batch_interval: u64, /// The maximum number of blocks in a single indexing execution batch, default is 200 pub batch_size: usize, + /// TODO(doc): @doitian pub db: DBConfig, } diff --git a/util/app-config/src/configs/memory_tracker.rs b/util/app-config/src/configs/memory_tracker.rs index c3273373e4..e1dcaa45ea 100644 --- a/util/app-config/src/configs/memory_tracker.rs +++ b/util/app-config/src/configs/memory_tracker.rs @@ -1,7 +1,9 @@ use serde::{Deserialize, Serialize}; +/// TODO(doc): @doitian #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Config { + /// TODO(doc): @doitian pub interval: u64, } diff --git a/util/app-config/src/configs/miner.rs b/util/app-config/src/configs/miner.rs index 488afeeea3..06dd4894c3 100644 --- a/util/app-config/src/configs/miner.rs +++ b/util/app-config/src/configs/miner.rs @@ -2,23 +2,33 @@ use ckb_jsonrpc_types::{JsonBytes, ScriptHashType}; use ckb_types::H256; use serde::{Deserialize, Serialize}; +/// TODO(doc): @doitian #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Config { + /// TODO(doc): @doitian pub client: ClientConfig, + /// TODO(doc): @doitian pub workers: Vec, } +/// TODO(doc): @doitian #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClientConfig { + /// TODO(doc): @doitian pub rpc_url: String, + /// TODO(doc): @doitian pub poll_interval: u64, + /// TODO(doc): @doitian pub block_on_submit: bool, } +/// TODO(doc): @doitian #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(tag = "worker_type")] pub enum WorkerConfig { + /// TODO(doc): @doitian Dummy(DummyConfig), + /// TODO(doc): @doitian EaglesongSimple(EaglesongSimpleConfig), } @@ -31,23 +41,49 @@ pub struct BlockAssemblerConfig { pub message: JsonBytes, } +/// TODO(doc): @doitian #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(tag = "delay_type")] pub enum DummyConfig { - Constant { value: u64 }, - Uniform { low: u64, high: u64 }, - Normal { mean: f64, std_dev: f64 }, - Poisson { lambda: f64 }, + /// TODO(doc): @doitian + Constant { + /// TODO(doc): @doitian + value: u64, + }, + /// TODO(doc): @doitian + Uniform { + /// TODO(doc): @doitian + low: u64, + /// TODO(doc): @doitian + high: u64, + }, + /// TODO(doc): @doitian + Normal { + /// TODO(doc): @doitian + mean: f64, + /// TODO(doc): @doitian + std_dev: f64, + }, + /// TODO(doc): @doitian + Poisson { + /// TODO(doc): @doitian + lambda: f64, + }, } +/// TODO(doc): @doitian #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EaglesongSimpleConfig { + /// TODO(doc): @doitian pub threads: usize, + /// TODO(doc): @doitian #[serde(default)] pub extra_hash_function: Option, } +/// TODO(doc): @doitian #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum ExtraHashFunction { + /// TODO(doc): @doitian Blake2b, } diff --git a/util/app-config/src/configs/network.rs b/util/app-config/src/configs/network.rs index 1e35b8616e..fffac8bd75 100644 --- a/util/app-config/src/configs/network.rs +++ b/util/app-config/src/configs/network.rs @@ -9,48 +9,69 @@ use std::path::PathBuf; // Max data size in send buffer: 24MB (a little larger than max frame length) const DEFAULT_SEND_BUFFER: usize = 24 * 1024 * 1024; +/// TODO(doc): @doitian #[derive(Clone, Debug, Serialize, Deserialize, Default)] pub struct Config { + /// TODO(doc): @doitian #[serde(default)] pub whitelist_only: bool, + /// TODO(doc): @doitian pub max_peers: u32, + /// TODO(doc): @doitian pub max_outbound_peers: u32, + /// TODO(doc): @doitian #[serde(default)] pub path: PathBuf, + /// TODO(doc): @doitian #[serde(default)] pub dns_seeds: Vec, + /// TODO(doc): @doitian // Set if discovery add local address to peer store #[serde(default)] pub discovery_local_address: bool, + /// TODO(doc): @doitian pub ping_interval_secs: u64, + /// TODO(doc): @doitian pub ping_timeout_secs: u64, + /// TODO(doc): @doitian pub connect_outbound_interval_secs: u64, + /// TODO(doc): @doitian pub listen_addresses: Vec, + /// TODO(doc): @doitian #[serde(default)] pub public_addresses: Vec, + /// TODO(doc): @doitian pub bootnodes: Vec, + /// TODO(doc): @doitian #[serde(default)] pub whitelist_peers: Vec, + /// TODO(doc): @doitian #[serde(default)] pub upnp: bool, + /// TODO(doc): @doitian #[serde(default)] pub bootnode_mode: bool, + /// TODO(doc): @doitian // Max send buffer size pub max_send_buffer: Option, + /// TODO(doc): @doitian pub sync: Option, } +/// TODO(doc): @doitian #[derive(Clone, Debug, Serialize, Deserialize, Default)] pub struct SyncConfig { + /// TODO(doc): @doitian #[serde(default)] pub header_map: HeaderMapConfig, } +/// TODO(doc): @doitian #[derive(Clone, Debug, Serialize, Deserialize)] pub struct HeaderMapConfig { - // The maximum size of data in memory + /// The maximum size of data in memory pub primary_limit: usize, - // Disable cache if the size of data in memory less than this threshold + /// Disable cache if the size of data in memory less than this threshold pub backend_close_threshold: usize, } @@ -130,18 +151,21 @@ pub(crate) fn read_secret_key(path: PathBuf) -> Result PathBuf { let mut path = self.path.clone(); path.push("secret_key"); path } + /// TODO(doc): @doitian pub fn peer_store_path(&self) -> PathBuf { let mut path = self.path.clone(); path.push("peer_store"); path } + /// TODO(doc): @doitian pub fn create_dir_if_not_exists(&self) -> Result<(), Error> { if !self.path.exists() { fs::create_dir(&self.path) @@ -150,14 +174,17 @@ impl Config { } } + /// TODO(doc): @doitian pub fn max_inbound_peers(&self) -> u32 { self.max_peers.saturating_sub(self.max_outbound_peers) } + /// TODO(doc): @doitian pub fn max_outbound_peers(&self) -> u32 { self.max_outbound_peers } + /// TODO(doc): @doitian pub fn max_send_buffer(&self) -> usize { self.max_send_buffer.unwrap_or(DEFAULT_SEND_BUFFER) } @@ -173,6 +200,7 @@ impl Config { write_secret_to_file(&random_key_pair, path) } + /// TODO(doc): @doitian pub fn fetch_private_key(&self) -> Result { match self.read_secret_key()? { Some(key) => Ok(key), @@ -183,6 +211,7 @@ impl Config { } } + /// TODO(doc): @doitian pub fn whitelist_peers(&self) -> Result, Error> { let mut peers = Vec::with_capacity(self.whitelist_peers.len()); for addr_str in &self.whitelist_peers { @@ -203,6 +232,7 @@ impl Config { Ok(peers) } + /// TODO(doc): @doitian pub fn bootnodes(&self) -> Result, Error> { let mut peers = Vec::with_capacity(self.bootnodes.len()); for addr_str in &self.bootnodes { @@ -222,10 +252,12 @@ impl Config { Ok(peers) } + /// TODO(doc): @doitian pub fn outbound_peer_service_enabled(&self) -> bool { self.connect_outbound_interval_secs > 0 } + /// TODO(doc): @doitian pub fn dns_seeding_service_enabled(&self) -> bool { !self.dns_seeds.is_empty() } diff --git a/util/app-config/src/configs/network_alert.rs b/util/app-config/src/configs/network_alert.rs index dbf0963e6a..da0622646e 100644 --- a/util/app-config/src/configs/network_alert.rs +++ b/util/app-config/src/configs/network_alert.rs @@ -1,9 +1,12 @@ use ckb_jsonrpc_types::JsonBytes; use serde::{Deserialize, Serialize}; +/// TODO(doc): @doitian #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Config { + /// TODO(doc): @doitian pub signatures_threshold: usize, + /// TODO(doc): @doitian pub public_keys: Vec, } diff --git a/util/app-config/src/configs/notify.rs b/util/app-config/src/configs/notify.rs index b444a27094..f62e8f0f37 100644 --- a/util/app-config/src/configs/notify.rs +++ b/util/app-config/src/configs/notify.rs @@ -1,6 +1,9 @@ use serde::{Deserialize, Serialize}; +/// TODO(doc): @doitian #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct Config { + /// TODO(doc): @doitian pub new_block_notify_script: Option, + /// TODO(doc): @doitian pub network_alert_notify_script: Option, } diff --git a/util/app-config/src/configs/rpc.rs b/util/app-config/src/configs/rpc.rs index e5e74665aa..e5fec6ea5b 100644 --- a/util/app-config/src/configs/rpc.rs +++ b/util/app-config/src/configs/rpc.rs @@ -1,78 +1,109 @@ use serde::{Deserialize, Serialize}; +/// TODO(doc): @doitian #[derive(Clone, Debug, Copy, Eq, PartialEq, Serialize, Deserialize)] pub enum Module { + /// TODO(doc): @doitian Net, + /// TODO(doc): @doitian Chain, + /// TODO(doc): @doitian Miner, + /// TODO(doc): @doitian Pool, + /// TODO(doc): @doitian Experiment, + /// TODO(doc): @doitian Stats, + /// TODO(doc): @doitian Indexer, + /// TODO(doc): @doitian IntegrationTest, + /// TODO(doc): @doitian Alert, + /// TODO(doc): @doitian Subscription, + /// TODO(doc): @doitian Debug, } +/// TODO(doc): @doitian #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Config { + /// TODO(doc): @doitian pub listen_address: String, + /// TODO(doc): @doitian #[serde(default)] pub tcp_listen_address: Option, + /// TODO(doc): @doitian #[serde(default)] pub ws_listen_address: Option, + /// TODO(doc): @doitian pub max_request_body_size: usize, + /// TODO(doc): @doitian pub threads: Option, + /// TODO(doc): @doitian pub modules: Vec, - // Rejects txs with scripts that might trigger known bugs + /// Rejects txs with scripts that might trigger known bugs #[serde(default)] pub reject_ill_transactions: bool, + /// TODO(doc): @doitian #[serde(default)] pub enable_deprecated_rpc: bool, } impl Config { + /// TODO(doc): @doitian pub fn net_enable(&self) -> bool { self.modules.contains(&Module::Net) } + /// TODO(doc): @doitian pub fn chain_enable(&self) -> bool { self.modules.contains(&Module::Chain) } + /// TODO(doc): @doitian pub fn miner_enable(&self) -> bool { self.modules.contains(&Module::Miner) } + /// TODO(doc): @doitian pub fn pool_enable(&self) -> bool { self.modules.contains(&Module::Pool) } + /// TODO(doc): @doitian pub fn experiment_enable(&self) -> bool { self.modules.contains(&Module::Experiment) } + /// TODO(doc): @doitian pub fn stats_enable(&self) -> bool { self.modules.contains(&Module::Stats) } + /// TODO(doc): @doitian pub fn subscription_enable(&self) -> bool { self.modules.contains(&Module::Subscription) } + /// TODO(doc): @doitian pub fn indexer_enable(&self) -> bool { self.modules.contains(&Module::Indexer) } + /// TODO(doc): @doitian pub fn integration_test_enable(&self) -> bool { self.modules.contains(&Module::IntegrationTest) } + /// TODO(doc): @doitian pub fn alert_enable(&self) -> bool { self.modules.contains(&Module::Alert) } + /// TODO(doc): @doitian pub fn debug_enable(&self) -> bool { self.modules.contains(&Module::Debug) } diff --git a/util/app-config/src/configs/store.rs b/util/app-config/src/configs/store.rs index d987a1b900..dae3fb5e92 100644 --- a/util/app-config/src/configs/store.rs +++ b/util/app-config/src/configs/store.rs @@ -1,12 +1,19 @@ use serde::{Deserialize, Serialize}; +/// TODO(doc): @doitian #[derive(Copy, Clone, Serialize, Deserialize, Eq, PartialEq, Hash, Debug)] pub struct Config { + /// TODO(doc): @doitian pub header_cache_size: usize, + /// TODO(doc): @doitian pub cell_data_cache_size: usize, + /// TODO(doc): @doitian pub block_proposals_cache_size: usize, + /// TODO(doc): @doitian pub block_tx_hashes_cache_size: usize, + /// TODO(doc): @doitian pub block_uncles_cache_size: usize, + /// TODO(doc): @doitian pub cellbase_cache_size: usize, } diff --git a/util/app-config/src/configs/tx_pool.rs b/util/app-config/src/configs/tx_pool.rs index 35d84f6517..232e03aceb 100644 --- a/util/app-config/src/configs/tx_pool.rs +++ b/util/app-config/src/configs/tx_pool.rs @@ -15,21 +15,21 @@ const DEFAULT_MAX_ANCESTORS_COUNT: usize = 25; /// Transaction pool configuration #[derive(Copy, Clone, Debug, Serialize, Deserialize)] pub struct TxPoolConfig { - // Keep the transaction pool below mb + /// Keep the transaction pool below mb pub max_mem_size: usize, - // Keep the transaction pool below cycles + /// Keep the transaction pool below cycles pub max_cycles: Cycle, - // tx verify cache capacity + /// tx verify cache capacity pub max_verify_cache_size: usize, - // conflict tx cache capacity + /// conflict tx cache capacity pub max_conflict_cache_size: usize, - // committed transactions hash cache capacity + /// committed transactions hash cache capacity pub max_committed_txs_hash_cache_size: usize, - // txs with lower fee rate than this will not be relayed or be mined + /// txs with lower fee rate than this will not be relayed or be mined pub min_fee_rate: FeeRate, - // tx pool rejects txs that cycles greater than max_tx_verify_cycles + /// tx pool rejects txs that cycles greater than max_tx_verify_cycles pub max_tx_verify_cycles: Cycle, - // max ancestors size limit for a single tx + /// max ancestors size limit for a single tx pub max_ancestors_count: usize, } @@ -48,10 +48,15 @@ impl Default for TxPoolConfig { } } +/// TODO(doc): @doitian #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockAssemblerConfig { + /// TODO(doc): @doitian pub code_hash: H256, + /// TODO(doc): @doitian pub hash_type: ScriptHashType, + /// TODO(doc): @doitian pub args: JsonBytes, + /// TODO(doc): @doitian pub message: JsonBytes, } diff --git a/util/app-config/src/exit_code.rs b/util/app-config/src/exit_code.rs index 0866afbc7b..4c97ec03e7 100644 --- a/util/app-config/src/exit_code.rs +++ b/util/app-config/src/exit_code.rs @@ -4,13 +4,18 @@ use std::io; #[repr(i32)] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum ExitCode { + /// TODO(doc): @doitian Cli = 64, + /// TODO(doc): @doitian Config = 65, + /// TODO(doc): @doitian IO = 66, + /// TODO(doc): @doitian Failure = 113, } impl ExitCode { + /// TODO(doc): @doitian pub fn into(self) -> i32 { self as i32 } diff --git a/util/app-config/src/lib.rs b/util/app-config/src/lib.rs index 713fb338c2..9791b7eadd 100644 --- a/util/app-config/src/lib.rs +++ b/util/app-config/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @doitian mod app_config; mod args; pub mod cli; @@ -18,13 +19,18 @@ use ckb_jsonrpc_types::ScriptHashType; use clap::{value_t, ArgMatches, ErrorKind}; use std::path::PathBuf; +/// TODO(doc): @doitian pub struct Setup { + /// TODO(doc): @doitian pub subcommand_name: String, + /// TODO(doc): @doitian pub config: AppConfig, + /// TODO(doc): @doitian pub is_sentry_enabled: bool, } impl Setup { + /// TODO(doc): @doitian pub fn from_matches<'m>(matches: &ArgMatches<'m>) -> Result { let subcommand_name = match matches.subcommand_name() { Some(subcommand_name) => subcommand_name, @@ -45,6 +51,7 @@ impl Setup { }) } + /// TODO(doc): @doitian pub fn run<'m>(self, matches: &ArgMatches<'m>) -> Result { let consensus = self.consensus()?; let config = self.config.into_ckb()?; @@ -56,12 +63,14 @@ impl Setup { }) } + /// TODO(doc): @doitian pub fn migrate<'m>(self, _matches: &ArgMatches<'m>) -> Result { let config = self.config.into_ckb()?; Ok(MigrateArgs { config }) } + /// TODO(doc): @doitian pub fn miner<'m>(self, matches: &ArgMatches<'m>) -> Result { let spec = self.chain_spec()?; let memory_tracker = self.config.memory_tracker().to_owned(); @@ -83,6 +92,7 @@ impl Setup { }) } + /// TODO(doc): @doitian pub fn replay<'m>(self, matches: &ArgMatches<'m>) -> Result { let consensus = self.consensus()?; let config = self.config.into_ckb()?; @@ -106,6 +116,7 @@ impl Setup { }) } + /// TODO(doc): @doitian pub fn stats<'m>(self, matches: &ArgMatches<'m>) -> Result { let consensus = self.consensus()?; let config = self.config.into_ckb()?; @@ -121,6 +132,7 @@ impl Setup { }) } + /// TODO(doc): @doitian pub fn import<'m>(self, matches: &ArgMatches<'m>) -> Result { let consensus = self.consensus()?; let config = self.config.into_ckb()?; @@ -133,6 +145,7 @@ impl Setup { }) } + /// TODO(doc): @doitian pub fn export<'m>(self, matches: &ArgMatches<'m>) -> Result { let consensus = self.consensus()?; let config = self.config.into_ckb()?; @@ -145,6 +158,7 @@ impl Setup { }) } + /// TODO(doc): @doitian pub fn init<'m>(matches: &ArgMatches<'m>) -> Result { if matches.is_present("list-specs") { eprintln!( @@ -209,6 +223,7 @@ impl Setup { }) } + /// TODO(doc): @doitian pub fn reset_data<'m>(self, matches: &ArgMatches<'m>) -> Result { let config = self.config.into_ckb()?; let data_dir = config.data_dir; @@ -248,6 +263,7 @@ impl Setup { }) } + /// TODO(doc): @doitian pub fn root_dir_from_matches<'m>(matches: &ArgMatches<'m>) -> Result { let config_dir = match matches.value_of(cli::ARG_CONFIG_DIR) { Some(arg_config_dir) => PathBuf::from(arg_config_dir), @@ -271,6 +287,7 @@ impl Setup { result } + /// TODO(doc): @doitian pub fn consensus(&self) -> Result { let result = consensus_from_spec(&self.chain_spec()?); @@ -285,6 +302,7 @@ impl Setup { result } + /// TODO(doc): @doitian pub fn peer_id<'m>(matches: &ArgMatches<'m>) -> Result { let path = matches.value_of(cli::ARG_SECRET_PATH).unwrap(); match read_secret_key(path.into()) { @@ -296,6 +314,7 @@ impl Setup { } } + /// TODO(doc): @doitian pub fn gen<'m>(matches: &ArgMatches<'m>) -> Result<(), ExitCode> { let path = matches.value_of(cli::ARG_SECRET_PATH).unwrap(); configs::write_secret_to_file(&configs::generate_random_key(), path.into()) @@ -303,6 +322,7 @@ impl Setup { } } +/// TODO(doc): @doitian // There are two types of errors, // parse failures and those where the argument wasn't present #[macro_export] diff --git a/util/build-info/Cargo.toml b/util/build-info/Cargo.toml index 599e0d1fde..fb55a85c2e 100644 --- a/util/build-info/Cargo.toml +++ b/util/build-info/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/build-info/src/lib.rs b/util/build-info/src/lib.rs index 6d386a0a44..5cc4c9f10c 100644 --- a/util/build-info/src/lib.rs +++ b/util/build-info/src/lib.rs @@ -1,16 +1,49 @@ -// some code taken and adapted from RLS and cargo +//! The crate `ckb-build-info` generates CKB version from the build environment. + +/// CKB version #[derive(Debug, Default, Clone)] pub struct Version { + /// The major version. + /// + /// It is the x in `x.y.z`. pub major: u8, + /// The minor version. + /// + /// It is the y in `x.y.z`. pub minor: u8, + /// The patch version. + /// + /// It is the z in `x.y.z`. pub patch: u16, + /// The pre-release version. + /// + /// It is the part starting with `-`. + /// + /// ## Examples + /// + /// * `v1.2.3`: `dash_pre` is "" + /// * `v1.2.3-rc1`: `dash_pre` is "-rc1" pub dash_pre: String, + /// A nickname of the version. pub code_name: Option, + /// The SHA of the last Git commit. + /// + /// See [`get_commit_describe`] how to get it. + /// + /// [`get_commit_describe`]: fn.get_commit_describe.html pub commit_describe: Option, + /// The commit date of the last Git commit. + /// + /// See [`get_commit_date`] how to get it. + /// + /// [`get_commit_date`]: fn.get_commit_date.html pub commit_date: Option, } impl Version { + /// Returns short representation of the version. + /// + /// It returns version in format like `x.y.z` or `x.y.z-pre`. pub fn short(&self) -> String { format!( "{}.{}.{}{}", @@ -18,14 +51,23 @@ impl Version { ) } + /// Returns full representation of the version. + /// + /// It adds extra information after the short version in parenthesis, for example: + /// + /// `0.36.0 (7692751 2020-09-21)` pub fn long(&self) -> String { self.to_string() } + /// Tells whether this is a pre-release version. pub fn is_pre(&self) -> bool { self.dash_pre != "" } + /// Tells whether this version is build from a dirty git working directory. + /// + /// The dirty version is built from the source code which has uncommitted changes. pub fn is_dirty(&self) -> bool { if let Some(describe) = &self.commit_describe { describe.ends_with("-dirty") @@ -58,6 +100,9 @@ impl std::fmt::Display for Version { } } +/// Gets the field [`commit_describe`] via Git. +/// +/// [`commit_describe`]: struct.Version.html#structfield.commit_describe pub fn get_commit_describe() -> Option { std::process::Command::new("git") .args(&[ @@ -77,6 +122,9 @@ pub fn get_commit_describe() -> Option { }) } +/// Gets the field [`commit_date`] via Git. +/// +/// [`commit_date`]: struct.Version.html#structfield.commit_date pub fn get_commit_date() -> Option { std::process::Command::new("git") .env("TZ", "UTC") diff --git a/util/chain-iter/Cargo.toml b/util/chain-iter/Cargo.toml index 18e0623951..1e99336b4f 100644 --- a/util/chain-iter/Cargo.toml +++ b/util/chain-iter/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/chain-iter/src/lib.rs b/util/chain-iter/src/lib.rs index 8254f12e79..2d6ee66ede 100644 --- a/util/chain-iter/src/lib.rs +++ b/util/chain-iter/src/lib.rs @@ -1,6 +1,8 @@ +//! TODO(doc): @quake use ckb_store::ChainStore; use ckb_types::{core::BlockNumber, core::BlockView}; +/// TODO(doc): @quake // An iterator over the entries of a `Chain`. pub struct ChainIterator<'a, S: ChainStore<'a>> { store: &'a S, @@ -9,6 +11,7 @@ pub struct ChainIterator<'a, S: ChainStore<'a>> { } impl<'a, S: ChainStore<'a>> ChainIterator<'a, S> { + /// TODO(doc): @quake pub fn new(store: &'a S) -> Self { let current = store.get_block_hash(0).and_then(|h| store.get_block(&h)); let tip = store.get_tip_header().expect("store inited").number(); @@ -19,10 +22,12 @@ impl<'a, S: ChainStore<'a>> ChainIterator<'a, S> { } } + /// TODO(doc): @quake pub fn len(&self) -> u64 { self.tip + 1 } + /// TODO(doc): @quake // we always have genesis, this function may be meaningless // but for convention, mute len-without-is-empty lint pub fn is_empty(&self) -> bool { diff --git a/util/channel/Cargo.toml b/util/channel/Cargo.toml index b5e4ab5d52..75b4430add 100644 --- a/util/channel/Cargo.toml +++ b/util/channel/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/channel/src/lib.rs b/util/channel/src/lib.rs index c2d25777d6..fcb64a9eb5 100644 --- a/util/channel/src/lib.rs +++ b/util/channel/src/lib.rs @@ -1,3 +1,4 @@ +//! Reexports `crossbeam_channel` to uniform the dependency version. pub use crossbeam_channel::{ bounded, select, unbounded, Receiver, RecvError, RecvTimeoutError, Sender, }; diff --git a/util/crypto/Cargo.toml b/util/crypto/Cargo.toml index 31fd9a5701..ee78f2ab4f 100644 --- a/util/crypto/Cargo.toml +++ b/util/crypto/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @zhangsoledad crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/crypto/src/lib.rs b/util/crypto/src/lib.rs index f065f1a208..b805b345d3 100644 --- a/util/crypto/src/lib.rs +++ b/util/crypto/src/lib.rs @@ -1,2 +1,4 @@ +//! TODO(doc): @zhangsoledad + #[cfg(feature = "secp")] pub mod secp; diff --git a/util/crypto/src/secp/error.rs b/util/crypto/src/secp/error.rs index de5a5cfc44..88fd003a97 100644 --- a/util/crypto/src/secp/error.rs +++ b/util/crypto/src/secp/error.rs @@ -1,18 +1,25 @@ use failure::Fail; use secp256k1::Error as SecpError; +/// TODO(doc): @zhangsoledad #[derive(Debug, PartialEq, Eq, Fail)] pub enum Error { + /// TODO(doc): @zhangsoledad #[fail(display = "invalid privkey")] InvalidPrivKey, + /// TODO(doc): @zhangsoledad #[fail(display = "invalid pubkey")] InvalidPubKey, + /// TODO(doc): @zhangsoledad #[fail(display = "invalid signature")] InvalidSignature, + /// TODO(doc): @zhangsoledad #[fail(display = "invalid message")] InvalidMessage, + /// TODO(doc): @zhangsoledad #[fail(display = "invalid recovery_id")] InvalidRecoveryId, + /// TODO(doc): @zhangsoledad #[fail(display = "{}", _0)] Other(String), } diff --git a/util/crypto/src/secp/generator.rs b/util/crypto/src/secp/generator.rs index 0f8b01af7d..4de6f9dd20 100644 --- a/util/crypto/src/secp/generator.rs +++ b/util/crypto/src/secp/generator.rs @@ -4,6 +4,7 @@ use super::SECP256K1; use rand::{self, Rng, SeedableRng}; use secp256k1::{PublicKey, SecretKey}; +/// TODO(doc): @zhangsoledad pub struct Generator { rng: Box, } @@ -15,6 +16,7 @@ impl Default for Generator { } impl Generator { + /// TODO(doc): @zhangsoledad pub fn new() -> Self { let rng = rand::thread_rng(); Generator { rng: Box::new(rng) } @@ -36,10 +38,12 @@ impl Generator { } } + /// TODO(doc): @zhangsoledad pub fn gen_privkey(&mut self) -> Privkey { self.gen_secret_key().into() } + /// TODO(doc): @zhangsoledad pub fn gen_keypair(&mut self) -> (Privkey, Pubkey) { let secret_key = self.gen_secret_key(); let pubkey = PublicKey::from_secret_key(&*SECP256K1, &secret_key); @@ -47,14 +51,17 @@ impl Generator { (secret_key.into(), pubkey.into()) } + /// TODO(doc): @zhangsoledad pub fn random_privkey() -> Privkey { Generator::new().gen_privkey() } + /// TODO(doc): @zhangsoledad pub fn random_keypair() -> (Privkey, Pubkey) { Generator::new().gen_keypair() } + /// TODO(doc): @zhangsoledad pub fn random_secret_key() -> SecretKey { Generator::new().gen_secret_key() } diff --git a/util/crypto/src/secp/mod.rs b/util/crypto/src/secp/mod.rs index a27dca35a7..377025a47d 100644 --- a/util/crypto/src/secp/mod.rs +++ b/util/crypto/src/secp/mod.rs @@ -1,9 +1,13 @@ +//! TODO(doc): @zhangsoledad + use ckb_fixed_hash::H256; use lazy_static::lazy_static; +/// TODO(doc): @zhangsoledad pub type Message = H256; lazy_static! { + /// TODO(doc): @zhangsoledad pub static ref SECP256K1: secp256k1::Secp256k1 = secp256k1::Secp256k1::new(); } diff --git a/util/crypto/src/secp/privkey.rs b/util/crypto/src/secp/privkey.rs index aa010dc834..7a650988cc 100644 --- a/util/crypto/src/secp/privkey.rs +++ b/util/crypto/src/secp/privkey.rs @@ -7,6 +7,7 @@ use secp256k1::Message as SecpMessage; use std::str::FromStr; use std::{ptr, sync::atomic}; +/// TODO(doc): @zhangsoledad #[derive(Clone, Eq, PartialEq)] pub struct Privkey { /// ECDSA key. @@ -25,6 +26,7 @@ impl Privkey { Ok(Signature::from_compact(rec_id, data)) } + /// TODO(doc): @zhangsoledad pub fn pubkey(&self) -> Result { let context = &SECP256K1; let privkey = key::SecretKey::from_slice(self.inner.as_bytes())?; @@ -32,6 +34,7 @@ impl Privkey { Ok(Pubkey::from(pubkey)) } + /// TODO(doc): @zhangsoledad pub fn from_slice(key: &[u8]) -> Self { assert_eq!(32, key.len(), "should provide 32-byte length slice"); diff --git a/util/crypto/src/secp/pubkey.rs b/util/crypto/src/secp/pubkey.rs index 8f022296ca..9e69d564f8 100644 --- a/util/crypto/src/secp/pubkey.rs +++ b/util/crypto/src/secp/pubkey.rs @@ -7,6 +7,7 @@ use secp256k1::key; use secp256k1::Message as SecpMessage; use std::{fmt, ops}; +/// TODO(doc): @zhangsoledad #[derive(Debug, Eq, PartialEq, Hash, Clone)] pub struct Pubkey { inner: H512, @@ -34,6 +35,7 @@ impl Pubkey { Ok(()) } + /// TODO(doc): @zhangsoledad pub fn serialize(&self) -> Vec { // non-compressed key prefix 4 let prefix_key: [u8; 65] = { @@ -45,6 +47,7 @@ impl Pubkey { Vec::from(&pubkey.serialize()[..]) } + /// TODO(doc): @zhangsoledad pub fn from_slice(data: &[u8]) -> Result { Ok(key::PublicKey::from_slice(data)?.into()) } diff --git a/util/crypto/src/secp/signature.rs b/util/crypto/src/secp/signature.rs index a0a494dfbf..5e145dba94 100644 --- a/util/crypto/src/secp/signature.rs +++ b/util/crypto/src/secp/signature.rs @@ -9,6 +9,7 @@ use secp256k1::Message as SecpMessage; use std::fmt; use std::str::FromStr; +/// TODO(doc): @zhangsoledad //RecoverableSignature compact serialize #[derive(Clone)] pub struct Signature([u8; 65]); @@ -32,6 +33,7 @@ impl Signature { self.0[64] } + /// TODO(doc): @zhangsoledad pub fn from_compact(rec_id: RecoveryId, ret: [u8; 64]) -> Self { let mut data = [0; 65]; data[0..64].copy_from_slice(&ret[0..64]); @@ -48,6 +50,7 @@ impl Signature { Signature(sig) } + /// TODO(doc): @zhangsoledad pub fn from_slice(data: &[u8]) -> Result { if data.len() != 65 { return Err(Error::InvalidSignature); @@ -97,10 +100,12 @@ impl Signature { Ok(pubkey.into()) } + /// TODO(doc): @zhangsoledad pub fn serialize(&self) -> Vec { Vec::from(&self.0[..]) } + /// TODO(doc): @zhangsoledad pub fn serialize_der(&self) -> Vec { self.to_recoverable() .unwrap() diff --git a/util/dao/Cargo.toml b/util/dao/Cargo.toml index 86c7525d54..884a48206a 100644 --- a/util/dao/Cargo.toml +++ b/util/dao/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/dao/src/lib.rs b/util/dao/src/lib.rs index 7b5537cea7..16aef31cb1 100644 --- a/util/dao/src/lib.rs +++ b/util/dao/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @keroro520 use byteorder::{ByteOrder, LittleEndian}; use ckb_chain_spec::consensus::Consensus; use ckb_dao_utils::{extract_dao_data, pack_dao_data, DaoError}; @@ -16,13 +17,18 @@ use ckb_types::{ use std::collections::HashSet; use std::convert::TryFrom; +/// TODO(doc): @keroro520 pub struct DaoCalculator<'a, CS, DL> { + /// TODO(doc): @keroro520 pub consensus: &'a Consensus, + /// TODO(doc): @keroro520 pub store: &'a CS, + /// TODO(doc): @keroro520 pub data_loader: DL, } impl<'a, CS: ChainStore<'a>> DaoCalculator<'a, CS, DataLoaderWrapper<'a, CS>> { + /// TODO(doc): @keroro520 pub fn new(consensus: &'a Consensus, store: &'a CS) -> Self { let data_loader = DataLoaderWrapper::new(store); DaoCalculator { @@ -32,6 +38,7 @@ impl<'a, CS: ChainStore<'a>> DaoCalculator<'a, CS, DataLoaderWrapper<'a, CS>> { } } + /// TODO(doc): @keroro520 pub fn primary_block_reward(&self, target: &HeaderView) -> Result { let target_epoch = self .store @@ -42,6 +49,7 @@ impl<'a, CS: ChainStore<'a>> DaoCalculator<'a, CS, DataLoaderWrapper<'a, CS>> { target_epoch.block_reward(target.number()) } + /// TODO(doc): @keroro520 pub fn secondary_block_reward(&self, target: &HeaderView) -> Result { if target.number() == 0 { return Ok(Capacity::zero()); @@ -67,6 +75,7 @@ impl<'a, CS: ChainStore<'a>> DaoCalculator<'a, CS, DataLoaderWrapper<'a, CS>> { Ok(Capacity::shannons(reward)) } + /// TODO(doc): @keroro520 // Used for testing only. // // Notice unlike primary_block_reward and secondary_epoch_reward above, @@ -88,6 +97,7 @@ impl<'a, CS: ChainStore<'a>> DaoCalculator<'a, CS, DataLoaderWrapper<'a, CS>> { Ok(primary_block_reward.safe_add(secondary_block_reward)?) } + /// TODO(doc): @keroro520 pub fn dao_field( &self, rtxs: &[ResolvedTransaction], @@ -153,6 +163,7 @@ impl<'a, CS: ChainStore<'a>> DaoCalculator<'a, CS, DataLoaderWrapper<'a, CS>> { Ok(pack_dao_data(current_ar, current_c, current_s, current_u)) } + /// TODO(doc): @keroro520 pub fn maximum_withdraw( &self, out_point: &OutPoint, @@ -178,6 +189,7 @@ impl<'a, CS: ChainStore<'a>> DaoCalculator<'a, CS, DataLoaderWrapper<'a, CS>> { ) } + /// TODO(doc): @keroro520 pub fn transaction_fee(&self, rtx: &ResolvedTransaction) -> Result { let maximum_withdraw = self.transaction_maximum_withdraw(rtx)?; rtx.transaction diff --git a/util/dao/utils/Cargo.toml b/util/dao/utils/Cargo.toml index 6eb16c5b64..57aa8cdf1d 100644 --- a/util/dao/utils/Cargo.toml +++ b/util/dao/utils/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/dao/utils/src/error.rs b/util/dao/utils/src/error.rs index 6a2ed8ff51..9d3367e8e9 100644 --- a/util/dao/utils/src/error.rs +++ b/util/dao/utils/src/error.rs @@ -2,12 +2,18 @@ use ckb_error::{Error, ErrorKind}; use failure::Fail; use std::fmt::Display; +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Clone, Eq, Display)] pub enum DaoError { + /// TODO(doc): @keroro520 InvalidHeader, + /// TODO(doc): @keroro520 InvalidOutPoint, + /// TODO(doc): @keroro520 InvalidDaoFormat, + /// TODO(doc): @keroro520 Overflow, + /// TODO(doc): @keroro520 ZeroC, } diff --git a/util/dao/utils/src/lib.rs b/util/dao/utils/src/lib.rs index 45bec8f829..b3b59d6431 100644 --- a/util/dao/utils/src/lib.rs +++ b/util/dao/utils/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @keroro520 #[macro_use] extern crate enum_display_derive; @@ -15,9 +16,11 @@ use std::collections::HashSet; pub use crate::error::DaoError; +/// TODO(doc): @keroro520 // This is multiplied by 10**16 to make sure we have enough precision. pub const DEFAULT_ACCUMULATED_RATE: u64 = 10_000_000_000_000_000; +/// TODO(doc): @keroro520 // NOTICE Used for testing only pub fn genesis_dao_data(txs: Vec<&TransactionView>) -> Result { genesis_dao_data_with_satoshi_gift( @@ -29,6 +32,7 @@ pub fn genesis_dao_data(txs: Vec<&TransactionView>) -> Result { ) } +/// TODO(doc): @keroro520 pub fn genesis_dao_data_with_satoshi_gift( txs: Vec<&TransactionView>, satoshi_pubkey_hash: &H160, @@ -95,6 +99,7 @@ pub fn genesis_dao_data_with_satoshi_gift( )) } +/// TODO(doc): @keroro520 pub fn extract_dao_data(dao: Byte32) -> Result<(u64, Capacity, Capacity, Capacity), Error> { let data = dao.raw_data(); let c = Capacity::shannons(LittleEndian::read_u64(&data[0..8])); @@ -104,6 +109,7 @@ pub fn extract_dao_data(dao: Byte32) -> Result<(u64, Capacity, Capacity, Capacit Ok((ar, c, s, u)) } +/// TODO(doc): @keroro520 pub fn pack_dao_data(ar: u64, c: Capacity, s: Capacity, u: Capacity) -> Byte32 { let mut buf = [0u8; 32]; LittleEndian::write_u64(&mut buf[0..8], c.as_u64()); diff --git a/util/fee-estimator/Cargo.toml b/util/fee-estimator/Cargo.toml index 69adc8037a..84a139aa25 100644 --- a/util/fee-estimator/Cargo.toml +++ b/util/fee-estimator/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/fee-estimator/src/estimator.rs b/util/fee-estimator/src/estimator.rs index b6b00634fd..ce34423304 100644 --- a/util/fee-estimator/src/estimator.rs +++ b/util/fee-estimator/src/estimator.rs @@ -4,6 +4,7 @@ use ckb_logger::debug; use ckb_types::packed::Byte32; use std::collections::HashMap; +/// TODO(doc): @doitian pub const MAX_CONFIRM_BLOCKS: usize = 1000; const MIN_BUCKET_FEERATE: f64 = 1000f64; const MAX_BUCKET_FEERATE: f64 = 1e7; @@ -47,6 +48,7 @@ impl Default for Estimator { } impl Estimator { + /// TODO(doc): @doitian pub fn new() -> Self { let mut buckets = Vec::new(); let mut bucket_fee_boundary = MIN_BUCKET_FEERATE; diff --git a/util/fee-estimator/src/fee_rate.rs b/util/fee-estimator/src/fee_rate.rs index 5e68577c8f..9abeb5a340 100644 --- a/util/fee-estimator/src/fee_rate.rs +++ b/util/fee-estimator/src/fee_rate.rs @@ -8,6 +8,7 @@ pub struct FeeRate(u64); const KB: u64 = 1000; impl FeeRate { + /// TODO(doc): @doitian pub fn calculate(fee: Capacity, vbytes: usize) -> Self { if vbytes == 0 { return FeeRate::zero(); @@ -15,18 +16,22 @@ impl FeeRate { FeeRate::from_u64(fee.as_u64().saturating_mul(KB) / (vbytes as u64)) } + /// TODO(doc): @doitian pub const fn from_u64(fee_per_kb: u64) -> Self { FeeRate(fee_per_kb) } + /// TODO(doc): @doitian pub const fn as_u64(self) -> u64 { self.0 } + /// TODO(doc): @doitian pub const fn zero() -> Self { Self::from_u64(0) } + /// TODO(doc): @doitian pub fn fee(self, size: usize) -> Capacity { let fee = self.0.saturating_mul(size as u64) / KB; Capacity::shannons(fee) diff --git a/util/fee-estimator/src/lib.rs b/util/fee-estimator/src/lib.rs index ae1f131ad7..4b9edf48b6 100644 --- a/util/fee-estimator/src/lib.rs +++ b/util/fee-estimator/src/lib.rs @@ -1,3 +1,5 @@ +//! TODO(doc): @doitian + mod estimator; mod fee_rate; mod tx_confirm_stat; diff --git a/util/fixed-hash/Cargo.toml b/util/fixed-hash/Cargo.toml index 0694af31c1..cf20e33c2b 100644 --- a/util/fixed-hash/Cargo.toml +++ b/util/fixed-hash/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos "] edition = "2018" -description = "TODO(doc): crate description" +description = "Provide several simple fixed-sized hash data type and their static constructors." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/fixed-hash/core/Cargo.toml b/util/fixed-hash/core/Cargo.toml index 9e8dfce7f5..7465878b3d 100644 --- a/util/fixed-hash/core/Cargo.toml +++ b/util/fixed-hash/core/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos "] edition = "2018" -description = "TODO(doc): crate description" +description = "Provide several fixed-length binary data, aka fixed-sized hashes." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/fixed-hash/core/src/error.rs b/util/fixed-hash/core/src/error.rs index 9f04791f3c..7e7c6691ed 100644 --- a/util/fixed-hash/core/src/error.rs +++ b/util/fixed-hash/core/src/error.rs @@ -1,15 +1,29 @@ +//! Conversion errors. + use failure::Fail; +/// The associated error of [`FromStr`] which can be returned from parsing a string. +/// +/// [`FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html#associatedtype.Err #[derive(Debug, Fail)] pub enum FromStrError { + /// Invalid character. #[fail(display = "invalid character code `{}` at {}", chr, idx)] - InvalidCharacter { chr: u8, idx: usize }, + InvalidCharacter { + /// The value of the invalid character. + chr: u8, + /// The index of the invalid character. + idx: usize, + }, + /// Invalid length. #[fail(display = "invalid length: {}", _0)] InvalidLength(usize), } +/// The error which can be returned when convert a byte slice back into a Hash. #[derive(Debug, Fail)] pub enum FromSliceError { + /// Invalid length. #[fail(display = "invalid length: {}", _0)] InvalidLength(usize), } diff --git a/util/fixed-hash/core/src/impls.rs b/util/fixed-hash/core/src/impls.rs index 6febab994d..58e1890d63 100644 --- a/util/fixed-hash/core/src/impls.rs +++ b/util/fixed-hash/core/src/impls.rs @@ -3,10 +3,12 @@ use crate::{error::FromSliceError, H160, H256, H512, H520}; macro_rules! impl_methods { ($name:ident, $bytes_size:expr) => { impl $name { + /// Converts `Self` to a byte slice. #[inline] pub fn as_bytes(&self) -> &[u8] { &self.0[..] } + /// To convert the byte slice back into `Self`. #[inline] pub fn from_slice(input: &[u8]) -> Result { if input.len() != $bytes_size { diff --git a/util/fixed-hash/core/src/lib.rs b/util/fixed-hash/core/src/lib.rs index b56cdd1c11..d17f8180e2 100644 --- a/util/fixed-hash/core/src/lib.rs +++ b/util/fixed-hash/core/src/lib.rs @@ -1,3 +1,15 @@ +//! Provide several fixed-length binary data, aka fixed-sized hashes. +//! +//! # Notice +//! +//! **This is an internal crate used by crate [`ckb_fixed_hash`], do not use this crate directly.** +//! +//! All structs and the module [`error`](error/index.html) in this crate are re-exported in crate [`ckb_fixed_hash`]. +//! +//! And you can found examples in crate [`ckb_fixed_hash`]. +//! +//! [`ckb_fixed_hash`]: ../ckb_fixed_hash/index.html + pub mod error; mod impls; @@ -16,6 +28,7 @@ mod std_str; /// In JSONRPC, it is encoded as a 0x-prefixed hex string. #[derive(Clone)] pub struct H160(pub [u8; 20]); + /// The 32-byte fixed-length binary data. /// /// The name comes from the number of bits in the data. @@ -23,6 +36,7 @@ pub struct H160(pub [u8; 20]); /// In JSONRPC, it is encoded as a 0x-prefixed hex string. #[derive(Clone)] pub struct H256(pub [u8; 32]); + /// The 64-byte fixed-length binary data. /// /// The name comes from the number of bits in the data. @@ -30,6 +44,7 @@ pub struct H256(pub [u8; 32]); /// In JSONRPC, it is encoded as a 0x-prefixed hex string. #[derive(Clone)] pub struct H512(pub [u8; 64]); + /// The 65-byte fixed-length binary data. /// /// The name comes from the number of bits in the data. diff --git a/util/fixed-hash/core/src/std_str.rs b/util/fixed-hash/core/src/std_str.rs index 121d5bc0a4..b84aa489f8 100644 --- a/util/fixed-hash/core/src/std_str.rs +++ b/util/fixed-hash/core/src/std_str.rs @@ -76,8 +76,50 @@ macro_rules! impl_std_str_fromstr { } macro_rules! impl_from_trimmed_str { - ($name:ident, $bytes_size:expr) => { + ($name:ident, $bytes_size:expr, $use_stmt:expr, $bytes_size_stmt:expr) => { impl $name { + /// To convert a trimmed hexadecimal string into `Self`. + /// + /// If the beginning of a hexadecimal string are one or more zeros, then these zeros + /// should be omitted. + /// + /// There should be only one zero at the beginning of a hexadecimal string at most. + /// + /// For example, if `x` is `H16` (a 16 bits binary data): + /// - when `x = [0, 0]`, the trimmed hexadecimal string should be "0" or "". + /// - when `x = [0, 1]`, the trimmed hexadecimal string should be "1". + /// - when `x = [1, 0]`, the trimmed hexadecimal string should be "100". + /// + /// ```rust + #[doc = $use_stmt] + #[doc = $bytes_size_stmt] + /// + /// let mut inner = [0u8; bytes_size]; + /// + /// { + /// let actual = Hash(inner.clone()); + /// let expected1 = Hash::from_trimmed_str("").unwrap(); + /// let expected2 = Hash::from_trimmed_str("0").unwrap(); + /// assert_eq!(actual, expected1); + /// assert_eq!(actual, expected2); + /// } + /// + /// { + /// inner[bytes_size - 1] = 1; + /// let actual = Hash(inner); + /// let expected = Hash::from_trimmed_str("1").unwrap(); + /// assert_eq!(actual, expected); + /// } + /// + /// { + /// assert!(Hash::from_trimmed_str("00").is_err()); + /// assert!(Hash::from_trimmed_str("000").is_err()); + /// assert!(Hash::from_trimmed_str("0000").is_err()); + /// assert!(Hash::from_trimmed_str("01").is_err()); + /// assert!(Hash::from_trimmed_str("001").is_err()); + /// assert!(Hash::from_trimmed_str("0001").is_err()); + /// } + /// ``` pub fn from_trimmed_str(input: &str) -> Result { let bytes = input.as_bytes(); let len = bytes.len(); @@ -119,6 +161,14 @@ macro_rules! impl_from_trimmed_str { } } }; + ($name:ident, $bytes_size:expr) => { + impl_from_trimmed_str!( + $name, + $bytes_size, + concat!("use ckb_fixed_hash_core::", stringify!($name), " as Hash;"), + concat!("const bytes_size: usize = ", stringify!($bytes_size), ";") + ); + } } impl_std_str_fromstr!(H160, 20); diff --git a/util/fixed-hash/macros/Cargo.toml b/util/fixed-hash/macros/Cargo.toml index d259ef218d..eea03a019d 100644 --- a/util/fixed-hash/macros/Cargo.toml +++ b/util/fixed-hash/macros/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos "] edition = "2018" -description = "TODO(doc): crate description" +description = "Provide several proc-macros to construct const fixed-sized hashes." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/fixed-hash/macros/src/lib.rs b/util/fixed-hash/macros/src/lib.rs index 56ff11f836..7da0c67ea4 100644 --- a/util/fixed-hash/macros/src/lib.rs +++ b/util/fixed-hash/macros/src/lib.rs @@ -1,3 +1,24 @@ +//! Provide several proc-macros to construct const fixed-sized hashes. +//! +//! If we use an array to construct const fixed-sized hashes, it's difficult to read. +//! +//! If we use [`FromStr::from_str`] to construct fixed-sized hashes, the result is not a constant. +//! So, it will reduce runtime performance. And it could cause a runtime error if the input is malformed. +//! +//! With proc-macros, we can construct human-readable const fixed-sized hashes. +//! And it will be checked in compile time, it could never cause any runtime error. +//! +//! # Notice +//! +//! **This is an internal crate used by crate [`ckb_fixed_hash`], do not use this crate directly.** +//! +//! All proc-macros in this crate are re-exported in crate [`ckb_fixed_hash`]. +//! +//! And you can found examples in crate [`ckb_fixed_hash`]. +//! +//! [`FromStr::from_str`]: https://doc.rust-lang.org/std/str/trait.FromStr.html#tymethod.from_str +//! [`ckb_fixed_hash`]: ../ckb_fixed_hash/index.html + extern crate proc_macro; use std::str::FromStr; @@ -6,7 +27,13 @@ use quote::quote; use syn::parse_macro_input; macro_rules! impl_hack { - ($name:ident, $type:ident) => { + ($name:ident, $type:ident, $type_str:expr, $link_str:expr) => { + #[doc = "A proc-macro used to create a const [`"] + #[doc = $type_str] + #[doc = "`] from a hexadecimal string or a trimmed hexadecimal string.\n\n[`"] + #[doc = $type_str] + #[doc = "`]:"] + #[doc = $link_str] #[proc_macro] pub fn $name(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as syn::LitStr); @@ -40,6 +67,9 @@ macro_rules! impl_hack { expanded.into() } }; + ($name:ident, $type:ident) => { + impl_hack!($name, $type, stringify!($type), concat!("../ckb_fixed_hash_core/struct.", stringify!($type), ".html")); + } } impl_hack!(h160, H160); diff --git a/util/fixed-hash/src/lib.rs b/util/fixed-hash/src/lib.rs index 74fe9a6bc9..5daaa40872 100644 --- a/util/fixed-hash/src/lib.rs +++ b/util/fixed-hash/src/lib.rs @@ -1,2 +1,22 @@ +//! Provide several simple fixed-sized hash data type and their static constructors. +//! +//! # Example +//! +//! ```rust +//! use ckb_fixed_hash::{H256, h256}; +//! +//! const N1: H256 = h256!("0xffffffff_ffffffff_ffffffff_fffffffe_baaedce6_af48a03b_bfd25e8c_d0364141"); +//! const N2: H256 = H256([ +//! 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +//! 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, +//! 0x41, 0x41 +//! ]); +//! assert_eq!(N1, N2); +//! +//! const ONE1: H256 = h256!("0x1"); +//! const ONE2: H256 = H256([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]); +//! assert_eq!(ONE1, ONE2); +//! ``` + pub use ckb_fixed_hash_core::{error, H160, H256, H512, H520}; pub use ckb_fixed_hash_macros::{h160, h256, h512, h520}; diff --git a/util/hash/Cargo.toml b/util/hash/Cargo.toml index 988e5f08cb..0d10a13d22 100644 --- a/util/hash/Cargo.toml +++ b/util/hash/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/hash/src/lib.rs b/util/hash/src/lib.rs index 6dc9fe2b02..cfa630823b 100644 --- a/util/hash/src/lib.rs +++ b/util/hash/src/lib.rs @@ -1,19 +1,65 @@ +//! CKB default hash function. +//! +//! CKB uses [blake2b] with following configurations as the default hash function. +//! +//! * output digest size: 32 +//! * personalization: ckb-default-hash +//! +//! [blake2b]: https://blake2.net/blake2.pdf pub use blake2b_rs::{Blake2b, Blake2bBuilder}; +#[doc(hidden)] pub const BLAKE2B_KEY: &[u8] = &[]; +/// Output digest size. pub const BLAKE2B_LEN: usize = 32; +/// Blake2b personalization. pub const CKB_HASH_PERSONALIZATION: &[u8] = b"ckb-default-hash"; +/// The hash output on empty input. +/// +/// ## Examples +/// +/// ``` +/// use ckb_hash::{BLANK_HASH, blake2b_256}; +/// +/// assert_eq!(BLANK_HASH, blake2b_256(&b"")); +/// ``` pub const BLANK_HASH: [u8; 32] = [ 68, 244, 198, 151, 68, 213, 248, 197, 93, 100, 32, 98, 148, 157, 202, 228, 155, 196, 231, 239, 67, 211, 136, 197, 161, 47, 66, 181, 99, 61, 22, 62, ]; +/// Creates a new hasher. +/// +/// This can be used to hash inputs incrementally. +/// +/// ## Examples +/// +/// ``` +/// use ckb_hash::new_blake2b; +/// +/// let mut hasher = new_blake2b(); +/// hasher.update(&b"left|"[..]); +/// hasher.update(&b"right"[..]); +/// let mut result = [0u8; 32]; +/// hasher.finalize(&mut result); // Saves hash result +/// ``` pub fn new_blake2b() -> Blake2b { Blake2bBuilder::new(32) .personal(CKB_HASH_PERSONALIZATION) .build() } +/// Hashes the slice of binary and returns the digest. +/// +/// ## Examples +/// +/// ``` +/// use ckb_hash::blake2b_256; +/// +/// let input = b"ckb"; +/// let digest = blake2b_256(&input); +/// println!("ckbhash({:?}) = {:?}", input, digest); +/// ``` pub fn blake2b_256>(s: T) -> [u8; 32] { if s.as_ref().is_empty() { return BLANK_HASH; diff --git a/util/instrument/Cargo.toml b/util/instrument/Cargo.toml index 6f79c3ff98..e6c0be5e40 100644 --- a/util/instrument/Cargo.toml +++ b/util/instrument/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/instrument/src/export.rs b/util/instrument/src/export.rs index b72a8057e1..634cb5dc1b 100644 --- a/util/instrument/src/export.rs +++ b/util/instrument/src/export.rs @@ -13,10 +13,12 @@ use std::path::PathBuf; pub struct Export { /// export target path pub target: PathBuf, + /// TODO(doc): @doitian pub shared: Shared, } impl Export { + /// TODO(doc): @doitian pub fn new(shared: Shared, target: PathBuf) -> Self { Export { shared, target } } @@ -26,6 +28,7 @@ impl Export { format!("{}.{}", self.shared.consensus().id, "json") } + /// TODO(doc): @doitian pub fn execute(self) -> Result<(), Box> { fs::create_dir_all(&self.target)?; self.write_to_json() @@ -50,6 +53,7 @@ impl Export { Ok(()) } + /// TODO(doc): @doitian #[cfg(feature = "progress_bar")] pub fn write_to_json(self) -> Result<(), Box> { let f = fs::OpenOptions::new() diff --git a/util/instrument/src/import.rs b/util/instrument/src/import.rs index f6eafd9055..15e422b994 100644 --- a/util/instrument/src/import.rs +++ b/util/instrument/src/import.rs @@ -18,10 +18,12 @@ pub struct Import { } impl Import { + /// TODO(doc): @doitian pub fn new(chain: ChainController, source: PathBuf) -> Self { Import { chain, source } } + /// TODO(doc): @doitian pub fn execute(self) -> Result<(), Box> { self.read_from_json() } @@ -44,6 +46,7 @@ impl Import { Ok(()) } + /// TODO(doc): @doitian #[cfg(feature = "progress_bar")] pub fn read_from_json(&self) -> Result<(), Box> { let metadata = fs::metadata(&self.source)?; diff --git a/util/jsonrpc-types/Cargo.toml b/util/jsonrpc-types/Cargo.toml index 9a33a78167..8a7630450b 100644 --- a/util/jsonrpc-types/Cargo.toml +++ b/util/jsonrpc-types/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index 7fc8e92684..cc5d4bee11 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -567,6 +567,7 @@ pub struct TxStatus { } impl TxStatus { + /// TODO(doc): @doitian pub fn pending() -> Self { Self { status: Status::Pending, @@ -574,6 +575,7 @@ impl TxStatus { } } + /// TODO(doc): @doitian pub fn proposed() -> Self { Self { status: Status::Proposed, @@ -581,6 +583,7 @@ impl TxStatus { } } + /// TODO(doc): @doitian pub fn committed(hash: H256) -> Self { Self { status: Status::Committed, @@ -972,11 +975,14 @@ pub struct EpochView { /// It also equals the total count of blocks in all the epochs which epoch number is /// less than this epoch. pub start_number: BlockNumber, + /// The number of blocks in this epoch. pub length: BlockNumber, + /// The difficulty target for any block in this epoch. pub compact_target: Uint32, } impl EpochView { + /// TODO(doc): @doitian pub fn from_ext(ext: packed::EpochExt) -> EpochView { EpochView { number: ext.number().unpack(), @@ -1102,6 +1108,9 @@ impl From for core::MinerReward { } } +/// Block Economic State. +/// +/// It includes the rewards details and when it is finalized. #[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)] pub struct BlockEconomicState { /// Block base rewards. diff --git a/util/jsonrpc-types/src/bytes.rs b/util/jsonrpc-types/src/bytes.rs index 44961d1a5f..1c96caecd8 100644 --- a/util/jsonrpc-types/src/bytes.rs +++ b/util/jsonrpc-types/src/bytes.rs @@ -17,27 +17,33 @@ use std::fmt; pub struct JsonBytes(Bytes); impl JsonBytes { + /// TODO(doc): @doitian pub fn from_bytes(bytes: Bytes) -> Self { JsonBytes(bytes) } + /// TODO(doc): @doitian pub fn from_vec(vec: Vec) -> Self { JsonBytes(Bytes::from(vec)) } + /// TODO(doc): @doitian pub fn into_bytes(self) -> Bytes { let JsonBytes(bytes) = self; bytes } + /// TODO(doc): @doitian pub fn len(&self) -> usize { self.0.len() } + /// TODO(doc): @doitian pub fn is_empty(&self) -> bool { 0 == self.len() } + /// TODO(doc): @doitian pub fn as_bytes(&self) -> &[u8] { &self.0 } diff --git a/util/jsonrpc-types/src/experiment.rs b/util/jsonrpc-types/src/experiment.rs index c6f1b26d4b..27a2f066cd 100644 --- a/util/jsonrpc-types/src/experiment.rs +++ b/util/jsonrpc-types/src/experiment.rs @@ -11,5 +11,6 @@ pub struct DryRunResult { /// The estimated fee rate. #[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)] pub struct EstimateResult { + /// The estimated fee rate. pub fee_rate: FeeRate, } diff --git a/util/jsonrpc-types/src/fixed_bytes.rs b/util/jsonrpc-types/src/fixed_bytes.rs index ca70fef1e3..3e3b65fd9c 100644 --- a/util/jsonrpc-types/src/fixed_bytes.rs +++ b/util/jsonrpc-types/src/fixed_bytes.rs @@ -13,6 +13,7 @@ use std::fmt; pub struct Byte32(pub [u8; 32]); impl Byte32 { + /// TODO(doc): @doitian pub fn new(inner: [u8; 32]) -> Self { Byte32(inner) } diff --git a/util/jsonrpc-types/src/lib.rs b/util/jsonrpc-types/src/lib.rs index d4e3bf7a2d..429ac4ebd5 100644 --- a/util/jsonrpc-types/src/lib.rs +++ b/util/jsonrpc-types/src/lib.rs @@ -1,3 +1,4 @@ +//! Wrappers for JSON serialization. mod alert; mod block_template; mod blockchain; diff --git a/util/jsonrpc-types/src/pool.rs b/util/jsonrpc-types/src/pool.rs index bbf78c85c9..5d472a9a78 100644 --- a/util/jsonrpc-types/src/pool.rs +++ b/util/jsonrpc-types/src/pool.rs @@ -67,6 +67,7 @@ pub enum OutputsValidator { } impl OutputsValidator { + /// TODO(doc): @doitian pub fn json_display(&self) -> String { let v = serde_json::to_value(self).expect("OutputsValidator to JSON should never fail"); v.as_str().unwrap_or_default().to_string() diff --git a/util/jsonrpc-types/src/proposal_short_id.rs b/util/jsonrpc-types/src/proposal_short_id.rs index 2e72a71b90..4e86cc0ba7 100644 --- a/util/jsonrpc-types/src/proposal_short_id.rs +++ b/util/jsonrpc-types/src/proposal_short_id.rs @@ -13,10 +13,12 @@ use std::fmt; pub struct ProposalShortId(pub [u8; 10]); impl ProposalShortId { + /// TODO(doc): @doitian pub fn new(inner: [u8; 10]) -> ProposalShortId { ProposalShortId(inner) } + /// TODO(doc): @doitian pub fn into_inner(self) -> [u8; 10] { self.0 } diff --git a/util/jsonrpc-types/src/sync.rs b/util/jsonrpc-types/src/sync.rs index 79c9a5e221..fd12d9b29c 100644 --- a/util/jsonrpc-types/src/sync.rs +++ b/util/jsonrpc-types/src/sync.rs @@ -14,6 +14,7 @@ pub struct PeerState { } impl PeerState { + /// TODO(doc): @doitian pub fn new(peer: usize, last_updated: u64, blocks_in_flight: usize) -> Self { Self { peer: (peer as u32).into(), diff --git a/util/logger-config/Cargo.toml b/util/logger-config/Cargo.toml index 6eb1ef9672..262750fd61 100644 --- a/util/logger-config/Cargo.toml +++ b/util/logger-config/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos "] edition = "2018" -description = "TODO(doc): crate description" +description = "CKB logger configurations." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/logger-config/src/lib.rs b/util/logger-config/src/lib.rs index 4843fc5c56..f738f610fd 100644 --- a/util/logger-config/src/lib.rs +++ b/util/logger-config/src/lib.rs @@ -1,24 +1,61 @@ +//! CKB logger configurations. +//! +//! This crate is used to configure the [CKB logger and logging service]. +//! +//! [CKB logger and logging service]: ../ckb_logger_service/index.html + use std::{collections::HashMap, path::PathBuf}; use serde::{Deserialize, Serialize}; +/// The whole CKB logger configuration. +/// +/// This struct is used to build [`Logger`]. +/// +/// Include configurations of the main logger and any number of extra loggers. +/// +/// [`Logger`]: ../ckb_logger_service/struct.Logger.html #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Config { + /// An optional string which is used to build [env_logger::Filter] for the main logger. + /// + /// If the value is `None`, no [env_logger::Filter] will be used. + /// + /// [env_logger::Filter]: https://docs.rs/env_logger/*/env_logger/filter/struct.Filter.html pub filter: Option, + /// Colorize the output which was written into the stdout. pub color: bool, + /// The log file of the main loggger. #[serde(skip)] pub file: PathBuf, + /// The directory where to store all log files. #[serde(skip)] pub log_dir: PathBuf, + /// Output the log records of the main logger into a file or not. pub log_to_file: bool, + /// Output the log records of the main logger into the stdout or not. pub log_to_stdout: bool, + /// An optional bool to control whether or not emit [Sentry Breadcrumbs]. + /// + /// if the value is `None`, not emit [Sentry Breadcrumbs]. + /// + /// [Sentry Breadcrumbs]: https://sentry.io/features/breadcrumbs/ pub emit_sentry_breadcrumbs: Option, + /// Add extra loggers. #[serde(default)] pub extra: HashMap, } +/// The configuration of an extra CKB logger. +/// +/// This struct is used to build [`ExtraLogger`]. +/// +/// [`ExtraLogger`]: ../ckb_logger_service/struct.ExtraLogger.html #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ExtraLoggerConfig { + /// A string which is used to build [env_logger::Filter] for the extra logger. + /// + /// [env_logger::Filter]: https://docs.rs/env_logger/*/env_logger/filter/struct.Filter.html pub filter: String, } diff --git a/util/logger-service/Cargo.toml b/util/logger-service/Cargo.toml index 1cc6f48d99..b86b2ab471 100644 --- a/util/logger-service/Cargo.toml +++ b/util/logger-service/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos "] edition = "2018" -description = "TODO(doc): crate description" +description = "CKB logger and logging service." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/logger-service/src/lib.rs b/util/logger-service/src/lib.rs index 81f7073000..a374566323 100644 --- a/util/logger-service/src/lib.rs +++ b/util/logger-service/src/lib.rs @@ -1,3 +1,5 @@ +//! CKB logger and logging service. + use ansi_term::Colour; use backtrace::Backtrace; use chrono::prelude::{DateTime, Local}; @@ -34,6 +36,11 @@ enum Message { Terminate, } +/// The CKB logger which implements [log::Log]. +/// +/// When a CKB logger is created, a logging service will be started in a background thread. +/// +/// [log::Log]: https://docs.rs/log/*/log/trait.Log.html #[derive(Debug)] pub struct Logger { sender: ckb_channel::Sender, @@ -44,7 +51,7 @@ pub struct Logger { } #[derive(Debug)] -pub struct MainLogger { +struct MainLogger { file_path: PathBuf, file: Option, to_stdout: bool, @@ -53,7 +60,7 @@ pub struct MainLogger { } #[derive(Debug)] -pub struct ExtraLogger { +struct ExtraLogger { filter: Filter, } @@ -331,7 +338,7 @@ impl Logger { }) } - pub fn filter(&self) -> LevelFilter { + fn filter(&self) -> LevelFilter { Self::max_level_filter(&self.filter.read(), &self.extra_loggers.read()) } @@ -347,6 +354,7 @@ impl Logger { }) } + /// Updates the main logger. pub fn update_main_logger( filter_str: Option, to_stdout: Option, @@ -363,16 +371,19 @@ impl Logger { Self::send_message(message) } + /// Checks if the input extra logger name is valid. pub fn check_extra_logger_name(name: &str) -> Result<(), String> { strings::check_if_identifier_is_valid(name) } + /// Updates an extra logger through it's name. pub fn update_extra_logger(name: String, filter_str: String) -> Result<(), String> { let filter = Self::build_filter(&filter_str); let message = Message::UpdateExtraLogger(name, filter); Self::send_message(message) } + /// Removes an extra logger. pub fn remove_extra_logger(name: String) -> Result<(), String> { let message = Message::RemoveExtraLogger(name); Self::send_message(message) @@ -446,7 +457,7 @@ fn sanitize_color(s: &str) -> String { re.replace_all(s, "").to_string() } -/// Flush the logger when dropped +/// Flushes the logger when dropped. #[must_use] pub struct LoggerInitGuard; @@ -456,6 +467,7 @@ impl Drop for LoggerInitGuard { } } +/// Initializes the [Logger](struct.Logger.html) and run the logging service. pub fn init(config: Config) -> Result { setup_panic_logger(); @@ -467,6 +479,7 @@ pub fn init(config: Config) -> Result { }) } +/// Flushes any buffered records. pub fn flush() { log::logger().flush() } diff --git a/util/logger/Cargo.toml b/util/logger/Cargo.toml index 926e5ac247..e7487fdf0e 100644 --- a/util/logger/Cargo.toml +++ b/util/logger/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos "] edition = "2018" -description = "TODO(doc): crate description" +description = "CKB logging facade." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/logger/src/lib.rs b/util/logger/src/lib.rs index 2fe0d5ac92..a628af4c74 100644 --- a/util/logger/src/lib.rs +++ b/util/logger/src/lib.rs @@ -1,3 +1,14 @@ +//! CKB logging facade. +//! +//! This crate is a wrapper of the crate [`log`]. +//! +//! [`log`]: https://docs.rs/log/*/log/index.html +//! +//! The major issue of the crate `log` is that the macro like +//! `trace!(target: "global", "message")` is unfriendly to `cargo fmt`. So this +//! crate disallow using `target: ` in the basic logging macros and add another +//! group of macros to support both target and message, for example, +//! `trace_target!("global", "message")`. pub use log::{self as internal, Level, SetLoggerError}; #[doc(hidden)] @@ -8,6 +19,24 @@ macro_rules! env { } } +/// Logs a message at the trace level using the default target. +/// +/// This macro logs the message using the default target, the module path of +/// the location of the log request. See [`trace_target!`] which can override the +/// target. +/// +/// [`trace_target!`]: macro.trace_target.html +/// +/// # Examples +/// +/// ``` +/// use ckb_logger::trace; +/// +/// # struct Position { x: f32, y: f32 } +/// let pos = Position { x: 3.234, y: -1.223 }; +/// +/// trace!("Position is: x: {}, y: {}", pos.x, pos.y); +/// ``` #[macro_export(local_inner_macros)] macro_rules! trace { ($( $args:tt )*) => { @@ -15,6 +44,24 @@ macro_rules! trace { } } +/// Logs a message at the debug level using the default target. +/// +/// This macro logs the message using the default target, the module path of +/// the location of the log request. See [`debug_target!`] which can override the +/// target. +/// +/// [`debug_target!`]: macro.debug_target.html +/// +/// # Examples +/// +/// ``` +/// use ckb_logger::debug; +/// +/// # struct Position { x: f32, y: f32 } +/// let pos = Position { x: 3.234, y: -1.223 }; +/// +/// debug!("Position is: x: {}, y: {}", pos.x, pos.y); +/// ``` #[macro_export(local_inner_macros)] macro_rules! debug { ($( $args:tt )*) => { @@ -22,6 +69,26 @@ macro_rules! debug { } } +/// Logs a message at the info level using the default target. +/// +/// This macro logs the message using the default target, the module path of +/// the location of the log request. See [`info_target!`] which can override the +/// target. +/// +/// [`info_target!`]: macro.info_target.html +/// +/// # Examples +/// +/// ``` +/// use ckb_logger::info; +/// +/// # struct Connection { port: u32, speed: f32 } +/// let conn_info = Connection { port: 40, speed: 3.20 }; +/// +/// info!("Connected to port {} at {} Mb/s", conn_info.port, conn_info.speed); +/// info!(target: "connection_events", "Successfull connection, port: {}, speed: {}", +/// conn_info.port, conn_info.speed); +/// ``` #[macro_export(local_inner_macros)] macro_rules! info { ($( $args:tt )*) => { @@ -29,6 +96,23 @@ macro_rules! info { } } +/// Logs a message at the warn level using the default target. +/// +/// This macro logs the message using the default target, the module path of +/// the location of the log request. See [`warn_target!`] which can override the +/// target. +/// +/// [`warn_target!`]: macro.warn_target.html +/// +/// # Examples +/// +/// ``` +/// use ckb_logger::warn; +/// +/// let warn_description = "Invalid Input"; +/// +/// warn!("Warning! {}!", warn_description); +/// ``` #[macro_export(local_inner_macros)] macro_rules! warn { ($( $args:tt )*) => { @@ -36,6 +120,23 @@ macro_rules! warn { } } +/// Logs a message at the error level using the default target. +/// +/// This macro logs the message using the default target, the module path of +/// the location of the log request. See [`error_target!`] which can override the +/// target. +/// +/// [`error_target!`]: macro.error_target.html +/// +/// # Examples +/// +/// ``` +/// use ckb_logger::error; +/// +/// let (err_info, port) = ("No connection", 22); +/// +/// error!("Error: {} on port {}", err_info, port); +/// ``` #[macro_export(local_inner_macros)] macro_rules! error { ($( $args:tt )*) => { @@ -43,6 +144,29 @@ macro_rules! error { } } +/// Determines if a message logged at the specified level and with the default target will be logged. +/// +/// The default target is the module path of the location of the log request. +/// See also [`log_enabled_target!`] the version that supports checking arbitrary +/// target. +/// +/// [`log_enabled_target!`]: macro.log_enabled_target.html +/// +/// This can be used to avoid expensive computation of log message arguments if the message would be ignored anyway. +/// +/// ## Examples +/// +/// ``` +/// use ckb_logger::Level::Debug; +/// use ckb_logger::{debug, log_enabled}; +/// +/// # struct Data { x: u32, y: u32 } +/// # fn expensive_call() -> Data { Data { x: 0, y: 0 } } +/// if log_enabled!(Debug) { +/// let data = expensive_call(); +/// debug!("expensive debug data: {} {}", data.x, data.y); +/// } +/// ``` #[macro_export(local_inner_macros)] macro_rules! log_enabled { ($level:expr) => { @@ -50,6 +174,25 @@ macro_rules! log_enabled { }; } +/// Logs a message at the trace level using the specified target. +/// +/// This macro logs the message using the specified target. In the most +/// scenarios, the log message should just use the default target, which is the +/// module path of the location of the log request. See [`trace!`] which just logs +/// using the default target. +/// +/// [`trace!`]: macro.trace.html +/// +/// # Examples +/// +/// ``` +/// use ckb_logger::trace_target; +/// +/// # struct Position { x: f32, y: f32 } +/// let pos = Position { x: 3.234, y: -1.223 }; +/// +/// trace_target!("app_events", "Position is: x: {}, y: {}", pos.x, pos.y); +/// ``` #[macro_export(local_inner_macros)] macro_rules! trace_target { ($target:expr, $( $args:tt )*) => { @@ -57,6 +200,25 @@ macro_rules! trace_target { } } +/// Logs a message at the debug level using the specified target. +/// +/// This macro logs the message using the specified target. In the most +/// scenarios, the log message should just use the default target, which is the +/// module path of the location of the log request. See [`debug!`] which just logs +/// using the default target. +/// +/// [`debug!`]: macro.debug.html +/// +/// # Examples +/// +/// ``` +/// use ckb_logger::debug_target; +/// +/// # struct Position { x: f32, y: f32 } +/// let pos = Position { x: 3.234, y: -1.223 }; +/// +/// debug_target!("app_events", "Position is: x: {}, y: {}", pos.x, pos.y); +/// ``` #[macro_export(local_inner_macros)] macro_rules! debug_target { ($target:expr, $( $args:tt )*) => { @@ -64,6 +226,26 @@ macro_rules! debug_target { } } +/// Logs a message at the info level using the specified target. +/// +/// This macro logs the message using the specified target. In the most +/// scenarios, the log message should just use the default target, which is the +/// module path of the location of the log request. See [`info!`] which just logs +/// using the default target. +/// +/// [`info!`]: macro.info.html +/// +/// # Examples +/// +/// ``` +/// use ckb_logger::info_target; +/// +/// # struct Connection { port: u32, speed: f32 } +/// let conn_info = Connection { port: 40, speed: 3.20 }; +/// +/// info_target!("connection_events", "Successfull connection, port: {}, speed: {}", +/// conn_info.port, conn_info.speed); +/// ``` #[macro_export(local_inner_macros)] macro_rules! info_target { ($target:expr, $( $args:tt )*) => { @@ -71,6 +253,24 @@ macro_rules! info_target { } } +/// Logs a message at the warn level using the specified target. +/// +/// This macro logs the message using the specified target. In the most +/// scenarios, the log message should just use the default target, which is the +/// module path of the location of the log request. See [`warn!`] which just logs +/// using the default target. +/// +/// [`warn!`]: macro.warn.html +/// +/// # Examples +/// +/// ``` +/// use ckb_logger::warn_target; +/// +/// let warn_description = "Invalid Input"; +/// +/// warn_target!("input_events", "App received warning: {}", warn_description); +/// ``` #[macro_export(local_inner_macros)] macro_rules! warn_target { ($target:expr, $( $args:tt )*) => { @@ -78,6 +278,24 @@ macro_rules! warn_target { } } +/// Logs a message at the error level using the specified target. +/// +/// This macro logs the message using the specified target. In the most +/// scenarios, the log message should just use the default target, which is the +/// module path of the location of the log request. See [`error!`] which just logs +/// using the default target. +/// +/// [`error!`]: macro.error.html +/// +/// # Examples +/// +/// ``` +/// use ckb_logger::error_target; +/// +/// let (err_info, port) = ("No connection", 22); +/// +/// error_target!("app_events", "App Error: {}, Port: {}", err_info, port); +/// ``` #[macro_export(local_inner_macros)] macro_rules! error_target { ($target:expr, $( $args:tt )*) => { @@ -85,6 +303,27 @@ macro_rules! error_target { } } +/// Determines if a message logged at the specified level and with the specified target will be logged. +/// +/// This can be used to avoid expensive computation of log message arguments if the message would be ignored anyway. +/// +/// See also [`log_enabled!`] the version that checks with the default target. +/// +/// [`log_enabled!`]: macro.log_enabled.html +/// +/// ## Examples +/// +/// ``` +/// use ckb_logger::Level::Debug; +/// use ckb_logger::{debug_target, log_enabled_target}; +/// +/// # struct Data { x: u32, y: u32 } +/// # fn expensive_call() -> Data { Data { x: 0, y: 0 } } +/// if log_enabled_target!("Global", Debug) { +/// let data = expensive_call(); +/// debug_target!("Global", "expensive debug data: {} {}", data.x, data.y); +/// } +/// ``` #[macro_export(local_inner_macros)] macro_rules! log_enabled_target { ($target:expr, $level:expr) => { diff --git a/util/memory-tracker/Cargo.toml b/util/memory-tracker/Cargo.toml index 29ac84df90..71ba26756b 100644 --- a/util/memory-tracker/Cargo.toml +++ b/util/memory-tracker/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "Track the memory usage of CKB." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" @@ -13,13 +13,6 @@ ckb-logger = { path = "../logger", version = "= 0.38.0-pre" } ckb-metrics = { path = "../metrics", version = "= 0.38.0-pre" } ckb-db = { path = "../../db", version = "= 0.38.0-pre" } -# TODO Why don't disable this crate by "target.*" in the crates which are dependent on this crate? -# -# I really don't like to write such dirty code. I try a lot. But due to the limit of the "cargo" -# (and a lot of stupid bugs), at last, I have to write these stupid code. -# -# References: -# - [Cargo Issue-1197: Target-specific features](https://github.com/rust-lang/cargo/issues/1197) [target.'cfg(all(not(target_env = "msvc"), not(target_os="macos")))'.dependencies] heim = "0.0.10" futures = "0.3.1" diff --git a/util/memory-tracker/src/jemalloc.rs b/util/memory-tracker/src/jemalloc.rs index c6385ff690..3a866727c2 100644 --- a/util/memory-tracker/src/jemalloc.rs +++ b/util/memory-tracker/src/jemalloc.rs @@ -1,6 +1,13 @@ use ckb_logger::info; use std::{ffi, mem, ptr}; +/// Dumps the heap through Jemalloc's API. +/// +/// This funcions works when the following conditions are satisfied: +/// - the global allocator is [Jemallocator]. +/// - the profiling is enabled. +/// +/// [Jemallocator]: https://docs.rs/jemallocator/*/jemallocator/index.html pub fn jemalloc_profiling_dump(filename: &str) -> Result<(), String> { let mut filename0 = format!("{}\0", filename); let opt_name = "prof.dump"; diff --git a/util/memory-tracker/src/lib.rs b/util/memory-tracker/src/lib.rs index 8c527c1ded..1034b7c2d1 100644 --- a/util/memory-tracker/src/lib.rs +++ b/util/memory-tracker/src/lib.rs @@ -1,3 +1,5 @@ +//! Track the memory usage of CKB. + #[cfg(all( not(target_env = "msvc"), not(target_os = "macos"), @@ -10,6 +12,9 @@ mod jemalloc; feature = "profiling" )))] mod jemalloc { + /// A dummy function which is used when the Jemalloc profiling isn't supported. + /// + /// Jemalloc profiling is disabled in default, the feature `profiling` is used to enable it. pub fn jemalloc_profiling_dump(_: &str) -> Result<(), String> { Err("jemalloc profiling dump: unsupported".to_string()) } @@ -24,6 +29,7 @@ mod process { use crate::rocksdb::TrackRocksDBMemory; use ckb_logger::info; + /// A dummy function which is used when tracking memory usage isn't supported. pub fn track_current_process( _: u64, _: Option>, @@ -31,12 +37,13 @@ mod process { info!("track current process: unsupported"); } } -pub mod rocksdb; -pub mod utils; +mod rocksdb; pub use jemalloc::jemalloc_profiling_dump; pub use process::track_current_process; +pub use rocksdb::TrackRocksDBMemory; +/// Track the memory usage of the CKB process and Jemalloc. pub fn track_current_process_simple(interval: u64) { track_current_process::(interval, None); } diff --git a/util/memory-tracker/src/process.rs b/util/memory-tracker/src/process.rs index 0284557943..f5fa5f7c02 100644 --- a/util/memory-tracker/src/process.rs +++ b/util/memory-tracker/src/process.rs @@ -30,6 +30,7 @@ macro_rules! mib_read { }; } +/// Track the memory usage of the CKB process, Jemalloc and RocksDB through [ckb-metrics](../../ckb_metrics/index.html). pub fn track_current_process( interval: u64, tracker_opt: Option>, @@ -88,7 +89,7 @@ pub fn track_current_process "metadata"); if let Some(tracker) = tracker_opt.clone() { - let _ignored = tracker.gather_memory_stats(); + tracker.gather_memory_stats(); } } else { error!("failed to fetch the memory information about current process"); diff --git a/util/memory-tracker/src/rocksdb.rs b/util/memory-tracker/src/rocksdb.rs index 669758c8df..5cb4a806d5 100644 --- a/util/memory-tracker/src/rocksdb.rs +++ b/util/memory-tracker/src/rocksdb.rs @@ -1,51 +1,60 @@ use ckb_db::internal::ops::{GetColumnFamilys, GetProperty, GetPropertyCF}; use ckb_metrics::metrics; -use crate::utils::{sum_int_values, PropertyValue}; - -// Ref: https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB -pub struct RocksDBMemoryStatistics { - pub estimate_table_readers_mem: PropertyValue, - pub size_all_mem_tables: PropertyValue, - pub cur_size_all_mem_tables: PropertyValue, - pub block_cache_capacity: PropertyValue, - pub block_cache_usage: PropertyValue, - pub block_cache_pinned_usage: PropertyValue, +#[derive(Debug, Clone)] +enum PropertyValue { + Value(T), + Null, + Error(String), } -pub trait TrackRocksDBMemory { - fn gather_memory_stats(&self) -> RocksDBMemoryStatistics { - let estimate_table_readers_mem = self.gather_int_values("estimate-table-readers-mem"); - let size_all_mem_tables = self.gather_int_values("size-all-mem-tables"); - let cur_size_all_mem_tables = self.gather_int_values("cur-size-all-mem-tables"); - let block_cache_capacity = self.gather_int_values("block-cache-capacity"); - let block_cache_usage = self.gather_int_values("block-cache-usage"); - let block_cache_pinned_usage = self.gather_int_values("block-cache-pinned-usage"); - RocksDBMemoryStatistics { - estimate_table_readers_mem, - size_all_mem_tables, - cur_size_all_mem_tables, - block_cache_capacity, - block_cache_usage, - block_cache_pinned_usage, +impl PropertyValue { + pub(crate) fn as_i64(&self) -> i64 { + match self { + Self::Value(v) => *v as i64, + Self::Null => -1, + Self::Error(_) => -2, } } - fn gather_int_values(&self, key: &str) -> PropertyValue; } -pub struct DummyRocksDB; +impl From, String>> for PropertyValue { + fn from(res: Result, String>) -> Self { + match res { + Ok(Some(v)) => Self::Value(v), + Ok(None) => Self::Null, + Err(e) => Self::Error(e), + } + } +} -impl TrackRocksDBMemory for DummyRocksDB { - fn gather_int_values(&self, _: &str) -> PropertyValue { - PropertyValue::Null +/// A trait which used to track the RocksDB memory usage. +/// +/// References: [Memory usage in RocksDB](https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB) +pub trait TrackRocksDBMemory { + /// Gather memory statistics through [ckb-metrics](../../ckb_metrics/index.html) + fn gather_memory_stats(&self) { + self.gather_int_values("estimate-table-readers-mem"); + self.gather_int_values("size-all-mem-tables"); + self.gather_int_values("cur-size-all-mem-tables"); + self.gather_int_values("block-cache-capacity"); + self.gather_int_values("block-cache-usage"); + self.gather_int_values("block-cache-pinned-usage"); } + + /// Gather integer values through [ckb-metrics](../../ckb_metrics/index.html) + fn gather_int_values(&self, _: &str) {} } +pub(crate) struct DummyRocksDB; + +impl TrackRocksDBMemory for DummyRocksDB {} + impl TrackRocksDBMemory for RocksDB where RocksDB: GetColumnFamilys + GetProperty + GetPropertyCF, { - fn gather_int_values(&self, key: &str) -> PropertyValue { + fn gather_int_values(&self, key: &str) { let mut values = Vec::new(); for (cf_name, cf) in self.get_cfs() { let value_col: PropertyValue = self @@ -55,6 +64,5 @@ where metrics!(gauge, "ckb-sys.mem.rocksdb", value_col.as_i64(), "type" => key.to_owned(), "cf" => cf_name.to_owned()); values.push(value_col); } - sum_int_values(&values) } } diff --git a/util/memory-tracker/src/utils.rs b/util/memory-tracker/src/utils.rs deleted file mode 100644 index 2777161b0a..0000000000 --- a/util/memory-tracker/src/utils.rs +++ /dev/null @@ -1,91 +0,0 @@ -use std::fmt; - -pub enum HumanReadableSize { - Bytes(u64), - KiBytes(f64), - MiBytes(f64), - GiBytes(f64), -} - -impl fmt::Display for HumanReadableSize { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::Bytes(v) => write!(f, "{} Bytes", v), - Self::KiBytes(v) => write!(f, "{:.2} KiB", v), - Self::MiBytes(v) => write!(f, "{:.2} MiB", v), - Self::GiBytes(v) => write!(f, "{:.2} GiB", v), - } - } -} - -impl From for HumanReadableSize { - fn from(v: u64) -> Self { - match v { - _ if v < 1024 => Self::Bytes(v), - _ if v < 1024 * 1024 => Self::KiBytes((v as f64) / 1024.0), - _ if v < 1024 * 1024 * 1024 => Self::MiBytes((v as f64) / 1024.0 / 1024.0), - _ => Self::GiBytes((v as f64) / 1024.0 / 1024.0 / 1024.0), - } - } -} - -#[derive(Debug, Clone)] -pub enum PropertyValue { - Value(T), - Null, - Error(String), -} - -impl PropertyValue { - pub(crate) fn as_i64(&self) -> i64 { - match self { - Self::Value(v) => *v as i64, - Self::Null => -1, - Self::Error(_) => -2, - } - } -} - -impl fmt::Display for PropertyValue { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::Value(v) => write!(f, "{}", HumanReadableSize::from(*v)), - Self::Null => write!(f, "null"), - Self::Error(_) => write!(f, "err"), - } - } -} - -impl From, String>> for PropertyValue { - fn from(res: Result, String>) -> Self { - match res { - Ok(Some(v)) => Self::Value(v), - Ok(None) => Self::Null, - Err(e) => Self::Error(e), - } - } -} - -pub fn sum_int_values(values: &[PropertyValue]) -> PropertyValue { - let mut total = 0; - let mut errors = 0; - let mut nulls = 0; - for value in values { - match value { - PropertyValue::Value(v) => { - total += v; - } - PropertyValue::Null => { - nulls += 1; - } - PropertyValue::Error(_) => { - errors += 1; - } - } - } - if errors > 0 || nulls > 0 { - PropertyValue::Error(format!("{} errors, {} nulls", errors, nulls)) - } else { - PropertyValue::Value(total) - } -} diff --git a/util/metrics-config/Cargo.toml b/util/metrics-config/Cargo.toml index 9f73de5059..1e5fe01c53 100644 --- a/util/metrics-config/Cargo.toml +++ b/util/metrics-config/Cargo.toml @@ -4,10 +4,10 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos "] edition = "2018" -description = "TODO(doc): crate description" +description = "CKB metrics configurations." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -log = "0.4" +log = { version = "0.4", features = ["serde"] } serde = { version = "1.0", features = ["derive"] } diff --git a/util/metrics-config/src/lib.rs b/util/metrics-config/src/lib.rs index 7cfe1f4471..63b5d5794b 100644 --- a/util/metrics-config/src/lib.rs +++ b/util/metrics-config/src/lib.rs @@ -1,91 +1,112 @@ +//! CKB metrics configurations. +//! +//! This crate is used to configure the [CKB metrics service]. +//! +//! [CKB metrics service]: ../ckb_metrics_service/index.html + use std::collections::HashMap; use serde::{Deserialize, Serialize}; -/* Examples: - * ```toml - * [metrics] - * threads = 3 - * histogram_window = 60 - * histogram_granularity = 1 - * upkeep_interval = 500 - * [metrics.exporter.prometheus] - * target = { type = "http", listen_address = "127.0.0.1:8100" } - * format = { type = "prometheus" } - * [metrics.exporter.log_yaml] - * target = { type = "log", level = "warn", interval = 600 } - * format = { type = "yaml" } - * [metrics.exporter.log_json] - * target = { type = "log", level = "error", interval = 900 } - * format = { type = "json" } - * ``` - */ +pub use log::Level as LogLevel; + +/// The whole CKB metrics configuration. +/// +/// This struct is used to configure [CKB metrics service]: +/// builds one [`metrics_runtime::Receiver`] and any number of [exporters] +/// +/// # An example which is used in `ckb.toml`: +/// ```toml +/// [metrics] +/// threads = 3 +/// histogram_window = 60 +/// histogram_granularity = 1 +/// upkeep_interval = 500 +/// [metrics.exporter.prometheus] +/// target = { type = "http", listen_address = "127.0.0.1:8100" } +/// format = { type = "prometheus" } +/// [metrics.exporter.log_yaml] +/// target = { type = "log", level = "warn", interval = 600 } +/// format = { type = "yaml" } +/// [metrics.exporter.log_json] +/// target = { type = "log", level = "error", interval = 900 } +/// format = { type = "json" } +/// ``` +/// +/// [CKB metrics service]: ../ckb_metrics_service/index.html +/// [`metrics_runtime::Receiver`]: https://docs.rs/metrics-runtime/0.13.1/metrics_runtime/struct.Receiver.html +/// [exporters]: https://docs.rs/metrics-runtime/0.13.1/metrics_runtime/exporters/index.html #[derive(Default, Clone, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct Config { + /// How many threads are required for metrics service. #[serde(default)] pub threads: usize, + /// Sets the [histogram] window configuration in seconds. + /// + /// [histogram]: https://docs.rs/metrics-runtime/0.13.1/metrics_runtime/struct.Builder.html#method.histogram #[serde(default)] - pub histogram_window: u64, // seconds + pub histogram_window: u64, + /// Sets the [histogram] granularity configuration in seconds. + /// + /// [histogram]: https://docs.rs/metrics-runtime/0.13.1/metrics_runtime/struct.Builder.html#method.histogram #[serde(default)] - pub histogram_granularity: u64, // seconds + pub histogram_granularity: u64, + /// Sets the [upkeep interval] configuration in milliseconds. + /// + /// [upkeep interval]: https://docs.rs/metrics-runtime/0.13.1/metrics_runtime/struct.Builder.html#method.upkeep_interval #[serde(default)] - pub upkeep_interval: u64, // milliseconds + pub upkeep_interval: u64, + /// Stores all exporters configurations. #[serde(default)] pub exporter: HashMap, } +/// The configuration of an [exporter]. +/// +/// [exporter]: https://docs.rs/metrics-runtime/0.13.1/metrics_runtime/exporters/index.html #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Exporter { + /// How to output the metrics data. pub target: Target, + /// The metrics output data in which format. pub format: Format, } +/// The target to output the metrics data. #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] #[serde(tag = "type")] pub enum Target { + /// Outputs the metrics data into logs. Log { + /// The log records will be output at which level. level: LogLevel, - interval: u64, // seconds + /// Outputs each log record after how many seconds. + interval: u64, }, + /// Outputs the metrics data through HTTP Protocol. Http { + /// The HTTP listen address. listen_address: String, }, } +/// Records the metrics data in which format. #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] #[serde(tag = "type")] pub enum Format { + /// JSON format. Json { + /// Sets whether or not to render the JSON as "pretty." #[serde(default)] pretty: bool, }, + /// YAML format. Yaml, + /// Prometheus exposition format. Prometheus, } - -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(rename_all = "snake_case")] -pub enum LogLevel { - Error, - Warn, - Info, - Debug, - Trace, -} - -impl From for log::Level { - fn from(lv: LogLevel) -> Self { - match lv { - LogLevel::Error => Self::Error, - LogLevel::Warn => Self::Warn, - LogLevel::Info => Self::Info, - LogLevel::Debug => Self::Debug, - LogLevel::Trace => Self::Trace, - } - } -} diff --git a/util/metrics-service/Cargo.toml b/util/metrics-service/Cargo.toml index e32d988ebe..3dc3dc0711 100644 --- a/util/metrics-service/Cargo.toml +++ b/util/metrics-service/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos "] edition = "2018" -description = "TODO(doc): crate description" +description = "The service which handle the metrics data in CKB." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" @@ -15,5 +15,4 @@ ckb-stop-handler = { path = "../stop-handler", version = "= 0.38.0-pre" } ckb-util = { path = "..", version = "= 0.38.0-pre" } metrics-runtime = "~0.13.1" metrics-core = "~0.5.2" -log = "0.4" tokio = { version = "0.2", features = ["sync", "blocking", "rt-threaded", "time", "io-driver", "macros"] } diff --git a/util/metrics-service/src/lib.rs b/util/metrics-service/src/lib.rs index 274d84e92c..14a10c89c1 100644 --- a/util/metrics-service/src/lib.rs +++ b/util/metrics-service/src/lib.rs @@ -1,3 +1,5 @@ +//! The service which handles the metrics data in CKB. + use std::{net::SocketAddr, time::Duration}; use metrics_core::Observe; @@ -13,11 +15,16 @@ use ckb_metrics_config::{Config, Exporter, Format, Target}; use ckb_stop_handler::{SignalSender, StopHandler}; use ckb_util::strings; +/// Ensures the metrics service can shutdown gracefully. #[must_use] pub enum Guard { + /// The metrics service is disabled. Off, + /// The metrics service is enabled. On { + #[doc(hidden)] handle: Handle, + #[doc(hidden)] stop: StopHandler<()>, }, } @@ -30,6 +37,9 @@ impl Drop for Guard { } } +/// Initializes the metrics service and lets it run in the background. +/// +/// Returns [Guard](enum.Guard.html) if succeeded, or an `String` to describes the reason for the failure. pub fn init(config: Config) -> Result { if config.exporter.is_empty() { return Ok(Guard::Off); @@ -102,8 +112,10 @@ where { let Exporter { target, format } = exporter; match target { - Target::Log { level, interval } => { - let lv: log::Level = level.into(); + Target::Log { + level: lv, + interval, + } => { let dur = Duration::from_secs(interval); match format { Format::Json { pretty } => { diff --git a/util/metrics/Cargo.toml b/util/metrics/Cargo.toml index 70854d7ffc..edc80b27a5 100644 --- a/util/metrics/Cargo.toml +++ b/util/metrics/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos "] edition = "2018" -description = "TODO(doc): crate description" +description = "A lightweight metrics facade used in CKB." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/metrics/src/lib.rs b/util/metrics/src/lib.rs index a80b8d4a40..b23a3fba12 100644 --- a/util/metrics/src/lib.rs +++ b/util/metrics/src/lib.rs @@ -1,20 +1,57 @@ +//! A lightweight metrics facade used in CKB. +//! +//! The `ckb-metrics` crate is a wrapper of [`metrics`]. The crate [`ckb-metrics-service`] is the +//! runtime which handles the metrics data in CKB. +//! +//! [`metrics`]: https://docs.rs/metrics/*/metrics/index.html +//! [`ckb-metrics-service`]: ../ckb_metrics_service/index.html +//! +//! ## Use +//! +//! The basic use of the facade crate is through the metrics macro: [`metrics!`]. +//! +//! ### Examples +//! +//! ```rust +//! use ckb_metrics::metrics; +//! +//! # use std::time::Instant; +//! # pub fn run_query(_: &str) -> u64 { 42 } +//! pub fn process(query: &str) -> u64 { +//! let start = Instant::now(); +//! let row_count = run_query(query); +//! let end = Instant::now(); +//! +//! metrics!(timing, "process.query_time", start, end); +//! metrics!(counter, "process.query_row_count", row_count); +//! +//! row_count +//! } +//! # fn main() {} +//! ``` + use std::time::{Duration, Instant}; pub use metrics::{self as internal, SetRecorderError}; +/// A simple timer which is used to time how much time elapsed. pub struct Timer(Instant); impl Timer { + /// Starts a new timer. pub fn start() -> Self { Self(Instant::now()) } + /// Stops the timer and return how much time elapsed. pub fn stop(self) -> Duration { Instant::now() - self.0 } } -// Ref: https://docs.rs/metrics/*/metrics/index.html#macros +/// Reexports the macros from the crate `metrics`. +/// +/// See the list of available [metrics types](https://docs.rs/metrics/*/metrics/index.html#macros). #[macro_export(local_inner_macros)] macro_rules! metrics { ($type:ident, $( $args:tt )*) => { diff --git a/util/multisig/Cargo.toml b/util/multisig/Cargo.toml index 27eb0ad8e3..8f19ad7813 100644 --- a/util/multisig/Cargo.toml +++ b/util/multisig/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/multisig/src/error.rs b/util/multisig/src/error.rs index 5cf8a454e7..d0ebd7acbb 100644 --- a/util/multisig/src/error.rs +++ b/util/multisig/src/error.rs @@ -1,21 +1,33 @@ +//! Multi-signature error. use failure::Context; +/// Multi-signature error. #[derive(Debug)] pub struct Error { inner: Context, } +/// Multi-signature error kinds. #[derive(Copy, Clone, Eq, PartialEq, Debug, Fail)] pub enum ErrorKind { + /// The count of signatures should be less than the count of private keys. #[fail(display = "The count of sigs should less than pks.")] SigCountOverflow, + /// The count of signatures is less than the threshold. #[fail(display = "The count of sigs less than threshold.")] SigNotEnough, + /// The verified signatures count is less than the threshold. #[fail(display = "Failed to meet threshold {:?}.", _0)] - Threshold { threshold: usize, pass_sigs: usize }, + Threshold { + /// The required count of valid signatures. + threshold: usize, + /// The actual count of valid signatures. + pass_sigs: usize, + }, } impl Error { + /// Gets the error kind. pub fn kind(&self) -> ErrorKind { *self.inner.get_context() } diff --git a/util/multisig/src/lib.rs b/util/multisig/src/lib.rs index a955b195b1..279c8262c6 100644 --- a/util/multisig/src/lib.rs +++ b/util/multisig/src/lib.rs @@ -1,3 +1,7 @@ +//! Multi-signatures. +//! +//! A m-of-n signature mechanism requires m valid signatures signed by m different keys from +//! the pre-configured n keys. #[macro_use] extern crate failure; diff --git a/util/multisig/src/secp256k1.rs b/util/multisig/src/secp256k1.rs index 9688abc881..4d1d3305b9 100644 --- a/util/multisig/src/secp256k1.rs +++ b/util/multisig/src/secp256k1.rs @@ -1,10 +1,12 @@ +//! Multi-signatures using secp256k1 use crate::error::{Error, ErrorKind}; pub use ckb_crypto::secp::{Error as Secp256k1Error, Message, Privkey, Pubkey, Signature}; use ckb_logger::{debug, trace}; use std::collections::HashSet; use std::hash::BuildHasher; -/// verify m of n signatures +/// Verifies m of n signatures. +/// /// Example 2 of 3 sigs: [s1, s3], pks: [pk1, pk2, pk3] pub fn verify_m_of_n( message: &Message, diff --git a/util/network-alert/Cargo.toml b/util/network-alert/Cargo.toml index 8a55c249e6..b9b25ca3d2 100644 --- a/util/network-alert/Cargo.toml +++ b/util/network-alert/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @driftluo crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/network-alert/src/alert_relayer.rs b/util/network-alert/src/alert_relayer.rs index 9681e1443f..b3f83d050a 100644 --- a/util/network-alert/src/alert_relayer.rs +++ b/util/network-alert/src/alert_relayer.rs @@ -31,6 +31,7 @@ pub struct AlertRelayer { } impl AlertRelayer { + /// TODO(doc): @driftluo pub fn new( client_version: String, notify_controller: NotifyController, @@ -43,10 +44,12 @@ impl AlertRelayer { } } + /// TODO(doc): @driftluo pub fn notifier(&self) -> &Arc> { &self.notifier } + /// TODO(doc): @driftluo pub fn verifier(&self) -> &Arc { &self.verifier } diff --git a/util/network-alert/src/notifier.rs b/util/network-alert/src/notifier.rs index ba169bc716..e91882338c 100644 --- a/util/network-alert/src/notifier.rs +++ b/util/network-alert/src/notifier.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @driftluo use ckb_logger::debug; use ckb_notify::NotifyController; use ckb_types::{packed::Alert, prelude::*}; @@ -6,6 +7,7 @@ use std::collections::HashMap; const CANCEL_FILTER_SIZE: usize = 128; +/// TODO(doc): @driftluo pub struct Notifier { /// cancelled alerts cancel_filter: LruCache, @@ -18,6 +20,7 @@ pub struct Notifier { } impl Notifier { + /// TODO(doc): @driftluo pub fn new(client_version: String, notify_controller: NotifyController) -> Self { Notifier { cancel_filter: LruCache::new(CANCEL_FILTER_SIZE), @@ -65,6 +68,7 @@ impl Notifier { true } + /// TODO(doc): @driftluo pub fn add(&mut self, alert: &Alert) { let alert_id = alert.raw().id().unpack(); let alert_cancel = alert.raw().cancel().unpack(); @@ -96,6 +100,7 @@ impl Notifier { }); } + /// TODO(doc): @driftluo pub fn cancel(&mut self, cancel_id: u32) { self.cancel_filter.put(cancel_id, ()); self.received_alerts.remove(&cancel_id); @@ -105,6 +110,7 @@ impl Notifier { }); } + /// TODO(doc): @driftluo pub fn clear_expired_alerts(&mut self, now: u64) { self.received_alerts.retain(|_id, alert| { let notice_until: u64 = alert.raw().notice_until().unpack(); @@ -116,15 +122,18 @@ impl Notifier { }); } + /// TODO(doc): @driftluo pub fn has_received(&self, id: u32) -> bool { self.received_alerts.contains_key(&id) || self.cancel_filter.contains(&id) } + /// TODO(doc): @driftluo // all unexpired alerts pub fn received_alerts(&self) -> Vec { self.received_alerts.values().cloned().collect() } + /// TODO(doc): @driftluo // alerts that self node should noticed pub fn noticed_alerts(&self) -> Vec { self.noticed_alerts.clone() diff --git a/util/network-alert/src/verifier.rs b/util/network-alert/src/verifier.rs index 646eb33a77..40f4f9fe68 100644 --- a/util/network-alert/src/verifier.rs +++ b/util/network-alert/src/verifier.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @driftluo use ckb_app_config::NetworkAlertConfig; use ckb_logger::{debug, trace}; use ckb_multisig::secp256k1::{verify_m_of_n, Message, Pubkey, Signature}; @@ -5,12 +6,14 @@ use ckb_types::{packed, prelude::*}; use failure::Error; use std::collections::HashSet; +/// TODO(doc): @driftluo pub struct Verifier { config: NetworkAlertConfig, pubkeys: HashSet, } impl Verifier { + /// TODO(doc): @driftluo pub fn new(config: NetworkAlertConfig) -> Self { let pubkeys = config .public_keys @@ -21,6 +24,7 @@ impl Verifier { Verifier { config, pubkeys } } + /// TODO(doc): @driftluo pub fn verify_signatures(&self, alert: &packed::Alert) -> Result<(), Error> { trace!("verify alert {:?}", alert); let message = Message::from_slice(alert.calc_alert_hash().as_slice())?; diff --git a/util/occupied-capacity/Cargo.toml b/util/occupied-capacity/Cargo.toml index 2709aef0b3..4a8415b90c 100644 --- a/util/occupied-capacity/Cargo.toml +++ b/util/occupied-capacity/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/occupied-capacity/core/Cargo.toml b/util/occupied-capacity/core/Cargo.toml index 376daba744..61777d4f45 100644 --- a/util/occupied-capacity/core/Cargo.toml +++ b/util/occupied-capacity/core/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/occupied-capacity/core/src/units.rs b/util/occupied-capacity/core/src/units.rs index c5c34eaeca..d7536ea7ab 100644 --- a/util/occupied-capacity/core/src/units.rs +++ b/util/occupied-capacity/core/src/units.rs @@ -1,25 +1,33 @@ use serde::{Deserialize, Serialize}; -// The inner is the amount of `Shannons`. +/// CKB capacity. +/// +/// It is encoded as the amount of `Shannons` internally. #[derive( Debug, Clone, Copy, Default, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, )] pub struct Capacity(u64); +/// Represents the ratio `numerator / denominator`, where `numerator` and `denominator` are both +/// unsigned 64-bit integers. #[derive(Clone, PartialEq, Debug, Eq, Copy, Deserialize, Serialize)] pub struct Ratio(pub u64, pub u64); impl Ratio { + /// The numerator in ratio numerator / denominator. pub fn numer(&self) -> u64 { self.0 } + /// The denominator in ratio numerator / denominator. pub fn denom(&self) -> u64 { self.1 } } +/// Conversion into `Capacity`. pub trait AsCapacity { + /// Converts `self` into `Capacity`. fn as_capacity(self) -> Capacity; } @@ -56,8 +64,10 @@ impl AsCapacity for u8 { // A `Byte` contains how many `Shannons`. const BYTE_SHANNONS: u64 = 100_000_000; +/// Numeric errors. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Error { + /// Numeric overflow. Overflow, } @@ -69,21 +79,26 @@ impl ::std::fmt::Display for Error { impl ::std::error::Error for Error {} +/// Numeric operation result. pub type Result = ::std::result::Result; impl Capacity { + /// Capacity of zero Shannons. pub const fn zero() -> Self { Capacity(0) } + /// Capacity of one Shannon. pub const fn one() -> Self { Capacity(1) } + /// Views the capacity as Shannons. pub const fn shannons(val: u64) -> Self { Capacity(val) } + /// Views the capacity as CKBytes. pub fn bytes(val: usize) -> Result { (val as u64) .checked_mul(BYTE_SHANNONS) @@ -91,10 +106,12 @@ impl Capacity { .ok_or(Error::Overflow) } + /// Views the capacity as Shannons. pub fn as_u64(self) -> u64 { self.0 } + /// Adds self and rhs and checks overflow error. pub fn safe_add(self, rhs: C) -> Result { self.0 .checked_add(rhs.as_capacity().0) @@ -102,6 +119,7 @@ impl Capacity { .ok_or(Error::Overflow) } + /// Subtracts self and rhs and checks overflow error. pub fn safe_sub(self, rhs: C) -> Result { self.0 .checked_sub(rhs.as_capacity().0) @@ -109,6 +127,7 @@ impl Capacity { .ok_or(Error::Overflow) } + /// Multiplies self and rhs and checks overflow error. pub fn safe_mul(self, rhs: C) -> Result { self.0 .checked_mul(rhs.as_capacity().0) @@ -116,6 +135,7 @@ impl Capacity { .ok_or(Error::Overflow) } + /// Multiplies self with a ratio and checks overflow error. pub fn safe_mul_ratio(self, ratio: Ratio) -> Result { self.0 .checked_mul(ratio.numer()) diff --git a/util/occupied-capacity/macros/Cargo.toml b/util/occupied-capacity/macros/Cargo.toml index 735dda3241..db457cb857 100644 --- a/util/occupied-capacity/macros/Cargo.toml +++ b/util/occupied-capacity/macros/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/occupied-capacity/macros/src/lib.rs b/util/occupied-capacity/macros/src/lib.rs index 86e817d684..c5f3f66188 100644 --- a/util/occupied-capacity/macros/src/lib.rs +++ b/util/occupied-capacity/macros/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @keroro520 extern crate proc_macro; use quote::quote; @@ -5,6 +6,7 @@ use syn::{parse_macro_input, Error as SynError}; use ckb_occupied_capacity_core::Capacity; +/// TODO(doc): @keroro520 #[proc_macro] pub fn capacity_bytes(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as syn::LitInt); diff --git a/util/proposal-table/Cargo.toml b/util/proposal-table/Cargo.toml index 42292dd77b..01c2d9740e 100644 --- a/util/proposal-table/Cargo.toml +++ b/util/proposal-table/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html -description = "TODO(doc): crate description" +description = "TODO(doc): @zhangsoledad crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/proposal-table/src/lib.rs b/util/proposal-table/src/lib.rs index 4244c45429..046b0d93a2 100644 --- a/util/proposal-table/src/lib.rs +++ b/util/proposal-table/src/lib.rs @@ -1,8 +1,10 @@ +//! TODO(doc): @zhangsoledad use ckb_chain_spec::consensus::ProposalWindow; use ckb_types::{core::BlockNumber, packed::ProposalShortId}; use std::collections::{BTreeMap, HashSet}; use std::ops::Bound; +/// TODO(doc): @zhangsoledad #[derive(Default, Clone, Debug)] pub struct ProposalView { pub(crate) gap: HashSet, @@ -10,27 +12,33 @@ pub struct ProposalView { } impl ProposalView { + /// TODO(doc): @zhangsoledad pub fn new(gap: HashSet, set: HashSet) -> ProposalView { ProposalView { gap, set } } + /// TODO(doc): @zhangsoledad pub fn gap(&self) -> &HashSet { &self.gap } + /// TODO(doc): @zhangsoledad pub fn set(&self) -> &HashSet { &self.set } + /// TODO(doc): @zhangsoledad pub fn contains_proposed(&self, id: &ProposalShortId) -> bool { self.set.contains(id) } + /// TODO(doc): @zhangsoledad pub fn contains_gap(&self, id: &ProposalShortId) -> bool { self.gap.contains(id) } } +/// TODO(doc): @zhangsoledad #[derive(Debug, PartialEq, Clone, Eq)] pub struct ProposalTable { pub(crate) table: BTreeMap>, @@ -38,6 +46,7 @@ pub struct ProposalTable { } impl ProposalTable { + /// TODO(doc): @zhangsoledad pub fn new(proposal_window: ProposalWindow) -> Self { ProposalTable { proposal_window, @@ -45,20 +54,24 @@ impl ProposalTable { } } + /// TODO(doc): @zhangsoledad // If the TABLE did not have this value present, true is returned. // If the TABLE did have this value present, false is returned pub fn insert(&mut self, number: BlockNumber, ids: HashSet) -> bool { self.table.insert(number, ids).is_none() } + /// TODO(doc): @zhangsoledad pub fn remove(&mut self, number: BlockNumber) -> Option> { self.table.remove(&number) } + /// TODO(doc): @zhangsoledad pub fn all(&self) -> &BTreeMap> { &self.table } + /// TODO(doc): @zhangsoledad pub fn finalize( &mut self, origin: &ProposalView, diff --git a/util/rational/Cargo.toml b/util/rational/Cargo.toml index 9b87856f38..b4d1d40188 100644 --- a/util/rational/Cargo.toml +++ b/util/rational/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/rational/src/lib.rs b/util/rational/src/lib.rs index 761de24b6f..c12931d078 100644 --- a/util/rational/src/lib.rs +++ b/util/rational/src/lib.rs @@ -1,3 +1,4 @@ +//! Rational numbers. #[cfg(test)] mod tests; @@ -6,6 +7,8 @@ use std::cmp::Ordering; use std::fmt; use std::ops::{Add, Div, Mul, Sub}; +/// Represents the ratio `numerator / denominator`, where `numerator` and `denominator` are both +/// unsigned 256-bit integers. #[derive(Clone, Debug, PartialEq, Eq)] pub struct RationalU256 { /// Numerator. @@ -21,6 +24,11 @@ impl fmt::Display for RationalU256 { } impl RationalU256 { + /// Creates a new ratio `numer / denom`. + /// + /// ## Panics + /// + /// Panics when `denom` is zero. #[inline] pub fn new(numer: U256, denom: U256) -> RationalU256 { if denom.is_zero() { @@ -31,36 +39,45 @@ impl RationalU256 { ret } + /// Creates a new ratio `numer / denom` without checking whether `denom` is zero. #[inline] pub const fn new_raw(numer: U256, denom: U256) -> RationalU256 { RationalU256 { numer, denom } } + /// Creates a new ratio `t / 1`. #[inline] pub const fn from_u256(t: U256) -> RationalU256 { RationalU256::new_raw(t, U256::one()) } + /// Tells whether the numerator is zero. #[inline] pub fn is_zero(&self) -> bool { self.numer.is_zero() } + /// Creates a new ratio `0 / 1`. #[inline] pub const fn zero() -> RationalU256 { RationalU256::new_raw(U256::zero(), U256::one()) } + /// Creates a new ratio `1 / 1`. #[inline] pub const fn one() -> RationalU256 { RationalU256::new_raw(U256::one(), U256::one()) } + /// Rounds down the ratio into an unsigned 256-bit integer. #[inline] pub fn into_u256(self) -> U256 { self.numer / self.denom } + /// Computes `self - rhs` and saturates the result to zero when `self` is less than `rhs`. + /// + /// Returns `self - rhs` when `self > rhs`, returns zero otherwise. #[inline] pub fn saturating_sub(self, rhs: RationalU256) -> Self { if self.denom == rhs.denom { @@ -85,6 +102,9 @@ impl RationalU256 { } } + /// Computes `self - rhs` and saturates the result to zero when `self` is less than `rhs`. + /// + /// Returns `self - rhs` when `self > rhs`, returns zero otherwise. #[inline] pub fn saturating_sub_u256(self, rhs: U256) -> Self { let (numer, overflowing) = self.numer.overflowing_sub(&(&self.denom * rhs)); diff --git a/util/reward-calculator/Cargo.toml b/util/reward-calculator/Cargo.toml index f57f1683a7..072b4f2c1f 100644 --- a/util/reward-calculator/Cargo.toml +++ b/util/reward-calculator/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/reward-calculator/src/lib.rs b/util/reward-calculator/src/lib.rs index bded39dc40..1fa05b75ed 100644 --- a/util/reward-calculator/src/lib.rs +++ b/util/reward-calculator/src/lib.rs @@ -13,12 +13,16 @@ use ckb_types::{ use std::cmp; use std::collections::HashSet; +/// TODO(doc): @keroro520 pub struct RewardCalculator<'a, CS> { + /// TODO(doc): @keroro520 pub consensus: &'a Consensus, + /// TODO(doc): @keroro520 pub store: &'a CS, } impl<'a, CS: ChainStore<'a>> RewardCalculator<'a, CS> { + /// TODO(doc): @keroro520 pub fn new(consensus: &'a Consensus, store: &'a CS) -> Self { RewardCalculator { consensus, store } } @@ -42,6 +46,7 @@ impl<'a, CS: ChainStore<'a>> RewardCalculator<'a, CS> { self.block_reward_internal(&target, parent) } + /// TODO(doc): @keroro520 pub fn block_reward_for_target( &self, target: &HeaderView, diff --git a/util/runtime/Cargo.toml b/util/runtime/Cargo.toml index d2eeba87fa..2d97042b08 100644 --- a/util/runtime/Cargo.toml +++ b/util/runtime/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/runtime/src/lib.rs b/util/runtime/src/lib.rs index 835a60124c..70c06be2ef 100644 --- a/util/runtime/src/lib.rs +++ b/util/runtime/src/lib.rs @@ -1,7 +1,9 @@ +//! Utilities for tokio runtime. use std::{future::Future, sync, thread}; pub use tokio::runtime::{Builder, Handle}; +/// Creates a new tokio runtime. pub fn new_runtime( name_prefix: &str, runtime_builder_opt: Option, diff --git a/util/rust-unstable-port/Cargo.toml b/util/rust-unstable-port/Cargo.toml index 9793ae80fa..92ac2e685a 100644 --- a/util/rust-unstable-port/Cargo.toml +++ b/util/rust-unstable-port/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/rust-unstable-port/src/lib.rs b/util/rust-unstable-port/src/lib.rs index 508fc2342e..bbaf9f0aff 100644 --- a/util/rust-unstable-port/src/lib.rs +++ b/util/rust-unstable-port/src/lib.rs @@ -1 +1,3 @@ +//! A collection of features ported from Rust unstable. + pub use is_sorted::IsSorted; diff --git a/util/snapshot/Cargo.toml b/util/snapshot/Cargo.toml index b4e81adca2..07867ed69b 100644 --- a/util/snapshot/Cargo.toml +++ b/util/snapshot/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @zhangsoledad crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/snapshot/src/lib.rs b/util/snapshot/src/lib.rs index b3e27cdf37..3e55625aa7 100644 --- a/util/snapshot/src/lib.rs +++ b/util/snapshot/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @zhangsoledad use arc_swap::{ArcSwap, Guard}; use ckb_chain_spec::consensus::Consensus; use ckb_db::{ @@ -20,26 +21,31 @@ use ckb_types::{ }; use std::sync::Arc; +/// TODO(doc): @zhangsoledad pub struct SnapshotMgr { inner: ArcSwap, } impl SnapshotMgr { + /// TODO(doc): @zhangsoledad pub fn new(snapshot: Arc) -> Self { SnapshotMgr { inner: ArcSwap::new(snapshot), } } + /// TODO(doc): @zhangsoledad pub fn load(&self) -> Guard> { self.inner.load() } + /// TODO(doc): @zhangsoledad pub fn store(&self, snapshot: Arc) { self.inner.store(snapshot); } } +/// TODO(doc): @zhangsoledad // A snapshot captures a point-in-time view of the DB at the time it's created // // yes —— new snapshot @@ -57,6 +63,7 @@ pub struct Snapshot { } impl Snapshot { + /// TODO(doc): @zhangsoledad // New snapshot created after tip change pub fn new( tip_header: HeaderView, @@ -76,6 +83,7 @@ impl Snapshot { } } + /// TODO(doc): @zhangsoledad // Refreshing on block commit is necessary operation, even tip remains unchanged. // when node relayed compact block,if some uncles were not available from receiver's local sources, // in GetBlockTransactions/BlockTransactions roundtrip, node will need access block data of uncles. @@ -90,38 +98,47 @@ impl Snapshot { } } + /// TODO(doc): @zhangsoledad pub fn tip_header(&self) -> &HeaderView { &self.tip_header } + /// TODO(doc): @zhangsoledad pub fn tip_number(&self) -> BlockNumber { self.tip_header.number() } + /// TODO(doc): @zhangsoledad pub fn tip_hash(&self) -> Byte32 { self.tip_header.hash() } + /// TODO(doc): @zhangsoledad pub fn epoch_ext(&self) -> &EpochExt { &self.epoch_ext } + /// TODO(doc): @zhangsoledad pub fn consensus(&self) -> &Consensus { &self.consensus } + /// TODO(doc): @zhangsoledad pub fn cloned_consensus(&self) -> Arc { Arc::clone(&self.consensus) } + /// TODO(doc): @zhangsoledad pub fn proposals(&self) -> &ProposalView { &self.proposals } + /// TODO(doc): @zhangsoledad pub fn total_difficulty(&self) -> &U256 { &self.total_difficulty } + /// TODO(doc): @zhangsoledad pub fn finalize_block_reward( &self, parent: &HeaderView, diff --git a/util/src/lib.rs b/util/src/lib.rs index 8e11b406f6..17972ba5ce 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1,3 +1,6 @@ +//! CKB utilities. +//! +//! Collection of frequently used utilities. mod linked_hash_set; mod shrink_to_fit; pub mod strings; @@ -11,8 +14,20 @@ pub use parking_lot::{ self, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard, }; -const TRY_LOCK_TIMEOUT: Duration = Duration::from_secs(300); +/// The timeout that [`lock_or_panic`] waits before it panics. +/// +/// It is set to 300 seconds. +/// +/// [`lock_or_panic`]: fn.lock_or_panic.html +pub const TRY_LOCK_TIMEOUT: Duration = Duration::from_secs(300); +/// Holds the mutex lock or panics after timeout. +/// +/// This is used to panic and restart the app on potential dead lock. +/// +/// Try to hold the lock or panic after the timeout [`TRY_LOCK_TIMEOUT`]. +/// +/// [`TRY_LOCK_TIMEOUT`]: constant.TRY_LOCK_TIMEOUT.html pub fn lock_or_panic(data: &Mutex) -> MutexGuard { data.try_lock_for(TRY_LOCK_TIMEOUT) .expect("please check if reach a deadlock") diff --git a/util/src/linked_hash_set.rs b/util/src/linked_hash_set.rs index 1596aeac6e..139c62aa2d 100644 --- a/util/src/linked_hash_set.rs +++ b/util/src/linked_hash_set.rs @@ -7,6 +7,21 @@ use std::iter::Extend; type DefaultBuildHasher = BuildHasherDefault; +/// A HashSet that holds elements in insertion order. +/// +/// ## Examples +/// +/// ``` +/// use ckb_util::LinkedHashSet; +/// +/// let mut set = LinkedHashSet::new(); +/// set.insert(2); +/// set.insert(1); +/// set.insert(3); +/// +/// let items: Vec = set.iter().copied().collect(); +/// assert_eq!(items, [2, 1, 3]); +/// ``` pub struct LinkedHashSet { map: LinkedHashMap, } @@ -75,6 +90,14 @@ where } impl LinkedHashSet { + /// Creates a linked hash set. + /// + /// ## Examples + /// + /// ``` + /// use ckb_util::LinkedHashSet; + /// let set: LinkedHashSet = LinkedHashSet::new(); + /// ``` pub fn new() -> LinkedHashSet { LinkedHashSet { map: LinkedHashMap::default(), @@ -87,32 +110,56 @@ where T: Eq + Hash, S: BuildHasher, { + /// Returns `true` if the set contains a value. + /// + /// ``` + /// use ckb_util::LinkedHashSet; + /// + /// let mut set: LinkedHashSet<_> = LinkedHashSet::new(); + /// set.insert(1); + /// set.insert(2); + /// set.insert(3); + /// assert_eq!(set.contains(&1), true); + /// assert_eq!(set.contains(&4), false); + /// ``` pub fn contains(&self, value: &T) -> bool { self.map.contains_key(value) } + /// Returns the number of elements the set can hold without reallocating. pub fn capacity(&self) -> usize { self.map.capacity() } + /// Returns the number of elements in the set. pub fn len(&self) -> usize { self.map.len() } + /// Returns `true` if the set contains no elements. pub fn is_empty(&self) -> bool { self.map.is_empty() } + /// Adds a value to the set. + /// + /// If the set did not have this value present, true is returned. + /// + /// If the set did have this value present, false is returned. pub fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()).is_none() } + /// Gets an iterator visiting all elements in insertion order. + /// + /// The iterator element type is `&'a T`. pub fn iter(&self) -> Iter { Iter { iter: self.map.keys(), } } + /// Visits the values representing the difference, i.e., the values that are in `self` but not in `other`. pub fn difference<'a>(&'a self, other: &'a LinkedHashSet) -> Difference<'a, T, S> { Difference { iter: self.iter(), diff --git a/util/src/shrink_to_fit.rs b/util/src/shrink_to_fit.rs index ea67798561..f1f9fe9f95 100644 --- a/util/src/shrink_to_fit.rs +++ b/util/src/shrink_to_fit.rs @@ -1,3 +1,15 @@ +/// Shrinks the map `$map` when it reserves more than `$threhold` slots for future entries. +/// +/// ## Examples +/// +/// ``` +/// use std::collections::HashMap; +/// use ckb_util::shrink_to_fit; +/// +/// let mut h = HashMap::::new(); +/// // Shrink the map when it reserves more than 10 slots for future entries. +/// shrink_to_fit!(h, 10); +/// ``` #[macro_export] macro_rules! shrink_to_fit { ($map:expr, $threhold:expr) => {{ diff --git a/util/src/strings.rs b/util/src/strings.rs index 22c8c335aa..be51674a62 100644 --- a/util/src/strings.rs +++ b/util/src/strings.rs @@ -1,5 +1,21 @@ +//! Utilities for std strings. use regex::Regex; +/// Checks whether the given string is a valid identifier. +/// +/// This function considers non-empty string containing only alphabets, digits, `-`, and `_` as +/// a valid identifier. +/// +/// ## Examples +/// +/// ``` +/// use ckb_util::strings::check_if_identifier_is_valid; +/// +/// assert!(check_if_identifier_is_valid("test123").is_ok()); +/// assert!(check_if_identifier_is_valid("123test").is_ok()); +/// assert!(check_if_identifier_is_valid("").is_err()); +/// assert!(check_if_identifier_is_valid("test 123").is_err()); +/// ``` pub fn check_if_identifier_is_valid(ident: &str) -> Result<(), String> { const IDENT_PATTERN: &str = r#"^[0-9a-zA-Z_-]+$"#; if ident.is_empty() { diff --git a/util/stop-handler/Cargo.toml b/util/stop-handler/Cargo.toml index c776050944..329db40f7f 100644 --- a/util/stop-handler/Cargo.toml +++ b/util/stop-handler/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/stop-handler/src/lib.rs b/util/stop-handler/src/lib.rs index 7bfcae7433..de313b82bb 100644 --- a/util/stop-handler/src/lib.rs +++ b/util/stop-handler/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @keroro520 use ckb_logger::error; use futures::sync::oneshot; use parking_lot::Mutex; @@ -6,15 +7,21 @@ use std::sync::Arc; use std::thread::JoinHandle; use tokio::sync::oneshot as tokio_oneshot; +/// TODO(doc): @keroro520 #[derive(Debug)] pub enum SignalSender { + /// TODO(doc): @keroro520 Future(oneshot::Sender<()>), + /// TODO(doc): @keroro520 Crossbeam(ckb_channel::Sender<()>), + /// TODO(doc): @keroro520 Std(mpsc::Sender<()>), + /// TODO(doc): @keroro520 Tokio(tokio_oneshot::Sender<()>), } impl SignalSender { + /// TODO(doc): @keroro520 pub fn send(self) { match self { SignalSender::Crossbeam(tx) => { @@ -47,6 +54,7 @@ struct Handler { thread: JoinHandle, } +/// TODO(doc): @keroro520 //the outer Option take ownership for `Arc::try_unwrap` //the inner Option take ownership for `JoinHandle` or `oneshot::Sender` #[derive(Clone, Debug)] @@ -55,6 +63,7 @@ pub struct StopHandler { } impl StopHandler { + /// TODO(doc): @keroro520 pub fn new(signal: SignalSender, thread: JoinHandle) -> StopHandler { let handler = Handler { signal, thread }; StopHandler { @@ -62,6 +71,7 @@ impl StopHandler { } } + /// TODO(doc): @keroro520 pub fn try_send(&mut self) { let inner = self .inner diff --git a/util/test-chain-utils/Cargo.toml b/util/test-chain-utils/Cargo.toml index ef8a020f48..8dc6bbdc24 100644 --- a/util/test-chain-utils/Cargo.toml +++ b/util/test-chain-utils/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "TODO(doc): @chuijiaolianying crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/test-chain-utils/src/chain.rs b/util/test-chain-utils/src/chain.rs index 5592f54f0a..c744374ed9 100644 --- a/util/test-chain-utils/src/chain.rs +++ b/util/test-chain-utils/src/chain.rs @@ -83,14 +83,17 @@ lazy_static! { }; } +/// TODO(doc): @chuijiaolianying pub fn load_input_data_hash_cell() -> &'static (CellOutput, Bytes, Script) { &LOAD_INPUT_DATA_HASH } +/// TODO(doc): @chuijiaolianying pub fn always_success_cell() -> &'static (CellOutput, Bytes, Script) { &SUCCESS_CELL } +/// TODO(doc): @chuijiaolianying pub fn always_success_consensus() -> Consensus { let (always_success_cell, always_success_cell_data, always_success_script) = always_success_cell(); @@ -113,6 +116,7 @@ pub fn always_success_consensus() -> Consensus { .build() } +/// TODO(doc): @chuijiaolianying pub fn always_success_cellbase( block_number: BlockNumber, reward: Capacity, @@ -145,18 +149,21 @@ fn load_spec_by_name(name: &str) -> ChainSpec { ChainSpec::load_from(&res).expect("load spec by name") } +/// TODO(doc): @chuijiaolianying pub fn ckb_testnet_consensus() -> Consensus { let name = "ckb_testnet"; let spec = load_spec_by_name(name); spec.build_consensus().unwrap() } +/// TODO(doc): @chuijiaolianying pub fn type_lock_script_code_hash() -> H256 { build_genesis_type_id_script(OUTPUT_INDEX_SECP256K1_BLAKE160_SIGHASH_ALL) .calc_script_hash() .unpack() } +/// TODO(doc): @chuijiaolianying pub fn secp256k1_blake160_sighash_cell(consensus: Consensus) -> (CellOutput, Bytes) { let genesis_block = consensus.genesis_block(); let tx = genesis_block.transactions()[0].clone(); @@ -167,6 +174,7 @@ pub fn secp256k1_blake160_sighash_cell(consensus: Consensus) -> (CellOutput, Byt (cell_output, data) } +/// TODO(doc): @chuijiaolianying pub fn secp256k1_data_cell(consensus: Consensus) -> (CellOutput, Bytes) { let genesis_block = consensus.genesis_block(); let tx = genesis_block.transactions()[0].clone(); diff --git a/util/test-chain-utils/src/lib.rs b/util/test-chain-utils/src/lib.rs index 9980894744..d2147a0717 100644 --- a/util/test-chain-utils/src/lib.rs +++ b/util/test-chain-utils/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @chuijiaolianying mod chain; mod median_time; mod mock_store; diff --git a/util/test-chain-utils/src/median_time.rs b/util/test-chain-utils/src/median_time.rs index 4755e3973d..2310a8b348 100644 --- a/util/test-chain-utils/src/median_time.rs +++ b/util/test-chain-utils/src/median_time.rs @@ -5,6 +5,7 @@ use ckb_types::{ prelude::*, }; +/// TODO(doc): @chuijiaolianying pub struct MockMedianTime { timestamps: Vec, } @@ -39,15 +40,18 @@ impl HeaderProvider for MockMedianTime { } impl MockMedianTime { + /// TODO(doc): @chuijiaolianying pub fn new(timestamps: Vec) -> Self { Self { timestamps } } + /// TODO(doc): @chuijiaolianying pub fn get_block_hash(block_number: BlockNumber) -> Byte32 { let vec: Vec = (0..32).map(|_| block_number as u8).collect(); Byte32::from_slice(vec.as_slice()).unwrap() } + /// TODO(doc): @chuijiaolianying pub fn get_transaction_info( block_number: BlockNumber, block_epoch: EpochNumberWithFraction, diff --git a/util/test-chain-utils/src/mock_store.rs b/util/test-chain-utils/src/mock_store.rs index 0d44451dea..c4cc0c4cc2 100644 --- a/util/test-chain-utils/src/mock_store.rs +++ b/util/test-chain-utils/src/mock_store.rs @@ -11,6 +11,7 @@ use ckb_types::{ }; use std::sync::Arc; +/// TODO(doc): @chuijiaolianying #[derive(Clone)] pub struct MockStore(pub Arc); @@ -22,6 +23,7 @@ impl Default for MockStore { } impl MockStore { + /// TODO(doc): @chuijiaolianying pub fn new(parent: &HeaderView, chain_store: &ChainDB) -> Self { // Insert parent block into current mock store for referencing let block = chain_store.get_block(&parent.hash()).unwrap(); @@ -34,10 +36,12 @@ impl MockStore { store } + /// TODO(doc): @chuijiaolianying pub fn store(&self) -> &ChainDB { &self.0 } + /// TODO(doc): @chuijiaolianying pub fn insert_block(&self, block: &BlockView, epoch_ext: &EpochExt) { let db_txn = self.0.begin_transaction(); let last_block_hash_in_previous_epoch = epoch_ext.last_block_hash_in_previous_epoch(); @@ -52,6 +56,7 @@ impl MockStore { db_txn.commit().unwrap(); } + /// TODO(doc): @chuijiaolianying pub fn remove_block(&self, block: &BlockView) { let db_txn = self.0.begin_transaction(); db_txn diff --git a/util/types/Cargo.toml b/util/types/Cargo.toml index d16fe87df7..076c0e66f5 100644 --- a/util/types/Cargo.toml +++ b/util/types/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" authors = ["Nervos Core Dev "] edition = "2018" license = "MIT" -description = "TODO(doc): crate description" +description = "Provides the essential types for CKB." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/types/src/constants.rs b/util/types/src/constants.rs index 16b0be82d4..44d5527897 100644 --- a/util/types/src/constants.rs +++ b/util/types/src/constants.rs @@ -2,5 +2,7 @@ use crate::core::Version; +/// Current transaction version. pub const TX_VERSION: Version = 0; +/// Current block version. pub const BLOCK_VERSION: Version = 0; diff --git a/util/types/src/conversion/primitive.rs b/util/types/src/conversion/primitive.rs index 9e3b3900e7..09de072a11 100644 --- a/util/types/src/conversion/primitive.rs +++ b/util/types/src/conversion/primitive.rs @@ -145,10 +145,13 @@ impl Pack for str { } impl<'r> packed::BytesReader<'r> { + /// Converts self to a string slice. pub fn as_utf8(&self) -> Result<&str, ::std::str::Utf8Error> { ::std::str::from_utf8(self.raw_data()) } + /// Converts self to a string slice without checking that the string contains valid UTF-8. + /// /// # Safety /// /// This function is unsafe because it does not check that the bytes passed to @@ -158,6 +161,7 @@ impl<'r> packed::BytesReader<'r> { ::std::str::from_utf8_unchecked(self.raw_data()) } + /// Checks whether self is contains valid UTF-8 binary data. pub fn is_utf8(&self) -> bool { self.as_utf8().is_ok() } diff --git a/util/types/src/core/advanced_builders.rs b/util/types/src/core/advanced_builders.rs index 2c13aeb986..16ffeeef7d 100644 --- a/util/types/src/core/advanced_builders.rs +++ b/util/types/src/core/advanced_builders.rs @@ -10,6 +10,12 @@ use crate::{ * Definitions */ +/// An advanced builder for [`TransactionView`]. +/// +/// Base on [`packed::TransactionBuilder`] but added lots of syntactic sugar. +/// +/// [`TransactionView`]: struct.TransactionView.html +/// [`packed::TransactionBuilder`]: ../packed/struct.TransactionBuilder.html #[derive(Clone, Debug)] pub struct TransactionBuilder { pub(crate) version: packed::Uint32, @@ -21,6 +27,12 @@ pub struct TransactionBuilder { pub(crate) outputs_data: Vec, } +/// An advanced builder for [`HeaderView`]. +/// +/// Base on [`packed::HeaderBuilder`] but added lots of syntactic sugar. +/// +/// [`HeaderView`]: struct.HeaderView.html +/// [`packed::HeaderBuilder`]: ../packed/struct.HeaderBuilder.html #[derive(Clone, Debug)] pub struct HeaderBuilder { // RawHeader @@ -38,6 +50,12 @@ pub struct HeaderBuilder { pub(crate) nonce: packed::Uint128, } +/// An advanced builder for [`BlockView`]. +/// +/// Base on [`packed::BlockBuilder`] but added lots of syntactic sugar. +/// +/// [`BlockView`]: struct.BlockView.html +/// [`packed::BlockBuilder`]: ../packed/struct.BlockBuilder.html #[derive(Clone, Debug, Default)] pub struct BlockBuilder { pub(crate) header: HeaderBuilder, @@ -88,57 +106,89 @@ impl ::std::default::Default for HeaderBuilder { */ macro_rules! def_setter_simple { - ($prefix:ident, $field:ident, $type:ident) => { + (__add_doc, $prefix:ident, $field:ident, $type:ident, $comment:expr) => { + #[doc = $comment] pub fn $field(mut self, v: packed::$type) -> Self { self.$prefix.$field = v; self } }; - ($field:ident, $type:ident) => { + (__add_doc, $field:ident, $type:ident, $comment:expr) => { + #[doc = $comment] pub fn $field(mut self, v: packed::$type) -> Self { self.$field = v; self } }; + ($prefix:ident, $field:ident, $type:ident) => { + def_setter_simple!( + __add_doc, + $prefix, + $field, + $type, + concat!("Sets `", stringify!($prefix), ".", stringify!($field), "`.") + ); + }; + ($field:ident, $type:ident) => { + def_setter_simple!( + __add_doc, + $field, + $type, + concat!("Sets `", stringify!($field), "`.") + ); + }; } macro_rules! def_setter_for_vector { - ($field:ident, $type:ident, $func_push:ident, $func_extend:ident, $func_set:ident) => { - pub fn $func_push(mut self, v: packed::$type) -> Self { + ( + $prefix:ident, $field:ident, $type:ident, + $func_push:ident, $func_extend:ident, $func_set:ident, + $comment_push:expr, $comment_extend:expr, $comment_set:expr, + ) => { + #[doc = $comment_push] + pub fn $func_push(mut self, v: $prefix::$type) -> Self { self.$field.push(v); self } + #[doc = $comment_extend] pub fn $func_extend(mut self, v: T) -> Self where - T: ::std::iter::IntoIterator, + T: ::std::iter::IntoIterator, { self.$field.extend(v); self } - pub fn $func_set(mut self, v: Vec) -> Self { + #[doc = $comment_set] + pub fn $func_set(mut self, v: Vec<$prefix::$type>) -> Self { self.$field = v; self } }; + ($prefix:ident, $field:ident, $type:ident, $func_push:ident, $func_extend:ident, $func_set:ident) => { + def_setter_for_vector!( + $prefix, + $field, + $type, + $func_push, + $func_extend, + $func_set, + concat!("Pushes an item into `", stringify!($field), "`."), + concat!( + "Extends `", + stringify!($field), + "` with the contents of an iterator." + ), + concat!("Sets `", stringify!($field), "`."), + ); + }; + ($field:ident, $type:ident, $func_push:ident, $func_extend:ident, $func_set:ident) => { + def_setter_for_vector!(packed, $field, $type, $func_push, $func_extend, $func_set); + }; } macro_rules! def_setter_for_view_vector { ($field:ident, $type:ident, $func_push:ident, $func_extend:ident, $func_set:ident) => { - pub fn $func_push(mut self, v: core::$type) -> Self { - self.$field.push(v); - self - } - pub fn $func_extend(mut self, v: T) -> Self - where - T: ::std::iter::IntoIterator, - { - self.$field.extend(v); - self - } - pub fn $func_set(mut self, v: Vec) -> Self { - self.$field = v; - self - } + def_setter_for_vector!(core, $field, $type, $func_push, $func_extend, $func_set); }; } @@ -163,6 +213,7 @@ impl TransactionBuilder { set_outputs_data ); + /// Converts into [`TransactionView`](struct.TransactionView.html). pub fn build(self) -> core::TransactionView { let Self { version, @@ -208,6 +259,7 @@ impl HeaderBuilder { def_setter_simple!(dao, Byte32); def_setter_simple!(nonce, Uint128); + /// Converts into [`HeaderView`](struct.HeaderView.html). pub fn build(self) -> core::HeaderView { let Self { version, @@ -272,6 +324,7 @@ impl BlockBuilder { set_proposals ); + /// Set `header`. pub fn header(mut self, header: core::HeaderView) -> Self { self.header = header.as_advanced_builder(); self @@ -362,10 +415,19 @@ impl BlockBuilder { } } + /// Converts into [`BlockView`](struct.BlockView.html) and recalculates all hashes and merkle + /// roots in the header. pub fn build(self) -> core::BlockView { self.build_internal(true) } + /// Converts into [`BlockView`](struct.BlockView.html) but does not refresh all hashes and all + /// merkle roots in the header. + /// + /// # Notice + /// + /// [`BlockView`](struct.BlockView.html) created by this method could have invalid hashes or + /// invalid merkle roots in the header. pub fn build_unchecked(self) -> core::BlockView { self.build_internal(false) } @@ -376,6 +438,7 @@ impl BlockBuilder { */ impl packed::Transaction { + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> TransactionBuilder { TransactionBuilder::default() .version(self.raw().version()) @@ -389,6 +452,7 @@ impl packed::Transaction { } impl packed::Header { + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> HeaderBuilder { HeaderBuilder::default() .version(self.raw().version()) @@ -406,6 +470,7 @@ impl packed::Header { } impl packed::Block { + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> BlockBuilder { BlockBuilder::default() .header(self.header().into_view()) @@ -426,18 +491,21 @@ impl packed::Block { } impl core::TransactionView { + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> TransactionBuilder { self.data().as_advanced_builder() } } impl core::HeaderView { + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> HeaderBuilder { self.data().as_advanced_builder() } } impl core::BlockView { + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> BlockBuilder { let core::BlockView { data, diff --git a/util/types/src/core/blockchain.rs b/util/types/src/core/blockchain.rs index ca8f7cd490..5ec92edb2a 100644 --- a/util/types/src/core/blockchain.rs +++ b/util/types/src/core/blockchain.rs @@ -3,9 +3,12 @@ use std::convert::TryFrom; use crate::packed; +/// TODO(doc): @quake #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum ScriptHashType { + /// TODO(doc): @quake Data = 0, + /// TODO(doc): @quake Type = 1, } @@ -48,9 +51,12 @@ impl Into for ScriptHashType { } } +/// TODO(doc): @quake #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum DepType { + /// TODO(doc): @quake Code = 0, + /// TODO(doc): @quake DepGroup = 1, } diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index 5c6992c8c3..84b2fc30b7 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @quake use crate::{ bytes::Bytes, core::error::OutPointError, @@ -13,19 +14,28 @@ use std::convert::TryInto; use std::fmt; use std::hash::BuildHasher; +/// TODO(doc): @quake #[derive(Debug)] pub enum ResolvedDep { + /// TODO(doc): @quake Cell(CellMeta), + /// TODO(doc): @quake Group((CellMeta, Vec)), } +/// TODO(doc): @quake pub static SYSTEM_CELL: OnceCell> = OnceCell::new(); +/// TODO(doc): @quake #[derive(Clone, Eq, PartialEq, Default)] pub struct CellMeta { + /// TODO(doc): @quake pub cell_output: CellOutput, + /// TODO(doc): @quake pub out_point: OutPoint, + /// TODO(doc): @quake pub transaction_info: Option, + /// TODO(doc): @quake pub data_bytes: u64, /// In memory cell data and its hash /// A live cell either exists in memory or DB @@ -33,6 +43,7 @@ pub struct CellMeta { pub mem_cell_data: Option<(Bytes, Byte32)>, } +/// TODO(doc): @quake #[derive(Default)] pub struct CellMetaBuilder { cell_output: CellOutput, @@ -43,6 +54,7 @@ pub struct CellMetaBuilder { } impl CellMetaBuilder { + /// TODO(doc): @quake pub fn from_cell_meta(cell_meta: CellMeta) -> Self { let CellMeta { cell_output, @@ -60,6 +72,7 @@ impl CellMetaBuilder { } } + /// TODO(doc): @quake pub fn from_cell_output(cell_output: CellOutput, data: Bytes) -> Self { let mut builder = CellMetaBuilder::default(); builder.cell_output = cell_output; @@ -69,16 +82,19 @@ impl CellMetaBuilder { builder } + /// TODO(doc): @quake pub fn out_point(mut self, out_point: OutPoint) -> Self { self.out_point = out_point; self } + /// TODO(doc): @quake pub fn transaction_info(mut self, transaction_info: TransactionInfo) -> Self { self.transaction_info = Some(transaction_info); self } + /// TODO(doc): @quake pub fn build(self) -> CellMeta { let Self { cell_output, @@ -109,6 +125,7 @@ impl fmt::Debug for CellMeta { } impl CellMeta { + /// TODO(doc): @quake pub fn is_cellbase(&self) -> bool { self.transaction_info .as_ref() @@ -116,21 +133,25 @@ impl CellMeta { .unwrap_or(false) } + /// TODO(doc): @quake pub fn capacity(&self) -> Capacity { self.cell_output.capacity().unpack() } + /// TODO(doc): @quake pub fn occupied_capacity(&self) -> CapacityResult { self.cell_output .occupied_capacity(Capacity::bytes(self.data_bytes as usize)?) } + /// TODO(doc): @quake pub fn is_lack_of_capacity(&self) -> CapacityResult { self.cell_output .is_lack_of_capacity(Capacity::bytes(self.data_bytes as usize)?) } } +/// TODO(doc): @quake #[derive(PartialEq, Debug)] pub enum CellStatus { /// Cell exists and has not been spent. @@ -142,10 +163,12 @@ pub enum CellStatus { } impl CellStatus { + /// TODO(doc): @quake pub fn live_cell(cell_meta: CellMeta) -> CellStatus { CellStatus::Live(cell_meta) } + /// TODO(doc): @quake pub fn is_live(&self) -> bool { match *self { CellStatus::Live(_) => true, @@ -153,10 +176,12 @@ impl CellStatus { } } + /// TODO(doc): @quake pub fn is_dead(&self) -> bool { self == &CellStatus::Dead } + /// TODO(doc): @quake pub fn is_unknown(&self) -> bool { self == &CellStatus::Unknown } @@ -165,18 +190,24 @@ impl CellStatus { /// Transaction with resolved input cells. #[derive(Debug)] pub struct ResolvedTransaction { + /// TODO(doc): @quake pub transaction: TransactionView, + /// TODO(doc): @quake pub resolved_cell_deps: Vec, + /// TODO(doc): @quake pub resolved_inputs: Vec, + /// TODO(doc): @quake pub resolved_dep_groups: Vec, } impl ResolvedTransaction { + /// TODO(doc): @quake // cellbase will be resolved with empty input cells, we can use low cost check here: pub fn is_cellbase(&self) -> bool { self.resolved_inputs.is_empty() } + /// TODO(doc): @quake pub fn inputs_capacity(&self) -> CapacityResult { self.resolved_inputs .iter() @@ -184,10 +215,12 @@ impl ResolvedTransaction { .try_fold(Capacity::zero(), Capacity::safe_add) } + /// TODO(doc): @quake pub fn outputs_capacity(&self) -> CapacityResult { self.transaction.outputs_capacity() } + /// TODO(doc): @quake pub fn related_dep_out_points(&self) -> Vec { self.resolved_cell_deps .iter() @@ -198,10 +231,13 @@ impl ResolvedTransaction { } } +/// TODO(doc): @quake pub trait CellProvider { + /// TODO(doc): @quake fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus; } +/// TODO(doc): @quake pub struct OverlayCellProvider<'a, A, B> { overlay: &'a A, cell_provider: &'a B, @@ -212,6 +248,7 @@ where A: CellProvider, B: CellProvider, { + /// TODO(doc): @quake pub fn new(overlay: &'a A, cell_provider: &'a B) -> Self { Self { overlay, @@ -234,6 +271,7 @@ where } } +/// TODO(doc): @quake pub struct BlockCellProvider<'a> { output_indices: HashMap, block: &'a BlockView, @@ -242,6 +280,7 @@ pub struct BlockCellProvider<'a> { // Transactions are expected to be sorted within a block, // Transactions have to appear after any transactions upon which they depend impl<'a> BlockCellProvider<'a> { + /// TODO(doc): @quake pub fn new(block: &'a BlockView) -> Result { let output_indices: HashMap = block .transactions() @@ -307,17 +346,20 @@ impl<'a> CellProvider for BlockCellProvider<'a> { } } +/// TODO(doc): @quake #[derive(Default)] pub struct TransactionsProvider<'a> { transactions: HashMap, } impl<'a> TransactionsProvider<'a> { + /// TODO(doc): @quake pub fn new(transactions: impl Iterator) -> Self { let transactions = transactions.map(|tx| (tx.hash(), tx)).collect(); Self { transactions } } + /// TODO(doc): @quake pub fn insert(&mut self, transaction: &'a TransactionView) { self.transactions.insert(transaction.hash(), transaction); } @@ -347,6 +389,7 @@ impl<'a> CellProvider for TransactionsProvider<'a> { } } +/// TODO(doc): @quake pub trait HeaderChecker { /// Check if header in main chain fn check_valid(&self, block_hash: &Byte32) -> Result<(), Error>; @@ -416,6 +459,7 @@ fn resolve_dep_group Result, Error Ok(Some((dep_group_cell, resolved_deps))) } +/// TODO(doc): @quake pub fn resolve_transaction( transaction: TransactionView, seen_inputs: &mut HashSet, @@ -561,6 +605,7 @@ fn build_cell_meta_from_out_point( } } +/// TODO(doc): @quake pub fn setup_system_cell_cache(genesis: &BlockView, cell_provider: &CP) { let system_cell_transaction = &genesis.transactions()[0]; let secp_cell_transaction = &genesis.transactions()[1]; diff --git a/util/types/src/core/error.rs b/util/types/src/core/error.rs index 356b223616..717530c7e9 100644 --- a/util/types/src/core/error.rs +++ b/util/types/src/core/error.rs @@ -1,7 +1,10 @@ +//! TODO(doc): @keroro520 + use crate::generated::packed::{Byte32, OutPoint}; use ckb_error::{Error, ErrorKind}; use failure::Fail; +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone)] pub enum OutPointError { /// The specified cell is already dead @@ -23,10 +26,12 @@ pub enum OutPointError { InvalidDepGroup(OutPoint), // TODO: This error should be move into HeaderError or TransactionError + /// TODO(doc): @keroro520 #[fail(display = "InvalidHeader({})", _0)] InvalidHeader(Byte32), // TODO: This error should be move into HeaderError or TransactionError + /// TODO(doc): @keroro520 #[fail(display = "ImmatureHeader({})", _0)] ImmatureHeader(Byte32), } diff --git a/util/types/src/core/extras.rs b/util/types/src/core/extras.rs index 7dfbdc06d8..867b1bb384 100644 --- a/util/types/src/core/extras.rs +++ b/util/types/src/core/extras.rs @@ -11,26 +11,38 @@ use std::fmt; use std::num::ParseIntError; use std::str::FromStr; +/// TODO(doc): @quake #[derive(Clone, PartialEq, Default, Debug)] pub struct BlockExt { + /// TODO(doc): @quake pub received_at: u64, + /// TODO(doc): @quake pub total_difficulty: U256, + /// TODO(doc): @quake pub total_uncles_count: u64, + /// TODO(doc): @quake pub verified: Option, + /// TODO(doc): @quake pub txs_fees: Vec, } +/// TODO(doc): @quake #[derive(Clone, Eq, PartialEq, Debug)] pub struct TransactionInfo { + /// TODO(doc): @quake // Block hash pub block_hash: packed::Byte32, + /// TODO(doc): @quake pub block_number: BlockNumber, + /// TODO(doc): @quake pub block_epoch: EpochNumberWithFraction, + /// TODO(doc): @quake // Index in the block pub index: usize, } impl TransactionInfo { + /// TODO(doc): @quake pub fn key(&self) -> packed::TransactionKey { packed::TransactionKey::new_builder() .block_hash(self.block_hash.clone()) @@ -38,6 +50,7 @@ impl TransactionInfo { .build() } + /// TODO(doc): @quake pub fn new( block_number: BlockNumber, block_epoch: EpochNumberWithFraction, @@ -52,15 +65,18 @@ impl TransactionInfo { } } + /// TODO(doc): @quake pub fn is_cellbase(&self) -> bool { self.index == 0 } + /// TODO(doc): @quake pub fn is_genesis(&self) -> bool { self.block_number == 0 } } +/// TODO(doc): @quake #[derive(Clone, Eq, PartialEq, Debug, Default)] pub struct EpochExt { pub(crate) number: EpochNumber, @@ -81,39 +97,48 @@ impl EpochExt { // Simple Getters // + /// TODO(doc): @quake pub fn number(&self) -> EpochNumber { self.number } + /// TODO(doc): @quake pub fn primary_reward(&self) -> Capacity { Capacity::shannons( self.base_block_reward.as_u64() * self.length + self.remainder_reward.as_u64(), ) } + /// TODO(doc): @quake pub fn base_block_reward(&self) -> &Capacity { &self.base_block_reward } + /// TODO(doc): @quake pub fn remainder_reward(&self) -> &Capacity { &self.remainder_reward } + /// TODO(doc): @quake pub fn previous_epoch_hash_rate(&self) -> &U256 { &self.previous_epoch_hash_rate } + /// TODO(doc): @quake pub fn last_block_hash_in_previous_epoch(&self) -> packed::Byte32 { self.last_block_hash_in_previous_epoch.clone() } + /// TODO(doc): @quake pub fn start_number(&self) -> BlockNumber { self.start_number } + /// TODO(doc): @quake pub fn length(&self) -> BlockNumber { self.length } + /// TODO(doc): @quake pub fn compact_target(&self) -> u32 { self.compact_target } @@ -122,22 +147,27 @@ impl EpochExt { // Simple Setters // + /// TODO(doc): @quake pub fn set_number(&mut self, number: BlockNumber) { self.number = number; } + /// TODO(doc): @quake pub fn set_base_block_reward(&mut self, base_block_reward: Capacity) { self.base_block_reward = base_block_reward; } + /// TODO(doc): @quake pub fn set_remainder_reward(&mut self, remainder_reward: Capacity) { self.remainder_reward = remainder_reward; } + /// TODO(doc): @quake pub fn set_previous_epoch_hash_rate(&mut self, previous_epoch_hash_rate: U256) { self.previous_epoch_hash_rate = previous_epoch_hash_rate; } + /// TODO(doc): @quake pub fn set_last_block_hash_in_previous_epoch( &mut self, last_block_hash_in_previous_epoch: packed::Byte32, @@ -145,20 +175,24 @@ impl EpochExt { self.last_block_hash_in_previous_epoch = last_block_hash_in_previous_epoch; } + /// TODO(doc): @quake pub fn set_start_number(&mut self, start_number: BlockNumber) { self.start_number = start_number; } + /// TODO(doc): @quake pub fn set_length(&mut self, length: BlockNumber) { self.length = length; } + /// TODO(doc): @quake pub fn set_primary_reward(&mut self, primary_reward: Capacity) { let primary_reward_u64 = primary_reward.as_u64(); self.base_block_reward = Capacity::shannons(primary_reward_u64 / self.length); self.remainder_reward = Capacity::shannons(primary_reward_u64 % self.length); } + /// TODO(doc): @quake pub fn set_compact_target(&mut self, compact_target: u32) { self.compact_target = compact_target; } @@ -167,18 +201,22 @@ impl EpochExt { // Normal Methods // + /// TODO(doc): @quake pub fn new_builder() -> EpochExtBuilder { EpochExtBuilder(EpochExt::default()) } + /// TODO(doc): @quake pub fn into_builder(self) -> EpochExtBuilder { EpochExtBuilder(self) } + /// TODO(doc): @quake pub fn is_genesis(&self) -> bool { 0 == self.number } + /// TODO(doc): @quake pub fn block_reward(&self, number: BlockNumber) -> Result { if number >= self.start_number() && number < self.start_number() + self.remainder_reward.as_u64() @@ -191,6 +229,7 @@ impl EpochExt { } } + /// TODO(doc): @quake pub fn number_with_fraction(&self, number: BlockNumber) -> EpochNumberWithFraction { debug_assert!( number >= self.start_number() && number < self.start_number() + self.length() @@ -200,6 +239,7 @@ impl EpochExt { // We name this issuance since it covers multiple parts: block reward, // NervosDAO issuance as well as treasury part. + /// TODO(doc): @quake pub fn secondary_block_issuance( &self, block_number: BlockNumber, @@ -325,19 +365,32 @@ impl Ord for EpochNumberWithFraction { } impl EpochNumberWithFraction { + /// TODO(doc): @quake pub const NUMBER_OFFSET: usize = 0; + /// TODO(doc): @quake pub const NUMBER_BITS: usize = 24; + /// TODO(doc): @quake pub const NUMBER_MAXIMUM_VALUE: u64 = (1u64 << Self::NUMBER_BITS); + /// TODO(doc): @quake pub const NUMBER_MASK: u64 = (Self::NUMBER_MAXIMUM_VALUE - 1); + /// TODO(doc): @quake pub const INDEX_OFFSET: usize = Self::NUMBER_BITS; + /// TODO(doc): @quake pub const INDEX_BITS: usize = 16; + /// TODO(doc): @quake pub const INDEX_MAXIMUM_VALUE: u64 = (1u64 << Self::INDEX_BITS); + /// TODO(doc): @quake pub const INDEX_MASK: u64 = (Self::INDEX_MAXIMUM_VALUE - 1); + /// TODO(doc): @quake pub const LENGTH_OFFSET: usize = Self::NUMBER_BITS + Self::INDEX_BITS; + /// TODO(doc): @quake pub const LENGTH_BITS: usize = 16; + /// TODO(doc): @quake pub const LENGTH_MAXIMUM_VALUE: u64 = (1u64 << Self::LENGTH_BITS); + /// TODO(doc): @quake pub const LENGTH_MASK: u64 = (Self::LENGTH_MAXIMUM_VALUE - 1); + /// TODO(doc): @quake pub fn new(number: u64, index: u64, length: u64) -> EpochNumberWithFraction { debug_assert!(number < Self::NUMBER_MAXIMUM_VALUE); debug_assert!(index < Self::INDEX_MAXIMUM_VALUE); @@ -346,6 +399,7 @@ impl EpochNumberWithFraction { Self::new_unchecked(number, index, length) } + /// TODO(doc): @quake pub const fn new_unchecked(number: u64, index: u64, length: u64) -> Self { EpochNumberWithFraction( (length << Self::LENGTH_OFFSET) @@ -354,22 +408,27 @@ impl EpochNumberWithFraction { ) } + /// TODO(doc): @quake pub fn number(self) -> EpochNumber { (self.0 >> Self::NUMBER_OFFSET) & Self::NUMBER_MASK } + /// TODO(doc): @quake pub fn index(self) -> u64 { (self.0 >> Self::INDEX_OFFSET) & Self::INDEX_MASK } + /// TODO(doc): @quake pub fn length(self) -> u64 { (self.0 >> Self::LENGTH_OFFSET) & Self::LENGTH_MASK } + /// TODO(doc): @quake pub const fn full_value(self) -> u64 { self.0 } + /// TODO(doc): @quake // One caveat here, is that if the user specifies a zero epoch length either // deliberately, or by accident, calling to_rational() after that might // result in a division by zero panic. To prevent that, this method would @@ -384,6 +443,7 @@ impl EpochNumberWithFraction { } } + /// TODO(doc): @quake pub fn to_rational(self) -> RationalU256 { RationalU256::new(self.index().into(), self.length().into()) + U256::from(self.number()) } diff --git a/util/types/src/core/mod.rs b/util/types/src/core/mod.rs index f87c424511..342993a513 100644 --- a/util/types/src/core/mod.rs +++ b/util/types/src/core/mod.rs @@ -1,6 +1,14 @@ -//! Rust types. +//! The essential rust types for CKB. //! -//! Packed bytes wrappers are not enough for all usage scenarios. +//! [Packed bytes] are not enough for all usage scenarios. +//! +//! This module provides essential rust types. +//! +//! Most of them is composed of [those packed bytes] or can convert between self and [those bytes]. +//! +//! [Packed bytes]: ../packed/index.html +//! [those packed bytes]: ../packed/index.html +//! [those bytes]: ../packed/index.html pub mod cell; pub mod error; @@ -20,8 +28,18 @@ pub use transaction_meta::{TransactionMeta, TransactionMetaBuilder}; pub use views::{BlockView, HeaderView, TransactionView, UncleBlockVecView, UncleBlockView}; pub use ckb_occupied_capacity::{capacity_bytes, Capacity, Ratio, Result as CapacityResult}; + +/// Public key. It's a 512 bits fixed binary data. pub type PublicKey = ckb_fixed_hash::H512; + +/// Block number. pub type BlockNumber = u64; + +/// Epoch number. pub type EpochNumber = u64; + +/// Cycle number. pub type Cycle = u64; + +/// Version number. pub type Version = u32; diff --git a/util/types/src/core/reward.rs b/util/types/src/core/reward.rs index e58f038722..a9c514cbb9 100644 --- a/util/types/src/core/reward.rs +++ b/util/types/src/core/reward.rs @@ -1,33 +1,93 @@ use crate::{core::Capacity, packed::Byte32}; +/// Details of miner rewards issued by block cellbase transaction. +/// +/// # References: +/// - [Token Issuance](https://github.com/nervosnetwork/rfcs/blob/v2020.01.15/rfcs/0015-ckb-cryptoeconomics/0015-ckb-cryptoeconomics.md#token-issuance) +/// - [Miner Compensation](https://github.com/nervosnetwork/rfcs/blob/v2020.01.15/rfcs/0015-ckb-cryptoeconomics/0015-ckb-cryptoeconomics.md#miner-compensation) +/// - [Paying for Transaction Fees](https://github.com/nervosnetwork/rfcs/blob/v2020.01.15/rfcs/0015-ckb-cryptoeconomics/0015-ckb-cryptoeconomics.md#paying-for-transaction-fees) +/// - [`RewardCalculator::txs_fee(..)`](../../ckb_reward_calculator/struct.RewardCalculator.html#method.txs_fees) +/// - [Collecting State Rent with Secondary Issuance and the NervosDAO](https://github.com/nervosnetwork/rfcs/blob/v2020.01.15/rfcs/0015-ckb-cryptoeconomics/0015-ckb-cryptoeconomics.md#collecting-state-rent-with-secondary-issuance-and-the-nervosdao) +/// - [Calculation of Nervos DAO and Examples](https://github.com/nervosnetwork/rfcs/blob/v2020.01.15/rfcs/0023-dao-deposit-withdraw/0023-dao-deposit-withdraw.md#calculation) #[derive(Debug, Default)] pub struct BlockReward { + /// The total block reward. pub total: Capacity, + /// The primary block reward. pub primary: Capacity, + /// The secondary block reward. + /// + /// # Notice + /// + /// - A part of the secondary issuance goes to the miners, the ratio depends on how many CKB + /// are used to store state. + /// - And a part of the secondary issuance goes to the NervosDAO, the ratio depends on how many + /// CKB are deposited and locked in the NervosDAO. + /// - The rest of the secondary issuance is determined by the community through the governance + /// mechanism. + /// Before the community can reach agreement, this part of the secondary issuance is going to + /// be burned. pub secondary: Capacity, + /// The transaction fees that are rewarded to miners because the transaction is committed in + /// the block. + /// + /// # Notice + /// + /// Miners only get 60% of the transaction fee for each transaction committed in the block. pub tx_fee: Capacity, + /// The transaction fees that are rewarded to miners because the transaction is proposed in the + /// block or its uncles. + /// + /// # Notice + /// + /// Miners only get 40% of the transaction fee for each transaction proposed in the block + /// and committed later in its active commit window. pub proposal_reward: Capacity, } +/// Native token issuance. +/// +/// # References: +/// - [Token Issuance](https://github.com/nervosnetwork/rfcs/blob/v2020.01.15/rfcs/0015-ckb-cryptoeconomics/0015-ckb-cryptoeconomics.md#token-issuance) #[derive(Debug, Default, PartialEq, Eq)] pub struct BlockIssuance { + /// The primary issuance. pub primary: Capacity, + /// The secondary issuance. pub secondary: Capacity, } +/// Miner reward. +/// +/// # References: +/// - [Token Issuance](https://github.com/nervosnetwork/rfcs/blob/v2020.01.15/rfcs/0015-ckb-cryptoeconomics/0015-ckb-cryptoeconomics.md#token-issuance) +/// - [Miner Compensation](https://github.com/nervosnetwork/rfcs/blob/v2020.01.15/rfcs/0015-ckb-cryptoeconomics/0015-ckb-cryptoeconomics.md#miner-compensation) +/// - [Paying for Transaction Fees](https://github.com/nervosnetwork/rfcs/blob/v2020.01.15/rfcs/0015-ckb-cryptoeconomics/0015-ckb-cryptoeconomics.md#paying-for-transaction-fees) +/// - [`RewardCalculator::txs_fee(..)`](../../ckb_reward_calculator/struct.RewardCalculator.html#method.txs_fees) #[derive(Debug, Default, PartialEq, Eq)] pub struct MinerReward { + /// The miner receives all the primary issuance. pub primary: Capacity, + /// The miner receives part of the secondary issuance. pub secondary: Capacity, + /// The miner recevies 60% of the transaction fee for each transaction committed in the block. pub committed: Capacity, + /// The miner recevies 40% of the transaction fee for each transaction proposed in the block, + /// and committed later in its active commit window. pub proposal: Capacity, } +/// Includes the rewards details for a block and when the block is finalized. #[derive(Debug, Default, PartialEq, Eq)] pub struct BlockEconomicState { + /// Native token issuance in the block. pub issuance: BlockIssuance, + /// Miner reward in the block. pub miner_reward: MinerReward, + /// The total fees of all transactions committed in the block. pub txs_fee: Capacity, + /// The block hash of the block which creates the rewards as cells in its cellbase + /// transaction. pub finalized_at: Byte32, } diff --git a/util/types/src/core/service.rs b/util/types/src/core/service.rs index a66cbe440b..fe5d1ea01e 100644 --- a/util/types/src/core/service.rs +++ b/util/types/src/core/service.rs @@ -1,15 +1,25 @@ +//! Types for CKB services. +//! +//! A CKB service acts as an actor, which processes requests from a channel and sends back the +//! response via one shot channel. use ckb_channel::Sender; use std::sync::mpsc; +/// Default channel size to send control signals. pub const SIGNAL_CHANNEL_SIZE: usize = 1; +/// Default channel size to send messages. pub const DEFAULT_CHANNEL_SIZE: usize = 32; +/// Synchronous request sent to the service. pub struct Request { + /// One shot channel for the service to send back the response. pub responder: mpsc::Sender, + /// Request arguments. pub arguments: A, } impl Request { + /// Call the service with the arguments and wait for the response. pub fn call(sender: &Sender>, arguments: A) -> Option { let (responder, response) = mpsc::channel(); let _ = sender.send(Request { diff --git a/util/types/src/core/transaction_meta.rs b/util/types/src/core/transaction_meta.rs index 9ae2d860aa..e4fd9a5aaa 100644 --- a/util/types/src/core/transaction_meta.rs +++ b/util/types/src/core/transaction_meta.rs @@ -2,6 +2,7 @@ use bit_vec::BitVec; use crate::packed::Byte32; +/// TODO(doc): @quake #[derive(Default, Debug, PartialEq, Eq, Clone)] pub struct TransactionMeta { pub(crate) block_number: u64, @@ -13,6 +14,7 @@ pub struct TransactionMeta { } impl TransactionMeta { + /// TODO(doc): @quake pub fn new( block_number: u64, epoch_number: u64, @@ -58,36 +60,44 @@ impl TransactionMeta { self.dead_cell.len() } + /// TODO(doc): @quake pub fn block_number(&self) -> u64 { self.block_number } + /// TODO(doc): @quake pub fn epoch_number(&self) -> u64 { self.epoch_number } + /// TODO(doc): @quake pub fn block_hash(&self) -> Byte32 { self.block_hash.clone() } + /// TODO(doc): @quake pub fn is_empty(&self) -> bool { self.dead_cell.is_empty() } + /// TODO(doc): @quake pub fn is_dead(&self, index: usize) -> Option { self.dead_cell.get(index) } + /// TODO(doc): @quake pub fn all_dead(&self) -> bool { self.dead_cell.all() } + /// TODO(doc): @quake pub fn set_dead(&mut self, index: usize) { if index < self.len() { self.dead_cell.set(index, true); } } + /// TODO(doc): @quake pub fn unset_dead(&mut self, index: usize) { if index < self.len() { self.dead_cell.set(index, false); @@ -95,6 +105,7 @@ impl TransactionMeta { } } +/// TODO(doc): @quake #[derive(Default)] pub struct TransactionMetaBuilder { block_number: u64, @@ -106,36 +117,43 @@ pub struct TransactionMetaBuilder { } impl TransactionMetaBuilder { + /// TODO(doc): @quake pub fn block_number(mut self, block_number: u64) -> Self { self.block_number = block_number; self } + /// TODO(doc): @quake pub fn epoch_number(mut self, epoch_number: u64) -> Self { self.epoch_number = epoch_number; self } + /// TODO(doc): @quake pub fn block_hash(mut self, block_hash: Byte32) -> Self { self.block_hash = block_hash; self } + /// TODO(doc): @quake pub fn cellbase(mut self, cellbase: bool) -> Self { self.cellbase = cellbase; self } + /// TODO(doc): @quake pub fn bits(mut self, bits: Vec) -> Self { self.bits = bits; self } + /// TODO(doc): @quake pub fn len(mut self, len: usize) -> Self { self.len = len; self } + /// TODO(doc): @quake pub fn build(self) -> TransactionMeta { let TransactionMetaBuilder { block_number, diff --git a/util/types/src/core/views.rs b/util/types/src/core/views.rs index 9a86e861d6..1b6fa6fed8 100644 --- a/util/types/src/core/views.rs +++ b/util/types/src/core/views.rs @@ -21,6 +21,15 @@ use crate::{ * Please DO NOT implement `Default`, use builders to construct views. */ +/// A readonly and immutable struct which includes [`Transaction`] and its associated hashes. +/// +/// # Notice +/// +/// This struct is not implement the trait [`Default`], use [`TransactionBuilder`] to construct it. +/// +/// [`Default`]: https://doc.rust-lang.org/std/default/trait.Default.html +/// [`Transaction`]: ../packed/struct.Transaction.html +/// [`TransactionBuilder`]: struct.TransactionBuilder.html #[derive(Debug, Clone)] pub struct TransactionView { pub(crate) data: packed::Transaction, @@ -28,24 +37,60 @@ pub struct TransactionView { pub(crate) witness_hash: packed::Byte32, } +/// A readonly and immutable struct which includes [`Header`] and its hash. +/// +/// # Notice +/// +/// This struct is not implement the trait [`Default`], use [`HeaderBuilder`] to construct it. +/// +/// [`Default`]: https://doc.rust-lang.org/std/default/trait.Default.html +/// [`Header`]: ../packed/struct.Header.html +/// [`HeaderBuilder`]: struct.HeaderBuilder.html #[derive(Debug, Clone)] pub struct HeaderView { pub(crate) data: packed::Header, pub(crate) hash: packed::Byte32, } +/// A readonly and immutable struct which includes [`UncleBlock`] and its hash. +/// +/// # Notice +/// +/// This struct is not implement the trait [`Default`], use [`BlockView::as_uncle()`] to construct it. +/// +/// [`Default`]: https://doc.rust-lang.org/std/default/trait.Default.html +/// [`UncleBlock`]: ../packed/struct.UncleBlock.html +/// [`BlockView::as_uncle()`]: struct.BlockView.html#method.as_uncle #[derive(Debug, Clone)] pub struct UncleBlockView { pub(crate) data: packed::UncleBlock, pub(crate) hash: packed::Byte32, } +/// A readonly and immutable struct which includes a vector of [`UncleBlock`]s and their hashes. +/// +/// # Notice +/// +/// This struct is not implement the trait [`Default`], use [`BlockView::uncles()`] to construct it. +/// +/// [`Default`]: https://doc.rust-lang.org/std/default/trait.Default.html +/// [`UncleBlock`]: ../packed/struct.UncleBlock.html +/// [`BlockView::uncles()`]: struct.BlockView.html#method.uncles #[derive(Debug, Clone)] pub struct UncleBlockVecView { pub(crate) data: packed::UncleBlockVec, pub(crate) hashes: packed::Byte32Vec, } +/// A readonly and immutable struct which includes [`Block`] and its associated hashes. +/// +/// # Notice +/// +/// This struct is not implement the trait [`Default`], use [`BlockBuilder`] to construct it. +/// +/// [`Default`]: https://doc.rust-lang.org/std/default/trait.Default.html +/// [`Block`]: ../packed/struct.Block.html +/// [`BlockBuilder`]: struct.BlockBuilder.html #[derive(Debug, Clone)] pub struct BlockView { pub(crate) data: packed::Block, @@ -114,59 +159,160 @@ impl ::std::fmt::Display for BlockView { * Define getters */ -macro_rules! define_simple_getter { - ($field:ident, $type:ident) => { +macro_rules! define_clone_getter { + ($field:ident, $type:ident, $comment:expr) => { + #[doc = $comment] pub fn $field(&self) -> packed::$type { self.$field.clone() } }; } -macro_rules! define_vector_getter { +macro_rules! define_data_getter { + ($type:ident) => { + define_clone_getter!( + data, + $type, + concat!( + "Gets a clone of [`packed::", + stringify!($type), + "`](../packed/struct.", + stringify!($type), + ".html)." + ) + ); + }; +} + +macro_rules! define_cache_getter { ($field:ident, $type:ident) => { + define_clone_getter!( + $field, + $type, + concat!("Gets a clone of `", stringify!($field), "`.") + ); + }; +} + +macro_rules! define_vector_getter { + ($field:ident, $type:ident, $comment:expr) => { + #[doc = $comment] pub fn $field(&self) -> &[packed::$type] { &self.$field[..] } }; + ($field:ident, $type:ident) => { + define_vector_getter!( + $field, + $type, + concat!("Extracts a slice of `", stringify!($field), "`.") + ); + }; +} + +macro_rules! define_inner_getter { + (header, unpacked, $field:ident, $type:ident) => { + define_inner_getter!( + $field, + $type, + data().as_reader().raw().$field().unpack(), + concat!("Gets `raw.", stringify!($field), "`.") + ); + }; + (header, packed, $field:ident, $type:ident) => { + define_inner_getter!( + $field, + packed::$type, + data().raw().$field(), + concat!("Gets `raw.", stringify!($field), "`.") + ); + }; + (uncle, unpacked, $field:ident, $type:ident) => { + define_inner_getter!( + $field, + $type, + data().as_reader().header().raw().$field().unpack(), + concat!("Gets `header.raw.", stringify!($field), "`.") + ); + }; + (uncle, packed, $field:ident, $type:ident) => { + define_inner_getter!( + $field, + packed::$type, + data().header().raw().$field(), + concat!("Gets `header.raw.", stringify!($field), "`.") + ); + }; + (block, unpacked, $field:ident, $type:ident) => { + define_inner_getter!( + $field, + $type, + data().as_reader().header().raw().$field().unpack(), + concat!("Gets `header.raw.", stringify!($field), "`.") + ); + }; + (block, packed, $field:ident, $type:ident) => { + define_inner_getter!( + $field, + packed::$type, + data().header().raw().$field(), + concat!("Gets `header.raw.", stringify!($field), "`.") + ); + }; + ($field:ident, $type:path, $f0:ident()$(.$fi:ident())*, $comment:expr) => { + #[doc = $comment] + pub fn $field(&self) -> $type { + self.$f0()$(.$fi())* + } + }; } impl TransactionView { - define_simple_getter!(data, Transaction); - define_simple_getter!(hash, Byte32); - define_simple_getter!(witness_hash, Byte32); + define_data_getter!(Transaction); + define_cache_getter!(hash, Byte32); + define_cache_getter!(witness_hash, Byte32); + /// Gets `raw.version`. pub fn version(&self) -> Version { self.data().raw().version().unpack() } + /// Gets `raw.cell_deps`. pub fn cell_deps(&self) -> packed::CellDepVec { self.data().raw().cell_deps() } + /// Gets `raw.header_deps`. pub fn header_deps(&self) -> packed::Byte32Vec { self.data().raw().header_deps() } + /// Gets `raw.inputs`. pub fn inputs(&self) -> packed::CellInputVec { self.data().raw().inputs() } + /// Gets `raw.outputs`. pub fn outputs(&self) -> packed::CellOutputVec { self.data().raw().outputs() } + /// Gets `raw.outputs_data`. pub fn outputs_data(&self) -> packed::BytesVec { self.data().raw().outputs_data() } + /// Gets `witnesses`. pub fn witnesses(&self) -> packed::BytesVec { self.data().witnesses() } + /// Gets an output through its index. pub fn output(&self, idx: usize) -> Option { self.data().raw().outputs().get(idx) } + /// Gets an output and its data through its index. pub fn output_with_data(&self, idx: usize) -> Option<(packed::CellOutput, Bytes)> { self.data().raw().outputs().get(idx).map(|output| { let data = self @@ -180,18 +326,21 @@ impl TransactionView { }) } + /// Gets out points for all outputs. pub fn output_pts(&self) -> Vec { (0..self.data().raw().outputs().len()) .map(|x| packed::OutPoint::new(self.hash(), x as u32)) .collect() } + /// Creates an iterator from out points of all outputs. pub fn output_pts_iter(&self) -> impl Iterator { let tx_hash = self.hash(); (0..self.data().raw().outputs().len()) .map(move |x| packed::OutPoint::new(tx_hash.clone(), x as u32)) } + /// Creates an iterator from out points of all inputs. pub fn input_pts_iter(&self) -> impl Iterator { self.data() .raw() @@ -200,139 +349,124 @@ impl TransactionView { .map(|x| x.previous_output()) } + /// Creates an iterator from all outputs and their data. pub fn outputs_with_data_iter(&self) -> impl Iterator { self.outputs() .into_iter() .zip(self.outputs_data().into_iter().map(|d| d.raw_data())) } + /// Creates an iterator from `raw.cell_deps`. pub fn cell_deps_iter(&self) -> impl Iterator { self.data().raw().cell_deps().into_iter() } + /// Creates an iterator from `raw.header_deps`. pub fn header_deps_iter(&self) -> impl Iterator { self.data().raw().header_deps().into_iter() } + /// Sets a fake transacton hash. pub fn fake_hash(mut self, hash: packed::Byte32) -> Self { self.hash = hash; self } + /// Sets a fake witness hash. pub fn fake_witness_hash(mut self, witness_hash: packed::Byte32) -> Self { self.witness_hash = witness_hash; self } + /// Sums the capacities of all outputs. pub fn outputs_capacity(&self) -> CapacityResult { self.data().raw().outputs().total_capacity() } + /// Checks whether the transaction is a cellbase. pub fn is_cellbase(&self) -> bool { self.data().is_cellbase() } + /// Creates a new `ProposalShortId` from the transaction hash. pub fn proposal_short_id(&self) -> packed::ProposalShortId { packed::ProposalShortId::from_tx_hash(&self.hash()) } } -macro_rules! define_header_unpacked_inner_getter { - ($field:ident, $type:ident) => { - pub fn $field(&self) -> $type { - self.data().as_reader().raw().$field().unpack() - } - }; -} - -macro_rules! define_header_packed_inner_getter { - ($field:ident, $type:ident) => { - pub fn $field(&self) -> packed::$type { - self.data().raw().$field() - } - }; -} - impl HeaderView { - define_simple_getter!(data, Header); - define_simple_getter!(hash, Byte32); + define_data_getter!(Header); + define_cache_getter!(hash, Byte32); - define_header_unpacked_inner_getter!(version, Version); - define_header_unpacked_inner_getter!(number, BlockNumber); - define_header_unpacked_inner_getter!(compact_target, u32); - define_header_unpacked_inner_getter!(timestamp, u64); - define_header_unpacked_inner_getter!(epoch, EpochNumberWithFraction); + define_inner_getter!(header, unpacked, version, Version); + define_inner_getter!(header, unpacked, number, BlockNumber); + define_inner_getter!(header, unpacked, compact_target, u32); + define_inner_getter!(header, unpacked, timestamp, u64); + define_inner_getter!(header, unpacked, epoch, EpochNumberWithFraction); - define_header_packed_inner_getter!(parent_hash, Byte32); - define_header_packed_inner_getter!(transactions_root, Byte32); - define_header_packed_inner_getter!(proposals_hash, Byte32); - define_header_packed_inner_getter!(uncles_hash, Byte32); + define_inner_getter!(header, packed, parent_hash, Byte32); + define_inner_getter!(header, packed, transactions_root, Byte32); + define_inner_getter!(header, packed, proposals_hash, Byte32); + define_inner_getter!(header, packed, uncles_hash, Byte32); + /// Gets `raw.dao`. pub fn dao(&self) -> packed::Byte32 { self.data().raw().dao() } + /// Gets `raw.difficulty`. pub fn difficulty(&self) -> U256 { self.data().raw().difficulty() } + /// Gets `nonce`. pub fn nonce(&self) -> u128 { self.data().nonce().unpack() } + /// Checks whether the header is the header block. pub fn is_genesis(&self) -> bool { self.number() == 0 } + /// Sets a fake header hash. pub fn fake_hash(mut self, hash: packed::Byte32) -> Self { self.hash = hash; self } } -macro_rules! define_uncle_unpacked_inner_getter { - ($field:ident, $type:ident) => { - pub fn $field(&self) -> $type { - self.data().as_reader().header().raw().$field().unpack() - } - }; -} - -macro_rules! define_uncle_packed_inner_getter { - ($field:ident, $type:ident) => { - pub fn $field(&self) -> packed::$type { - self.data().header().raw().$field() - } - }; -} - impl UncleBlockView { - define_simple_getter!(data, UncleBlock); - define_simple_getter!(hash, Byte32); + define_data_getter!(UncleBlock); + define_cache_getter!(hash, Byte32); - define_uncle_unpacked_inner_getter!(version, Version); - define_uncle_unpacked_inner_getter!(number, BlockNumber); - define_uncle_unpacked_inner_getter!(compact_target, u32); - define_uncle_unpacked_inner_getter!(timestamp, u64); - define_uncle_unpacked_inner_getter!(epoch, EpochNumberWithFraction); + define_inner_getter!(uncle, unpacked, version, Version); + define_inner_getter!(uncle, unpacked, number, BlockNumber); + define_inner_getter!(uncle, unpacked, compact_target, u32); + define_inner_getter!(uncle, unpacked, timestamp, u64); + define_inner_getter!(uncle, unpacked, epoch, EpochNumberWithFraction); - define_uncle_packed_inner_getter!(parent_hash, Byte32); - define_uncle_packed_inner_getter!(transactions_root, Byte32); - define_uncle_packed_inner_getter!(proposals_hash, Byte32); - define_uncle_packed_inner_getter!(uncles_hash, Byte32); + define_inner_getter!(uncle, packed, parent_hash, Byte32); + define_inner_getter!(uncle, packed, transactions_root, Byte32); + define_inner_getter!(uncle, packed, proposals_hash, Byte32); + define_inner_getter!(uncle, packed, uncles_hash, Byte32); + /// Gets `header.raw.dao`. pub fn dao(&self) -> packed::Byte32 { self.data().header().raw().dao() } + /// Gets `header.raw.difficulty`. pub fn difficulty(&self) -> U256 { - self.header().difficulty() + self.data().header().raw().difficulty() } + /// Gets `header.nonce`. pub fn nonce(&self) -> u128 { self.data().header().nonce().unpack() } + /// Gets `header`. pub fn header(&self) -> HeaderView { HeaderView { data: self.data.header(), @@ -340,20 +474,23 @@ impl UncleBlockView { } } + /// Sets a fake hash. pub fn fake_hash(mut self, hash: packed::Byte32) -> Self { self.hash = hash; self } + /// Calculates the hash for proposals. pub fn calc_proposals_hash(&self) -> packed::Byte32 { self.data().as_reader().calc_proposals_hash() } } impl UncleBlockVecView { - define_simple_getter!(data, UncleBlockVec); - define_simple_getter!(hashes, Byte32Vec); + define_data_getter!(UncleBlockVec); + define_cache_getter!(hashes, Byte32Vec); + /// Gets an uncle block through its index. pub fn get(&self, index: usize) -> Option { if index >= self.data().len() { None @@ -362,6 +499,11 @@ impl UncleBlockVecView { } } + /// Gets an uncle block through its index without checks. + /// + /// # Panics + /// + /// Panics if the index out of range. pub fn get_unchecked(&self, index: usize) -> UncleBlockView { let data = self.data().get(index).should_be_ok(); let hash = self.hashes().get(index).should_be_ok(); @@ -401,53 +543,41 @@ impl ::std::iter::IntoIterator for UncleBlockVecView { } } -macro_rules! define_block_unpacked_inner_getter { - ($field:ident, $type:ident) => { - pub fn $field(&self) -> $type { - self.data().as_reader().header().raw().$field().unpack() - } - }; -} - -macro_rules! define_block_packed_inner_getter { - ($field:ident, $type:ident) => { - pub fn $field(&self) -> packed::$type { - self.data().header().raw().$field() - } - }; -} - impl BlockView { - define_simple_getter!(data, Block); - define_simple_getter!(hash, Byte32); - define_simple_getter!(uncle_hashes, Byte32Vec); + define_data_getter!(Block); + define_cache_getter!(hash, Byte32); + define_cache_getter!(uncle_hashes, Byte32Vec); define_vector_getter!(tx_hashes, Byte32); define_vector_getter!(tx_witness_hashes, Byte32); - define_block_unpacked_inner_getter!(version, Version); - define_block_unpacked_inner_getter!(number, BlockNumber); - define_block_unpacked_inner_getter!(compact_target, u32); - define_block_unpacked_inner_getter!(timestamp, u64); - define_block_unpacked_inner_getter!(epoch, EpochNumberWithFraction); + define_inner_getter!(block, unpacked, version, Version); + define_inner_getter!(block, unpacked, number, BlockNumber); + define_inner_getter!(block, unpacked, compact_target, u32); + define_inner_getter!(block, unpacked, timestamp, u64); + define_inner_getter!(block, unpacked, epoch, EpochNumberWithFraction); - define_block_packed_inner_getter!(parent_hash, Byte32); - define_block_packed_inner_getter!(transactions_root, Byte32); - define_block_packed_inner_getter!(proposals_hash, Byte32); - define_block_packed_inner_getter!(uncles_hash, Byte32); + define_inner_getter!(block, packed, parent_hash, Byte32); + define_inner_getter!(block, packed, transactions_root, Byte32); + define_inner_getter!(block, packed, proposals_hash, Byte32); + define_inner_getter!(block, packed, uncles_hash, Byte32); + /// Gets `header.raw.dao`. pub fn dao(&self) -> packed::Byte32 { self.data().header().raw().dao() } + /// Gets `header.nonce`. pub fn nonce(&self) -> u128 { self.data().header().nonce().unpack() } + /// Gets `header.difficulty`. pub fn difficulty(&self) -> U256 { self.header().difficulty() } + /// Gets `header`. pub fn header(&self) -> HeaderView { HeaderView { data: self.data.header(), @@ -455,6 +585,7 @@ impl BlockView { } } + /// Gets `uncles`. pub fn uncles(&self) -> UncleBlockVecView { UncleBlockVecView { data: self.data.uncles(), @@ -462,6 +593,7 @@ impl BlockView { } } + /// Converts into an uncle block. pub fn as_uncle(&self) -> UncleBlockView { UncleBlockView { data: self.data.as_uncle(), @@ -469,6 +601,7 @@ impl BlockView { } } + /// Gets `transactions`. pub fn transactions(&self) -> Vec { self.data .transactions() @@ -483,6 +616,7 @@ impl BlockView { .collect() } + /// Creates an iterator from `proposals` of the block and `proposals` of `uncles`. pub fn union_proposal_ids_iter(&self) -> impl Iterator { self.data().proposals().into_iter().chain( self.data() @@ -492,10 +626,12 @@ impl BlockView { ) } + /// Creates a hashset from `proposals` of the block and `proposals` of `uncles`. pub fn union_proposal_ids(&self) -> HashSet { self.union_proposal_ids_iter().collect() } + /// Gets a transaction through its index. pub fn transaction(&self, index: usize) -> Option { self.data.transactions().get(index).map(|data| { let hash = self.tx_hashes.get(index).should_be_ok().to_owned(); @@ -508,6 +644,7 @@ impl BlockView { }) } + /// Gets an output through its transaction index and its own index. pub fn output(&self, tx_index: usize, index: usize) -> Option { self.data .transactions() @@ -515,23 +652,28 @@ impl BlockView { .and_then(|tx| tx.raw().outputs().get(index)) } + /// Sets a fake header hash. pub fn fake_hash(mut self, hash: packed::Byte32) -> Self { self.hash = hash; self } + /// Checks whether the block is the genesis block. pub fn is_genesis(&self) -> bool { self.number() == 0 } + /// Calculates the hash for uncle blocks. pub fn calc_uncles_hash(&self) -> packed::Byte32 { self.data().as_reader().calc_uncles_hash() } + /// Calculates the hash for proposals. pub fn calc_proposals_hash(&self) -> packed::Byte32 { self.data().as_reader().calc_proposals_hash() } + /// Calculates the merkel root for transactions with witnesses. pub fn calc_transactions_root(&self) -> packed::Byte32 { merkle_root(&[ self.calc_raw_transactions_root(), @@ -539,10 +681,12 @@ impl BlockView { ]) } + /// Calculates the merkel root for transactions without witnesses. pub fn calc_raw_transactions_root(&self) -> packed::Byte32 { merkle_root(&self.tx_hashes[..]) } + /// Calculates the merkel root for transaction witnesses. pub fn calc_witnesses_root(&self) -> packed::Byte32 { merkle_root(&self.tx_witness_hashes[..]) } @@ -579,6 +723,12 @@ impl_std_cmp_eq_and_hash!(BlockView, hash); */ impl BlockView { + /// Creates a new `BlockView`. + /// + /// # Notice + /// + /// [`BlockView`] created by this method could have invalid hashes or + /// invalid merkle roots in the header. pub fn new_unchecked( header: HeaderView, uncles: UncleBlockVecView, @@ -608,6 +758,9 @@ impl BlockView { */ impl packed::Transaction { + /// Calculates the associated hashes and converts into [`TransactionView`] with those hashes. + /// + /// [`TransactionView`]: ../core/struct.TransactionView.html pub fn into_view(self) -> TransactionView { let hash = self.calc_tx_hash(); let witness_hash = self.calc_witness_hash(); @@ -620,6 +773,9 @@ impl packed::Transaction { } impl packed::Header { + /// Calculates the header hash and converts into [`HeaderView`] with the hash. + /// + /// [`HeaderView`]: ../core/struct.HeaderView.html pub fn into_view(self) -> HeaderView { let hash = self.calc_header_hash(); HeaderView { data: self, hash } @@ -627,6 +783,9 @@ impl packed::Header { } impl packed::UncleBlock { + /// Calculates the header hash and converts into [`UncleBlockView`] with the hash. + /// + /// [`UncleBlockView`]: ../core/struct.UncleBlockView.html pub fn into_view(self) -> UncleBlockView { let hash = self.calc_header_hash(); UncleBlockView { data: self, hash } @@ -634,12 +793,23 @@ impl packed::UncleBlock { } impl packed::Block { + /// Calculates transaction associated hashes and converts them into [`BlockView`]. + /// + /// # Notice + /// + /// [`BlockView`] created by this method could have invalid hashes or + /// invalid merkle roots in the header. + /// + /// [`BlockView`]: ../core/struct.BlockView.html pub fn into_view_without_reset_header(self) -> BlockView { let tx_hashes = self.calc_tx_hashes(); let tx_witness_hashes = self.calc_tx_witness_hashes(); Self::block_into_view_internal(self, tx_hashes, tx_witness_hashes) } + /// Calculates transaction associated hashes, resets all hashes and merkle roots in the header, then converts them into [`BlockView`]. + /// + /// [`BlockView`]: ../core/struct.BlockView.html pub fn into_view(self) -> BlockView { let tx_hashes = self.calc_tx_hashes(); let tx_witness_hashes = self.calc_tx_witness_hashes(); diff --git a/util/types/src/extension/calc_hash.rs b/util/types/src/extension/calc_hash.rs index 29d8cfc6e5..5aa536bc6a 100644 --- a/util/types/src/extension/calc_hash.rs +++ b/util/types/src/extension/calc_hash.rs @@ -26,16 +26,41 @@ where */ macro_rules! impl_calc_special_hash_for_entity { - ($entity:ident, $func_name:ident) => { + ($entity:ident, $func_name:ident, $return:ty, $comment:expr) => { impl packed::$entity { - pub fn $func_name(&self) -> packed::Byte32 { + #[doc = $comment] + pub fn $func_name(&self) -> $return { self.as_reader().$func_name() } } }; + ($entity:ident, $func_name:ident, $return:ty) => { + impl_calc_special_hash_for_entity!( + $entity, + $func_name, + $return, + concat!( + "Calls [`", + stringify!($entity), + "Reader.", + stringify!($func_name), + "()`](struct.", + stringify!($entity), + "Reader.html#method.", + stringify!($func_name), + ") for [`self.as_reader()`](#method.as_reader)." + ) + ); + }; + ($entity:ident, $func_name:ident) => { + impl_calc_special_hash_for_entity!($entity, $func_name, packed::Byte32); + }; } impl packed::CellOutput { + /// Calculates the hash for cell data. + /// + /// Returns the empty hash if no data, otherwise, calculates the hash of the data and returns it. pub fn calc_data_hash(data: &[u8]) -> packed::Byte32 { if data.is_empty() { packed::Byte32::zero() @@ -46,6 +71,9 @@ impl packed::CellOutput { } impl<'r> packed::ScriptReader<'r> { + /// Calculates the hash for [self.as_slice()] as the script hash. + /// + /// [self.as_slice()]: ../prelude/trait.Reader.html#tymethod.as_slice pub fn calc_script_hash(&self) -> packed::Byte32 { self.calc_hash() } @@ -53,6 +81,10 @@ impl<'r> packed::ScriptReader<'r> { impl_calc_special_hash_for_entity!(Script, calc_script_hash); impl<'r> packed::CellOutputReader<'r> { + /// Calls [`ScriptReader.calc_script_hash()`] for [`self.lock()`]. + /// + /// [`ScriptReader.calc_script_hash()`]: struct.ScriptReader.html#method.calc_script_hash + /// [`self.lock()`]: #method.lock pub fn calc_lock_hash(&self) -> packed::Byte32 { self.lock().calc_script_hash() } @@ -60,6 +92,10 @@ impl<'r> packed::CellOutputReader<'r> { impl_calc_special_hash_for_entity!(CellOutput, calc_lock_hash); impl<'r> packed::ProposalShortIdVecReader<'r> { + /// Calculates the hash for proposals. + /// + /// Returns the empty hash if no proposals short ids, otherwise, calculates a hash for all + /// proposals short ids and return it. pub fn calc_proposals_hash(&self) -> packed::Byte32 { if self.is_empty() { packed::Byte32::zero() @@ -76,11 +112,28 @@ impl<'r> packed::ProposalShortIdVecReader<'r> { } impl_calc_special_hash_for_entity!(ProposalShortIdVec, calc_proposals_hash); +impl<'r> packed::RawTransactionReader<'r> { + /// Calculates the hash for [self.as_slice()] as the transaction hash. + /// + /// [self.as_slice()]: ../prelude/trait.Reader.html#tymethod.as_slice + pub fn calc_tx_hash(&self) -> packed::Byte32 { + self.calc_hash() + } +} +impl_calc_special_hash_for_entity!(RawTransaction, calc_tx_hash); + impl<'r> packed::TransactionReader<'r> { + /// Calls [`RawTransactionReader.calc_tx_hash()`] for [`self.raw()`]. + /// + /// [`RawTransactionReader.calc_tx_hash()`]: struct.RawTransactionReader.html#method.calc_tx_hash + /// [`self.raw()`]: #method.raw pub fn calc_tx_hash(&self) -> packed::Byte32 { - self.raw().calc_hash() + self.raw().calc_tx_hash() } + /// Calculates the hash for [self.as_slice()] as the witness hash. + /// + /// [self.as_slice()]: ../prelude/trait.Reader.html#tymethod.as_slice pub fn calc_witness_hash(&self) -> packed::Byte32 { self.calc_hash() } @@ -89,6 +142,9 @@ impl_calc_special_hash_for_entity!(Transaction, calc_tx_hash); impl_calc_special_hash_for_entity!(Transaction, calc_witness_hash); impl<'r> packed::RawHeaderReader<'r> { + /// Calculates the hash for [self.as_slice()] as the pow hash. + /// + /// [self.as_slice()]: ../prelude/trait.Reader.html#tymethod.as_slice pub fn calc_pow_hash(&self) -> packed::Byte32 { self.calc_hash() } @@ -96,10 +152,17 @@ impl<'r> packed::RawHeaderReader<'r> { impl_calc_special_hash_for_entity!(RawHeader, calc_pow_hash); impl<'r> packed::HeaderReader<'r> { + /// Calls [`RawHeaderReader.calc_pow_hash()`] for [`self.raw()`]. + /// + /// [`RawHeaderReader.calc_pow_hash()`]: struct.RawHeaderReader.html#method.calc_pow_hash + /// [`self.raw()`]: #method.raw pub fn calc_pow_hash(&self) -> packed::Byte32 { self.raw().calc_pow_hash() } + /// Calculates the hash for [self.as_slice()] as the header hash. + /// + /// [self.as_slice()]: ../prelude/trait.Reader.html#tymethod.as_slice pub fn calc_header_hash(&self) -> packed::Byte32 { self.calc_hash() } @@ -108,10 +171,18 @@ impl_calc_special_hash_for_entity!(Header, calc_pow_hash); impl_calc_special_hash_for_entity!(Header, calc_header_hash); impl<'r> packed::UncleBlockReader<'r> { + /// Calls [`HeaderReader.calc_header_hash()`] for [`self.header()`]. + /// + /// [`HeaderReader.calc_header_hash()`]: struct.HeaderReader.html#method.calc_header_hash + /// [`self.header()`]: #method.header pub fn calc_header_hash(&self) -> packed::Byte32 { self.header().calc_header_hash() } + /// Calls [`ProposalShortIdVecReader.calc_proposals_hash()`] for [`self.proposals()`]. + /// + /// [`ProposalShortIdVecReader.calc_proposals_hash()`]: struct.ProposalShortIdVecReader.html#method.calc_proposals_hash + /// [`self.proposals()`]: #method.proposals pub fn calc_proposals_hash(&self) -> packed::Byte32 { self.proposals().calc_proposals_hash() } @@ -120,6 +191,10 @@ impl_calc_special_hash_for_entity!(UncleBlock, calc_header_hash); impl_calc_special_hash_for_entity!(UncleBlock, calc_proposals_hash); impl<'r> packed::UncleBlockVecReader<'r> { + /// Calculates the hash for uncle blocks. + /// + /// Returns the empty hash if no uncle block, otherwise, calculates a hash for all header + /// hashes of uncle blocks and returns it. pub fn calc_uncles_hash(&self) -> packed::Byte32 { if self.is_empty() { packed::Byte32::zero() @@ -137,18 +212,31 @@ impl<'r> packed::UncleBlockVecReader<'r> { impl_calc_special_hash_for_entity!(UncleBlockVec, calc_uncles_hash); impl<'r> packed::BlockReader<'r> { + /// Calls [`HeaderReader.calc_header_hash()`] for [`self.header()`]. + /// + /// [`HeaderReader.calc_header_hash()`]: struct.HeaderReader.html#method.calc_header_hash + /// [`self.header()`]: #method.header pub fn calc_header_hash(&self) -> packed::Byte32 { self.header().calc_header_hash() } + /// Calls [`ProposalShortIdVecReader.calc_proposals_hash()`] for [`self.proposals()`]. + /// + /// [`ProposalShortIdVecReader.calc_proposals_hash()`]: struct.ProposalShortIdVecReader.html#method.calc_proposals_hash + /// [`self.proposals()`]: #method.proposals pub fn calc_proposals_hash(&self) -> packed::Byte32 { self.proposals().calc_proposals_hash() } + /// Calls [`UncleBlockVecReader.calc_uncles_hash()`] for [`self.uncles()`]. + /// + /// [`UncleBlockVecReader.calc_uncles_hash()`]: struct.UncleBlockVecReader.html#method.calc_uncles_hash + /// [`self.uncles()`]: #method.uncles pub fn calc_uncles_hash(&self) -> packed::Byte32 { self.uncles().calc_uncles_hash() } + /// Calculates transaction hashes for all transactions in the block. pub fn calc_tx_hashes(&self) -> Vec { self.transactions() .iter() @@ -156,6 +244,7 @@ impl<'r> packed::BlockReader<'r> { .collect::>() } + /// Calculates transaction witness hashes for all transactions in the block. pub fn calc_tx_witness_hashes(&self) -> Vec { self.transactions() .iter() @@ -167,18 +256,14 @@ impl<'r> packed::BlockReader<'r> { impl_calc_special_hash_for_entity!(Block, calc_header_hash); impl_calc_special_hash_for_entity!(Block, calc_proposals_hash); impl_calc_special_hash_for_entity!(Block, calc_uncles_hash); - -impl packed::Block { - pub fn calc_tx_hashes(&self) -> Vec { - self.as_reader().calc_tx_hashes() - } - - pub fn calc_tx_witness_hashes(&self) -> Vec { - self.as_reader().calc_tx_witness_hashes() - } -} +impl_calc_special_hash_for_entity!(Block, calc_tx_hashes, Vec); +impl_calc_special_hash_for_entity!(Block, calc_tx_witness_hashes, Vec); impl<'r> packed::CompactBlockReader<'r> { + /// Calls [`HeaderReader.calc_header_hash()`] for [`self.header()`]. + /// + /// [`HeaderReader.calc_header_hash()`]: struct.HeaderReader.html#method.calc_header_hash + /// [`self.header()`]: #method.header pub fn calc_header_hash(&self) -> packed::Byte32 { self.header().calc_header_hash() } @@ -186,6 +271,9 @@ impl<'r> packed::CompactBlockReader<'r> { impl_calc_special_hash_for_entity!(CompactBlock, calc_header_hash); impl<'r> packed::RawAlertReader<'r> { + /// Calculates the hash for [self.as_slice()] as the alert hash. + /// + /// [self.as_slice()]: ../prelude/trait.Reader.html#tymethod.as_slice pub fn calc_alert_hash(&self) -> packed::Byte32 { self.calc_hash() } @@ -193,6 +281,10 @@ impl<'r> packed::RawAlertReader<'r> { impl_calc_special_hash_for_entity!(RawAlert, calc_alert_hash); impl<'r> packed::AlertReader<'r> { + /// Calls [`RawAlertReader.calc_alert_hash()`] for [`self.raw()`]. + /// + /// [`RawAlertReader.calc_alert_hash()`]: struct.RawAlertReader.html#method.calc_alert_hash + /// [`self.raw()`]: #method.raw pub fn calc_alert_hash(&self) -> packed::Byte32 { self.raw().calc_alert_hash() } diff --git a/util/types/src/extension/capacity.rs b/util/types/src/extension/capacity.rs index 53433caffb..34797ae3eb 100644 --- a/util/types/src/extension/capacity.rs +++ b/util/types/src/extension/capacity.rs @@ -3,12 +3,29 @@ use ckb_occupied_capacity::Result as CapacityResult; use crate::{core::Capacity, packed, prelude::*}; impl packed::Script { + /// Calculates the occupied capacity of [`Script`]. + /// + /// Includes [`code_hash`] (32), [`hash_type`] (1) and [`args`] (calculated). + /// + /// [`Script`]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L30-L34 + /// [`code_hash`]: #method.code_hash + /// [`hash_type`]: #method.hash_type + /// [`args`]: #method.args pub fn occupied_capacity(&self) -> CapacityResult { Capacity::bytes(self.args().raw_data().len() + 32 + 1) } } impl packed::CellOutput { + /// Calculates the occupied capacity of [`CellOutput`]. + /// + /// Includes [`output_data`] (provided), [`capacity`] (8), [`lock`] (calculated) and [`type`] (calculated). + /// + /// [`CellOutput`]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L46-L50 + /// [`output_data`]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L63 + /// [`capacity`]: #method.capacity + /// [`lock`]: #method.lock + /// [`type`]: #method.type_ pub fn occupied_capacity(&self, data_capacity: Capacity) -> CapacityResult { Capacity::bytes(8) .and_then(|x| x.safe_add(data_capacity)) @@ -23,6 +40,10 @@ impl packed::CellOutput { }) } + /// Returns if the [`capacity`] in `CellOutput` is smaller than the [`occupied capacity`]. + /// + /// [`capacity`]: #method.capacity + /// [`occupied capacity`]: #method.occupied_capacity pub fn is_lack_of_capacity(&self, data_capacity: Capacity) -> CapacityResult { self.occupied_capacity(data_capacity) .map(|cap| cap > self.capacity().unpack()) @@ -30,6 +51,11 @@ impl packed::CellOutput { } impl packed::CellOutputBuilder { + /// Build a [`CellOutput`] and sets its [`capacity`] equal to its [`occupied capacity`] exactly. + /// + /// [`CellOutput`]: struct.CellOutput.html + /// [`capacity`]: #method.capacity + /// [`occupied capacity`]: struct.CellOutput.html#method.occupied_capacity pub fn build_exact_capacity( self, data_capacity: Capacity, @@ -50,6 +76,9 @@ impl packed::CellOutputBuilder { } impl packed::CellOutputVec { + /// Sums the capacities of all [`CellOutput`]s in the vector. + /// + /// [`CellOutput`]: struct.CellOutput.html#method.occupied_capacity pub fn total_capacity(&self) -> CapacityResult { self.as_reader() .iter() diff --git a/util/types/src/extension/check_data.rs b/util/types/src/extension/check_data.rs index 9190e34ed6..a211dac0e2 100644 --- a/util/types/src/extension/check_data.rs +++ b/util/types/src/extension/check_data.rs @@ -5,13 +5,13 @@ use crate::{core, packed}; */ impl<'r> packed::ScriptReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { core::ScriptHashType::verify_value(self.hash_type().into()) } } impl<'r> packed::ScriptOptReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { self.to_opt() .map(|i| core::ScriptHashType::verify_value(i.hash_type().into())) .unwrap_or(true) @@ -19,31 +19,31 @@ impl<'r> packed::ScriptOptReader<'r> { } impl<'r> packed::CellOutputReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { self.lock().check_data() && self.type_().check_data() } } impl<'r> packed::CellOutputVecReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { self.iter().all(|i| i.check_data()) } } impl<'r> packed::CellDepReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { core::DepType::verify_value(self.dep_type().into()) } } impl<'r> packed::CellDepVecReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { self.iter().all(|i| i.check_data()) } } impl<'r> packed::RawTransactionReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { self.outputs().len() == self.outputs_data().len() && self.cell_deps().check_data() && self.outputs().check_data() @@ -51,19 +51,19 @@ impl<'r> packed::RawTransactionReader<'r> { } impl<'r> packed::TransactionReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { self.raw().check_data() } } impl<'r> packed::TransactionVecReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { self.iter().all(|i| i.check_data()) } } impl<'r> packed::BlockReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { self.transactions().check_data() } } @@ -73,30 +73,33 @@ impl<'r> packed::BlockReader<'r> { */ impl<'r> packed::BlockTransactionsReader<'r> { + /// Recursively checks whether the structure of the binary data is correct. pub fn check_data(&self) -> bool { self.transactions().check_data() } } impl<'r> packed::RelayTransactionReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { self.transaction().check_data() } } impl<'r> packed::RelayTransactionVecReader<'r> { - pub fn check_data(&self) -> bool { + fn check_data(&self) -> bool { self.iter().all(|i| i.check_data()) } } impl<'r> packed::RelayTransactionsReader<'r> { + /// Recursively checks whether the structure of the binary data is correct. pub fn check_data(&self) -> bool { self.transactions().check_data() } } impl<'r> packed::SendBlockReader<'r> { + /// Recursively checks whether the structure of the binary data is correct. pub fn check_data(&self) -> bool { self.block().check_data() } diff --git a/util/types/src/extension/serialized_size.rs b/util/types/src/extension/serialized_size.rs index 18ffa2b957..6d9b770fdd 100644 --- a/util/types/src/extension/serialized_size.rs +++ b/util/types/src/extension/serialized_size.rs @@ -1,27 +1,61 @@ use crate::{packed, prelude::*}; macro_rules! impl_serialized_size_for_entity { - ($entity:ident, $func:ident) => { + ($entity:ident, $func:ident, $reader_func_link:expr) => { impl packed::$entity { + /// Calls + #[doc = $reader_func_link] pub fn $func(&self) -> usize { self.as_reader().$func() } } }; + ($entity:ident, $func:ident) => { + impl_serialized_size_for_entity!( + $entity, + $func, + concat!( + "[`", stringify!($entity), "::", stringify!($func), "(..)`](struct.", + stringify!($entity), "Reader.html#method.", stringify!($func), ")." + ) + ); + } } impl<'r> packed::TransactionReader<'r> { + /// Calculates the serialized size of a [`Transaction`] in [`Block`]. + /// + /// Put each [`Transaction`] into [`Block`] will occupy extra spaces to store [an offset in header], + /// its size is [`molecule::NUMBER_SIZE`]. + /// + /// [`Transaction`]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L66-L69 + /// [`Block`]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L94-L99 + /// [an offset in header]: https://github.com/nervosnetwork/molecule/blob/df1fdce/docs/encoding_spec.md#memory-layout + /// [`molecule::NUMBER_SIZE`]: https://docs.rs/molecule/0.6.1/molecule/constant.NUMBER_SIZE.html pub fn serialized_size_in_block(&self) -> usize { - // the offset in TransactionVec header is u32 self.as_slice().len() + molecule::NUMBER_SIZE } } impl_serialized_size_for_entity!(Transaction, serialized_size_in_block); impl<'r> packed::BlockReader<'r> { + /// Calculates the serialized size of [`Block`] without [uncle proposals]. + /// + /// # Computational Steps + /// - Calculates the total serialized size of [`Block`], marks it as `B`. + /// - Calculates the serialized size [`ProposalShortIdVec`] for each uncle block, marks them as + /// `P0, P1, ..., Pn`. + /// - Even an uncle has no proposals, the [`ProposalShortIdVec`] still has [a header contains its total size], + /// the size is [`molecule::NUMBER_SIZE`], marks it as `h`. + /// - So the serialized size of [`Block`] without [uncle proposals] is: `B - sum(P0 - h, P1 - h, ..., Pn - h)` + /// + /// [`Block`]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L94-L99 + /// [uncle proposals]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L91 + /// [`ProposalShortIdVec`]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L25 + /// [a header contains its total size]: https://github.com/nervosnetwork/molecule/blob/df1fdce/docs/encoding_spec.md#memory-layout + /// [`molecule::NUMBER_SIZE`]: https://docs.rs/molecule/0.6.1/molecule/constant.NUMBER_SIZE.html pub fn serialized_size_without_uncle_proposals(&self) -> usize { let block_size = self.as_slice().len(); - // the header of ProposalShortIdVec header is u32 let uncles_proposals_size = self .uncles() .iter() diff --git a/util/types/src/extension/shortcuts.rs b/util/types/src/extension/shortcuts.rs index e9429a89e9..5a6a33ed60 100644 --- a/util/types/src/extension/shortcuts.rs +++ b/util/types/src/extension/shortcuts.rs @@ -9,40 +9,48 @@ use crate::{ }; impl packed::Byte32 { + /// Creates a new `Bytes32` whose bits are all zeros. pub fn zero() -> Self { Self::default() } + /// Creates a new `Byte32` whose bits are all ones. pub fn max_value() -> Self { [u8::max_value(); 32].pack() } + /// Checks whether all bits in self are zeros. pub fn is_zero(&self) -> bool { self.as_slice().iter().all(|x| *x == 0) } + /// Creates a new `Bytes32`. pub fn new(v: [u8; 32]) -> Self { v.pack() } } impl packed::ProposalShortId { + /// Creates a new `ProposalShortId` from a transaction hash. pub fn from_tx_hash(h: &packed::Byte32) -> Self { let mut inner = [0u8; 10]; inner.copy_from_slice(&h.as_slice()[..10]); inner.pack() } + /// Creates a new `ProposalShortId` whose bits are all zeros. pub fn zero() -> Self { Self::default() } + /// Creates a new `ProposalShortId`. pub fn new(v: [u8; 10]) -> Self { v.pack() } } impl packed::OutPoint { + /// Creates a new `OutPoint`. pub fn new(tx_hash: packed::Byte32, index: u32) -> Self { packed::OutPoint::new_builder() .tx_hash(tx_hash) @@ -50,16 +58,34 @@ impl packed::OutPoint { .build() } + /// Creates a new null `OutPoint`. pub fn null() -> Self { packed::OutPoint::new_builder() .index(u32::max_value().pack()) .build() } + /// Checks whether self is a null `OutPoint`. pub fn is_null(&self) -> bool { self.tx_hash().is_zero() && Unpack::::unpack(&self.index()) == u32::max_value() } + /// Generates a binary data to be used as a key for indexing cells in storage. + /// + /// # Notice + /// + /// The difference between [`Self::as_slice()`](../prelude/trait.Entity.html#tymethod.as_slice) + /// and [`Self::to_cell_key()`](#method.to_cell_key) is the byteorder of the field `index`. + /// + /// - Uses little endian for the field `index` in serialization. + /// + /// Because in the real world, the little endian machines make up the majority, we can cast + /// it as a number without re-order the bytes. + /// + /// - Uses big endian for the field `index` to index cells in storage. + /// + /// So we can use `tx_hash` as key prefix to seek the cells from storage in the forward + /// order, so as to traverse cells in the forward order too. pub fn to_cell_key(&self) -> Vec { let mut key = Vec::with_capacity(36); let index: u32 = self.index().unpack(); @@ -70,18 +96,21 @@ impl packed::OutPoint { } impl packed::CellInput { + /// Creates a new `CellInput`. pub fn new(previous_output: packed::OutPoint, block_number: BlockNumber) -> Self { packed::CellInput::new_builder() .since(block_number.pack()) .previous_output(previous_output) .build() } + /// Creates a new `CellInput` with a null `OutPoint`. pub fn new_cellbase_input(block_number: BlockNumber) -> Self { Self::new(packed::OutPoint::null(), block_number) } } impl packed::Script { + /// Converts self into bytes of [`CellbaseWitness`](struct.CellbaseWitness.html). pub fn into_witness(self) -> packed::Bytes { packed::CellbaseWitness::new_builder() .lock(self) @@ -90,18 +119,22 @@ impl packed::Script { .pack() } + /// Converts from bytes of [`CellbaseWitness`](struct.CellbaseWitness.html). pub fn from_witness(witness: packed::Bytes) -> Option { packed::CellbaseWitness::from_slice(&witness.raw_data()) .map(|cellbase_witness| cellbase_witness.lock()) .ok() } + /// Checks whether the own [`hash_type`](#method.hash_type) is + /// [`Type`](../core/enum.ScriptHashType.html#variant.Type). pub fn is_hash_type_type(&self) -> bool { Into::::into(self.hash_type()) == Into::::into(core::ScriptHashType::Type) } } impl packed::Transaction { + /// Checks whether self is a cellbase. pub fn is_cellbase(&self) -> bool { let raw_tx = self.raw(); raw_tx.inputs().len() == 1 @@ -114,24 +147,28 @@ impl packed::Transaction { .is_null() } + /// Generates a proposal short id after calculating the transaction hash. pub fn proposal_short_id(&self) -> packed::ProposalShortId { packed::ProposalShortId::from_tx_hash(&self.calc_tx_hash()) } } impl packed::RawHeader { + /// Calculates the difficulty from compact target. pub fn difficulty(&self) -> U256 { compact_to_difficulty(self.compact_target().unpack()) } } impl packed::Header { + /// Calculates the difficulty from compact target. pub fn difficulty(&self) -> U256 { self.raw().difficulty() } } impl packed::Block { + /// Converts self to an uncle block. pub fn as_uncle(&self) -> packed::UncleBlock { packed::UncleBlock::new_builder() .header(self.header()) @@ -139,6 +176,7 @@ impl packed::Block { .build() } + /// Recalculates all hashes and merkle roots in the header. pub fn reset_header(self) -> packed::Block { let tx_hashes = self.as_reader().calc_tx_hashes(); let tx_witness_hashes = self.as_reader().calc_tx_witness_hashes(); @@ -169,6 +207,7 @@ impl packed::Block { } impl packed::CompactBlock { + /// Builds a `CompactBlock` from block and prefilled transactions indexes. pub fn build_from_block( block: &core::BlockView, prefilled_transactions_indexes: &HashSet, @@ -207,6 +246,7 @@ impl packed::CompactBlock { .build() } + /// Takes proposal short ids for the transactions which are not prefilled. pub fn block_short_ids(&self) -> Vec> { let txs_len = self.txs_len(); let mut block_short_ids: Vec> = Vec::with_capacity(txs_len); @@ -228,16 +268,18 @@ impl packed::CompactBlock { block_short_ids } + /// Calculates the length of transactions. pub fn txs_len(&self) -> usize { self.prefilled_transactions().len() + self.short_ids().len() } - pub fn prefilled_indexes_iter(&self) -> impl Iterator { + fn prefilled_indexes_iter(&self) -> impl Iterator { self.prefilled_transactions() .into_iter() .map(|i| i.index().unpack()) } + /// Collects the short id indexes. pub fn short_id_indexes(&self) -> Vec { let prefilled_indexes: HashSet = self.prefilled_indexes_iter().collect(); diff --git a/util/types/src/generated/mod.rs b/util/types/src/generated/mod.rs index 203448b5b3..fd81a946d0 100644 --- a/util/types/src/generated/mod.rs +++ b/util/types/src/generated/mod.rs @@ -1,6 +1,8 @@ //! Generated packed bytes wrappers. +#![doc(hidden)] #![allow(warnings)] +#![allow(missing_docs)] #[allow(clippy::all)] mod blockchain; @@ -10,6 +12,20 @@ mod extensions; mod protocols; pub mod packed { + //! Various newtypes of [`bytes::Bytes`], and their associated types. + //! + //! These newtypes are thread-safe and immutable binary data with a series of associated methods + //! to read and convert themselves. + //! + //! These newtypes are generated by [Molecule] from [schemas]. + //! + //! # References: + //! - [Using the Newtype Pattern to Implement External Traits on External Types](https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#using-the-newtype-pattern-to-implement-external-traits-on-external-types) + //! - [Using the Newtype Pattern for Type Safety and Abstraction](https://doc.rust-lang.org/book/ch19-04-advanced-types.html#using-the-newtype-pattern-for-type-safety-and-abstraction) + //! + //! [`bytes::Bytes`]: https://docs.rs/bytes/*/bytes/#bytes + //! [Molecule]: https://github.com/nervosnetwork/molecule + //! [schemas]: https://github.com/nervosnetwork/molecule/blob/df1fdce/docs/schema_language.md#molecule-schema-language pub use molecule::prelude::{Byte, ByteReader}; pub use super::blockchain::*; diff --git a/util/types/src/lib.rs b/util/types/src/lib.rs index 1abc4ae589..ea1ec2c1b7 100644 --- a/util/types/src/lib.rs +++ b/util/types/src/lib.rs @@ -1,6 +1,6 @@ //! # The Core Types Library //! -//! This Library provides the essential types for building ckb. +//! This Library provides the essential types for CKB. pub mod prelude; @@ -9,7 +9,6 @@ pub use ckb_fixed_hash::{h160, h256, H160, H256}; pub use molecule::error; pub use numext_fixed_uint::{u256, U128, U256}; -#[doc(hidden)] mod generated; pub use generated::packed; diff --git a/util/types/src/prelude.rs b/util/types/src/prelude.rs index 5dc7216030..949ddc3469 100644 --- a/util/types/src/prelude.rs +++ b/util/types/src/prelude.rs @@ -1,10 +1,17 @@ +//! This module includes several traits. +//! +//! Few traits are re-exported from other crates, few are used as aliases and others are syntactic sugar. + pub use molecule::{ hex_string, prelude::{Builder, Entity, Reader}, }; -// An alias for unwrap / expect. +/// An alias of `unwrap()` to mark where we are really have confidence to do unwrap. +/// +/// We can also customize the panic message or do something else in this alias. pub trait ShouldBeOk { + /// Unwraps an `Option` or a `Result` with confidence and we assume that it's impossible to fail. fn should_be_ok(self) -> T; } @@ -22,7 +29,9 @@ impl ShouldBeOk for molecule::error::VerificationResult { } } +/// An alias of `from_slice(..)` to mark where we are really have confidence to do unwrap on the result of `from_slice(..)`. pub trait FromSliceShouldBeOk<'r>: Reader<'r> { + /// Unwraps the result of `from_slice(..)` with confidence and we assume that it's impossible to fail. fn from_slice_should_be_ok(slice: &'r [u8]) -> Self; } @@ -42,14 +51,20 @@ where } } +/// A syntactic sugar to convert binary data into rust types. pub trait Unpack { + /// Unpack binary data into rust types. fn unpack(&self) -> T; } +/// A syntactic sugar to convert a rust type into binary data. pub trait Pack { + /// Packs a rust type into binary data. fn pack(&self) -> T; } +/// A syntactic sugar to convert a vector of binary data into one binary data. pub trait PackVec: IntoIterator { + /// Packs a vector of binary data into one binary data. fn pack(self) -> T; } diff --git a/util/types/src/utilities/difficulty.rs b/util/types/src/utilities/difficulty.rs index 6143665c83..d8a3d0ecb7 100644 --- a/util/types/src/utilities/difficulty.rs +++ b/util/types/src/utilities/difficulty.rs @@ -1,6 +1,7 @@ use numext_fixed_uint::prelude::UintConvert; use numext_fixed_uint::{u512, U256, U512}; +/// TODO(doc): @doitian pub const DIFF_TWO: u32 = 0x2080_0000; const ONE: U256 = U256::one(); @@ -43,6 +44,7 @@ fn get_low64(target: &U256) -> u64 { target.0[0] } +/// TODO(doc): @doitian pub fn target_to_compact(target: U256) -> u32 { let bits = 256 - target.leading_zeros(); let exponent = u64::from((bits + 7) / 8); @@ -56,6 +58,7 @@ pub fn target_to_compact(target: U256) -> u32 { compact as u32 } +/// TODO(doc): @doitian pub fn compact_to_target(compact: u32) -> (U256, bool) { let exponent = compact >> 24; let mut mantissa = U256::from(compact & 0x00ff_ffff); @@ -73,6 +76,7 @@ pub fn compact_to_target(compact: u32) -> (U256, bool) { (ret, overflow) } +/// TODO(doc): @doitian pub fn compact_to_difficulty(compact: u32) -> U256 { let (target, overflow) = compact_to_target(compact); if target.is_zero() || overflow { @@ -81,6 +85,7 @@ pub fn compact_to_difficulty(compact: u32) -> U256 { target_to_difficulty(&target) } +/// TODO(doc): @doitian pub fn difficulty_to_compact(difficulty: U256) -> u32 { let target = difficulty_to_target(&difficulty); target_to_compact(target) diff --git a/util/types/src/utilities/merkle_tree.rs b/util/types/src/utilities/merkle_tree.rs index a90caa7f40..75c2e152f2 100644 --- a/util/types/src/utilities/merkle_tree.rs +++ b/util/types/src/utilities/merkle_tree.rs @@ -3,6 +3,7 @@ use merkle_cbt::{merkle_tree::Merge, MerkleProof as ExMerkleProof, CBMT as ExCBM use crate::{packed::Byte32, prelude::*}; +/// TODO(doc): @quake pub struct MergeByte32; impl Merge for MergeByte32 { @@ -18,9 +19,12 @@ impl Merge for MergeByte32 { } } +/// TODO(doc): @quake pub type CBMT = ExCBMT; +/// TODO(doc): @quake pub type MerkleProof = ExMerkleProof; +/// TODO(doc): @quake pub fn merkle_root(leaves: &[Byte32]) -> Byte32 { CBMT::build_merkle_root(leaves) } diff --git a/util/types/src/utilities/mod.rs b/util/types/src/utilities/mod.rs index 3e629f7b44..5aeab0526a 100644 --- a/util/types/src/utilities/mod.rs +++ b/util/types/src/utilities/mod.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @doitian mod difficulty; mod merkle_tree; diff --git a/verification/Cargo.toml b/verification/Cargo.toml index baa900217e..c0c2ca69fb 100644 --- a/verification/Cargo.toml +++ b/verification/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @zhangsoledad crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/verification/src/block_verifier.rs b/verification/src/block_verifier.rs index 9c86dcc008..f099efcf25 100644 --- a/verification/src/block_verifier.rs +++ b/verification/src/block_verifier.rs @@ -11,6 +11,7 @@ use ckb_types::{ }; use std::collections::HashSet; +/// TODO(doc): @zhangsoledad //TODO: cellbase, witness #[derive(Clone)] pub struct BlockVerifier<'a> { @@ -18,6 +19,7 @@ pub struct BlockVerifier<'a> { } impl<'a> BlockVerifier<'a> { + /// TODO(doc): @zhangsoledad pub fn new(consensus: &'a Consensus) -> Self { BlockVerifier { consensus } } @@ -162,12 +164,14 @@ impl MerkleRootVerifier { } } +/// TODO(doc): @zhangsoledad pub struct HeaderResolverWrapper<'a> { header: &'a HeaderView, parent: Option, } impl<'a> HeaderResolverWrapper<'a> { + /// TODO(doc): @zhangsoledad pub fn new(header: &'a HeaderView, store: &'a CS) -> Self where CS: ChainStore<'a>, @@ -176,6 +180,7 @@ impl<'a> HeaderResolverWrapper<'a> { HeaderResolverWrapper { parent, header } } + /// TODO(doc): @zhangsoledad pub fn build(header: &'a HeaderView, parent: Option) -> Self { HeaderResolverWrapper { parent, header } } @@ -237,15 +242,18 @@ impl BlockBytesVerifier { } } +/// TODO(doc): @zhangsoledad pub struct NonContextualBlockTxsVerifier<'a> { consensus: &'a Consensus, } impl<'a> NonContextualBlockTxsVerifier<'a> { + /// TODO(doc): @zhangsoledad pub fn new(consensus: &'a Consensus) -> Self { NonContextualBlockTxsVerifier { consensus } } + /// TODO(doc): @zhangsoledad pub fn verify(&self, block: &BlockView) -> Result, Error> { block .transactions() diff --git a/verification/src/cache.rs b/verification/src/cache.rs index 04803e8674..446fc5d71c 100644 --- a/verification/src/cache.rs +++ b/verification/src/cache.rs @@ -1,17 +1,23 @@ +//! TODO(doc): @zhangsoledad use ckb_types::{ core::{Capacity, Cycle}, packed::Byte32, }; +/// TODO(doc): @zhangsoledad pub type TxVerifyCache = lru::LruCache; +/// TODO(doc): @zhangsoledad #[derive(Clone, Copy, Debug, PartialEq)] pub struct CacheEntry { + /// TODO(doc): @zhangsoledad pub cycles: Cycle, + /// TODO(doc): @zhangsoledad pub fee: Capacity, } impl CacheEntry { + /// TODO(doc): @zhangsoledad pub fn new(cycles: Cycle, fee: Capacity) -> Self { CacheEntry { cycles, fee } } diff --git a/verification/src/contextual_block_verifier.rs b/verification/src/contextual_block_verifier.rs index 3470b9c3c9..266a94db95 100644 --- a/verification/src/contextual_block_verifier.rs +++ b/verification/src/contextual_block_verifier.rs @@ -28,20 +28,28 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; use tokio::sync::{oneshot, RwLock}; +/// TODO(doc): @zhangsoledad pub struct VerifyContext<'a, CS> { pub(crate) store: &'a CS, pub(crate) consensus: &'a Consensus, } +/// TODO(doc): @zhangsoledad pub trait Switch { + /// TODO(doc): @zhangsoledad fn disable_epoch(&self) -> bool; + /// TODO(doc): @zhangsoledad fn disable_uncles(&self) -> bool; + /// TODO(doc): @zhangsoledad fn disable_two_phase_commit(&self) -> bool; + /// TODO(doc): @zhangsoledad fn disable_daoheader(&self) -> bool; + /// TODO(doc): @zhangsoledad fn disable_reward(&self) -> bool; } impl<'a, CS: ChainStore<'a>> VerifyContext<'a, CS> { + /// TODO(doc): @zhangsoledad pub fn new(store: &'a CS, consensus: &'a Consensus) -> Self { VerifyContext { store, consensus } } @@ -504,15 +512,18 @@ impl<'a> EpochVerifier<'a> { } } +/// TODO(doc): @zhangsoledad pub struct ContextualBlockVerifier<'a, CS> { context: &'a VerifyContext<'a, CS>, } impl<'a, CS: ChainStore<'a>> ContextualBlockVerifier<'a, CS> { + /// TODO(doc): @zhangsoledad pub fn new(context: &'a VerifyContext<'a, CS>) -> Self { ContextualBlockVerifier { context } } + /// TODO(doc): @zhangsoledad pub fn verify( &'a self, resolved: &'a [ResolvedTransaction], diff --git a/verification/src/error.rs b/verification/src/error.rs index 319fc771fa..24352e56f2 100644 --- a/verification/src/error.rs +++ b/verification/src/error.rs @@ -16,6 +16,7 @@ pub enum TransactionErrorSource { Witnesses, } +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone)] pub enum TransactionError { /// output.occupied_capacity() > output.capacity() @@ -24,9 +25,13 @@ pub enum TransactionError { source, index, occupied_capacity, capacity )] InsufficientCellCapacity { + /// TODO(doc): @keroro520 source: TransactionErrorSource, + /// TODO(doc): @keroro520 index: usize, + /// TODO(doc): @keroro520 occupied_capacity: Capacity, + /// TODO(doc): @keroro520 capacity: Capacity, }, @@ -36,21 +41,32 @@ pub enum TransactionError { outputs_sum, inputs_sum )] OutputsSumOverflow { + /// TODO(doc): @keroro520 inputs_sum: Capacity, + /// TODO(doc): @keroro520 outputs_sum: Capacity, }, /// inputs.is_empty() || outputs.is_empty() #[fail(display = "Empty({})", source)] - Empty { source: TransactionErrorSource }, + Empty { + /// TODO(doc): @keroro520 + source: TransactionErrorSource, + }, /// Duplicated dep-out-points within the same transaction #[fail(display = "DuplicateCellDeps({})", out_point)] - DuplicateCellDeps { out_point: OutPoint }, + DuplicateCellDeps { + /// TODO(doc): @keroro520 + out_point: OutPoint, + }, /// Duplicated headers deps without within the same transaction #[fail(display = "DuplicateHeaderDeps({})", hash)] - DuplicateHeaderDeps { hash: Byte32 }, + DuplicateHeaderDeps { + /// TODO(doc): @keroro520 + hash: Byte32, + }, /// outputs.len() != outputs_data.len() #[fail( @@ -58,7 +74,9 @@ pub enum TransactionError { outputs_data_len, outputs_len )] OutputsDataLengthMismatch { + /// TODO(doc): @keroro520 outputs_len: usize, + /// TODO(doc): @keroro520 outputs_data_len: usize, }, @@ -67,56 +85,85 @@ pub enum TransactionError { display = "InvalidSince(Inputs[{}]): the field since is invalid", index )] - InvalidSince { index: usize }, + InvalidSince { + /// TODO(doc): @keroro520 + index: usize, + }, /// The transaction is not mature which is required by `transaction.since` #[fail( display = "Immature(Inputs[{}]): the transaction is immature because of the since requirement", index )] - Immature { index: usize }, + Immature { + /// TODO(doc): @keroro520 + index: usize, + }, /// The transaction is not mature which is required by cellbase maturity rule #[fail(display = "CellbaseImmaturity({}[{}])", source, index)] CellbaseImmaturity { + /// TODO(doc): @keroro520 source: TransactionErrorSource, + /// TODO(doc): @keroro520 index: usize, }, /// The transaction version is mismatched with the system can hold #[fail(display = "MismatchedVersion: expected {}, got {}", expected, actual)] - MismatchedVersion { expected: Version, actual: Version }, + MismatchedVersion { + /// TODO(doc): @keroro520 + expected: Version, + /// TODO(doc): @keroro520 + actual: Version, + }, /// The transaction size is too large #[fail( display = "ExceededMaximumBlockBytes: expected transaction serialized size ({}) < block size limit ({})", actual, limit )] - ExceededMaximumBlockBytes { limit: u64, actual: u64 }, + ExceededMaximumBlockBytes { + /// TODO(doc): @keroro520 + limit: u64, + /// TODO(doc): @keroro520 + actual: u64, + }, } +/// TODO(doc): @keroro520 #[derive(Debug, PartialEq, Eq, Clone, Display)] pub enum HeaderErrorKind { + /// TODO(doc): @keroro520 InvalidParent, + /// TODO(doc): @keroro520 Pow, + /// TODO(doc): @keroro520 Timestamp, + /// TODO(doc): @keroro520 Number, + /// TODO(doc): @keroro520 Epoch, + /// TODO(doc): @keroro520 Version, } +/// TODO(doc): @keroro520 #[derive(Debug)] pub struct HeaderError { kind: Context, } +/// TODO(doc): @keroro520 #[derive(Debug)] pub struct BlockError { kind: Context, } +/// TODO(doc): @keroro520 #[derive(Debug, PartialEq, Eq, Clone, Display)] pub enum BlockErrorKind { + /// TODO(doc): @keroro520 ProposalTransactionDuplicate, /// There are duplicate committed transactions. @@ -136,134 +183,215 @@ pub enum BlockErrorKind { /// transaction index in the block and the second item is the transaction verification error. BlockTransactions, + /// TODO(doc): @keroro520 UnknownParent, + /// TODO(doc): @keroro520 Uncles, + /// TODO(doc): @keroro520 Cellbase, /// This error is returned when the committed transactions does not meet the 2-phases /// propose-then-commit consensus rule. Commit, + /// TODO(doc): @keroro520 ExceededMaximumProposalsLimit, + /// TODO(doc): @keroro520 ExceededMaximumCycles, + /// TODO(doc): @keroro520 ExceededMaximumBlockBytes, } +/// TODO(doc): @keroro520 #[derive(Fail, Debug)] #[fail(display = "BlockTransactionsError(index: {}, error: {})", index, error)] pub struct BlockTransactionsError { + /// TODO(doc): @keroro520 pub index: u32, + /// TODO(doc): @keroro520 pub error: Error, } +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone)] #[fail(display = "UnknownParentError(parent_hash: {})", parent_hash)] pub struct UnknownParentError { + /// TODO(doc): @keroro520 pub parent_hash: Byte32, } +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone, Display)] pub enum CommitError { + /// TODO(doc): @keroro520 AncestorNotFound, + /// TODO(doc): @keroro520 Invalid, } +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone, Display)] pub enum CellbaseError { + /// TODO(doc): @keroro520 InvalidInput, + /// TODO(doc): @keroro520 InvalidRewardAmount, + /// TODO(doc): @keroro520 InvalidRewardTarget, + /// TODO(doc): @keroro520 InvalidWitness, + /// TODO(doc): @keroro520 InvalidTypeScript, + /// TODO(doc): @keroro520 InvalidOutputQuantity, + /// TODO(doc): @keroro520 InvalidQuantity, + /// TODO(doc): @keroro520 InvalidPosition, + /// TODO(doc): @keroro520 InvalidOutputData, } +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone)] pub enum UnclesError { + /// TODO(doc): @keroro520 #[fail(display = "OverCount(max: {}, actual: {})", max, actual)] - OverCount { max: u32, actual: u32 }, + OverCount { + /// TODO(doc): @keroro520 + max: u32, + /// TODO(doc): @keroro520 + actual: u32, + }, + /// TODO(doc): @keroro520 #[fail( display = "InvalidDepth(min: {}, max: {}, actual: {})", min, max, actual )] - InvalidDepth { max: u64, min: u64, actual: u64 }, + InvalidDepth { + /// TODO(doc): @keroro520 + max: u64, + /// TODO(doc): @keroro520 + min: u64, + /// TODO(doc): @keroro520 + actual: u64, + }, + /// TODO(doc): @keroro520 #[fail(display = "InvalidHash(expected: {}, actual: {})", expected, actual)] - InvalidHash { expected: Byte32, actual: Byte32 }, + InvalidHash { + /// TODO(doc): @keroro520 + expected: Byte32, + /// TODO(doc): @keroro520 + actual: Byte32, + }, + /// TODO(doc): @keroro520 #[fail(display = "InvalidNumber")] InvalidNumber, + /// TODO(doc): @keroro520 #[fail(display = "InvalidTarget")] InvalidTarget, + /// TODO(doc): @keroro520 #[fail(display = "InvalidDifficultyEpoch")] InvalidDifficultyEpoch, + /// TODO(doc): @keroro520 #[fail(display = "ProposalsHash")] ProposalsHash, + /// TODO(doc): @keroro520 #[fail(display = "ProposalDuplicate")] ProposalDuplicate, + /// TODO(doc): @keroro520 #[fail(display = "Duplicate({})", _0)] Duplicate(Byte32), + /// TODO(doc): @keroro520 #[fail(display = "DoubleInclusion({})", _0)] DoubleInclusion(Byte32), + /// TODO(doc): @keroro520 #[fail(display = "DescendantLimit")] DescendantLimit, + /// TODO(doc): @keroro520 #[fail(display = "ExceededMaximumProposalsLimit")] ExceededMaximumProposalsLimit, } +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone)] #[fail( display = "BlockVersionError(expected: {}, actual: {})", expected, actual )] pub struct BlockVersionError { + /// TODO(doc): @keroro520 pub expected: Version, + /// TODO(doc): @keroro520 pub actual: Version, } +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone)] #[fail(display = "InvalidParentError(parent_hash: {})", parent_hash)] pub struct InvalidParentError { + /// TODO(doc): @keroro520 pub parent_hash: Byte32, } +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone)] pub enum PowError { + /// TODO(doc): @keroro520 #[fail(display = "Boundary(expected: {}, actual: {})", expected, actual)] - Boundary { expected: Byte32, actual: Byte32 }, + Boundary { + /// TODO(doc): @keroro520 + expected: Byte32, + /// TODO(doc): @keroro520 + actual: Byte32, + }, + /// TODO(doc): @keroro520 #[fail( display = "InvalidNonce: please set logger.filter to \"info,ckb-pow=debug\" to see detailed PoW verification information in the log" )] InvalidNonce, } +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone)] pub enum TimestampError { + /// TODO(doc): @keroro520 #[fail(display = "BlockTimeTooOld(min: {}, actual: {})", min, actual)] - BlockTimeTooOld { min: u64, actual: u64 }, + BlockTimeTooOld { + /// TODO(doc): @keroro520 + min: u64, + /// TODO(doc): @keroro520 + actual: u64, + }, + /// TODO(doc): @keroro520 #[fail(display = "BlockTimeTooNew(max: {}, actual: {})", max, actual)] - BlockTimeTooNew { max: u64, actual: u64 }, + BlockTimeTooNew { + /// TODO(doc): @keroro520 + max: u64, + /// TODO(doc): @keroro520 + actual: u64, + }, } impl TimestampError { + /// TODO(doc): @keroro520 pub fn is_too_new(&self) -> bool { match self { Self::BlockTimeTooOld { .. } => false, @@ -272,26 +400,43 @@ impl TimestampError { } } +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone)] #[fail(display = "NumberError(expected: {}, actual: {})", expected, actual)] pub struct NumberError { + /// TODO(doc): @keroro520 pub expected: u64, + /// TODO(doc): @keroro520 pub actual: u64, } +/// TODO(doc): @keroro520 #[derive(Fail, Debug, PartialEq, Eq, Clone)] pub enum EpochError { + /// TODO(doc): @keroro520 #[fail( display = "TargetMismatch(expected: {:x}, actual: {:x})", expected, actual )] - TargetMismatch { expected: u32, actual: u32 }, + TargetMismatch { + /// TODO(doc): @keroro520 + expected: u32, + /// TODO(doc): @keroro520 + actual: u32, + }, + /// TODO(doc): @keroro520 #[fail(display = "NumberMismatch(expected: {}, actual: {})", expected, actual)] - NumberMismatch { expected: u64, actual: u64 }, + NumberMismatch { + /// TODO(doc): @keroro520 + expected: u64, + /// TODO(doc): @keroro520 + actual: u64, + }, } impl TransactionError { + /// TODO(doc): @keroro520 pub fn is_malformed_tx(&self) -> bool { match self { TransactionError::OutputsSumOverflow { .. } @@ -347,18 +492,22 @@ impl Fail for HeaderError { } impl HeaderError { + /// TODO(doc): @keroro520 pub fn kind(&self) -> &HeaderErrorKind { self.kind.get_context() } + /// TODO(doc): @keroro520 pub fn downcast_ref(&self) -> Option<&T> { self.cause().and_then(|cause| cause.downcast_ref::()) } + /// TODO(doc): @keroro520 pub fn inner(&self) -> &Context { &self.kind } + /// TODO(doc): @keroro520 // Note: if the header is invalid, that may also be grounds for disconnecting the peer, // However, there is a circumstance where that does not hold: // if the header's timestamp is more than ALLOWED_FUTURE_BLOCKTIME ahead of our current time. @@ -389,14 +538,17 @@ impl Fail for BlockError { } impl BlockError { + /// TODO(doc): @keroro520 pub fn kind(&self) -> &BlockErrorKind { self.kind.get_context() } + /// TODO(doc): @keroro520 pub fn downcast_ref(&self) -> Option<&T> { self.cause().and_then(|cause| cause.downcast_ref::()) } + /// TODO(doc): @keroro520 pub fn inner(&self) -> &Context { &self.kind } diff --git a/verification/src/genesis_verifier.rs b/verification/src/genesis_verifier.rs index c298618fe2..9a2ae7fc18 100644 --- a/verification/src/genesis_verifier.rs +++ b/verification/src/genesis_verifier.rs @@ -8,10 +8,12 @@ use ckb_dao_utils::genesis_dao_data_with_satoshi_gift; use ckb_error::Error; use ckb_types::{core::BlockView, packed::CellInput}; +/// TODO(doc): @zhangsoledad #[derive(Clone)] pub struct GenesisVerifier {} impl GenesisVerifier { + /// TODO(doc): @zhangsoledad pub fn new() -> Self { GenesisVerifier {} } diff --git a/verification/src/header_verifier.rs b/verification/src/header_verifier.rs index e66c20d8cc..12ba6cf42f 100644 --- a/verification/src/header_verifier.rs +++ b/verification/src/header_verifier.rs @@ -11,12 +11,15 @@ use ckb_types::core::{HeaderView, Version}; use faketime::unix_time_as_millis; use std::marker::PhantomData; +/// TODO(doc): @zhangsoledad pub trait HeaderResolver { + /// TODO(doc): @zhangsoledad fn header(&self) -> &HeaderView; /// resolves parent header fn parent(&self) -> Option<&HeaderView>; } +/// TODO(doc): @zhangsoledad pub struct HeaderVerifier<'a, T, M> { block_median_time_context: &'a M, consensus: &'a Consensus, @@ -24,6 +27,7 @@ pub struct HeaderVerifier<'a, T, M> { } impl<'a, T, M: BlockMedianTimeContext> HeaderVerifier<'a, T, M> { + /// TODO(doc): @zhangsoledad pub fn new(block_median_time_context: &'a M, consensus: &'a Consensus) -> Self { HeaderVerifier { consensus, diff --git a/verification/src/lib.rs b/verification/src/lib.rs index 4613059a5e..20362d8aee 100644 --- a/verification/src/lib.rs +++ b/verification/src/lib.rs @@ -1,3 +1,4 @@ +//! TODO(doc): @zhangsoledad #[macro_use] extern crate enum_display_derive; @@ -30,11 +31,15 @@ pub use crate::transaction_verifier::{ SinceMetric, TimeRelativeTransactionVerifier, TransactionVerifier, }; +/// TODO(doc): @zhangsoledad pub const ALLOWED_FUTURE_BLOCKTIME: u64 = 15 * 1000; // 15 Second pub(crate) const LOG_TARGET: &str = "ckb_chain"; +/// TODO(doc): @zhangsoledad pub trait Verifier { + /// TODO(doc): @zhangsoledad type Target; + /// TODO(doc): @zhangsoledad fn verify(&self, target: &Self::Target) -> Result<(), ckb_error::Error>; } diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index 28f0567f54..b02f2d2627 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -20,8 +20,11 @@ use lru::LruCache; use std::cell::RefCell; use std::collections::HashSet; +/// TODO(doc): @zhangsoledad pub struct TimeRelativeTransactionVerifier<'a, M> { + /// TODO(doc): @zhangsoledad pub maturity: MaturityVerifier<'a>, + /// TODO(doc): @zhangsoledad pub since: SinceVerifier<'a, M>, } @@ -29,6 +32,7 @@ impl<'a, M> TimeRelativeTransactionVerifier<'a, M> where M: BlockMedianTimeContext, { + /// TODO(doc): @zhangsoledad pub fn new( rtx: &'a ResolvedTransaction, median_time_context: &'a M, @@ -53,6 +57,7 @@ where } } + /// TODO(doc): @zhangsoledad pub fn verify(&self) -> Result<(), Error> { self.maturity.verify()?; self.since.verify()?; @@ -60,15 +65,22 @@ where } } +/// TODO(doc): @zhangsoledad pub struct NonContextualTransactionVerifier<'a> { + /// TODO(doc): @zhangsoledad pub version: VersionVerifier<'a>, + /// TODO(doc): @zhangsoledad pub size: SizeVerifier<'a>, + /// TODO(doc): @zhangsoledad pub empty: EmptyVerifier<'a>, + /// TODO(doc): @zhangsoledad pub duplicate_deps: DuplicateDepsVerifier<'a>, + /// TODO(doc): @zhangsoledad pub outputs_data_verifier: OutputsDataVerifier<'a>, } impl<'a> NonContextualTransactionVerifier<'a> { + /// TODO(doc): @zhangsoledad pub fn new(tx: &'a TransactionView, consensus: &'a Consensus) -> Self { NonContextualTransactionVerifier { version: VersionVerifier::new(tx, consensus.tx_version()), @@ -79,6 +91,7 @@ impl<'a> NonContextualTransactionVerifier<'a> { } } + /// TODO(doc): @zhangsoledad pub fn verify(&self) -> Result<(), Error> { self.version.verify()?; self.size.verify()?; @@ -89,11 +102,17 @@ impl<'a> NonContextualTransactionVerifier<'a> { } } +/// TODO(doc): @zhangsoledad pub struct ContextualTransactionVerifier<'a, M, CS> { + /// TODO(doc): @zhangsoledad pub maturity: MaturityVerifier<'a>, + /// TODO(doc): @zhangsoledad pub since: SinceVerifier<'a, M>, + /// TODO(doc): @zhangsoledad pub capacity: CapacityVerifier<'a>, + /// TODO(doc): @zhangsoledad pub script: ScriptVerifier<'a, CS>, + /// TODO(doc): @zhangsoledad pub fee_calculator: FeeCalculator<'a, CS>, } @@ -102,6 +121,7 @@ where M: BlockMedianTimeContext, CS: ChainStore<'a>, { + /// TODO(doc): @zhangsoledad #[allow(clippy::too_many_arguments)] pub fn new( rtx: &'a ResolvedTransaction, @@ -131,6 +151,7 @@ where } } + /// TODO(doc): @zhangsoledad pub fn verify(&self, max_cycles: Cycle) -> Result { self.maturity.verify()?; self.capacity.verify()?; @@ -141,8 +162,11 @@ where } } +/// TODO(doc): @zhangsoledad pub struct TransactionVerifier<'a, M, CS> { + /// TODO(doc): @zhangsoledad pub non_contextual: NonContextualTransactionVerifier<'a>, + /// TODO(doc): @zhangsoledad pub contextual: ContextualTransactionVerifier<'a, M, CS>, } @@ -151,6 +175,7 @@ where M: BlockMedianTimeContext, CS: ChainStore<'a>, { + /// TODO(doc): @zhangsoledad #[allow(clippy::too_many_arguments)] pub fn new( rtx: &'a ResolvedTransaction, @@ -175,6 +200,7 @@ where } } + /// TODO(doc): @zhangsoledad pub fn verify(&self, max_cycles: Cycle) -> Result { self.non_contextual.verify()?; self.contextual.verify(max_cycles) @@ -262,12 +288,14 @@ impl<'a> SizeVerifier<'a> { } } +/// TODO(doc): @zhangsoledad pub struct ScriptVerifier<'a, CS> { chain_store: &'a CS, resolved_transaction: &'a ResolvedTransaction, } impl<'a, CS: ChainStore<'a>> ScriptVerifier<'a, CS> { + /// TODO(doc): @zhangsoledad pub fn new(resolved_transaction: &'a ResolvedTransaction, chain_store: &'a CS) -> Self { ScriptVerifier { chain_store, @@ -275,6 +303,7 @@ impl<'a, CS: ChainStore<'a>> ScriptVerifier<'a, CS> { } } + /// TODO(doc): @zhangsoledad pub fn verify(&self, max_cycles: Cycle) -> Result { let data_loader = DataLoaderWrapper::new(self.chain_store); TransactionScriptsVerifier::new(&self.resolved_transaction, &data_loader).verify(max_cycles) @@ -484,9 +513,13 @@ const METRIC_TYPE_FLAG_MASK: u64 = 0x6000_0000_0000_0000; const VALUE_MASK: u64 = 0x00ff_ffff_ffff_ffff; const REMAIN_FLAGS_BITS: u64 = 0x1f00_0000_0000_0000; +/// TODO(doc): @zhangsoledad pub enum SinceMetric { + /// TODO(doc): @zhangsoledad BlockNumber(u64), + /// TODO(doc): @zhangsoledad EpochNumberWithFraction(EpochNumberWithFraction), + /// TODO(doc): @zhangsoledad Timestamp(u64), } @@ -495,20 +528,24 @@ pub enum SinceMetric { pub struct Since(pub u64); impl Since { + /// TODO(doc): @zhangsoledad pub fn is_absolute(self) -> bool { self.0 & LOCK_TYPE_FLAG == 0 } + /// TODO(doc): @zhangsoledad #[inline] pub fn is_relative(self) -> bool { !self.is_absolute() } + /// TODO(doc): @zhangsoledad pub fn flags_is_valid(self) -> bool { (self.0 & REMAIN_FLAGS_BITS == 0) && ((self.0 & METRIC_TYPE_FLAG_MASK) != METRIC_TYPE_FLAG_MASK) } + /// TODO(doc): @zhangsoledad pub fn extract_metric(self) -> Option { let value = self.0 & VALUE_MASK; match self.0 & METRIC_TYPE_FLAG_MASK { diff --git a/wasm-build-test/Cargo.toml b/wasm-build-test/Cargo.toml index e80d6c7c12..b81d940368 100644 --- a/wasm-build-test/Cargo.toml +++ b/wasm-build-test/Cargo.toml @@ -4,7 +4,7 @@ version = "0.38.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -description = "TODO(doc): crate description" +description = "TODO(doc): @doitian crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb"