diff --git a/chain/src/tests/basic.rs b/chain/src/tests/basic.rs index 305f2b116f..7384b76a53 100644 --- a/chain/src/tests/basic.rs +++ b/chain/src/tests/basic.rs @@ -811,8 +811,12 @@ fn test_next_epoch_ext() { // upper bound trigger assert_eq!(epoch.length(), 800, "epoch length {}", epoch.length()); - // orphan_rate_estimation = 1 / ( (1 + o_i ) * L_ideal * C_i,m / (o_i * L_i * C_i+1,m) − 1) = 133 / 9587 + // orphan_rate_estimation + // = 1 / ( (1 + o_i ) * L_ideal * C_i,m / (o_i * L_i * C_i+1,m) − 1) + // = 133 / 9587 + // // Diff_i+1 = (HPS_i · L_ideal) / (1 + orphan_rate_estimation ) * C_i+1,m + // // 50 * 14400 * 9587 / ((133 + 9587) * 800) assert_eq!( epoch.compact_target(), diff --git a/chain/src/tests/load_input_data_hash_cell.rs b/chain/src/tests/load_input_data_hash_cell.rs index 23cbb1f090..17d3edfea6 100644 --- a/chain/src/tests/load_input_data_hash_cell.rs +++ b/chain/src/tests/load_input_data_hash_cell.rs @@ -94,7 +94,8 @@ fn test_load_input_data_hash_cell() { let entry0 = vec![TxEntry::new(tx0, 0, Capacity::shannons(0), 100, vec![])]; tx_pool.plug_entry(entry0, PlugTarget::Proposed).unwrap(); - // Ensure tx which calls syscall load_cell_data_hash will got reject even previous tx is already in tx-pool + // Ensure tx which calls syscall load_cell_data_hash will got reject even previous tx is already + // in tx-pool let ret = tx_pool.submit_txs(vec![tx1]).unwrap(); assert!(ret.is_err()); assert!(format!("{}", ret.err().unwrap()).contains("ValidationFailure(2)")); diff --git a/ckb-bin/src/subcommand/list_hashes.rs b/ckb-bin/src/subcommand/list_hashes.rs index f7114f6dc6..9bcd1df179 100644 --- a/ckb-bin/src/subcommand/list_hashes.rs +++ b/ckb-bin/src/subcommand/list_hashes.rs @@ -55,7 +55,8 @@ impl TryFrom for SpecHashes { let cellbase = &block.transactions()[0]; let dep_group_tx = &block.transactions()[1]; - // Zip name with the transaction outputs. System cells start from 1 in the genesis cellbase outputs. + // Zip name with the transaction outputs. System cells start from 1 in the genesis cellbase + // outputs. let cells_hashes = spec .genesis .system_cells diff --git a/error/src/internal.rs b/error/src/internal.rs index b333eeebe5..089aeafe72 100644 --- a/error/src/internal.rs +++ b/error/src/internal.rs @@ -17,7 +17,8 @@ pub struct OtherError(String); /// A list specifying categories of ckb internal error. /// -/// This list is intended to grow over time and it is not recommended to exhaustively match against it. +/// This list is intended to grow over time and it is not recommended to exhaustively match against +/// it. /// /// It is used with the [`InternalError`]. /// diff --git a/error/src/lib.rs b/error/src/lib.rs index 755d04ad56..77da5d42c7 100644 --- a/error/src/lib.rs +++ b/error/src/lib.rs @@ -17,7 +17,8 @@ pub struct AnyError(Arc); /// A list specifying categories of ckb error. /// -/// This list is intended to grow over time and it is not recommended to exhaustively match against it. +/// This list is intended to grow over time and it is not recommended to exhaustively match against +/// it. /// /// It is used with [`Error`]. /// diff --git a/error/src/util.rs b/error/src/util.rs index 0db6a05c3f..803c04a4eb 100644 --- a/error/src/util.rs +++ b/error/src/util.rs @@ -56,7 +56,8 @@ macro_rules! impl_error_conversion_with_kind { }; } -/// A macro to implement conversion from source type to target type based on an implicit middle adaptor. +/// A macro to implement conversion from source type to target type based on an implicit middle +/// adaptor. /// /// ## Examples /// diff --git a/indexer/src/store.rs b/indexer/src/store.rs index 2ab7cc85c8..c772c51c80 100644 --- a/indexer/src/store.rs +++ b/indexer/src/store.rs @@ -744,7 +744,8 @@ impl IndexerStoreTransaction { } fn commit(self) { - // only log the error, indexer store commit failure should not causing the thread to panic entirely. + // only log the error, indexer store commit failure should not causing the thread to panic + // entirely. if let Err(err) = self.txn.commit() { error!("indexer db failed to commit txn, error: {:?}", err) } diff --git a/miner/src/client.rs b/miner/src/client.rs index afdae5f9c9..02a5ac6700 100644 --- a/miner/src/client.rs +++ b/miner/src/client.rs @@ -43,7 +43,8 @@ impl Rpc { let (stop, stop_rx) = oneshot::channel::<()>(); let thread = thread::spawn(move || { - // 1 is number of blocking DNS threads, this connector will use plain HTTP if the URL provded uses the HTTP scheme. + // 1 is number of blocking DNS threads, this connector will use plain HTTP if the URL + // provded uses the HTTP scheme. let https = hyper_tls::HttpsConnector::new(1).expect("init https connector should be OK"); let client = HttpClient::builder().keep_alive(true).build(https); diff --git a/network/src/lib.rs b/network/src/lib.rs index 8205479bdc..e07ab40265 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,10 +1,9 @@ //! ckb network module //! -//! This module is based on the Tentacle library, once again abstract the context that protocols can use, -//! and providing a unified implementation of the peer storage and registration mechanism. +//! This module is based on the Tentacle library, once again abstract the context that protocols can +//! use, and providing a unified implementation of the peer storage and registration mechanism. //! //! And implemented several basic protocols: identify, discovery, ping, feeler, disconnect_message -//! mod behaviour; mod compress; diff --git a/network/src/network.rs b/network/src/network.rs index bd95304885..774276532b 100644 --- a/network/src/network.rs +++ b/network/src/network.rs @@ -494,7 +494,8 @@ impl NetworkState { } } - /// Network message processing controller, default is true, if false, discard any received messages + /// Network message processing controller, default is true, if false, discard any received + /// messages pub fn is_active(&self) -> bool { self.active.load(Ordering::Relaxed) } @@ -939,8 +940,8 @@ impl NetworkService { // and the administrator permissions of group permissions must be turned on. // This operation is very burdensome for windows users, so it is turned off by default // - // The integration test fails after MacOS is turned on, the behavior is different from linux. - // Decision to turn off it + // The integration test fails after MacOS is turned on, the behavior is different from + // linux. Decision to turn off it let p2p_service = service_builder.build(event_handler); // == Build background service tasks diff --git a/network/src/protocols/mod.rs b/network/src/protocols/mod.rs index ba1c478a41..4c28fc3dd4 100644 --- a/network/src/protocols/mod.rs +++ b/network/src/protocols/mod.rs @@ -55,7 +55,8 @@ pub trait CKBProtocolContext: Send { fn quick_send_message_to(&self, peer_index: PeerIndex, data: Bytes) -> Result<(), Error>; /// Filter broadcast message through quick queue fn quick_filter_broadcast(&self, target: TargetSession, data: Bytes) -> Result<(), Error>; - /// spawn a future task, if `blocking` is true we use tokio_threadpool::blocking to handle the task. + /// spawn a future task, if `blocking` is true we use tokio_threadpool::blocking to handle the + /// task. fn future_task(&self, task: BoxedFutureTask, blocking: bool) -> Result<(), Error>; /// Send message fn send_message( diff --git a/network/src/protocols/ping.rs b/network/src/protocols/ping.rs index 983174d423..8ae3589406 100644 --- a/network/src/protocols/ping.rs +++ b/network/src/protocols/ping.rs @@ -29,7 +29,8 @@ const CONTROL_CHANNEL_BUFFER_SIZE: usize = 2; /// Ping protocol handler. /// /// The interval means that we send ping to peers. -/// The timeout means that consider peer is timeout if during a timeout we still have not received pong from a peer +/// The timeout means that consider peer is timeout if during a timeout we still have not received +/// pong from a peer pub struct PingHandler { interval: Duration, timeout: Duration, diff --git a/network/src/protocols/support_protocols.rs b/network/src/protocols/support_protocols.rs index 68ef0f66fd..6539eca030 100644 --- a/network/src/protocols/support_protocols.rs +++ b/network/src/protocols/support_protocols.rs @@ -8,10 +8,10 @@ use tokio_util::codec::length_delimited; /// All supported protocols /// -/// The underlying network of CKB is flexible and complex. The flexibility lies in that it can support any number of protocols. -/// Therefore, it is also relatively complex. Now, CKB has a bunch of protocols open by default, -/// but not all protocols have to be open. In other words, if you want to interact with ckb nodes at the p2p layer, -/// you only need to implement a few core protocols. +/// The underlying network of CKB is flexible and complex. The flexibility lies in that it can +/// support any number of protocols. Therefore, it is also relatively complex. Now, CKB has a bunch +/// of protocols open by default, but not all protocols have to be open. In other words, if you want +/// to interact with ckb nodes at the p2p layer, you only need to implement a few core protocols. /// /// Core protocol: identify/discovery/sync/relay #[derive(Clone, Debug)] @@ -22,7 +22,8 @@ pub enum SupportProtocols { /// to build a robust network topology as much as possible. Discovery, /// Identify: the first protocol opened when the nodes are interconnected, - /// used to obtain the features, versions, and observation addresses supported by the other node. + /// used to obtain the features, versions, and observation addresses supported by the other + /// node. /// /// [RFC](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0012-node-discovery/0012-node-discovery.md) Identify, @@ -31,8 +32,9 @@ pub enum SupportProtocols { /// [RFC](https://github.com/nervosnetwork/rfcs/blob/v2020.01.15/rfcs/0007-scoring-system-and-network-security/0007-scoring-system-and-network-security.md#feeler-connection) /// [Eclipse Attacks on Bitcoin's Peer-to-Peer Network](https://cryptographylab.bitbucket.io/slides/Eclipse%20Attacks%20on%20Bitcoin%27s%20Peer-to-Peer%20Network.pdf) Feeler, - /// Disconnect message: used to give the remote node a debug message when the node decides to disconnect. - /// This message must be as quick as possible, otherwise the message may not be sent. So, use a separate protocol to support it. + /// Disconnect message: used to give the remote node a debug message when the node decides to + /// disconnect. This message must be as quick as possible, otherwise the message may not be + /// sent. So, use a separate protocol to support it. DisconnectMessage, /// Sync: ckb's main communication protocol for synchronize all blocks. /// @@ -42,7 +44,8 @@ pub enum SupportProtocols { /// /// [RFC](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0004-ckb-block-sync/0004-ckb-block-sync.md#new-block-announcement) Relay, - /// Time: A protocol used for node pairing that warns if there is a large gap between the local time and the remote node. + /// Time: A protocol used for node pairing that warns if there is a large gap between the local + /// time and the remote node. Time, /// Alert: A protocol reserved by the Nervos Foundation to publish network-wide announcements. /// Any information sent from the protocol is verified by multi-signature @@ -84,8 +87,8 @@ impl SupportProtocols { /// Support versions pub fn support_versions(&self) -> Vec { - // we didn't invoke MetaBuilder#support_versions fn for these protocols (Ping/Discovery/Identify/Feeler/DisconnectMessage) - // in previous code, so the default 0.0.1 value is used ( https://github.com/nervosnetwork/tentacle/blob/master/src/builder.rs#L312 ) + // we didn't invoke MetaBuilder#support_versions fn for these protocols + // (Ping/Discovery/Identify/Feeler/DisconnectMessage) in previous code, so the default 0.0.1 value is used ( https://github.com/nervosnetwork/tentacle/blob/master/src/builder.rs#L312 ) // have to keep 0.0.1 for compatibility... match self { SupportProtocols::Ping => vec!["0.0.1".to_owned()], diff --git a/network/src/services/dns_seeding/seed_record.rs b/network/src/services/dns_seeding/seed_record.rs index ea49cc100f..ef734dfae4 100644 --- a/network/src/services/dns_seeding/seed_record.rs +++ b/network/src/services/dns_seeding/seed_record.rs @@ -40,7 +40,7 @@ const SEP: char = ';'; // Typical txt record: // ================== -// 47.103.65.40;49582;QmbU82jmDbu8AsUfa6bDKPHxTpwnPfcRQrzNPacKcSyM1Y;1574942409;K1vAkHZZ8to5VmjD4eyv65ENLbNa9Tda4Aytd8DE9iipFQanRpcZtSPyRiiGHThRGJPVRD18KAsGb8kV2s2WBK39R +// 47.103.65.40;49582;QmbU82jmDbu8AsUfa6bDKPHxTpwnPfcRQrzNPacKcSyM1Y;1574942409;K1vAkHZZ8to5VmjD4eyv65ENLbNa9Tda4Aytd8DE9iipFQanRpcZtSPyRiiGHThRGJPVRD18KAsGb8kV2s2WBK39R #[derive(Debug, Clone, Eq, PartialEq)] pub struct SeedRecord { ip: IpAddr, diff --git a/network/src/services/protocol_type_checker.rs b/network/src/services/protocol_type_checker.rs index f4eea29a1e..c8b13d8711 100644 --- a/network/src/services/protocol_type_checker.rs +++ b/network/src/services/protocol_type_checker.rs @@ -1,6 +1,7 @@ /// CKB evicts inactive peers in `sync` protocol; but due to P2P connection design, -/// a malicious peer may choose not to open `sync` protocol, to sneak from the eviction mechanism; -/// this service periodically check peers opened sub-protocols, to make sure no malicious connection. +/// a malicious peer may choose not to open `sync` protocol, to sneak from the eviction +/// mechanism; this service periodically check peers opened sub-protocols, to make sure no +/// malicious connection. /// /// Currently, 2 sub-protocols types are valid: /// diff --git a/resource/src/template.rs b/resource/src/template.rs index 75ff01891d..97554e9cf1 100644 --- a/resource/src/template.rs +++ b/resource/src/template.rs @@ -25,8 +25,8 @@ use std::io; /// The block starts with the line ending with ` # {{` (the leading space is required) and ends /// with a line `# }}`. /// -/// Between the start and end markers, every line is a branch starting with `# SPEC => CONTENT`, where -/// `SPEC` is the branch spec name, and `CONTENT` is the text to be replaced for the spec. +/// Between the start and end markers, every line is a branch starting with `# SPEC => CONTENT`, +/// where `SPEC` is the branch spec name, and `CONTENT` is the text to be replaced for the spec. /// A special spec name `_` acts as a wildcard which matches any spec name. /// /// The spec name is required to render the template, see [`Template::new`]. The block including diff --git a/rpc/src/error.rs b/rpc/src/error.rs index 8000786a19..3516e4de25 100644 --- a/rpc/src/error.rs +++ b/rpc/src/error.rs @@ -26,8 +26,8 @@ use std::fmt::{Debug, Display}; /// * -1000 ~ -2999 are module-specific errors. Each module generally gets 100 reserved error /// codes. /// -/// Unless otherwise noted, all the errors return optional detailed information as `string` in the error -/// object `data` field. +/// Unless otherwise noted, all the errors return optional detailed information as `string` in the +/// error object `data` field. #[derive(Debug, PartialEq, Clone, Copy)] pub enum RPCError { /// (-1): CKB internal errors are considered to never happen or only happen when the system @@ -65,8 +65,8 @@ pub enum RPCError { DatabaseError = -200, /// (-201): The chain index is inconsistent. /// - /// An example of an inconsistent index is that the chain index says a block hash is in the chain - /// but the block cannot be read from the database. + /// An example of an inconsistent index is that the chain index says a block hash is in the + /// chain but the block cannot be read from the database. /// /// This is a fatal error usually due to a serious bug. Please back up the data directory and /// re-sync the chain from scratch. @@ -76,19 +76,21 @@ pub enum RPCError { /// This is a fatal error usually caused by the underlying database used by CKB. Please back up /// the data directory and re-sync the chain from scratch. DatabaseIsCorrupt = -202, - /// (-301): Failed to resolve the referenced cells and headers used in the transaction, as inputs or - /// dependencies. + /// (-301): Failed to resolve the referenced cells and headers used in the transaction, as + /// inputs or dependencies. TransactionFailedToResolve = -301, /// (-302): Failed to verify the transaction. TransactionFailedToVerify = -302, /// (-1000): Some signatures in the submit alert are invalid. AlertFailedToVerifySignatures = -1000, - /// (-1102): The transaction is rejected by the outputs validator specified by the RPC parameter. + /// (-1102): The transaction is rejected by the outputs validator specified by the RPC + /// parameter. PoolRejectedTransactionByOutputsValidator = -1102, - /// (-1103): Pool rejects some transactions which seem contain invalid VM instructions. See the issue - /// link in the error message for details. + /// (-1103): Pool rejects some transactions which seem contain invalid VM instructions. See the + /// issue link in the error message for details. PoolRejectedTransactionByIllTransactionChecker = -1103, - /// (-1104): The transaction fee rate must be greater than or equal to the config option `tx_pool.min_fee_rate` + /// (-1104): The transaction fee rate must be greater than or equal to the config option + /// `tx_pool.min_fee_rate` /// /// The fee rate is calculated as: /// @@ -96,7 +98,8 @@ pub enum RPCError { /// fee / (1000 * tx_serialization_size_in_block_in_bytes) /// ``` PoolRejectedTransactionByMinFeeRate = -1104, - /// (-1105): The in-pool ancestors count must be less than or equal to the config option `tx_pool.max_ancestors_count` + /// (-1105): The in-pool ancestors count must be less than or equal to the config option + /// `tx_pool.max_ancestors_count` /// /// Pool rejects a large package of chained transactions to avoid certain kinds of DoS attacks. PoolRejectedTransactionByMaxAncestorsCountLimit = -1105, diff --git a/rpc/src/module/alert.rs b/rpc/src/module/alert.rs index 024a6ec65e..dd6652e03e 100644 --- a/rpc/src/module/alert.rs +++ b/rpc/src/module/alert.rs @@ -13,8 +13,8 @@ use std::sync::Arc; /// /// An alert is a message about critical problems to be broadcast to all nodes via the p2p network. /// -/// The alerts must be signed by 2-of-4 signatures, where the public keys are hard-coded in the source code -/// and belong to early CKB developers. +/// The alerts must be signed by 2-of-4 signatures, where the public keys are hard-coded in the +/// source code and belong to early CKB developers. #[rpc(server)] pub trait AlertRpc { /// Sends an alert. diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index a32f8bd23b..e7ad8feb3e 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -287,8 +287,8 @@ pub trait ChainRpc { /// /// ## Returns /// - /// The RPC returns a header or null. When the RPC returns a header, the block hash must equal to - /// the parameter `block_hash`. + /// The RPC returns a header or null. When the RPC returns a header, the block hash must equal + /// to the parameter `block_hash`. /// /// If the block is in the [canonical chain](#canonical-chain), the RPC must return the header /// information. Otherwise, the behavior is undefined. The RPC may return blocks found in local @@ -634,7 +634,8 @@ pub trait ChainRpc { verbosity: Option, ) -> Result>; - /// Returns the information about [live cell](#live-cell)s collection by the hash of lock script. + /// Returns the information about [live cell](#live-cell)s collection by the hash of lock + /// script. /// /// This method will be removed. It always returns an error now. #[deprecated( @@ -658,8 +659,8 @@ pub trait ChainRpc { /// /// This RPC tells whether a cell is live or not. /// - /// If the cell is live, the RPC will return details about the cell. Otherwise, the field `cell` is - /// null in the result. + /// If the cell is live, the RPC will return details about the cell. Otherwise, the field `cell` + /// is null in the result. /// /// If the cell is live and `with_data` is set to `false`, the field `cell.data` is null in the /// result. @@ -667,7 +668,8 @@ pub trait ChainRpc { /// ## Params /// /// * `out_point` - Reference to the cell by transaction hash and output index. - /// * `with_data` - Whether the RPC should return cell data. Cell data can be huge, if the client + /// * `with_data` - Whether the RPC should return cell data. Cell data can be huge, if the + /// client /// does not need the data, it should set this to `false` to save bandwidth. /// /// ## Examples @@ -792,8 +794,9 @@ pub trait ChainRpc { /// /// ## Returns /// - /// The RPC returns the epoch when `epoch_number` is less than or equal to the current epoch number - /// returned by [`get_current_epoch`](#tymethod.get_current_epoch) and returns null otherwise. + /// The RPC returns the epoch when `epoch_number` is less than or equal to the current epoch + /// number returned by [`get_current_epoch`](#tymethod.get_current_epoch) and returns null + /// otherwise. /// /// Because of [chain reorganization](#chain-reorganization), for the same `epoch_number`, this /// RPC may return null or different epochs in different invocations. @@ -847,7 +850,8 @@ pub trait ChainRpc { /// ## Returns /// /// If the block with the hash `block_hash` is in the [canonical chain](#canonical-chain) and - /// its block number is N, return the block rewards analysis for block `N - 1 - ProposalWindow.farthest`. + /// its block number is N, return the block rewards analysis for block `N - 1 - + /// ProposalWindow.farthest`. /// /// ## Examples /// @@ -955,7 +959,8 @@ pub trait ChainRpc { /// ## Params /// /// * `tx_hashes` - Transaction hashes, all transactions must be in the same block - /// * `block_hash` - An optional parameter, if specified, looks for transactions in the block with this hash + /// * `block_hash` - An optional parameter, if specified, looks for transactions in the block + /// with this hash /// /// ## Examples /// @@ -995,11 +1000,13 @@ pub trait ChainRpc { block_hash: Option, ) -> Result; - /// Verifies that a proof points to transactions in a block, returning the transaction hashes it commits to. + /// Verifies that a proof points to transactions in a block, returning the transaction hashes it + /// commits to. /// /// ## Parameters /// - /// * `transaction_proof` - proof generated by [`get_transaction_proof`](#tymethod.get_transaction_proof). + /// * `transaction_proof` - proof generated by + /// [`get_transaction_proof`](#tymethod.get_transaction_proof). /// /// ## Examples /// @@ -1046,13 +1053,14 @@ pub trait ChainRpc { /// /// ## Returns /// - /// The RPC returns a fork block or null. When the RPC returns a block, the block hash must equal to - /// the parameter `block_hash`. + /// The RPC returns a fork block or null. When the RPC returns a block, the block hash must + /// equal to the parameter `block_hash`. /// - /// Please note that due to the technical nature of the peer to peer sync, the RPC may return null or a fork block - /// result on different nodes with same `block_hash` even they are fully synced to the [canonical chain](#canonical-chain). - /// And because of [chain reorganization](#chain-reorganization), for the same `block_hash`, the - /// RPC may sometimes return null and sometimes return the fork block. + /// Please note that due to the technical nature of the peer to peer sync, the RPC may return + /// null or a fork block result on different nodes with same `block_hash` even they are + /// fully synced to the [canonical chain](#canonical-chain). And because of [chain + /// reorganization](#chain-reorganization), for the same `block_hash`, the RPC may sometimes + /// return null and sometimes return the fork block. /// /// When `verbosity` is 2, it returns a JSON object as the `result`. See `BlockView` for the /// schema. diff --git a/rpc/src/module/experiment.rs b/rpc/src/module/experiment.rs index ca5ed8e735..3faf34903f 100644 --- a/rpc/src/module/experiment.rs +++ b/rpc/src/module/experiment.rs @@ -271,7 +271,8 @@ pub trait ExperimentRpc { block_hash: H256, ) -> Result; - /// Estimates a fee rate (capacity/KB) for a transaction that to be committed within the expect number of blocks. + /// Estimates a fee rate (capacity/KB) for a transaction that to be committed within the expect + /// number of blocks. #[deprecated( since = "0.34.0", note = "This method is deprecated because of the performance issue. It always returns an error now." diff --git a/rpc/src/module/indexer.rs b/rpc/src/module/indexer.rs index 91b097e7d6..085d0461c9 100644 --- a/rpc/src/module/indexer.rs +++ b/rpc/src/module/indexer.rs @@ -8,7 +8,8 @@ use jsonrpc_derive::rpc; /// RPC Module Indexer which index cells by lock script hash. /// -/// The index is disabled by default, which **must** be enabled by calling [`index_lock_hash`](#tymethod.index_lock_hash) first. +/// The index is disabled by default, which **must** be enabled by calling +/// [`index_lock_hash`](#tymethod.index_lock_hash) first. #[deprecated( since = "0.36.0", note = "Please use [ckb-indexer](https://github.com/nervosnetwork/ckb-indexer) as an alternate solution." @@ -25,7 +26,8 @@ pub trait IndexerRpc { /// * `lock_hash` - Cell lock script hash /// * `page` - Page number, starting from 0 /// * `per` - Page size, max value is 50 - /// * `reverse_order` - Returns the live cells collection in reverse order. (**Optional**, default is false) + /// * `reverse_order` - Returns the live cells collection in reverse order. (**Optional**, + /// default is false) /// /// ## Examples /// @@ -318,14 +320,16 @@ pub trait IndexerRpc { /// Returns the transactions collection by the hash of lock script. /// /// This RPC requires [creating the index](#tymethod.index_lock_hash) on `lock_hash` first. - /// It returns all matched transactions only if the index is created starting from the genesis block. + /// It returns all matched transactions only if the index is created starting from the genesis + /// block. /// /// ## Params /// /// * `lock_hash` - Cell lock script hash /// * `page` - Page number, starting from 0 /// * `per` - Page size, max value is 50 - /// * `reverse_order` - Return the transactions collection in reverse order. (**Optional**, default is false) + /// * `reverse_order` - Return the transactions collection in reverse order. (**Optional**, + /// default is false) /// /// ## Examples /// diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 2a039ba364..db609e9685 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -26,12 +26,12 @@ pub trait MinerRpc { /// /// ## Params /// - /// * `bytes_limit` - the max serialization size in bytes of the block. - /// (**Optional:** the default is the consensus limit.) - /// * `proposals_limit` - the max count of proposals. - /// (**Optional:** the default is the consensus limit.) - /// * `max_version` - the max block version. - /// (**Optional:** the default is one configured in the current client version.) + /// * `bytes_limit` - the max serialization size in bytes of the block. (**Optional:** the + /// default is the consensus limit.) + /// * `proposals_limit` - the max count of proposals. (**Optional:** the default is the + /// consensus limit.) + /// * `max_version` - the max block version. (**Optional:** the default is one configured in the + /// current client version.) /// /// ## Examples /// @@ -140,8 +140,10 @@ pub trait MinerRpc { /// /// ## Params /// - /// * `work_id` - The same work ID returned from [`get_block_template`](#tymethod.get_block_template). - /// * `block` - The assembled block from the block template and which PoW puzzle has been resolved. + /// * `work_id` - The same work ID returned from + /// [`get_block_template`](#tymethod.get_block_template). + /// * `block` - The assembled block from the block template and which PoW puzzle has been + /// resolved. /// /// ## Examples /// diff --git a/rpc/src/module/mod.rs b/rpc/src/module/mod.rs index 72a5539039..b687771a61 100644 --- a/rpc/src/module/mod.rs +++ b/rpc/src/module/mod.rs @@ -13,7 +13,8 @@ //! The section *Required methods* lists all the RPC methods in the module. See module //! [PoolRpc](trait.PoolRpc.html#required-methods). //! -//! Use the RPC [`send_transaction`](trait.PoolRpc.html#tymethod.send_transaction) in the module `PoolRpc` as an example. +//! Use the RPC [`send_transaction`](trait.PoolRpc.html#tymethod.send_transaction) in the module +//! `PoolRpc` as an example. //! //! ```text //! fn send_transaction( @@ -35,9 +36,13 @@ //! ``` //! //! * `send_transaction` - The JSONRPC method name. -//! * `tx: Transaction` - The first param in the request params list which name is `tx` and type is `Transaction`. The type links to the JSON object definition of a CKB transaction. -//! * `outputs_validator: Option` - The second param. The `Option` shows that this argument is optional. The document for `OutputsValidator` shows that `outputs_validator` is an enum type which possible values include "default" and "passthrough". -//! * `-> Result` - The type inside the `Result` after `->` is the response type. In this example, it is `H256` which is a 32-bytes binary encoded as a hex string. +//! * `tx: Transaction` - The first param in the request params list which name is `tx` and type is +//! `Transaction`. The type links to the JSON object definition of a CKB transaction. +//! * `outputs_validator: Option` - The second param. The `Option` shows that this +//! argument is optional. The document for `OutputsValidator` shows that `outputs_validator` is an +//! enum type which possible values include "default" and "passthrough". +//! * `-> Result` - The type inside the `Result` after `->` is the response type. In this +//! example, it is `H256` which is a 32-bytes binary encoded as a hex string. //! //! The RPC errors are documented in [`RPCError`](../enum.RPCError.html). //! @@ -50,9 +55,11 @@ //! solutions. //! //! The CKB dev team will disable any deprecated RPC methods starting from the next minor version -//! release. Users can enable the deprecated methods via the config file option `rpc.enable_deprecated_rpc`. +//! release. Users can enable the deprecated methods via the config file option +//! `rpc.enable_deprecated_rpc`. //! -//! Once a deprecated method is disabled, the CKB dev team will remove it in a future minor version release. +//! Once a deprecated method is disabled, the CKB dev team will remove it in a future minor version +//! release. //! //! For example, a method is marked as deprecated in 0.35.0, it can be disabled in 0.36.0 and //! removed in 0.37.0. The minor versions are released monthly, so there's at least a two-month @@ -106,8 +113,8 @@ //! } //! ``` //! -//! The enum values are represented as JSON strings in the lowercase, underscore-concatenated form. So, in -//! JSON, `Status` can be one of "pending", "proposed" or "committed". +//! The enum values are represented as JSON strings in the lowercase, underscore-concatenated form. +//! So, in JSON, `Status` can be one of "pending", "proposed" or "committed". #![allow(deprecated)] mod alert; diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index cb1e564f63..ac92d0efcf 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -288,9 +288,12 @@ pub trait NetRpc { /// * `address` - The IP/Subnet with an optional netmask (default is /32 = single IP). Examples: /// * "192.168.0.2" bans a single IP /// * "192.168.0.0/24" bans IP from "192.168.0.0" to "192.168.0.255". - /// * `command` - `insert` to insert an IP/Subnet to the list, `delete` to delete an IP/Subnet from the list. - /// * `ban_time` - Time in milliseconds how long (or until when if [absolute] is set) the IP is banned, optional parameter, null means using the default time of 24h - /// * `absolute` - If set, the `ban_time` must be an absolute timestamp in milliseconds since epoch, optional parameter. + /// * `command` - `insert` to insert an IP/Subnet to the list, `delete` to delete an IP/Subnet + /// from the list. + /// * `ban_time` - Time in milliseconds how long (or until when if [absolute] is set) the IP is + /// banned, optional parameter, null means using the default time of 24h + /// * `absolute` - If set, the `ban_time` must be an absolute timestamp in milliseconds since + /// epoch, optional parameter. /// * `reason` - Ban reason, optional parameter. /// /// ## Errors diff --git a/rpc/src/module/pool.rs b/rpc/src/module/pool.rs index f6e222a05b..2053dd369b 100644 --- a/rpc/src/module/pool.rs +++ b/rpc/src/module/pool.rs @@ -22,7 +22,8 @@ pub trait PoolRpc { /// ## Params /// /// * `transaction` - The transaction. - /// * `outputs_validator` - Validates the transaction outputs before entering the tx-pool. (**Optional**, default is "passthrough"). + /// * `outputs_validator` - Validates the transaction outputs before entering the tx-pool. + /// (**Optional**, default is "passthrough"). /// /// ## Errors /// @@ -282,7 +283,8 @@ impl PoolRpc for PoolRpcImpl { } let broadcast = |tx_hash: packed::Byte32| { - // workaround: we are using `PeerIndex(usize::max)` to indicate that tx hash source is itself. + // workaround: we are using `PeerIndex(usize::max)` to indicate that tx hash source is + // itself. let peer_index = PeerIndex::new(usize::max_value()); self.sync_shared .state() diff --git a/rpc/src/module/subscription.rs b/rpc/src/module/subscription.rs index 7f397bd9da..d8a9d1f104 100644 --- a/rpc/src/module/subscription.rs +++ b/rpc/src/module/subscription.rs @@ -55,9 +55,9 @@ impl PubSubMetadata for SubscriptionSession { /// > {"id": 2, "jsonrpc": "2.0", "method": "subscribe", "params": ["new_tip_header"]} /// < {"jsonrpc":"2.0","result":0,"id":2} /// < {"jsonrpc":"2.0","method":"subscribe","params":{"result":"...block header json...", -///"subscription":0}} +/// "subscription":0}} /// < {"jsonrpc":"2.0","method":"subscribe","params":{"result":"...block header json...", -///"subscription":0}} +/// "subscription":0}} /// < ... /// > {"id": 2, "jsonrpc": "2.0", "method": "unsubscribe", "params": [0]} /// < {"jsonrpc":"2.0","result":true,"id":2} @@ -86,12 +86,13 @@ pub trait SubscriptionRpc { /// /// ## Params /// - /// * `topic` - Subscription topic (enum: new_tip_header | new_tip_block | new_transaction | proposed_transaction | rejected_transaction) + /// * `topic` - Subscription topic (enum: new_tip_header | new_tip_block | new_transaction | + /// proposed_transaction | rejected_transaction) /// /// ## Returns /// - /// This RPC returns the subscription ID as the result. CKB node will push messages in the subscribed - /// topics to the current RPC connection. The subscript ID is also attached as + /// This RPC returns the subscription ID as the result. CKB node will push messages in the + /// subscribed topics to the current RPC connection. The subscript ID is also attached as /// `params.subscription` in the push messages. /// /// Example push message: @@ -111,29 +112,33 @@ pub trait SubscriptionRpc { /// /// ### `new_tip_header` /// - /// Whenever there's a block that is appended to the canonical chain, the CKB node will publish the - /// block header to subscribers. + /// Whenever there's a block that is appended to the canonical chain, the CKB node will publish + /// the block header to subscribers. /// - /// The type of the `params.result` in the push message is [`HeaderView`](../../ckb_jsonrpc_types/struct.HeaderView.html). + /// The type of the `params.result` in the push message is + /// [`HeaderView`](../../ckb_jsonrpc_types/struct.HeaderView.html). /// /// ### `new_tip_block` /// - /// Whenever there's a block that is appended to the canonical chain, the CKB node will publish the - /// whole block to subscribers. + /// Whenever there's a block that is appended to the canonical chain, the CKB node will publish + /// the whole block to subscribers. /// - /// The type of the `params.result` in the push message is [`BlockView`](../../ckb_jsonrpc_types/struct.BlockView.html). + /// The type of the `params.result` in the push message is + /// [`BlockView`](../../ckb_jsonrpc_types/struct.BlockView.html). /// /// ### `new_transaction` /// /// Subscribers will get notified when a new transaction is submitted to the pool. /// - /// The type of the `params.result` in the push message is [`PoolTransactionEntry`](../../ckb_jsonrpc_types/struct.PoolTransactionEntry.html). + /// The type of the `params.result` in the push message is + /// [`PoolTransactionEntry`](../../ckb_jsonrpc_types/struct.PoolTransactionEntry.html). /// /// ### `proposed_transaction` /// /// Subscribers will get notified when an in-pool transaction is proposed by chain. /// - /// The type of the `params.result` in the push message is [`PoolTransactionEntry`](../../ckb_jsonrpc_types/struct.PoolTransactionEntry.html). + /// The type of the `params.result` in the push message is + /// [`PoolTransactionEntry`](../../ckb_jsonrpc_types/struct.PoolTransactionEntry.html). /// /// ### `rejected_transaction` /// @@ -143,8 +148,10 @@ pub trait SubscriptionRpc { /// /// The type of the `params.result` in the push message is a two-elements array, where /// - /// - the first item type is [`PoolTransactionEntry`](../../ckb_jsonrpc_types/struct.PoolTransactionEntry.html), and - /// - the second item type is [`PoolTransactionReject`](../../ckb_jsonrpc_types/struct.PoolTransactionReject.html). + /// - the first item type is + /// [`PoolTransactionEntry`](../../ckb_jsonrpc_types/struct.PoolTransactionEntry.html), and + /// - the second item type is + /// [`PoolTransactionReject`](../../ckb_jsonrpc_types/struct.PoolTransactionReject.html). /// /// ## Examples /// diff --git a/rpc/src/test.rs b/rpc/src/test.rs index 2052bea720..6465ff278e 100644 --- a/rpc/src/test.rs +++ b/rpc/src/test.rs @@ -632,8 +632,8 @@ impl RpcTestSuite { /// /// Q. How to add tests? /// -/// Test cases are collected from code comments. Please put request and response JSON in their own code -/// blocks and set the fenced code block type to "json". +/// Test cases are collected from code comments. Please put request and response JSON in their own +/// code blocks and set the fenced code block type to "json". /// /// The first example must use id 42. And extra examples for the same method must use different /// ids. @@ -667,6 +667,7 @@ where *response = example.response.clone() } +// // * Use replace_rpc_response to skip the response matching assertions. // * Fix timestamp related fields. fn mock_rpc_response(example: &RpcTestExample, response: &mut RpcTestResponse) { diff --git a/script/src/verify.rs b/script/src/verify.rs index e3717701f2..28e34ee8c4 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -467,11 +467,15 @@ mod tests { output } - // NOTE: `verify` binary is outdated and most related unit tests are testing `script` crate functions - // I try to keep unit test code unmodified as much as possible, and may add it back in future PR. + // NOTE: `verify` binary is outdated and most related unit tests are testing `script` crate + // functions I try to keep unit test code unmodified as much as possible, and may add it + // back in future PR. + // + // ```ignore // fn open_cell_verify() -> File { // File::open(Path::new(env!("CARGO_MANIFEST_DIR")).join("../script/testdata/verify")).unwrap() // } + // ``` fn open_cell_always_success() -> File { File::open(Path::new(env!("CARGO_MANIFEST_DIR")).join("../script/testdata/always_success")) diff --git a/shared/src/migrations/table_to_struct.rs b/shared/src/migrations/table_to_struct.rs index 8746b6e6ea..7efad97f6b 100644 --- a/shared/src/migrations/table_to_struct.rs +++ b/shared/src/migrations/table_to_struct.rs @@ -60,8 +60,8 @@ impl Migration for ChangeMoleculeTableToStruct { pb.set_message("migrating: epoch"); pb.inc(1); let epoch_ext_migration = |key: &[u8], value: &[u8]| -> Result<()> { - // COLUMN_EPOCH stores epoch_number => last_block_hash_in_previous_epoch and last_block_hash_in_previous_epoch => epoch_ext - // only migrates epoch_ext + // COLUMN_EPOCH stores epoch_number => last_block_hash_in_previous_epoch and + // last_block_hash_in_previous_epoch => epoch_ext only migrates epoch_ext if key.len() == 32 { // (1 total size field + 8 fields) * 4 byte per field txn.put(COLUMN_EPOCH, key, &value[36..])?; diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 60e2bcca17..cb9eeb93b8 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -223,7 +223,8 @@ impl Shared { } } - /// Spawn freeze background thread that periodically checks and moves ancient data from the kv database into the freezer. + /// Spawn freeze background thread that periodically checks and moves ancient data from the kv + /// database into the freezer. pub fn spawn_freeze(&self) -> FreezerClose { let (signal_sender, signal_receiver) = ckb_channel::bounded::<()>(service::SIGNAL_CHANNEL_SIZE); diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index d1fbf0e66a..7137758fef 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -1,5 +1,4 @@ //! Consensus defines various tweakable parameters of a given instance of the CKB system. -//! #![allow(clippy::inconsistent_digit_grouping)] @@ -96,9 +95,12 @@ pub const TYPE_ID_CODE_HASH: H256 = h256!("0x545950455f4944"); /// and farthest on-chain distance between a transaction's proposal /// and commitment. /// -/// A non-cellbase transaction is committed at height h_c if all of the following conditions are met: -/// 1) it is proposed at height h_p of the same chain, where w_close <= h_c − h_p <= w_far ; -/// 2) it is in the commitment zone of the main chain block with height h_c ; +/// A non-cellbase transaction is committed at height h_c if all of the following conditions are +/// met: +/// +/// 1. it is proposed at height h_p of the same chain, where w_close <= h_c − h_p <= w_far ; +/// +/// 2. it is in the commitment zone of the main chain block with height h_c ; /// /// ```text /// ProposalWindow (2, 10) @@ -110,7 +112,6 @@ pub const TYPE_ID_CODE_HASH: H256 = h256!("0x545950455f4944"); /// \ /// commit /// ``` -/// impl ProposalWindow { /// The w_close parameter pub fn closest(&self) -> BlockNumber { @@ -128,7 +129,8 @@ impl ProposalWindow { } } -/// The Consensus factory, which can be used in order to configure the properties of a new Consensus. +/// The Consensus factory, which can be used in order to configure the properties of a new +/// Consensus. pub struct ConsensusBuilder { inner: Consensus, } @@ -233,7 +235,8 @@ pub fn build_genesis_dao_data( } impl ConsensusBuilder { - /// Generates the base configuration for build a Consensus, from which configuration methods can be chained. + /// Generates the base configuration for build a Consensus, from which configuration methods can + /// be chained. pub fn new(genesis_block: BlockView, genesis_epoch_ext: EpochExt) -> Self { let orphan_rate_target = RationalU256::new_raw( U256::from(DEFAULT_ORPHAN_RATE_TARGET.0), @@ -430,8 +433,9 @@ impl ConsensusBuilder { /// Sets permanent_difficulty_in_dummy for the new Consensus. /// /// [dynamic-difficulty-adjustment-mechanism](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0020-ckb-consensus-protocol/0020-ckb-consensus-protocol.md#dynamic-difficulty-adjustment-mechanism) - /// may be a disturbance in dev chain, set permanent_difficulty_in_dummy to true will disable dynamic difficulty adjustment mechanism. keep difficulty unchanged. - /// Work only under dummy Pow + /// may be a disturbance in dev chain, set permanent_difficulty_in_dummy to true will disable + /// dynamic difficulty adjustment mechanism. keep difficulty unchanged. Work only under + /// dummy Pow #[must_use] pub fn permanent_difficulty_in_dummy(mut self, permanent: bool) -> Self { self.inner.permanent_difficulty_in_dummy = permanent; diff --git a/spec/src/lib.rs b/spec/src/lib.rs index bf74492e0a..8179e257c3 100644 --- a/spec/src/lib.rs +++ b/spec/src/lib.rs @@ -4,7 +4,6 @@ //! //! In order to run a chain different to the official public one, //! with a config file specifying chain = "path" under [ckb]. -//! // Because the limitation of toml library, // we must put nested config struct in the tail to make it serializable, @@ -70,7 +69,8 @@ pub struct ChainSpec { /// The block chain pow pub pow: Pow, #[serde(skip)] - /// Hash of blake2b_256 spec content bytes, used for check consistency between database and config + /// Hash of blake2b_256 spec content bytes, used for check consistency between database and + /// config pub hash: packed::Byte32, } @@ -253,7 +253,8 @@ impl Params { .unwrap_or_else(default_params::cellbase_maturity) } - /// Return the `primary_epoch_reward_halving_interval`, otherwise if None, returns the default value + /// Return the `primary_epoch_reward_halving_interval`, otherwise if None, returns the default + /// value pub fn primary_epoch_reward_halving_interval(&self) -> EpochNumber { self.primary_epoch_reward_halving_interval .unwrap_or_else(default_params::primary_epoch_reward_halving_interval) @@ -325,7 +326,8 @@ pub struct Genesis { /// The system cells' lock pub system_cells_lock: Script, /// For block 1~11, the reward target is genesis block. - /// Genesis block must have the lock serialized in the cellbase witness, which is set to `bootstrap_lock`. + /// Genesis block must have the lock serialized in the cellbase witness, which is set to + /// `bootstrap_lock`. pub bootstrap_lock: Script, /// The genesis dep_groups file resource /// diff --git a/store/src/store.rs b/store/src/store.rs index bf7f08d543..4f20bf0262 100644 --- a/store/src/store.rs +++ b/store/src/store.rs @@ -227,7 +227,8 @@ pub trait ChainStore<'a>: Send + Sync + Sized { /// Returns true if the transaction confirmed in main chain. /// /// This function is base on transaction index `COLUMN_TRANSACTION_INFO`. - /// Current release maintains a full index of historical transaction by default, this may be changed in future + /// Current release maintains a full index of historical transaction by default, this may be + /// changed in future fn transaction_exists(&'a self, hash: &packed::Byte32) -> bool { self.get(COLUMN_TRANSACTION_INFO, hash.as_slice()).is_some() } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index adf4a88fdc..f0bebc0e8e 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -21,7 +21,8 @@ pub use crate::synchronizer::Synchronizer; pub use crate::types::SyncShared; use std::time::Duration; -/// Default max get header response length, if it is greater than this value, the message will be ignored +/// Default max get header response length, if it is greater than this value, the message will be +/// ignored pub const MAX_HEADERS_LEN: usize = 2_000; /// The default init download block interval is 24 hours /// If the time of the local highest block is within this range, exit the ibd state @@ -32,7 +33,8 @@ pub const MAX_TIP_AGE: u64 = 24 * 60 * 60 * 1000; pub const INIT_BLOCKS_IN_TRANSIT_PER_PEER: usize = 16; /// Maximum number of download blocks that can be requested at one time pub const MAX_BLOCKS_IN_TRANSIT_PER_PEER: usize = 128; -/// The point at which the scheduler adjusts the number of tasks, by default one adjustment per 512 blocks. +/// The point at which the scheduler adjusts the number of tasks, by default one adjustment per 512 +/// blocks. pub const CHECK_POINT_WINDOW: u64 = (MAX_BLOCKS_IN_TRANSIT_PER_PEER * 4) as u64; // Time recording window size, ibd period scheduler dynamically adjusts frequency diff --git a/sync/src/orphan_block_pool.rs b/sync/src/orphan_block_pool.rs index 21c5fa1549..fd27a32c14 100644 --- a/sync/src/orphan_block_pool.rs +++ b/sync/src/orphan_block_pool.rs @@ -61,7 +61,8 @@ impl OrphanBlockPool { } pub fn get_block(&self, hash: &packed::Byte32) -> Option { - // acquire the `blocks` read lock first, guarantee ordering of acquisition is same as `remove_blocks_by_parent`, avoids deadlocking + // acquire the `blocks` read lock first, guarantee ordering of acquisition is same as + // `remove_blocks_by_parent`, avoids deadlocking let guard = self.blocks.read(); self.parents.read().get(hash).and_then(|parent_hash| { guard diff --git a/sync/src/relayer/compact_block_process.rs b/sync/src/relayer/compact_block_process.rs index 72938b57f3..ded227a881 100644 --- a/sync/src/relayer/compact_block_process.rs +++ b/sync/src/relayer/compact_block_process.rs @@ -296,7 +296,8 @@ impl<'a> BlockMedianTimeContext for CompactBlockMedianTimeView<'a> { impl<'a> HeaderProvider for CompactBlockMedianTimeView<'a> { fn get_header(&self, hash: &packed::Byte32) -> Option { - // Note: don't query store because we already did that in `fn_get_pending_header -> get_header_view`. + // Note: don't query store because we already did that in `fn_get_pending_header -> + // get_header_view`. (self.fn_get_pending_header)(hash.to_owned()) } } diff --git a/sync/src/relayer/compact_block_verifier.rs b/sync/src/relayer/compact_block_verifier.rs index 65f13aeb17..b9fb5fd008 100644 --- a/sync/src/relayer/compact_block_verifier.rs +++ b/sync/src/relayer/compact_block_verifier.rs @@ -72,7 +72,8 @@ impl ShortIdsVerifier { } // Check intersection of prefilled transactions and short ids. - // Cellbase is skipped since it's always prefilled and has the chances of collision with other txs + // Cellbase is skipped since it's always prefilled and has the chances of collision with + // other txs let is_intersect = prefilled_transactions .into_iter() .skip(1) diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 1c3ec6b6f1..1e5820d8a5 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -77,18 +77,21 @@ pub struct Relayer { impl Relayer { /// Init relay protocol handle /// - /// This is a runtime relay protocol shared state, and any relay messages will be processed and forwarded by it + /// This is a runtime relay protocol shared state, and any relay messages will be processed and + /// forwarded by it /// /// min_fee_rate: Default transaction fee unit, can be modified by configuration file - /// max_tx_verify_cycles: Maximum transaction consumption allowed by default, can be modified by configuration file + /// max_tx_verify_cycles: Maximum transaction consumption allowed by default, can be modified by + /// configuration file pub fn new( chain: ChainController, shared: Arc, min_fee_rate: FeeRate, max_tx_verify_cycles: Cycle, ) -> Self { - // setup a rate limiter keyed by peer and message type that lets through 30 requests per second - // current max rps is 10 (ASK_FOR_TXS_TOKEN / TX_PROPOSAL_TOKEN), 30 is a flexible hard cap with buffer + // setup a rate limiter keyed by peer and message type that lets through 30 requests per + // second current max rps is 10 (ASK_FOR_TXS_TOKEN / TX_PROPOSAL_TOKEN), 30 is a + // flexible hard cap with buffer let quota = governor::Quota::per_second(std::num::NonZeroU32::new(30).unwrap()); let rate_limiter = Arc::new(Mutex::new(RateLimiter::keyed(quota))); Relayer { diff --git a/sync/src/synchronizer/get_blocks_process.rs b/sync/src/synchronizer/get_blocks_process.rs index 0f05b7b4bf..73e7f2660d 100644 --- a/sync/src/synchronizer/get_blocks_process.rs +++ b/sync/src/synchronizer/get_blocks_process.rs @@ -29,7 +29,8 @@ impl<'a> GetBlocksProcess<'a> { pub fn execute(self) -> Status { let block_hashes = self.message.block_hashes(); - // use MAX_HEADERS_LEN as limit, we may increase the value of INIT_BLOCKS_IN_TRANSIT_PER_PEER in the future + // use MAX_HEADERS_LEN as limit, we may increase the value of + // INIT_BLOCKS_IN_TRANSIT_PER_PEER in the future if block_hashes.len() > MAX_HEADERS_LEN { return StatusCode::ProtocolMessageIsMalformed.with_context(format!( "BlockHashes count({}) > MAX_HEADERS_LEN({})", diff --git a/sync/src/synchronizer/in_ibd_process.rs b/sync/src/synchronizer/in_ibd_process.rs index 55af45e027..8cd0ced213 100644 --- a/sync/src/synchronizer/in_ibd_process.rs +++ b/sync/src/synchronizer/in_ibd_process.rs @@ -35,8 +35,9 @@ impl<'a> InIBDProcess<'a> { // The node itself needs to ensure the validity of the outbound connection. // - // If outbound is an ibd node(non-whitelist, non-protect), it should be disconnected automatically. - // If inbound is an ibd node, just mark the node does not pass header sync authentication. + // If outbound is an ibd node(non-whitelist, non-protect), it should be disconnected + // automatically. If inbound is an ibd node, just mark the node does not + // pass header sync authentication. if state.peer_flags.is_outbound { if state.peer_flags.is_whitelist || state.peer_flags.is_protect { self.synchronizer.shared().state().suspend_sync(state); diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 0ae998388a..b149d7e525 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -210,7 +210,8 @@ pub struct Synchronizer { impl Synchronizer { /// Init sync protocol handle /// - /// This is a runtime sync protocol shared state, and any relay messages will be processed and forwarded by it + /// This is a runtime sync protocol shared state, and any relay messages will be processed and + /// forwarded by it pub fn new(chain: ChainController, shared: Arc) -> Synchronizer { Synchronizer { chain, @@ -362,14 +363,14 @@ impl Synchronizer { } /// Regularly check and eject some nodes that do not respond in time - // - If at timeout their best known block now has more work than our tip - // when the timeout was set, then either reset the timeout or clear it - // (after comparing against our current tip's work) - // - If at timeout their best known block still has less work than our - // tip did when the timeout was set, then send a getheaders message, - // and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future. - // If their best known block is still behind when that new timeout is - // reached, disconnect. + // + // - If at timeout their best known block now has more work than our tip when the timeout was + // set, then either reset the timeout or clear it (after comparing against our current tip's + // work) + // - If at timeout their best known block still has less work than our tip did when the + // timeout was set, then send a getheaders message, and set a shorter timeout, + // HEADERS_RESPONSE_TIME seconds in future. If their best known block is still behind when + // that new timeout is reached, disconnect. pub fn eviction(&self, nc: &dyn CKBProtocolContext) { let mut peer_states = self.peers().state.write(); let active_chain = self.shared.active_chain(); @@ -413,10 +414,11 @@ impl Synchronizer { && best_known_header.map(HeaderView::total_difficulty) >= state.chain_sync.total_difficulty.as_ref()) { - // Our best block known by this peer is behind our tip, and we're either noticing - // that for the first time, OR this peer was able to catch up to some earlier point - // where we checked against our tip. - // Either way, set a new timeout based on current tip. + // Our best block known by this peer is behind our tip, and we're either + // noticing that for the first time, OR this peer was able + // to catch up to some earlier point where we checked + // against our tip. Either way, set a new timeout based on + // current tip. state.chain_sync.timeout = now + CHAIN_SYNC_TIMEOUT; state.chain_sync.work_header = Some(tip_header); state.chain_sync.total_difficulty = Some(local_total_difficulty); diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index bd77f424d4..f98152f943 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -62,13 +62,11 @@ const SHRINK_THREHOLD: usize = 300; // non-protected and non-whitelist. // Algorithm: if a peer's best known block has less work than our tip, // set a timeout CHAIN_SYNC_TIMEOUT seconds in the future: -// - If at timeout their best known block now has more work than our tip -// when the timeout was set, then either reset the timeout or clear it -// (after comparing against our current tip's work) -// - If at timeout their best known block still has less work than our -// tip did when the timeout was set, then send a getheaders message, -// and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future. -// If their best known block is still behind when that new timeout is +// - If at timeout their best known block now has more work than our tip when the timeout was set, +// then either reset the timeout or clear it (after comparing against our current tip's work) +// - If at timeout their best known block still has less work than our tip did when the timeout +// was set, then send a getheaders message, and set a shorter timeout, HEADERS_RESPONSE_TIME +// seconds in future. If their best known block is still behind when that new timeout is // reached, disconnect. #[derive(Clone, Debug, Default)] @@ -155,7 +153,8 @@ impl HeadersSyncController { None } else { // ignore timeout because the tip already almost reach the real time; - // we can sync to the estimated tip in 1 inspect window by the slowest speed that we can accept. + // we can sync to the estimated tip in 1 inspect window by the slowest speed that we + // can accept. Some(false) } } else if expected_before_finished < inspect_window { @@ -166,7 +165,8 @@ impl HeadersSyncController { let spent_since_last_updated = now.saturating_sub(self.last_updated_ts); if spent_since_last_updated < inspect_window { - // ignore timeout because the time spent since last updated is not enough as a sample + // ignore timeout because the time spent since last updated is not enough as a + // sample Some(false) } else { let synced_since_last_updated = now_tip_ts.saturating_sub(self.last_updated_tip_ts); @@ -185,7 +185,8 @@ impl HeadersSyncController { trace!("headers-sync: the instantaneous speed is acceptable"); Some(false) } else { - // tolerate more bias for instantaneous speed, we will check the global average speed + // tolerate more bias for instantaneous speed, we will check the global + // average speed let spent_since_started = now.saturating_sub(self.started_ts); let synced_since_started = now_tip_ts.saturating_sub(self.started_tip_ts); @@ -433,7 +434,8 @@ enum TimeQuantile { /// /// The dividing line is, 1/3 position, 4/5 position, 1/10 position. /// -/// There is 14/30 normal area, 1/10 penalty area, 1/10 double penalty area, 1/3 accelerated reward area. +/// There is 14/30 normal area, 1/10 penalty area, 1/10 double penalty area, 1/3 accelerated reward +/// area. /// /// Most of the nodes that fall in the normal and accelerated reward area will be retained, /// while most of the nodes that fall in the normal and penalty zones will be slowly eliminated @@ -730,8 +732,8 @@ impl InflightBlocks { // In the normal state, trace will always empty // // When the inflight request reaches the checkpoint(inflight > tip + 512), - // it means that there is an anomaly in the sync less than tip + 1, i.e. some nodes are stuck, - // at which point it will be recorded as the timestamp at that time. + // it means that there is an anomaly in the sync less than tip + 1, i.e. some nodes are + // stuck, at which point it will be recorded as the timestamp at that time. // // If the time exceeds 1s, delete the task and halve the number of // executable tasks for the corresponding node @@ -785,8 +787,8 @@ impl InflightBlocks { if self.restart_number >= block.number { // All new requests smaller than restart_number mean that they are cleaned up and - // cannot be immediately marked as cleaned up again, so give it a normal response time of 1.5s. - // (timeout check is 1s, plus 0.5s given in advance) + // cannot be immediately marked as cleaned up again, so give it a normal response time + // of 1.5s. (timeout check is 1s, plus 0.5s given in advance) self.trace_number .insert(block.clone(), unix_time_as_millis() + 500); } @@ -1294,8 +1296,9 @@ impl SyncShared { return ret; } - // The above block has been accepted. Attempt to accept its descendant blocks in orphan pool. - // The returned blocks of `remove_blocks_by_parent` are in topology order by parents + // The above block has been accepted. Attempt to accept its descendant blocks in orphan + // pool. The returned blocks of `remove_blocks_by_parent` are in topology order by + // parents self.try_search_orphan_pool(chain, &block.as_ref().hash()); ret } diff --git a/tx-pool/src/block_assembler/mod.rs b/tx-pool/src/block_assembler/mod.rs index bfc2aeb35d..d699ae4e6e 100644 --- a/tx-pool/src/block_assembler/mod.rs +++ b/tx-pool/src/block_assembler/mod.rs @@ -191,11 +191,12 @@ impl BlockAssembler { Ok(tx) } - // A block B1 is considered to be the uncle of another block B2 if all of the following conditions are met: - // (1) they are in the same epoch, sharing the same difficulty; - // (2) height(B2) > height(B1); - // (3) B1's parent is either B2's ancestor or embedded in B2 or its ancestors as an uncle; - // and (4) B2 is the first block in its chain to refer to B1. + // A block B1 is considered to be the uncle of another block B2 if all of the following + // conditions are met: + // + // 1. they are in the same epoch, sharing the same difficulty; + // 2. height(B2) > height(B1); + // 3. B1's parent is either B2's ancestor or embedded in B2 or its ancestors as an uncle; and (4) B2 is the first block in its chain to refer to B1. pub(crate) fn prepare_uncles( snapshot: &Snapshot, candidate_number: BlockNumber, diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 77c334c63c..d11daad1e4 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -114,7 +114,8 @@ impl TxPool { } } - /// Tx-pool owned snapshot, it may not consistent with chain cause tx-pool update snapshot asynchronously + /// Tx-pool owned snapshot, it may not consistent with chain cause tx-pool update snapshot + /// asynchronously pub fn snapshot(&self) -> &Snapshot { &self.snapshot } @@ -504,7 +505,8 @@ impl TxPool { OutPointError::Unknown(out_points) => { let snapshot = self.snapshot(); - // if resolved input is unknown, but we known tx, it's dead or invalid + // if resolved input is unknown, but we known tx, it's dead or + // invalid if !out_points .iter() .any(|pt| snapshot.transaction_exists(&pt.tx_hash())) diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index b0516d8ef5..cdac13e839 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -225,7 +225,8 @@ impl TxPoolService { let cycles_limit = consensus.max_block_cycles(); let uncles_count_limit = consensus.max_uncles_num() as u32; - // Should recalculate current time after create cellbase (create cellbase may spend a lot of time) + // Should recalculate current time after create cellbase (create cellbase may spend a lot of + // time) let current_time = cmp::max(unix_time_as_millis(), tip_header.timestamp() + 1); Ok(BlockTemplate { diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index 7cb9eb047d..e806315277 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -97,7 +97,8 @@ pub(crate) enum Message { /// Controller to the tx-pool service. /// -/// The Controller is internally reference-counted and can be freely cloned. A Controller can be obtained when tx-pool service construct. +/// The Controller is internally reference-counted and can be freely cloned. A Controller can be +/// obtained when tx-pool service construct. #[derive(Clone)] pub struct TxPoolController { sender: mpsc::Sender, @@ -444,7 +445,8 @@ impl TxPoolServiceBuilder { ) } - /// Start a background thread tx-pool service by taking ownership of the Builder, and returns a TxPoolController. + /// Start a background thread tx-pool service by taking ownership of the Builder, and returns a + /// TxPoolController. pub fn start(self, handle: &Handle) -> TxPoolController { let (sender, mut receiver) = mpsc::channel(DEFAULT_CHANNEL_SIZE); let (signal_sender, mut signal_receiver) = oneshot::channel(); diff --git a/util/crypto/src/secp/privkey.rs b/util/crypto/src/secp/privkey.rs index 7ee6e03ff7..63d616e5ca 100644 --- a/util/crypto/src/secp/privkey.rs +++ b/util/crypto/src/secp/privkey.rs @@ -15,7 +15,8 @@ pub struct Privkey { } impl Privkey { - /// Constructs a signature for message using the Privkey and RFC6979 nonce Requires a signing-capable context. + /// Constructs a signature for message using the Privkey and RFC6979 nonce Requires a + /// signing-capable context. pub fn sign_recoverable(&self, message: &Message) -> Result { let context = &SECP256K1; let message = message.as_ref(); diff --git a/util/dao/src/lib.rs b/util/dao/src/lib.rs index a7dc0f4b5e..043e68baf1 100644 --- a/util/dao/src/lib.rs +++ b/util/dao/src/lib.rs @@ -98,7 +98,9 @@ impl<'a, CS: ChainStore<'a>> DaoCalculator<'a, CS, DataLoaderWrapper<'a, CS>> { Ok(primary_block_reward.safe_add(secondary_block_reward)?) } - /// Calculates the new dao field after packaging these transactions. It returns the dao field in [`Byte32`] format. Please see [`extract_dao_data`] if you intend to see the detailed content. + /// Calculates the new dao field after packaging these transactions. It returns the dao field in + /// [`Byte32`] format. Please see [`extract_dao_data`] if you intend to see the detailed + /// content. /// /// [`Byte32`]: ../ckb_types/packed/struct.Byte32.html /// [`extract_dao_data`]: ../ckb_dao_utils/fn.extract_dao_data.html @@ -167,7 +169,8 @@ impl<'a, CS: ChainStore<'a>> DaoCalculator<'a, CS, DataLoaderWrapper<'a, CS>> { Ok(pack_dao_data(current_ar, current_c, current_s, current_u)) } - /// Returns the maximum capacity that the deposited `out_point` acts [withdrawing phase 1] at `withdrawing_header_hash.` + /// Returns the maximum capacity that the deposited `out_point` acts [withdrawing phase 1] at + /// `withdrawing_header_hash.` /// /// [withdrawing phase 1]: https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0023-dao-deposit-withdraw/0023-dao-deposit-withdraw.md#withdraw-phase-1 pub fn maximum_withdraw( @@ -289,7 +292,8 @@ impl<'a, CS: ChainStore<'a>> DaoCalculator<'a, CS, DataLoaderWrapper<'a, CS>> { .get(i) .ok_or(DaoError::InvalidOutPoint) .and_then(|witness_data| { - // dao contract stores header deps index as u64 in the input_type field of WitnessArgs + // dao contract stores header deps index as u64 in the input_type + // field of WitnessArgs let witness = WitnessArgs::from_slice(&Unpack::::unpack( &witness_data, )) diff --git a/util/dao/utils/src/error.rs b/util/dao/utils/src/error.rs index e2df5581c3..afdcdca496 100644 --- a/util/dao/utils/src/error.rs +++ b/util/dao/utils/src/error.rs @@ -5,17 +5,20 @@ use ckb_error::{prelude::*, Error, ErrorKind}; /// [NervosDAO]: https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0023-dao-deposit-withdraw/0023-dao-deposit-withdraw.md #[derive(Error, Debug, PartialEq, Clone, Eq)] pub enum DaoError { - /// This error occurs during calculating the dao field for a block, which broadly indicates that it cannot find a required block. + /// This error occurs during calculating the dao field for a block, which broadly indicates + /// that it cannot find a required block. #[error("InvalidHeader")] InvalidHeader, - /// When withdraws from NervosDAO, it requires the deposited header and withdrawing header to help calculating interest. - /// This error occurs at [withdrawing phase 2] for the below cases: - /// - The [`HeaderDeps`] does not include the withdrawing block hash. The withdrawing block hash - /// indicates the block which packages the target transaction at [withdrawing phase 1]. + /// When withdraws from NervosDAO, it requires the deposited header and withdrawing header to + /// help calculating interest. This error occurs at [withdrawing phase 2] for the below + /// cases: + /// - The [`HeaderDeps`] does not include the withdrawing block hash. The withdrawing block + /// hash indicates the block which packages the target transaction at [withdrawing phase + /// 1]. /// - The [`HeaderDeps`] does not include the deposited block hash. The deposited block hash - /// indicates the block which packages the target transaction at [deposit phase]. Please see - /// [withdrawing phase 2] for more details. + /// indicates the block which packages the target transaction at [deposit phase]. Please + /// see [withdrawing phase 2] for more details. /// /// [deposit phase]: https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0023-dao-deposit-withdraw/0023-dao-deposit-withdraw.md#deposit /// [withdrawing phase 1]: https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0023-dao-deposit-withdraw/0023-dao-deposit-withdraw.md#withdraw-phase-1 @@ -23,8 +26,8 @@ pub enum DaoError { #[error("InvalidOutPoint")] InvalidOutPoint, - /// When withdraws from NervosDAO, the deposited header should be specified via witness. This error - /// indicates the corresponding witness is unexpected. See + /// When withdraws from NervosDAO, the deposited header should be specified via witness. This + /// error indicates the corresponding witness is unexpected. See /// [the code](https://github.com/nervosnetwork/ckb/blob/69ff8311cdb312a0ef45d524060719eea5e90e9e/util/dao/src/lib.rs#L280-L301) /// for more detail. /// diff --git a/util/dao/utils/src/lib.rs b/util/dao/utils/src/lib.rs index e7ccad1029..d1fc233449 100644 --- a/util/dao/utils/src/lib.rs +++ b/util/dao/utils/src/lib.rs @@ -1,4 +1,5 @@ -//! This crate provides several util functions to operate the dao field and NervosDAO related errors. +//! This crate provides several util functions to operate the dao field and NervosDAO related +//! errors. mod error; diff --git a/util/fixed-hash/core/src/lib.rs b/util/fixed-hash/core/src/lib.rs index d17f8180e2..ea256ddc18 100644 --- a/util/fixed-hash/core/src/lib.rs +++ b/util/fixed-hash/core/src/lib.rs @@ -4,7 +4,8 @@ //! //! **This is an internal crate used by crate [`ckb_fixed_hash`], do not use this crate directly.** //! -//! All structs and the module [`error`](error/index.html) in this crate are re-exported in crate [`ckb_fixed_hash`]. +//! All structs and the module [`error`](error/index.html) in this crate are re-exported in crate +//! [`ckb_fixed_hash`]. //! //! And you can found examples in crate [`ckb_fixed_hash`]. //! diff --git a/util/fixed-hash/macros/src/lib.rs b/util/fixed-hash/macros/src/lib.rs index 7da0c67ea4..af836fe93e 100644 --- a/util/fixed-hash/macros/src/lib.rs +++ b/util/fixed-hash/macros/src/lib.rs @@ -3,7 +3,8 @@ //! If we use an array to construct const fixed-sized hashes, it's difficult to read. //! //! If we use [`FromStr::from_str`] to construct fixed-sized hashes, the result is not a constant. -//! So, it will reduce runtime performance. And it could cause a runtime error if the input is malformed. +//! So, it will reduce runtime performance. And it could cause a runtime error if the input is +//! malformed. //! //! With proc-macros, we can construct human-readable const fixed-sized hashes. //! And it will be checked in compile time, it could never cause any runtime error. diff --git a/util/instrument/src/lib.rs b/util/instrument/src/lib.rs index c110120bd5..106fc5bd43 100644 --- a/util/instrument/src/lib.rs +++ b/util/instrument/src/lib.rs @@ -2,10 +2,8 @@ //! //! Instruments for ckb for working with `Export`, `Import` //! -//! - [Export](instrument::export::Export) provide block data -//! export function. -//! - [Import](instrument::import::Import) import block data which -//! export from `Export`. +//! - [Export](instrument::export::Export) provide block data export function. +//! - [Import](instrument::import::Import) import block data which export from `Export`. mod export; mod import; diff --git a/util/jsonrpc-types/src/alert.rs b/util/jsonrpc-types/src/alert.rs index 830ff57cdf..18121156d1 100644 --- a/util/jsonrpc-types/src/alert.rs +++ b/util/jsonrpc-types/src/alert.rs @@ -4,11 +4,13 @@ use serde::{Deserialize, Serialize}; /// The alert identifier that is used to filter duplicated alerts. /// -/// This is a 32-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See examples of [Uint32](type.Uint32.html#examples). +/// This is a 32-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See +/// examples of [Uint32](type.Uint32.html#examples). pub type AlertId = Uint32; /// Alerts are sorted by priority. Greater integers mean higher priorities. /// -/// This is a 32-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See examples of [Uint32](type.Uint32.html#examples). +/// This is a 32-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See +/// examples of [Uint32](type.Uint32.html#examples). pub type AlertPriority = Uint32; /// An alert is a message about critical problems to be broadcast to all nodes via the p2p network. diff --git a/util/jsonrpc-types/src/block_template.rs b/util/jsonrpc-types/src/block_template.rs index b53715d78e..53cb3d64f5 100644 --- a/util/jsonrpc-types/src/block_template.rs +++ b/util/jsonrpc-types/src/block_template.rs @@ -21,9 +21,11 @@ pub struct BlockTemplate { pub compact_target: Uint32, /// The timestamp for the new block. /// - /// CKB node guarantees that this timestamp is larger than the median of the previous 37 blocks. + /// CKB node guarantees that this timestamp is larger than the median of the previous 37 + /// blocks. /// - /// Miners can increase it to the current time. It is not recommended to decrease it, since it may violate the median block timestamp consensus rule. + /// Miners can increase it to the current time. It is not recommended to decrease it, since it + /// may violate the median block timestamp consensus rule. pub current_time: Timestamp, /// The block number for the new block. /// @@ -39,16 +41,16 @@ pub struct BlockTemplate { pub parent_hash: H256, /// The cycles limit. /// - /// Miners must keep the total cycles below this limit, otherwise, the CKB node will reject the block - /// submission. + /// Miners must keep the total cycles below this limit, otherwise, the CKB node will reject the + /// block submission. /// /// It is guaranteed that the block does not exceed the limit if miners do not add new /// transactions to the block. pub cycles_limit: Cycle, /// The block serialized size limit. /// - /// Miners must keep the block size below this limit, otherwise, the CKB node will reject the block - /// submission. + /// Miners must keep the block size below this limit, otherwise, the CKB node will reject the + /// block submission. /// /// It is guaranteed that the block does not exceed the limit if miners do not add new /// transaction commitments. @@ -204,7 +206,8 @@ pub struct TransactionTemplate { /// /// This is a hint to help miners selecting transactions. /// - /// This transaction can only be committed if its dependencies are also committed in the new block. + /// This transaction can only be committed if its dependencies are also committed in the new + /// block. /// /// This field is a list of indices into the array `transactions` in the block template. /// diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index eeff0900fc..c1ba59c508 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -365,8 +365,8 @@ pub struct Transaction { pub version: Version, /// An array of cell deps. /// - /// CKB locates lock script and type script code via cell deps. The script also can uses syscalls - /// to read the cells here. + /// CKB locates lock script and type script code via cell deps. The script also can uses + /// syscalls to read the cells here. /// /// Unlike inputs, the live cells can be used as cell deps in multiple transactions. pub cell_deps: Vec, @@ -626,16 +626,20 @@ pub struct Header { /// /// It is a hash on two Merkle Tree roots: /// - /// * The root of a CKB Merkle Tree, which items are the transaction hashes of all the transactions in the block. - /// * The root of a CKB Merkle Tree, but the items are the transaction witness hashes of all the transactions in the block. + /// * The root of a CKB Merkle Tree, which items are the transaction hashes of all the + /// transactions in the block. + /// * The root of a CKB Merkle Tree, but the items are the transaction witness hashes of all + /// the transactions in the block. pub transactions_root: H256, /// The hash on `proposals` in the block body. /// - /// It is all zeros when `proposals` is empty, or the hash on all the bytes concatenated together. + /// It is all zeros when `proposals` is empty, or the hash on all the bytes concatenated + /// together. pub proposals_hash: H256, /// The hash on `uncles` in the block body. /// - /// It is all zeros when `uncles` is empty, or the hash on all the uncle header hashes concatenated together. + /// It is all zeros when `uncles` is empty, or the hash on all the uncle header hashes + /// concatenated together. pub uncles_hash: H256, /// DAO fields. /// @@ -755,7 +759,8 @@ impl From
for packed::Header { /// covered by PoW and can pass the consensus rules on uncle blocks. Proposal IDs are there because /// a block can commit transactions proposed in an uncle. /// -/// A block B1 is considered to be the uncle of another block B2 if all the following conditions are met: +/// A block B1 is considered to be the uncle of another block B2 if all the following conditions are +/// met: /// /// 1. They are in the same epoch, sharing the same difficulty; /// 2. B2 block number is larger than B1; @@ -776,7 +781,8 @@ pub struct UncleBlock { /// covered by PoW and can pass the consensus rules on uncle blocks. Proposal IDs are there because /// a block can commit transactions proposed in an uncle. /// -/// A block B1 is considered to be the uncle of another block B2 if all the following conditions are met: +/// A block B1 is considered to be the uncle of another block B2 if all the following conditions are +/// met: /// /// 1. They are in the same epoch, sharing the same difficulty; /// 2. B2 block number is larger than B1; @@ -1002,14 +1008,15 @@ pub struct BlockReward { pub primary: Capacity, /// The secondary base block reward allocated to miners. pub secondary: Capacity, - /// The transaction fees that are rewarded to miners because the transaction is committed in the block. + /// The transaction fees that are rewarded to miners because the transaction is committed in + /// the block. /// /// **Attention**, this is not the total transaction fee in the block. /// /// Miners get 60% of the transaction fee for each transaction committed in the block. pub tx_fee: Capacity, - /// The transaction fees that are rewarded to miners because the transaction is proposed in the block or - /// its uncles. + /// The transaction fees that are rewarded to miners because the transaction is proposed in the + /// block or its uncles. /// /// Miners get 40% of the transaction fee for each transaction proposed in the block and /// committed later in its active commit window. @@ -1074,12 +1081,13 @@ pub struct MinerReward { pub primary: Capacity, /// The secondary base block reward allocated to miners. pub secondary: Capacity, - /// The transaction fees that are rewarded to miners because the transaction is committed in the block. + /// The transaction fees that are rewarded to miners because the transaction is committed in + /// the block. /// /// Miners get 60% of the transaction fee for each transaction committed in the block. pub committed: Capacity, - /// The transaction fees that are rewarded to miners because the transaction is proposed in the block or - /// its uncles. + /// The transaction fees that are rewarded to miners because the transaction is proposed in the + /// block or its uncles. /// /// Miners get 40% of the transaction fee for each transaction proposed in the block and /// committed later in its active commit window. @@ -1173,9 +1181,11 @@ pub struct MerkleProof { /// and farthest on-chain distance between a transaction's proposal /// and commitment. /// -/// A non-cellbase transaction is committed at height h_c if all of the following conditions are met: -/// 1) it is proposed at height h_p of the same chain, where w_close <= h_c − h_p <= w_far ; -/// 2) it is in the commitment zone of the main chain block with height h_c ; +/// A non-cellbase transaction is committed at height h_c if all of the following conditions are +/// met: +/// +/// 1. it is proposed at height h_p of the same chain, where w_close <= h_c − h_p <= w_far ; +/// 2. it is in the commitment zone of the main chain block with height h_c ; /// /// ```text /// ProposalWindow { closest: 2, farthest: 10 } diff --git a/util/jsonrpc-types/src/indexer.rs b/util/jsonrpc-types/src/indexer.rs index dcfc68ab77..bab0a2f3f5 100644 --- a/util/jsonrpc-types/src/indexer.rs +++ b/util/jsonrpc-types/src/indexer.rs @@ -30,8 +30,9 @@ pub struct CellTransaction { /// /// This is null if the cell is still live. /// - /// The cell is consumed as the `consumed_by.index`-th input in the transaction `consumed_by.tx_hash`, which - /// has been committed to at the height `consumed_by.block_number` in the chain. + /// The cell is consumed as the `consumed_by.index`-th input in the transaction + /// `consumed_by.tx_hash`, which has been committed to at the height + /// `consumed_by.block_number` in the chain. pub consumed_by: Option, } @@ -43,8 +44,8 @@ pub struct TransactionPoint { pub block_number: BlockNumber, /// In which transaction this cell is an output. pub tx_hash: H256, - /// The index of this cell in the transaction. Based on the context, this is either an input index - /// or an output index. + /// The index of this cell in the transaction. Based on the context, this is either an input + /// index or an output index. pub index: Uint64, } diff --git a/util/jsonrpc-types/src/net.rs b/util/jsonrpc-types/src/net.rs index d75e3f6a99..d20ce5cc17 100644 --- a/util/jsonrpc-types/src/net.rs +++ b/util/jsonrpc-types/src/net.rs @@ -277,10 +277,13 @@ pub struct SyncState { pub orphan_blocks_count: Uint64, /// Count of downloading blocks. pub inflight_blocks_count: Uint64, - /// The download scheduler's time analysis data, the fast is the 1/3 of the cut-off point, unit ms + /// The download scheduler's time analysis data, the fast is the 1/3 of the cut-off point, unit + /// ms pub fast_time: Uint64, - /// The download scheduler's time analysis data, the normal is the 4/5 of the cut-off point, unit ms + /// The download scheduler's time analysis data, the normal is the 4/5 of the cut-off point, + /// unit ms pub normal_time: Uint64, - /// The download scheduler's time analysis data, the low is the 9/10 of the cut-off point, unit ms + /// The download scheduler's time analysis data, the low is the 9/10 of the cut-off point, unit + /// ms pub low_time: Uint64, } diff --git a/util/jsonrpc-types/src/pool.rs b/util/jsonrpc-types/src/pool.rs index 4fdb83770b..ab2535406a 100644 --- a/util/jsonrpc-types/src/pool.rs +++ b/util/jsonrpc-types/src/pool.rs @@ -74,7 +74,8 @@ pub enum OutputsValidator { /// /// The default validator only allows outputs (a.k.a., cells) that /// - /// * use either the secp256k1 or the secp256k1 multisig bundled in the genesis block via type script hash as the lock script, + /// * use either the secp256k1 or the secp256k1 multisig bundled in the genesis block via type + /// script hash as the lock script, /// * and the type script is either empty or DAO. Default, /// "passthrough": bypass the validator, thus allow any kind of transaction outputs. diff --git a/util/jsonrpc-types/src/primitive.rs b/util/jsonrpc-types/src/primitive.rs index 5d346afe0c..f893a29a37 100644 --- a/util/jsonrpc-types/src/primitive.rs +++ b/util/jsonrpc-types/src/primitive.rs @@ -2,51 +2,63 @@ use crate::{Uint32, Uint64}; /// Consecutive block number starting from 0. /// -/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See examples of [Uint64](type.Uint64.html#examples). +/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See +/// examples of [Uint64](type.Uint64.html#examples). pub type BlockNumber = Uint64; /// Consecutive epoch number starting from 0. /// -/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See examples of [Uint64](type.Uint64.html#examples). +/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See +/// examples of [Uint64](type.Uint64.html#examples). pub type EpochNumber = Uint64; -/// The epoch indicator of a block. It shows which epoch the block is in, and the elapsed epoch fraction after adding this block. +/// The epoch indicator of a block. It shows which epoch the block is in, and the elapsed epoch +/// fraction after adding this block. /// -/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See examples of [Uint64](type.Uint64.html#examples). +/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See +/// examples of [Uint64](type.Uint64.html#examples). /// -/// The lower 56 bits of the epoch field are split into 3 parts (listed in the order from higher bits to lower bits): +/// The lower 56 bits of the epoch field are split into 3 parts (listed in the order from higher +/// bits to lower bits): /// /// * The highest 16 bits represent the epoch length /// * The next 16 bits represent the current block index in the epoch, starting from 0. /// * The lowest 24 bits represent the current epoch number. /// /// Assume there's a block, which number is 11555 and in epoch 50. The epoch 50 starts from block -/// 11000 and have 1000 blocks. The epoch field for this particular block will then be 1,099,520,939,130,930, -/// which is calculated in the following way: +/// 11000 and have 1000 blocks. The epoch field for this particular block will then be +/// 1,099,520,939,130,930, which is calculated in the following way: /// /// ```text /// 50 | ((11555 - 11000) << 24) | (1000 << 40) /// ``` pub type EpochNumberWithFraction = Uint64; -/// The capacity of a cell is the value of the cell in Shannons. It is also the upper limit of the cell occupied storage size where every 100,000,000 Shannons give 1-byte storage. +/// The capacity of a cell is the value of the cell in Shannons. It is also the upper limit of the +/// cell occupied storage size where every 100,000,000 Shannons give 1-byte storage. /// -/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See examples of [Uint64](type.Uint64.html#examples). +/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See +/// examples of [Uint64](type.Uint64.html#examples). pub type Capacity = Uint64; /// Count of cycles consumed by CKB VM to run scripts. /// -/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See examples of [Uint64](type.Uint64.html#examples). +/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See +/// examples of [Uint64](type.Uint64.html#examples). pub type Cycle = Uint64; /// The fee rate is the ratio between fee and transaction weight in unit Shannon per 1,000 bytes. /// -/// Based on the context, the weight is either the transaction virtual bytes or serialization size in the block. +/// Based on the context, the weight is either the transaction virtual bytes or serialization size +/// in the block. /// -/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See examples of [Uint64](type.Uint64.html#examples). +/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See +/// examples of [Uint64](type.Uint64.html#examples). pub type FeeRate = Uint64; /// The Unix timestamp in milliseconds (1 second is 1000 milliseconds). /// /// For example, 1588233578000 is Thu, 30 Apr 2020 07:59:38 +0000 /// -/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See examples of [Uint64](type.Uint64.html#examples). +/// This is a 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See +/// examples of [Uint64](type.Uint64.html#examples). pub type Timestamp = Uint64; /// The simple increasing integer version. /// -/// This is a 32-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See examples of [Uint32](type.Uint32.html#examples). +/// This is a 32-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. See +/// examples of [Uint32](type.Uint32.html#examples). pub type Version = Uint32; diff --git a/util/logger/src/lib.rs b/util/logger/src/lib.rs index a628af4c74..4df949e71c 100644 --- a/util/logger/src/lib.rs +++ b/util/logger/src/lib.rs @@ -144,7 +144,8 @@ macro_rules! error { } } -/// Determines if a message logged at the specified level and with the default target will be logged. +/// Determines if a message logged at the specified level and with the default target will be +/// logged. /// /// The default target is the module path of the location of the log request. /// See also [`log_enabled_target!`] the version that supports checking arbitrary @@ -152,7 +153,8 @@ macro_rules! error { /// /// [`log_enabled_target!`]: macro.log_enabled_target.html /// -/// This can be used to avoid expensive computation of log message arguments if the message would be ignored anyway. +/// This can be used to avoid expensive computation of log message arguments if the message would be +/// ignored anyway. /// /// ## Examples /// @@ -303,9 +305,11 @@ macro_rules! error_target { } } -/// Determines if a message logged at the specified level and with the specified target will be logged. +/// Determines if a message logged at the specified level and with the specified target will be +/// logged. /// -/// This can be used to avoid expensive computation of log message arguments if the message would be ignored anyway. +/// This can be used to avoid expensive computation of log message arguments if the message would be +/// ignored anyway. /// /// See also [`log_enabled!`] the version that checks with the default target. /// diff --git a/util/memory-tracker/src/process.rs b/util/memory-tracker/src/process.rs index f5fa5f7c02..35afd27774 100644 --- a/util/memory-tracker/src/process.rs +++ b/util/memory-tracker/src/process.rs @@ -30,7 +30,8 @@ macro_rules! mib_read { }; } -/// Track the memory usage of the CKB process, Jemalloc and RocksDB through [ckb-metrics](../../ckb_metrics/index.html). +/// Track the memory usage of the CKB process, Jemalloc and RocksDB through +/// [ckb-metrics](../../ckb_metrics/index.html). pub fn track_current_process( interval: u64, tracker_opt: Option>, diff --git a/util/metrics-service/src/lib.rs b/util/metrics-service/src/lib.rs index 50c0599e93..e9a3af2094 100644 --- a/util/metrics-service/src/lib.rs +++ b/util/metrics-service/src/lib.rs @@ -24,7 +24,8 @@ pub enum Guard { /// Initializes the metrics service and lets it run in the background. /// -/// Returns [Guard](enum.Guard.html) if succeeded, or an `String` to describes the reason for the failure. +/// Returns [Guard](enum.Guard.html) if succeeded, or an `String` to describes the reason for the +/// failure. pub fn init(config: Config, handle: Handle) -> Result { if config.exporter.is_empty() { return Ok(Guard::Off); diff --git a/util/network-alert/src/lib.rs b/util/network-alert/src/lib.rs index 9d5f4822eb..aeef2a721d 100644 --- a/util/network-alert/src/lib.rs +++ b/util/network-alert/src/lib.rs @@ -6,7 +6,6 @@ //! the client will show the alert message, the other behaviors of CKB node will not change. //! //! Network Alert will be removed soon once the CKB network is considered mature. -//! pub mod alert_relayer; pub mod notifier; #[cfg(test)] diff --git a/util/network-alert/src/verifier.rs b/util/network-alert/src/verifier.rs index 38b0ac5716..5c67f4b64f 100644 --- a/util/network-alert/src/verifier.rs +++ b/util/network-alert/src/verifier.rs @@ -1,9 +1,9 @@ //! verify module //! //! The message of this protocol must be verified by multi-signature before notifying the user. -//! The implementation of any client must be consistent with ckb to prevent useless information from being broadcast on the entire network. -//! The set of public keys is currently in the possession of the Nervos foundation -//! +//! The implementation of any client must be consistent with ckb to prevent useless information from +//! being broadcast on the entire network. The set of public keys is currently in the possession of +//! the Nervos foundation use ckb_app_config::NetworkAlertConfig; use ckb_error::AnyError; use ckb_logger::{debug, trace}; diff --git a/util/proposal-table/src/lib.rs b/util/proposal-table/src/lib.rs index db43b505c6..62ff6ab8fe 100644 --- a/util/proposal-table/src/lib.rs +++ b/util/proposal-table/src/lib.rs @@ -6,8 +6,9 @@ use std::collections::{BTreeMap, HashSet}; use std::ops::Bound; /// A view captures point-time proposal set, representing on-chain proposed transaction pool, -/// stored in the memory so that there is no need to fetch on hard disk, create by ProposalTable finalize method -/// w_close and w_far define the closest and farthest on-chain distance between a transaction’s proposal and commitment. +/// stored in the memory so that there is no need to fetch on hard disk, create by ProposalTable +/// finalize method w_close and w_far define the closest and farthest on-chain distance between a +/// transaction’s proposal and commitment. #[derive(Default, Clone, Debug)] pub struct ProposalView { pub(crate) gap: HashSet, @@ -64,7 +65,8 @@ impl ProposalTable { self.table.insert(number, ids).is_none() } - /// Removes a proposal set from the table, returning the set at the number if the number was previously in the table + /// Removes a proposal set from the table, returning the set at the number if the number was + /// previously in the table /// /// # Examples /// @@ -102,13 +104,15 @@ impl ProposalTable { ckb_logger::trace!("[proposal_finalize] table {:?}", self.table); + // ``` // - if candidate_number <= self.proposal_window.closest() - // new_ids = [] - // gap = [1..candidate_number] + // new_ids = [] + // gap = [1..candidate_number] // - else - // new_ids = [candidate_number- farthest..= candidate_number- closest] - // gap = [candidate_number- closest + 1..candidate_number] + // new_ids = [candidate_number- farthest..= candidate_number- closest] + // gap = [candidate_number- closest + 1..candidate_number] // - end + // ``` let (new_ids, gap) = if candidate_number <= self.proposal_window.closest() { ( HashSet::new(), diff --git a/util/reward-calculator/src/lib.rs b/util/reward-calculator/src/lib.rs index e83ac29d15..0a5e07d0f5 100644 --- a/util/reward-calculator/src/lib.rs +++ b/util/reward-calculator/src/lib.rs @@ -38,7 +38,8 @@ impl<'a, CS: ChainStore<'a>> RewardCalculator<'a, CS> { RewardCalculator { consensus, store } } - /// Calculates the current block number based on `parent,` locates the current block's target block, returns the target block miner's lock, and total block reward. + /// Calculates the current block number based on `parent,` locates the current block's target + /// block, returns the target block miner's lock, and total block reward. pub fn block_reward_to_finalize( &self, parent: &HeaderView, @@ -154,7 +155,6 @@ impl<'a, CS: ChainStore<'a>> RewardCalculator<'a, CS> { /// \ \ \________/___/___/ /// \ \__________/___/ /// \____________/ - /// fn proposal_reward(&self, parent: &HeaderView, target: &HeaderView) -> Result { let mut target_proposals = self.get_proposal_ids_by_hash(&target.hash()); diff --git a/util/snapshot/src/lib.rs b/util/snapshot/src/lib.rs index 5b1153ef68..cc6e1c29be 100644 --- a/util/snapshot/src/lib.rs +++ b/util/snapshot/src/lib.rs @@ -85,8 +85,9 @@ impl Snapshot { } /// Refreshing on block commit is necessary operation, even tip remains unchanged. - /// when node relayed compact block,if some uncles were not available from receiver's local sources, - /// in GetBlockTransactions/BlockTransactions roundtrip, node will need access block data of uncles. + /// when node relayed compact block,if some uncles were not available from receiver's local + /// sources, in GetBlockTransactions/BlockTransactions roundtrip, node will need access + /// block data of uncles. pub fn refresh(&self, store: StoreSnapshot) -> Snapshot { Snapshot { store, diff --git a/util/src/linked_hash_set.rs b/util/src/linked_hash_set.rs index 139c62aa2d..727f6941d5 100644 --- a/util/src/linked_hash_set.rs +++ b/util/src/linked_hash_set.rs @@ -159,7 +159,8 @@ where } } - /// Visits the values representing the difference, i.e., the values that are in `self` but not in `other`. + /// Visits the values representing the difference, i.e., the values that are in `self` but not + /// in `other`. pub fn difference<'a>(&'a self, other: &'a LinkedHashSet) -> Difference<'a, T, S> { Difference { iter: self.iter(), diff --git a/util/test-chain-utils/src/chain.rs b/util/test-chain-utils/src/chain.rs index 8eb73ddd86..c54e993b29 100644 --- a/util/test-chain-utils/src/chain.rs +++ b/util/test-chain-utils/src/chain.rs @@ -45,6 +45,7 @@ lazy_static! { }; } +// ```ignore // #include "ckb_syscalls.h" // #define HASH_SIZE 32 @@ -61,6 +62,7 @@ lazy_static! { // return 0; // } +// ``` lazy_static! { static ref LOAD_INPUT_DATA_HASH: (CellOutput, Bytes, Script) = { let mut file = @@ -210,7 +212,8 @@ pub fn ckb_testnet_consensus() -> Consensus { spec.build_consensus().unwrap() } -/// Return code hash of genesis type_id script which built with output index of SECP256K1/blake160 script. +/// Return code hash of genesis type_id script which built with output index of SECP256K1/blake160 +/// script. #[doc(hidden)] pub fn type_lock_script_code_hash() -> H256 { build_genesis_type_id_script(OUTPUT_INDEX_SECP256K1_BLAKE160_SIGHASH_ALL) @@ -218,8 +221,8 @@ pub fn type_lock_script_code_hash() -> H256 { .unpack() } -/// Return cell output and data in genesis block's cellbase transaction with index of SECP256K1/blake160 script, -/// the genesis block depends on the consensus parameter. +/// Return cell output and data in genesis block's cellbase transaction with index of +/// SECP256K1/blake160 script, the genesis block depends on the consensus parameter. #[doc(hidden)] pub fn secp256k1_blake160_sighash_cell(consensus: Consensus) -> (CellOutput, Bytes) { let genesis_block = consensus.genesis_block(); diff --git a/util/test-chain-utils/src/median_time.rs b/util/test-chain-utils/src/median_time.rs index 71d41ea371..1dd2624ed5 100644 --- a/util/test-chain-utils/src/median_time.rs +++ b/util/test-chain-utils/src/median_time.rs @@ -58,7 +58,8 @@ impl MockMedianTime { Byte32::from_slice(vec.as_slice()).unwrap() } - /// Return transaction info corresponding to the block number, block epoch and transaction index. + /// Return transaction info corresponding to the block number, block epoch and transaction + /// index. #[doc(hidden)] pub fn get_transaction_info( block_number: BlockNumber, diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index b9ec250131..e6c8b7bbc2 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -348,7 +348,8 @@ impl<'a> CellProvider for BlockCellProvider<'a> { }), data_bytes: data.len() as u64, mem_cell_data: Some(data), - mem_cell_data_hash: Some(data_hash), // make sure load_cell_data_hash works within block + mem_cell_data_hash: Some(data_hash), /* make sure load_cell_data_hash + * works within block */ }) }) }) diff --git a/util/types/src/core/error.rs b/util/types/src/core/error.rs index 21e14d3477..1dc532e41b 100644 --- a/util/types/src/core/error.rs +++ b/util/types/src/core/error.rs @@ -14,11 +14,13 @@ pub enum OutPointError { #[error("Unknown({0:?})")] Unknown(Vec), - /// There is an input out-point or dependency out-point which references a newer cell in the same block. + /// There is an input out-point or dependency out-point which references a newer cell in the + /// same block. #[error("OutOfOrder({0:?})")] OutOfOrder(OutPoint), - /// There is a dependency out-point, which is [`DepGroup`], but its output-data is invalid format. The expected output-data format for [`DepGroup`] is [`OutPointVec`]. + /// There is a dependency out-point, which is [`DepGroup`], but its output-data is invalid + /// format. The expected output-data format for [`DepGroup`] is [`OutPointVec`]. /// /// [`DepGroup`]: ../enum.DepType.html#variant.DepGroup /// [`OutPointVec`]: ../../packed/struct.OutPointVec.html diff --git a/util/types/src/core/reward.rs b/util/types/src/core/reward.rs index a9c514cbb9..52bb7f3434 100644 --- a/util/types/src/core/reward.rs +++ b/util/types/src/core/reward.rs @@ -24,9 +24,8 @@ pub struct BlockReward { /// - And a part of the secondary issuance goes to the NervosDAO, the ratio depends on how many /// CKB are deposited and locked in the NervosDAO. /// - The rest of the secondary issuance is determined by the community through the governance - /// mechanism. - /// Before the community can reach agreement, this part of the secondary issuance is going to - /// be burned. + /// mechanism. Before the community can reach agreement, this part of the secondary issuance + /// is going to be burned. pub secondary: Capacity, /// The transaction fees that are rewarded to miners because the transaction is committed in /// the block. diff --git a/util/types/src/core/views.rs b/util/types/src/core/views.rs index 1b6fa6fed8..d132c83303 100644 --- a/util/types/src/core/views.rs +++ b/util/types/src/core/views.rs @@ -56,7 +56,8 @@ pub struct HeaderView { /// /// # Notice /// -/// This struct is not implement the trait [`Default`], use [`BlockView::as_uncle()`] to construct it. +/// This struct is not implement the trait [`Default`], use [`BlockView::as_uncle()`] to construct +/// it. /// /// [`Default`]: https://doc.rust-lang.org/std/default/trait.Default.html /// [`UncleBlock`]: ../packed/struct.UncleBlock.html @@ -807,7 +808,8 @@ impl packed::Block { Self::block_into_view_internal(self, tx_hashes, tx_witness_hashes) } - /// Calculates transaction associated hashes, resets all hashes and merkle roots in the header, then converts them into [`BlockView`]. + /// Calculates transaction associated hashes, resets all hashes and merkle roots in the header, + /// then converts them into [`BlockView`]. /// /// [`BlockView`]: ../core/struct.BlockView.html pub fn into_view(self) -> BlockView { diff --git a/util/types/src/extension/calc_hash.rs b/util/types/src/extension/calc_hash.rs index 5aa536bc6a..d2ebb94e54 100644 --- a/util/types/src/extension/calc_hash.rs +++ b/util/types/src/extension/calc_hash.rs @@ -60,7 +60,8 @@ macro_rules! impl_calc_special_hash_for_entity { impl packed::CellOutput { /// Calculates the hash for cell data. /// - /// Returns the empty hash if no data, otherwise, calculates the hash of the data and returns it. + /// Returns the empty hash if no data, otherwise, calculates the hash of the data and returns + /// it. pub fn calc_data_hash(data: &[u8]) -> packed::Byte32 { if data.is_empty() { packed::Byte32::zero() diff --git a/util/types/src/extension/serialized_size.rs b/util/types/src/extension/serialized_size.rs index 6d9b770fdd..0f5a32fe75 100644 --- a/util/types/src/extension/serialized_size.rs +++ b/util/types/src/extension/serialized_size.rs @@ -25,8 +25,8 @@ macro_rules! impl_serialized_size_for_entity { impl<'r> packed::TransactionReader<'r> { /// Calculates the serialized size of a [`Transaction`] in [`Block`]. /// - /// Put each [`Transaction`] into [`Block`] will occupy extra spaces to store [an offset in header], - /// its size is [`molecule::NUMBER_SIZE`]. + /// Put each [`Transaction`] into [`Block`] will occupy extra spaces to store + /// [an offset in header], its size is [`molecule::NUMBER_SIZE`]. /// /// [`Transaction`]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L66-L69 /// [`Block`]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L94-L99 @@ -45,9 +45,10 @@ impl<'r> packed::BlockReader<'r> { /// - Calculates the total serialized size of [`Block`], marks it as `B`. /// - Calculates the serialized size [`ProposalShortIdVec`] for each uncle block, marks them as /// `P0, P1, ..., Pn`. - /// - Even an uncle has no proposals, the [`ProposalShortIdVec`] still has [a header contains its total size], - /// the size is [`molecule::NUMBER_SIZE`], marks it as `h`. - /// - So the serialized size of [`Block`] without [uncle proposals] is: `B - sum(P0 - h, P1 - h, ..., Pn - h)` + /// - Even an uncle has no proposals, the [`ProposalShortIdVec`] still has [a header contains + /// its total size], the size is [`molecule::NUMBER_SIZE`], marks it as `h`. + /// - So the serialized size of [`Block`] without [uncle proposals] is: `B - sum(P0 - h, P1 - h, + /// ..., Pn - h)` /// /// [`Block`]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L94-L99 /// [uncle proposals]: https://github.com/nervosnetwork/ckb/blob/v0.36.0/util/types/schemas/blockchain.mol#L91 diff --git a/util/types/src/prelude.rs b/util/types/src/prelude.rs index 949ddc3469..9f516e30bd 100644 --- a/util/types/src/prelude.rs +++ b/util/types/src/prelude.rs @@ -1,6 +1,7 @@ //! This module includes several traits. //! -//! Few traits are re-exported from other crates, few are used as aliases and others are syntactic sugar. +//! Few traits are re-exported from other crates, few are used as aliases and others are syntactic +//! sugar. pub use molecule::{ hex_string, @@ -11,7 +12,8 @@ pub use molecule::{ /// /// We can also customize the panic message or do something else in this alias. pub trait ShouldBeOk { - /// Unwraps an `Option` or a `Result` with confidence and we assume that it's impossible to fail. + /// Unwraps an `Option` or a `Result` with confidence and we assume that it's impossible to + /// fail. fn should_be_ok(self) -> T; } @@ -29,9 +31,11 @@ impl ShouldBeOk for molecule::error::VerificationResult { } } -/// An alias of `from_slice(..)` to mark where we are really have confidence to do unwrap on the result of `from_slice(..)`. +/// An alias of `from_slice(..)` to mark where we are really have confidence to do unwrap on the +/// result of `from_slice(..)`. pub trait FromSliceShouldBeOk<'r>: Reader<'r> { - /// Unwraps the result of `from_slice(..)` with confidence and we assume that it's impossible to fail. + /// Unwraps the result of `from_slice(..)` with confidence and we assume that it's impossible to + /// fail. fn from_slice_should_be_ok(slice: &'r [u8]) -> Self; } diff --git a/util/types/src/utilities/difficulty.rs b/util/types/src/utilities/difficulty.rs index d8a3d0ecb7..97f0019704 100644 --- a/util/types/src/utilities/difficulty.rs +++ b/util/types/src/utilities/difficulty.rs @@ -26,20 +26,20 @@ fn difficulty_to_target(difficulty: &U256) -> U256 { } } /** -* the original nBits implementation inherits properties from a signed data class, -* allowing the target threshold to be negative if the high bit of the significand is set. -* This is useless—the header hash is treated as an unsigned number, -* so it can never be equal to or lower than a negative target threshold. -* -* -* The "compact" format is a representation of a whole -* number N using an unsigned 32bit number similar to a -* floating point format. -* The most significant 8 bits are the unsigned exponent of base 256. -* This exponent can be thought of as "number of bytes of N". -* The lower 24 bits are the mantissa. -* N = mantissa * 256^(exponent-3) -*/ + * the original nBits implementation inherits properties from a signed data class, + * allowing the target threshold to be negative if the high bit of the significand is set. + * This is useless—the header hash is treated as an unsigned number, + * so it can never be equal to or lower than a negative target threshold. + * + * + * The "compact" format is a representation of a whole + * number N using an unsigned 32bit number similar to a + * floating point format. + * The most significant 8 bits are the unsigned exponent of base 256. + * This exponent can be thought of as "number of bytes of N". + * The lower 24 bits are the mantissa. + * N = mantissa * 256^(exponent-3) + */ fn get_low64(target: &U256) -> u64 { target.0[0] } diff --git a/verification/src/error.rs b/verification/src/error.rs index 6839bb2b30..aa86d381c2 100644 --- a/verification/src/error.rs +++ b/verification/src/error.rs @@ -18,7 +18,8 @@ pub enum TransactionErrorSource { /// The error types to transactions. #[derive(Error, Debug, PartialEq, Eq, Clone)] pub enum TransactionError { - /// There is an erroneous output that its occupied capacity is greater than its capacity (`output.occupied_capacity() > output.capacity()`). + /// There is an erroneous output that its occupied capacity is greater than its capacity + /// (`output.occupied_capacity() > output.capacity()`). #[error("InsufficientCellCapacity({inner}[{index}]): expected occupied capacity ({occupied_capacity:#x}) <= capacity ({capacity:#x})")] InsufficientCellCapacity { /// The transaction field that causes error. @@ -32,7 +33,8 @@ pub enum TransactionError { capacity: Capacity, }, - /// The total capacity of outputs is less than the total capacity of inputs (`SUM([o.capacity for o in outputs]) > SUM([i.capacity for i in inputs]`). + /// The total capacity of outputs is less than the total capacity of inputs (`SUM([o.capacity + /// for o in outputs]) > SUM([i.capacity for i in inputs]`). #[error("OutputsSumOverflow: expected outputs capacity ({outputs_sum:#x}) <= inputs capacity ({inputs_sum:#x})")] OutputsSumOverflow { /// The total capacity of inputs. @@ -41,7 +43,8 @@ pub enum TransactionError { outputs_sum: Capacity, }, - /// Either inputs or outputs of the transaction are empty (`inputs.is_empty() || outputs.is_empty()`). + /// Either inputs or outputs of the transaction are empty (`inputs.is_empty() || + /// outputs.is_empty()`). #[error("Empty({inner})")] Empty { /// The transaction field that causes the error. @@ -67,7 +70,8 @@ pub enum TransactionError { hash: Byte32, }, - /// The length of outputs is not equal to the length of outputs-data (`outputs.len() != outputs_data.len()`). + /// The length of outputs is not equal to the length of outputs-data (`outputs.len() != + /// outputs_data.len()`). #[error("OutputsDataLengthMismatch: expected outputs data length ({outputs_data_len}) = outputs length ({outputs_len})")] OutputsDataLengthMismatch { /// The length of outputs. @@ -100,7 +104,8 @@ pub enum TransactionError { #[error("CellbaseImmaturity({inner}[{index}])")] CellbaseImmaturity { /// The transaction field that causes the error. - /// It should be `TransactionErrorSource::Inputs` or `TransactionErrorSource::CellDeps`. It does not allow using an immature cell as input out-point and dependency out-point. + /// It should be `TransactionErrorSource::Inputs` or `TransactionErrorSource::CellDeps`. It + /// does not allow using an immature cell as input out-point and dependency out-point. inner: TransactionErrorSource, /// The index of immature input out-point or dependency out-point. index: usize, @@ -127,7 +132,8 @@ pub enum TransactionError { /// A list specifying categories of ckb header error. /// -/// This list is intended to grow over time and it is not recommended to exhaustively match against it. +/// This list is intended to grow over time and it is not recommended to exhaustively match against +/// it. /// /// It is used with the [`HeaderError`]. /// @@ -168,7 +174,8 @@ def_error_base_on_kind!( /// A list specifying categories of ckb block error. /// -/// This list is intended to grow over time and it is not recommended to exhaustively match against it. +/// This list is intended to grow over time and it is not recommended to exhaustively match against +/// it. /// /// It is used with the [`BlockError`]. /// @@ -181,10 +188,12 @@ pub enum BlockErrorKind { /// There are duplicate committed transactions. CommitTransactionDuplicate, - /// The calculated Merkle tree hash of proposed transactions does not match the one in the header. + /// The calculated Merkle tree hash of proposed transactions does not match the one in the + /// header. ProposalTransactionsHash, - /// The calculated Merkle tree hash of committed transactions does not match the one in the header. + /// The calculated Merkle tree hash of committed transactions does not match the one in the + /// header. TransactionsRoot, /// The calculated dao field does not match with the one in the header. @@ -267,7 +276,8 @@ pub enum CommitError { /// [cellbase transaction]: https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0022-transaction-structure/0022-transaction-structure.md#exceptions #[derive(Error, Debug, PartialEq, Eq, Clone, Display)] pub enum CellbaseError { - /// The cellbase input is unexpected. The structure reference of correct cellbase input: [`new_cellbase_input`]. + /// The cellbase input is unexpected. The structure reference of correct cellbase input: + /// [`new_cellbase_input`]. /// /// [`new_cellbase_input`]: https://github.com/nervosnetwork/ckb/blob/ee0ccecd87013821a2e68120ba3510393c0373e7/util/types/src/extension/shortcuts.rs#L107-L109 InvalidInput, @@ -275,9 +285,11 @@ pub enum CellbaseError { InvalidRewardAmount, /// The cellbase output lock does not match with the target lock. /// - /// As for 0 ~ PROPOSAL_WINDOW.farthest blocks, cellbase outputs should be empty; otherwise, lock of first cellbase output should match with the target block. + /// As for 0 ~ PROPOSAL_WINDOW.farthest blocks, cellbase outputs should be empty; otherwise, + /// lock of first cellbase output should match with the target block. /// - /// Assumes the current block number is `i`, then its target block is that: (1) on that same chain with current block; (2) number is `i - PROPOSAL_WINDOW.farthest - 1`. + /// Assumes the current block number is `i`, then its target block is that: (1) on that same + /// chain with current block; (2) number is `i - PROPOSAL_WINDOW.farthest - 1`. InvalidRewardTarget, /// The cellbase witness is not in [`CellbaseWitness`] format. /// @@ -477,7 +489,8 @@ impl HeaderError { /// if the header's timestamp is more than ALLOWED_FUTURE_BLOCKTIME ahead of our current time. /// In that case, the header may become valid in the future, /// and we don't want to disconnect a peer merely for serving us one too-far-ahead block header, - /// to prevent an attacker from splitting the network by mining a block right at the ALLOWED_FUTURE_BLOCKTIME boundary. + /// to prevent an attacker from splitting the network by mining a block right at the + /// ALLOWED_FUTURE_BLOCKTIME boundary. /// /// [`TimestampError::is_too_new`] #[doc(hidden)] diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index 267b35e79e..6705a24ade 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -537,7 +537,8 @@ const REMAIN_FLAGS_BITS: u64 = 0x1f00_0000_0000_0000; /// Metric represent value pub enum SinceMetric { - /// The metric_flag is 00, `value` can be explained as a block number or a relative block number. + /// The metric_flag is 00, `value` can be explained as a block number or a relative block + /// number. BlockNumber(u64), /// The metric_flag is 01, value can be explained as an absolute epoch or relative epoch. EpochNumberWithFraction(EpochNumberWithFraction), diff --git a/verification/src/uncles_verifier.rs b/verification/src/uncles_verifier.rs index 855d8ad0ad..f0cdf55a71 100644 --- a/verification/src/uncles_verifier.rs +++ b/verification/src/uncles_verifier.rs @@ -23,11 +23,13 @@ pub struct UnclesVerifier<'a, P> { block: &'a BlockView, } -// A block B1 is considered to be the uncle of another block B2 if all of the following conditions are met: -// (1) they are in the same epoch, sharing the same difficulty; -// (2) height(B2) > height(B1); -// (3) B1's parent is either B2's ancestor or embedded in B2 or its ancestors as an uncle; -// and (4) B2 is the first block in its chain to refer to B1. +// A block B1 is considered to be the uncle of another block B2 if all of the following conditions +// are met: +// +// 1. they are in the same epoch, sharing the same difficulty; +// 2. height(B2) > height(B1); +// 3. B1's parent is either B2's ancestor or embedded in B2 or its ancestors as an uncle; +// 4. B2 is the first block in its chain to refer to B1. impl<'a, P> UnclesVerifier<'a, P> where P: UncleProvider, @@ -36,11 +38,12 @@ where UnclesVerifier { provider, block } } - // - uncles_hash - // - uncles_num - // - depth - // - uncle not in main chain - // - uncle duplicate + // + // - uncles_hash + // - uncles_num + // - depth + // - uncle not in main chain + // - uncle duplicate pub fn verify(&self) -> Result<(), Error> { let uncles_count = self.block.data().uncles().len() as u32;