From 6163d7baf71dbca7d21af09a204f5f6c5c515cd4 Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Mon, 16 May 2022 10:36:20 +0400 Subject: [PATCH] chore: use standard lint settings and fix lints (#4066) Description --- - updates lints.toml to match [tari-meta](https://github.com/tari-project/meta/blob/main/lints.toml) - fixes a number of clippy minor lints Motivation and Context --- Tari repo uses standard linter settings EDIT: there are too many cast clippys to investigate, so that is left for another PR Larger fixes are in #4063 #4064 #4065 which should be merged before this PR. How Has This Been Tested? --- `cargo lints clippy` passes --- .../backend/src/commands/launch_docker.rs | 1 + .../launchpad/backend/src/commands/service.rs | 8 +- .../launchpad/backend/src/docker/workspace.rs | 6 +- .../src/conversions/block_header.rs | 8 +- .../tari_app_grpc/src/conversions/mod.rs | 23 ++-- .../src/conversions/new_block_template.rs | 2 +- .../src/conversions/output_features.rs | 4 +- .../tari_app_grpc/src/conversions/peer.rs | 31 ++--- .../src/conversions/transaction_kernel.rs | 4 +- .../src/identity_management.rs | 6 +- .../tari_app_utilities/src/utilities.rs | 2 +- applications/tari_base_node/src/bootstrap.rs | 10 +- applications/tari_base_node/src/builder.rs | 8 +- applications/tari_base_node/src/cli.rs | 1 + .../tari_base_node/src/commands/cli.rs | 14 +- .../src/commands/command/header_stats.rs | 1 + applications/tari_base_node/src/config.rs | 1 + .../src/grpc/base_node_grpc_server.rs | 1 + applications/tari_base_node/src/main.rs | 2 +- applications/tari_base_node/src/recovery.rs | 6 +- .../tari_collectibles/src-tauri/src/cli.rs | 10 +- .../sqlite/sqlite_assets_table_gateway.rs | 2 + .../sqlite_key_indices_table_gateway.rs | 4 + .../src/automation/command_parser.rs | 1 + .../src/automation/commands.rs | 2 + .../src/automation/error.rs | 2 +- applications/tari_console_wallet/src/cli.rs | 1 + .../tari_console_wallet/src/init/mod.rs | 54 ++++---- applications/tari_console_wallet/src/main.rs | 2 +- .../tari_console_wallet/src/recovery.rs | 11 +- .../src/ui/components/base_node.rs | 1 + .../src/ui/components/send_tab.rs | 1 + .../src/ui/components/transactions_tab.rs | 1 + .../tari_console_wallet/src/ui/mod.rs | 2 +- .../src/ui/state/wallet_event_monitor.rs | 1 + .../tari_console_wallet/src/wallet_modes.rs | 14 +- .../tari_merge_mining_proxy/src/config.rs | 1 + .../tari_merge_mining_proxy/src/proxy.rs | 3 + applications/tari_miner/src/difficulty.rs | 4 + applications/tari_miner/src/main.rs | 7 +- applications/tari_miner/src/miner.rs | 1 + .../stratum/stratum_controller/controller.rs | 1 + applications/tari_validator_node/src/comms.rs | 15 +-- .../tari_validator_node/src/dan_node.rs | 8 +- applications/tari_validator_node/src/main.rs | 2 +- base_layer/common_types/src/tx_id.rs | 28 ++-- .../comms_interface/inbound_handlers.rs | 5 + base_layer/core/src/base_node/rpc/service.rs | 6 +- .../state_machine_service/states/listening.rs | 3 + .../base_node/sync/block_sync/synchronizer.rs | 1 + .../sync/header_sync/synchronizer.rs | 10 +- .../sync/horizon_state_sync/synchronizer.rs | 7 + .../core/src/base_node/sync/rpc/service.rs | 1 + .../src/base_node/sync/rpc/sync_utxos_task.rs | 9 +- base_layer/core/src/blocks/genesis_block.rs | 2 + .../src/chain_storage/blockchain_database.rs | 11 +- .../core/src/chain_storage/lmdb_db/helpers.rs | 1 + .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 83 +++++++----- .../core/src/consensus/consensus_constants.rs | 5 + base_layer/core/src/covenants/filters/xor.rs | 2 +- base_layer/core/src/covenants/output_set.rs | 2 +- base_layer/core/src/lib.rs | 2 +- .../core/src/mempool/mempool_storage.rs | 6 +- base_layer/core/src/mempool/mod.rs | 6 +- .../priority/prioritized_transaction.rs | 2 + .../core/src/mempool/proto/stats_response.rs | 12 +- .../core/src/mempool/reorg_pool/reorg_pool.rs | 2 + .../src/mempool/service/inbound_handlers.rs | 1 + .../core/src/mempool/sync_protocol/mod.rs | 11 +- .../unconfirmed_pool/unconfirmed_pool.rs | 2 + .../core/src/proof_of_work/lwma_diff.rs | 1 + .../proof_of_work/monero_rx/fixed_array.rs | 6 +- .../src/proof_of_work/monero_rx/helpers.rs | 4 +- base_layer/core/src/proof_of_work/sha3_pow.rs | 8 +- base_layer/core/src/proto/block_header.rs | 7 +- base_layer/core/src/proto/transaction.rs | 8 +- base_layer/core/src/proto/utils.rs | 15 ++- .../core/src/transactions/format_currency.rs | 6 +- .../core/src/transactions/test_helpers.rs | 1 + .../transaction_protocol/recipient.rs | 4 +- .../transaction_protocol/sender.rs | 38 ++---- .../transaction_protocol/single_receiver.rs | 2 +- .../transaction_initializer.rs | 1 + base_layer/core/src/validation/error.rs | 4 +- base_layer/core/src/validation/header_iter.rs | 1 + base_layer/core/src/validation/test.rs | 1 + base_layer/core/tests/block_validation.rs | 3 + .../chain_storage_tests/chain_storage.rs | 12 +- .../core/tests/helpers/block_builders.rs | 2 + base_layer/core/tests/helpers/mod.rs | 1 - .../core/tests/helpers/pow_blockchain.rs | 124 ------------------ .../core/tests/helpers/test_blockchain.rs | 1 + base_layer/core/tests/mempool.rs | 5 + base_layer/core/tests/node_service.rs | 1 + base_layer/mmr/src/common.rs | 1 - base_layer/mmr/src/merkle_mountain_range.rs | 1 - base_layer/mmr/src/mutable_mmr.rs | 1 - base_layer/mmr/src/pruned_hashset.rs | 1 - base_layer/p2p/src/initialization.rs | 1 - base_layer/p2p/src/services/utils.rs | 1 - base_layer/tari_mining_helper_ffi/src/lib.rs | 18 ++- .../wallet/src/contacts_service/service.rs | 13 +- .../src/contacts_service/storage/sqlite_db.rs | 2 + base_layer/wallet/src/error.rs | 4 +- .../src/output_manager_service/error.rs | 2 +- .../src/output_manager_service/service.rs | 5 + .../storage/sqlite_db/mod.rs | 15 ++- .../storage/sqlite_db/new_output_sql.rs | 3 +- .../storage/sqlite_db/output_sql.rs | 8 +- .../transaction_broadcast_protocol.rs | 1 + .../protocols/transaction_receive_protocol.rs | 1 + .../protocols/transaction_send_protocol.rs | 3 + .../wallet/src/transaction_service/service.rs | 6 +- .../transaction_service/storage/database.rs | 22 +++- .../transaction_service/storage/sqlite_db.rs | 25 ++-- .../tasks/check_faux_transaction_status.rs | 1 + .../tasks/send_finalized_transaction.rs | 1 + .../tasks/send_transaction_reply.rs | 1 + base_layer/wallet/src/wallet.rs | 4 +- .../output_manager_service_tests/service.rs | 20 +-- .../output_manager_service_tests/storage.rs | 12 +- base_layer/wallet/tests/support/comms_rpc.rs | 3 + .../tests/transaction_service_tests/mod.rs | 4 + .../transaction_service_tests/service.rs | 32 ++--- .../transaction_service_tests/storage.rs | 6 +- .../transaction_protocols.rs | 121 +++++++++-------- base_layer/wallet/tests/utxo_scanner.rs | 3 + base_layer/wallet/tests/wallet.rs | 15 +-- base_layer/wallet_ffi/src/callback_handler.rs | 1 + .../wallet_ffi/src/callback_handler_tests.rs | 2 + base_layer/wallet_ffi/src/error.rs | 1 + base_layer/wallet_ffi/src/lib.rs | 24 ++-- base_layer/wallet_ffi/src/tasks.rs | 1 + common/src/configuration/loader.rs | 8 +- common/src/exit_codes.rs | 18 +-- comms/core/src/backoff.rs | 10 +- comms/core/src/peer_manager/peer_storage.rs | 1 + .../core/src/protocol/rpc/server/chunking.rs | 1 + comms/dht/examples/memory_net/utilities.rs | 2 + comms/dht/examples/memorynet.rs | 1 + ...rynet_graph_network_join_multiple_seeds.rs | 1 + .../memorynet_graph_network_track_join.rs | 1 + comms/dht/src/actor.rs | 5 +- comms/dht/src/connectivity/mod.rs | 9 +- comms/dht/src/dedup/dedup_cache.rs | 2 + comms/dht/src/envelope.rs | 2 +- comms/dht/src/network_discovery/ready.rs | 2 + comms/dht/src/outbound/message_send_state.rs | 1 + comms/dht/src/store_forward/database/mod.rs | 5 +- .../dht/src/store_forward/saf_handler/task.rs | 1 + .../src/test_utils/store_and_forward_mock.rs | 3 + comms/dht/tests/dht.rs | 2 + comms/rpc_macros/src/expand.rs | 2 + dan_layer/core/src/models/view_id.rs | 4 +- .../core/src/templates/tip004_template.rs | 1 + dan_layer/core/src/workers/states/prepare.rs | 1 + .../storage_sqlite/src/models/state_op_log.rs | 2 + .../src/sqlite_chain_backend_adapter.rs | 8 ++ .../src/sqlite_state_db_backend_adapter.rs | 2 + infrastructure/libtor/src/tor.rs | 2 +- infrastructure/tari_script/src/script.rs | 2 + .../tari_script/src/script_context.rs | 1 - lints.toml | 51 +++---- 163 files changed, 708 insertions(+), 595 deletions(-) delete mode 100644 base_layer/core/tests/helpers/pow_blockchain.rs diff --git a/applications/launchpad/backend/src/commands/launch_docker.rs b/applications/launchpad/backend/src/commands/launch_docker.rs index 87acac2c26..471fcba289 100644 --- a/applications/launchpad/backend/src/commands/launch_docker.rs +++ b/applications/launchpad/backend/src/commands/launch_docker.rs @@ -48,6 +48,7 @@ use crate::{ #[derive(Derivative, Serialize, Deserialize)] #[derivative(Debug)] +#[allow(clippy::struct_excessive_bools)] pub struct WorkspaceLaunchOptions { root_folder: String, tari_network: String, diff --git a/applications/launchpad/backend/src/commands/service.rs b/applications/launchpad/backend/src/commands/service.rs index b015c6d5ee..4cbf3d6a1c 100644 --- a/applications/launchpad/backend/src/commands/service.rs +++ b/applications/launchpad/backend/src/commands/service.rs @@ -246,14 +246,14 @@ async fn start_service_impl( app.clone(), log_events_name.as_str(), container_name.as_str(), - docker.clone(), + &docker, workspace, ); container_stats( app.clone(), stats_events_name.as_str(), container_name.as_str(), - docker.clone(), + &docker, workspace, ); // Collect data for the return object @@ -280,7 +280,7 @@ fn container_logs( app: AppHandle, event_name: &str, container_name: &str, - docker: Docker, + docker: &Docker, workspace: &mut TariWorkspace, ) { info!("Setting up log events for {}", container_name); @@ -312,7 +312,7 @@ fn container_stats( app: AppHandle, event_name: &str, container_name: &str, - docker: Docker, + docker: &Docker, workspace: &mut TariWorkspace, ) { info!("Setting up Resource stats events for {}", container_name); diff --git a/applications/launchpad/backend/src/docker/workspace.rs b/applications/launchpad/backend/src/docker/workspace.rs index 1c0419fcd1..e013001004 100644 --- a/applications/launchpad/backend/src/docker/workspace.rs +++ b/applications/launchpad/backend/src/docker/workspace.rs @@ -289,11 +289,11 @@ impl TariWorkspace { Ok(ids) } - /// Create and return a [`Stream`] of [`LogMessage`] instances for the `name`d container in the workspace. + /// Create and return a [`Stream`] of [`LogMessage`] instances for the `name`d container in the workspace. pub fn logs( &self, container_name: &str, - docker: Docker, + docker: &Docker, ) -> Option>> { let options = LogsOptions:: { follow: true, @@ -313,7 +313,7 @@ impl TariWorkspace { pub fn resource_stats( &self, name: &str, - docker: Docker, + docker: &Docker, ) -> Option>> { if let Some(container) = self.containers.get(name) { let options = StatsOptions { diff --git a/applications/tari_app_grpc/src/conversions/block_header.rs b/applications/tari_app_grpc/src/conversions/block_header.rs index c3a6765c3a..0aef8815e7 100644 --- a/applications/tari_app_grpc/src/conversions/block_header.rs +++ b/applications/tari_app_grpc/src/conversions/block_header.rs @@ -39,7 +39,7 @@ impl From for grpc::BlockHeader { version: u32::from(h.version), height: h.height, prev_hash: h.prev_hash, - timestamp: Some(datetime_to_timestamp(h.timestamp)), + timestamp: datetime_to_timestamp(h.timestamp), input_mr: h.input_mr, output_mr: h.output_mr, output_mmr_size: h.output_mmr_size, @@ -69,15 +69,15 @@ impl TryFrom for BlockHeader { let timestamp = header .timestamp - .map(timestamp_to_datetime) - .ok_or_else(|| "timestamp not provided".to_string())?; + .and_then(timestamp_to_datetime) + .ok_or_else(|| "timestamp not provided or was negative".to_string())?; let pow = match header.pow { Some(p) => ProofOfWork::try_from(p)?, None => return Err("No proof of work provided".into()), }; Ok(Self { - version: header.version as u16, + version: u16::try_from(header.version).map_err(|_| "header version too large")?, height: header.height, prev_hash: header.prev_hash, timestamp, diff --git a/applications/tari_app_grpc/src/conversions/mod.rs b/applications/tari_app_grpc/src/conversions/mod.rs index fa9c2238c3..015a960486 100644 --- a/applications/tari_app_grpc/src/conversions/mod.rs +++ b/applications/tari_app_grpc/src/conversions/mod.rs @@ -39,6 +39,9 @@ mod transaction_kernel; mod transaction_output; mod unblinded_output; +use std::convert::TryFrom; + +use chrono::Utc; use prost_types::Timestamp; use tari_utilities::epoch_time::EpochTime; @@ -64,11 +67,10 @@ pub use self::{ use crate::{tari_rpc as grpc, tari_rpc::BlockGroupRequest}; /// Utility function that converts a `EpochTime` to a `prost::Timestamp` -pub fn datetime_to_timestamp(datetime: EpochTime) -> Timestamp { - Timestamp { - seconds: datetime.as_u64() as i64, - nanos: 0, - } +/// Returns None if the EpochTime is negative or larger than i64::MAX. +pub(self) fn datetime_to_timestamp(datetime: EpochTime) -> Option { + let seconds = i64::try_from(datetime.as_u64()).ok()?; + Some(Timestamp { seconds, nanos: 0 }) } /// Utility function that converts a `chrono::NaiveDateTime` to a `prost::Timestamp` @@ -79,13 +81,18 @@ pub fn naive_datetime_to_timestamp(datetime: chrono::NaiveDateTime) -> Timestamp } } -pub(crate) fn timestamp_to_datetime(timestamp: Timestamp) -> EpochTime { - (timestamp.seconds as u64).into() +/// Converts a protobuf Timestamp to an EpochTime. +/// Returns None if the timestamp is negative. +pub(self) fn timestamp_to_datetime(timestamp: Timestamp) -> Option { + u64::try_from(timestamp.seconds).ok().map(Into::into) } /// Current unix time as timestamp (seconds part only) pub fn timestamp() -> Timestamp { - datetime_to_timestamp(EpochTime::now()) + Timestamp { + seconds: Utc::now().timestamp(), + nanos: 0, + } } impl From for grpc::IntegerValue { diff --git a/applications/tari_app_grpc/src/conversions/new_block_template.rs b/applications/tari_app_grpc/src/conversions/new_block_template.rs index 6e27b2cddd..0037c7f096 100644 --- a/applications/tari_app_grpc/src/conversions/new_block_template.rs +++ b/applications/tari_app_grpc/src/conversions/new_block_template.rs @@ -85,7 +85,7 @@ impl TryFrom for NewBlockTemplate { None => return Err("No proof of work provided".into()), }; let header = NewBlockHeaderTemplate { - version: header.version as u16, + version: u16::try_from(header.version).map_err(|_| "header version too large")?, height: header.height, prev_hash: header.prev_hash, total_kernel_offset, diff --git a/applications/tari_app_grpc/src/conversions/output_features.rs b/applications/tari_app_grpc/src/conversions/output_features.rs index 2276ced12a..6e4bf3d805 100644 --- a/applications/tari_app_grpc/src/conversions/output_features.rs +++ b/applications/tari_app_grpc/src/conversions/output_features.rs @@ -54,13 +54,13 @@ impl TryFrom for OutputFeatures { } else { Some(PublicKey::from_bytes(features.parent_public_key.as_bytes()).map_err(|err| format!("{:?}", err))?) }; + let flags = u8::try_from(features.flags).map_err(|_| "Invalid output flags: overflowed u8")?; Ok(OutputFeatures::new( OutputFeaturesVersion::try_from( u8::try_from(features.version).map_err(|_| "Invalid version: overflowed u8")?, )?, - OutputFlags::from_bits(features.flags as u8) - .ok_or_else(|| "Invalid or unrecognised output flags".to_string())?, + OutputFlags::from_bits(flags).ok_or_else(|| "Invalid or unrecognised output flags".to_string())?, features.maturity, u8::try_from(features.recovery_byte).map_err(|_| "Invalid recovery byte: overflowed u8")?, features.metadata, diff --git a/applications/tari_app_grpc/src/conversions/peer.rs b/applications/tari_app_grpc/src/conversions/peer.rs index 839dbfb174..00dd104333 100644 --- a/applications/tari_app_grpc/src/conversions/peer.rs +++ b/applications/tari_app_grpc/src/conversions/peer.rs @@ -23,36 +23,29 @@ use tari_comms::{connectivity::ConnectivityStatus, net_address::MutliaddrWithStats, peer_manager::Peer}; use tari_utilities::ByteArray; -use crate::{conversions::datetime_to_timestamp, tari_rpc as grpc}; +use crate::{conversions::naive_datetime_to_timestamp, tari_rpc as grpc}; +#[allow(clippy::cast_possible_truncation)] +#[allow(clippy::cast_sign_loss)] impl From for grpc::Peer { fn from(peer: Peer) -> Self { let public_key = peer.public_key.to_vec(); let node_id = peer.node_id.to_vec(); - let mut addresses: Vec = Vec::new(); - let last_connection = match peer.addresses.last_seen() { - Some(v) => Some(datetime_to_timestamp((v.timestamp() as u64).into())), - None => Some(datetime_to_timestamp(0.into())), - }; + let mut addresses = Vec::with_capacity(peer.addresses.addresses.len()); + let last_connection = peer + .addresses + .last_seen() + .map(|v| naive_datetime_to_timestamp(v.naive_utc())); for address in peer.addresses.addresses { addresses.push(address.clone().into()) } let flags = u32::from(peer.flags.bits()); - let banned_until = match peer.banned_until { - Some(v) => Some(datetime_to_timestamp((v.timestamp() as u64).into())), - None => Some(datetime_to_timestamp(0.into())), - }; + let banned_until = peer.banned_until.map(naive_datetime_to_timestamp); let banned_reason = peer.banned_reason.to_string(); - let offline_at = match peer.offline_at { - Some(v) => Some(datetime_to_timestamp((v.timestamp() as u64).into())), - None => Some(datetime_to_timestamp(0.into())), - }; + let offline_at = peer.offline_at.map(naive_datetime_to_timestamp); let features = peer.features.bits(); - let last_connected_at = match peer.connection_stats.last_connected_at { - Some(v) => Some(datetime_to_timestamp((v.timestamp() as u64).into())), - None => Some(datetime_to_timestamp(0.into())), - }; + let last_connected_at = peer.connection_stats.last_connected_at.map(naive_datetime_to_timestamp); let supported_protocols = peer.supported_protocols.into_iter().map(|p| p.to_vec()).collect(); let user_agent = peer.user_agent; Self { @@ -77,7 +70,7 @@ impl From for grpc::Address { let address = address_with_stats.address.to_vec(); let last_seen = match address_with_stats.last_seen { Some(v) => v.to_string(), - None => "".to_string(), + None => String::new(), }; let connection_attempts = address_with_stats.connection_attempts; let rejected_message_count = address_with_stats.rejected_message_count; diff --git a/applications/tari_app_grpc/src/conversions/transaction_kernel.rs b/applications/tari_app_grpc/src/conversions/transaction_kernel.rs index 0f2075ef75..e14b9ce731 100644 --- a/applications/tari_app_grpc/src/conversions/transaction_kernel.rs +++ b/applications/tari_app_grpc/src/conversions/transaction_kernel.rs @@ -44,11 +44,13 @@ impl TryFrom for TransactionKernel { .try_into() .map_err(|_| "excess_sig could not be converted".to_string())?; + let kernel_features = u8::try_from(kernel.features).map_err(|_| "kernel features must be a single byte")?; + Ok(Self::new( TransactionKernelVersion::try_from( u8::try_from(kernel.version).map_err(|_| "Invalid version: overflowed u8")?, )?, - KernelFeatures::from_bits(kernel.features as u8) + KernelFeatures::from_bits(kernel_features) .ok_or_else(|| "Invalid or unrecognised kernel feature flag".to_string())?, MicroTari::from(kernel.fee), kernel.lock_height, diff --git a/applications/tari_app_utilities/src/identity_management.rs b/applications/tari_app_utilities/src/identity_management.rs index db2afa0ff1..79dcd5f8e1 100644 --- a/applications/tari_app_utilities/src/identity_management.rs +++ b/applications/tari_app_utilities/src/identity_management.rs @@ -63,7 +63,7 @@ pub fn setup_node_identity>( }, Err(IdentityError::InvalidPermissions) => Err(ExitError::new( ExitCode::ConfigError, - &format!( + format!( "{path} has incorrect permissions. You can update the identity file with the correct permissions \ using 'chmod 600 {path}', or delete the identity file and a new one will be created on next start", path = identity_file.as_ref().to_string_lossy() @@ -82,7 +82,7 @@ pub fn setup_node_identity>( ); return Err(ExitError::new( ExitCode::ConfigError, - &format!( + format!( "Node identity information not found. {}. You can update the configuration file to point \ to a valid node identity file, or re-run the node to create a new one", e @@ -112,7 +112,7 @@ pub fn setup_node_identity>( error!(target: LOG_TARGET, "Could not create new node id. {}.", e); Err(ExitError::new( ExitCode::ConfigError, - &format!("Could not create new node id. {}.", e), + format!("Could not create new node id. {}.", e), )) }, } diff --git a/applications/tari_app_utilities/src/utilities.rs b/applications/tari_app_utilities/src/utilities.rs index a8fe833a13..07aa14c28e 100644 --- a/applications/tari_app_utilities/src/utilities.rs +++ b/applications/tari_app_utilities/src/utilities.rs @@ -40,7 +40,7 @@ pub fn setup_runtime() -> Result { let mut builder = runtime::Builder::new_multi_thread(); builder.enable_all().build().map_err(|e| { let msg = format!("There was an error while building the node runtime. {}", e); - ExitError::new(ExitCode::UnknownError, &msg) + ExitError::new(ExitCode::UnknownError, msg) }) } diff --git a/applications/tari_base_node/src/bootstrap.rs b/applications/tari_base_node/src/bootstrap.rs index cca9c1b3f8..4b663d783a 100644 --- a/applications/tari_base_node/src/bootstrap.rs +++ b/applications/tari_base_node/src/bootstrap.rs @@ -93,7 +93,7 @@ where B: BlockchainBackend + 'static .map(|s| SeedPeer::from_str(s)) .map(|r| r.map(Peer::from).map(|p| p.node_id)) .collect::, _>>() - .map_err(|e| ExitError::new(ExitCode::ConfigError, &e))?; + .map_err(|e| ExitError::new(ExitCode::ConfigError, e))?; debug!(target: LOG_TARGET, "{} sync peer(s) configured", sync_peers.len()); @@ -101,7 +101,7 @@ where B: BlockchainBackend + 'static let mempool_protocol = mempool_sync.get_protocol_extension(); let tor_identity = load_from_json(&base_node_config.tor_identity_file) - .map_err(|e| ExitError::new(ExitCode::ConfigError, &e))?; + .map_err(|e| ExitError::new(ExitCode::ConfigError, e))?; p2p_config.transport.tor.identity = tor_identity; let mut handles = StackBuilder::new(self.interrupt_signal) @@ -157,19 +157,19 @@ where B: BlockchainBackend + 'static let comms = Self::setup_rpc_services(comms, &handles, self.db.into(), &p2p_config); let comms = initialization::spawn_comms_using_transport(comms, p2p_config.transport.clone()) .await - .map_err(|e| ExitError::new(ExitCode::NetworkError, &e))?; + .map_err(|e| ExitError::new(ExitCode::NetworkError, e))?; // Save final node identity after comms has initialized. This is required because the public_address can be // changed by comms during initialization when using tor. match p2p_config.transport.transport_type { TransportType::Tcp => {}, // Do not overwrite TCP public_address in the base_node_id! _ => { identity_management::save_as_json(&base_node_config.identity_file, &*comms.node_identity()) - .map_err(|e| ExitError::new(ExitCode::IdentityError, &e))?; + .map_err(|e| ExitError::new(ExitCode::IdentityError, e))?; }, }; if let Some(hs) = comms.hidden_service() { identity_management::save_as_json(&base_node_config.tor_identity_file, hs.tor_identity()) - .map_err(|e| ExitError::new(ExitCode::IdentityError, &e))?; + .map_err(|e| ExitError::new(ExitCode::IdentityError, e))?; } handles.register(comms); diff --git a/applications/tari_base_node/src/builder.rs b/applications/tari_base_node/src/builder.rs index bf1e6c8632..034272f8fe 100644 --- a/applications/tari_base_node/src/builder.rs +++ b/applications/tari_base_node/src/builder.rs @@ -179,7 +179,7 @@ pub async fn configure_and_initialize_node( app_config.base_node.lmdb_path.as_path(), app_config.base_node.lmdb.clone(), ) - .map_err(|e| ExitError::new(ExitCode::DatabaseError, &e))?; + .map_err(|e| ExitError::new(ExitCode::DatabaseError, e))?; build_node_context(backend, app_config, node_identity, interrupt_signal).await? }, }; @@ -225,17 +225,17 @@ async fn build_node_context( backend, rules.clone(), validators, - app_config.base_node.storage.clone(), + app_config.base_node.storage, DifficultyCalculator::new(rules.clone(), randomx_factory), ) .map_err(|err| { if let ChainStorageError::DatabaseResyncRequired(reason) = err { return ExitError::new( ExitCode::DbInconsistentState, - &format!("You may need to re-sync your database because {}", reason), + format!("You may need to re-sync your database because {}", reason), ); } else { - ExitError::new(ExitCode::DatabaseError, &err) + ExitError::new(ExitCode::DatabaseError, err) } })?; let mempool_validator = MempoolValidator::new(vec![ diff --git a/applications/tari_base_node/src/cli.rs b/applications/tari_base_node/src/cli.rs index 3e634b300a..911953470d 100644 --- a/applications/tari_base_node/src/cli.rs +++ b/applications/tari_base_node/src/cli.rs @@ -28,6 +28,7 @@ const DEFAULT_NETWORK: &str = "dibbler"; #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] #[clap(propagate_version = true)] +#[allow(clippy::struct_excessive_bools)] pub(crate) struct Cli { #[clap(flatten)] pub common: CommonCliArgs, diff --git a/applications/tari_base_node/src/commands/cli.rs b/applications/tari_base_node/src/commands/cli.rs index aaa1185929..45298a5ff3 100644 --- a/applications/tari_base_node/src/commands/cli.rs +++ b/applications/tari_base_node/src/commands/cli.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryFrom, io::stdout}; +use std::io::stdout; use chrono::{Datelike, Utc}; use crossterm::{ @@ -108,6 +108,7 @@ fn multiline_find_display_length(lines: &str) -> usize { /// Try to resize terminal to make sure the width is enough. /// In case of error, just simply print out the error. +#[allow(clippy::cast_possible_truncation)] fn resize_terminal_to_fit_the_box(width: usize, height: usize) { if let Err(e) = execute!(stdout(), SetSize(width as u16, height as u16)) { println!("Can't resize terminal to fit the box. Error: {}", e) @@ -115,16 +116,16 @@ fn resize_terminal_to_fit_the_box(width: usize, height: usize) { } /// Prints a pretty banner on the console as well as the list of available commands -pub fn print_banner(commands: Vec, chunk_size: i32, resize_terminal: bool) { +pub fn print_banner(commands: Vec, chunk_size: usize, resize_terminal: bool) { let terminal_title = format!("Tari Base Node - Version {}", consts::APP_VERSION); if let Err(e) = execute!(stdout(), SetTitle(terminal_title.as_str())) { println!("Error setting terminal title. {}", e) } - let chunks: Vec> = commands.chunks(chunk_size as usize).map(|x| x.to_vec()).collect(); + let chunks: Vec> = commands.chunks(chunk_size).map(|x| x.to_vec()).collect(); let mut cell_sizes = Vec::new(); - let mut row_cell_count: i32 = 0; + let mut row_cell_count: usize = 0; let mut command_data: Vec> = Vec::new(); for chunk in chunks { let mut cells: Vec = Vec::new(); @@ -145,10 +146,7 @@ pub fn print_banner(commands: Vec, chunk_size: i32, resize_terminal: boo command_data.push(cells); } - let row_cell_sizes: Vec> = cell_sizes - .chunks(usize::try_from(chunk_size).unwrap()) - .map(|x| x.to_vec()) - .collect(); + let row_cell_sizes: Vec> = cell_sizes.chunks(chunk_size).map(|x| x.to_vec()).collect(); let mut row_cell_size = Vec::new(); let mut max_cell_size: usize = 0; for sizes in row_cell_sizes { diff --git a/applications/tari_base_node/src/commands/command/header_stats.rs b/applications/tari_base_node/src/commands/command/header_stats.rs index 1602f32a1c..55f66a94de 100644 --- a/applications/tari_base_node/src/commands/command/header_stats.rs +++ b/applications/tari_base_node/src/commands/command/header_stats.rs @@ -61,6 +61,7 @@ impl HandleCommand for CommandContext { } impl CommandContext { + #[allow(clippy::cast_possible_wrap)] pub async fn save_header_stats( &self, start_height: u64, diff --git a/applications/tari_base_node/src/config.rs b/applications/tari_base_node/src/config.rs index d19a30dd3a..187f5d0391 100644 --- a/applications/tari_base_node/src/config.rs +++ b/applications/tari_base_node/src/config.rs @@ -77,6 +77,7 @@ impl ApplicationConfig { #[derive(Clone, Serialize, Deserialize, Debug)] #[serde(deny_unknown_fields)] +#[allow(clippy::struct_excessive_bools)] pub struct BaseNodeConfig { override_from: Option, pub network: Network, diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 991a85eb22..0f3e6d00ab 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -321,6 +321,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { Ok(Response::new(rx)) } + #[allow(clippy::too_many_lines)] async fn list_headers( &self, request: Request, diff --git a/applications/tari_base_node/src/main.rs b/applications/tari_base_node/src/main.rs index 9c3c2b6757..6f49dedbc7 100644 --- a/applications/tari_base_node/src/main.rs +++ b/applications/tari_base_node/src/main.rs @@ -219,7 +219,7 @@ async fn run_node( recovery::initiate_recover_db(&config.base_node)?; recovery::run_recovery(&config.base_node) .await - .map_err(|e| ExitError::new(ExitCode::RecoveryError, &e))?; + .map_err(|e| ExitError::new(ExitCode::RecoveryError, e))?; return Ok(()); }; diff --git a/applications/tari_base_node/src/recovery.rs b/applications/tari_base_node/src/recovery.rs index 6a9e0eb220..eb9eea3b4b 100644 --- a/applications/tari_base_node/src/recovery.rs +++ b/applications/tari_base_node/src/recovery.rs @@ -65,7 +65,7 @@ pub fn initiate_recover_db(config: &BaseNodeConfig) -> Result<(), ExitError> { DatabaseType::Lmdb => { create_recovery_lmdb_database(config.lmdb_path.as_path()).map_err(|err| { error!(target: LOG_TARGET, "{}", err); - ExitError::new(ExitCode::UnknownError, &err) + ExitError::new(ExitCode::UnknownError, err) })?; }, }; @@ -101,13 +101,13 @@ pub async fn run_recovery(node_config: &BaseNodeConfig) -> Result<(), anyhow::Er factories.clone(), ), ); - let mut config = node_config.storage.clone(); + let mut config = node_config.storage; config.cleanup_orphans_at_startup = true; let db = BlockchainDatabase::new( main_db, rules.clone(), validators, - node_config.storage.clone(), + node_config.storage, DifficultyCalculator::new(rules, randomx_factory), )?; do_recovery(db.into(), temp_db).await?; diff --git a/applications/tari_collectibles/src-tauri/src/cli.rs b/applications/tari_collectibles/src-tauri/src/cli.rs index 2443461606..5695bba75a 100644 --- a/applications/tari_collectibles/src-tauri/src/cli.rs +++ b/applications/tari_collectibles/src-tauri/src/cli.rs @@ -82,7 +82,7 @@ pub fn list_assets(offset: u64, count: u64, state: &ConcurrentAppState) -> Resul println!("{}", serde_json::to_string_pretty(&rows).unwrap()); Ok(()) } - Err(e) => Err(ExitError::new(ExitCode::CommandError, &e)), + Err(e) => Err(ExitError::new(ExitCode::CommandError, e)), } } @@ -110,7 +110,7 @@ pub(crate) fn make_it_rain( match source_address { Some(source_address) => { let source_uuid = Uuid::parse_str(&source_address) - .map_err(|e| ExitError::new(ExitCode::CommandError, &e))?; + .map_err(|e| ExitError::new(ExitCode::CommandError, e))?; if !rows.iter().any(|wallet| wallet.id == source_uuid) { return Err(ExitError::new(ExitCode::CommandError, "Wallet not found!")); } @@ -120,13 +120,13 @@ pub(crate) fn make_it_rain( } } Err(e) => { - return Err(ExitError::new(ExitCode::CommandError, e.to_string())); + return Err(ExitError::new(ExitCode::CommandError, e)); } }; runtime .block_on(commands::wallets::inner_wallets_unlock(id, state)) - .map_err(|e| ExitError::new(ExitCode::CommandError, e.to_string()))?; + .map_err(|e| ExitError::new(ExitCode::CommandError, e))?; println!( "Sending {} of {} to {} {} times.", asset_public_key, amount, to_address, number_transactions @@ -139,7 +139,7 @@ pub(crate) fn make_it_rain( to_address.clone(), state, )) - .map_err(|e| ExitError::new(ExitCode::CommandError, e.to_string()))?; + .map_err(|e| ExitError::new(ExitCode::CommandError, e))?; } Ok(()) } diff --git a/applications/tari_collectibles/src-tauri/src/storage/sqlite/sqlite_assets_table_gateway.rs b/applications/tari_collectibles/src-tauri/src/storage/sqlite/sqlite_assets_table_gateway.rs index 9ad5e5e527..e26357d96b 100644 --- a/applications/tari_collectibles/src-tauri/src/storage/sqlite/sqlite_assets_table_gateway.rs +++ b/applications/tari_collectibles/src-tauri/src/storage/sqlite/sqlite_assets_table_gateway.rs @@ -49,6 +49,8 @@ impl AssetsTableGateway for SqliteAssetsTableGateway { .collect::>() } + #[allow(clippy::cast_possible_wrap)] + #[allow(clippy::cast_possible_truncation)] fn insert(&self, asset: &AssetRow, tx: &SqliteTransaction) -> Result<(), StorageError> { let mut committee_pub_keys = vec![]; if let Some(pub_keys) = asset.committee.as_ref() { diff --git a/applications/tari_collectibles/src-tauri/src/storage/sqlite/sqlite_key_indices_table_gateway.rs b/applications/tari_collectibles/src-tauri/src/storage/sqlite/sqlite_key_indices_table_gateway.rs index 25847e5e79..6f425ce630 100644 --- a/applications/tari_collectibles/src-tauri/src/storage/sqlite/sqlite_key_indices_table_gateway.rs +++ b/applications/tari_collectibles/src-tauri/src/storage/sqlite/sqlite_key_indices_table_gateway.rs @@ -37,6 +37,7 @@ pub struct SqliteKeyIndicesTableGateway { } impl KeyIndicesTableGateway for SqliteKeyIndicesTableGateway { + #[allow(clippy::cast_sign_loss)] fn list(&self, tx: &SqliteTransaction) -> Result, StorageError> { let results: Vec = schema::key_indices::table.load(tx.connection())?; Ok( @@ -51,6 +52,7 @@ impl KeyIndicesTableGateway for SqliteKeyIndicesTableGateway ) } + #[allow(clippy::cast_possible_wrap)] fn insert(&self, key_index: &KeyIndexRow, tx: &SqliteTransaction) -> Result<(), StorageError> { let sql_model = models::KeyIndex { id: Vec::from(key_index.id.as_bytes().as_slice()), @@ -64,6 +66,7 @@ impl KeyIndicesTableGateway for SqliteKeyIndicesTableGateway Ok(()) } + #[allow(clippy::cast_possible_wrap)] fn update_last_index( &self, old_row: &KeyIndexRow, @@ -88,6 +91,7 @@ impl KeyIndicesTableGateway for SqliteKeyIndicesTableGateway Ok(()) } + #[allow(clippy::cast_sign_loss)] fn find( &self, branch_seed: String, diff --git a/applications/tari_console_wallet/src/automation/command_parser.rs b/applications/tari_console_wallet/src/automation/command_parser.rs index 507a83d89b..48c50ce91e 100644 --- a/applications/tari_console_wallet/src/automation/command_parser.rs +++ b/applications/tari_console_wallet/src/automation/command_parser.rs @@ -498,6 +498,7 @@ mod test { }; #[test] + #[allow(clippy::too_many_lines)] fn test_parse_command() { let (_secret_key, public_key) = PublicKey::random_keypair(&mut OsRng); diff --git a/applications/tari_console_wallet/src/automation/commands.rs b/applications/tari_console_wallet/src/automation/commands.rs index f73d329408..a2fc146b6c 100644 --- a/applications/tari_console_wallet/src/automation/commands.rs +++ b/applications/tari_console_wallet/src/automation/commands.rs @@ -345,6 +345,7 @@ pub async fn discover_peer( Ok(()) } +#[allow(clippy::too_many_lines)] pub async fn make_it_rain( wallet_transaction_service: TransactionServiceHandle, fee_per_gram: u64, @@ -623,6 +624,7 @@ pub async fn monitor_transactions( results } +#[allow(clippy::too_many_lines)] pub async fn command_runner( config: &WalletConfig, commands: Vec, diff --git a/applications/tari_console_wallet/src/automation/error.rs b/applications/tari_console_wallet/src/automation/error.rs index 093eba9e83..370e4f35fc 100644 --- a/applications/tari_console_wallet/src/automation/error.rs +++ b/applications/tari_console_wallet/src/automation/error.rs @@ -73,7 +73,7 @@ pub enum CommandError { impl From for ExitError { fn from(err: CommandError) -> Self { error!(target: LOG_TARGET, "{}", err); - Self::new(ExitCode::CommandError, &err) + Self::new(ExitCode::CommandError, err.to_string()) } } diff --git a/applications/tari_console_wallet/src/cli.rs b/applications/tari_console_wallet/src/cli.rs index 1c47bfc0c1..9c686fe038 100644 --- a/applications/tari_console_wallet/src/cli.rs +++ b/applications/tari_console_wallet/src/cli.rs @@ -30,6 +30,7 @@ const DEFAULT_NETWORK: &str = "dibbler"; #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] #[clap(propagate_version = true)] +#[allow(clippy::struct_excessive_bools)] pub(crate) struct Cli { #[clap(flatten)] pub common: CommonCliArgs, diff --git a/applications/tari_console_wallet/src/init/mod.rs b/applications/tari_console_wallet/src/init/mod.rs index dc8c82802d..176e5dbb30 100644 --- a/applications/tari_console_wallet/src/init/mod.rs +++ b/applications/tari_console_wallet/src/init/mod.rs @@ -82,7 +82,7 @@ pub fn get_or_prompt_password( if let Some(p) = env { let env_password = Some( p.into_string() - .map_err(|_| ExitError::new(ExitCode::IOError, &"Failed to convert OsString into String"))?, + .map_err(|_| ExitError::new(ExitCode::IOError, "Failed to convert OsString into String"))?, ); return Ok(env_password); } @@ -98,7 +98,7 @@ pub fn get_or_prompt_password( fn prompt_password(prompt: &str) -> Result { let password = loop { - let pass = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, &e))?; + let pass = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, e))?; if pass.is_empty() { println!("Password cannot be empty!"); continue; @@ -122,18 +122,18 @@ pub async fn change_password( let confirmed = prompt_password("Confirm new password: ")?; if passphrase != confirmed { - return Err(ExitError::new(ExitCode::InputError, &"Passwords don't match!")); + return Err(ExitError::new(ExitCode::InputError, "Passwords don't match!")); } wallet .remove_encryption() .await - .map_err(|e| ExitError::new(ExitCode::WalletError, &e))?; + .map_err(|e| ExitError::new(ExitCode::WalletError, e))?; wallet .apply_encryption(passphrase) .await - .map_err(|e| ExitError::new(ExitCode::WalletError, &e))?; + .map_err(|e| ExitError::new(ExitCode::WalletError, e))?; println!("Wallet password changed successfully."); @@ -173,7 +173,7 @@ pub async fn get_base_node_peer_config( .map(|s| SeedPeer::from_str(s)) .map(|r| r.map(Peer::from)) .collect::, _>>() - .map_err(|err| ExitError::new(ExitCode::ConfigError, &format!("Malformed base node peer: {}", err)))?; + .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed base node peer: {}", err)))?; // peer seeds let peer_seeds = config @@ -183,7 +183,7 @@ pub async fn get_base_node_peer_config( .map(|s| SeedPeer::from_str(s)) .map(|r| r.map(Peer::from)) .collect::, _>>() - .map_err(|err| ExitError::new(ExitCode::ConfigError, &format!("Malformed seed peer: {}", err)))?; + .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?; let peer_config = PeerConfig::new(base_node_custom, base_node_peers, peer_seeds); debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config); @@ -217,6 +217,7 @@ pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode { } /// Set up the app environment and state for use by the UI +#[allow(clippy::too_many_lines)] pub async fn init_wallet( config: &ApplicationConfig, arg_password: Option, @@ -231,9 +232,9 @@ pub async fn init_wallet( .parent() .expect("console_wallet_db_file cannot be set to a root directory"), ) - .map_err(|e| ExitError::new(ExitCode::WalletError, &format!("Error creating Wallet folder. {}", e)))?; + .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?; fs::create_dir_all(&config.wallet.p2p.datastore_path) - .map_err(|e| ExitError::new(ExitCode::WalletError, &format!("Error creating peer db folder. {}", e)))?; + .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?; debug!(target: LOG_TARGET, "Running Wallet database migrations"); @@ -316,8 +317,8 @@ pub async fn init_wallet( .map_err(|e| match e { WalletError::CommsInitializationError(CommsInitializationError::HiddenServiceControllerError( HiddenServiceControllerError::TorControlPortOffline, - )) => ExitError::new(ExitCode::TorOffline, &e), - WalletError::CommsInitializationError(e) => ExitError::new(ExitCode::WalletError, &e), + )) => ExitError::new(ExitCode::TorOffline, e), + WalletError::CommsInitializationError(e) => ExitError::new(ExitCode::WalletError, e), e => ExitError::new( ExitCode::WalletError, &format!("Error creating Wallet Container: {}", e), @@ -328,7 +329,7 @@ pub async fn init_wallet( .db .set_tor_identity(hs.tor_identity().clone()) .await - .map_err(|e| ExitError::new(ExitCode::WalletError, &format!("Problem writing tor identity. {}", e)))?; + .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?; } if !wallet_encrypted { @@ -345,7 +346,7 @@ pub async fn init_wallet( let confirmed = prompt_password("Confirm wallet password: ")?; if password != confirmed { - return Err(ExitError::new(ExitCode::InputError, &"Passwords don't match!")); + return Err(ExitError::new(ExitCode::InputError, "Passwords don't match!")); } (password, true) @@ -410,15 +411,12 @@ async fn setup_identity_from_db( if !node_identity.is_signed() { node_identity.sign(); // unreachable panic: signed above - wallet_db - .set_comms_identity_signature( - node_identity - .identity_signature_read() - .as_ref() - .expect("unreachable panic") - .clone(), - ) - .await?; + let sig = node_identity + .identity_signature_read() + .as_ref() + .expect("unreachable panic") + .clone(); + wallet_db.set_comms_identity_signature(sig).await?; } Ok(node_identity) @@ -436,7 +434,7 @@ pub async fn start_wallet( let net_address = base_node .addresses .first() - .ok_or_else(|| ExitError::new(ExitCode::ConfigError, &"Configured base node has no address!"))?; + .ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?; wallet .set_base_node_peer(base_node.public_key.clone(), net_address.address.clone()) @@ -472,7 +470,7 @@ async fn validate_txos(wallet: &mut WalletSqlite) -> Result<(), ExitError> { wallet.output_manager_service.validate_txos().await.map_err(|e| { error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e); - ExitError::new(ExitCode::WalletError, &e) + ExitError::new(ExitCode::WalletError, e) })?; debug!(target: LOG_TARGET, "TXO validations started."); @@ -507,7 +505,7 @@ async fn confirm_seed_words(wallet: &mut WalletSqlite) -> Result<(), ExitError> _ => continue, }, Err(e) => { - return Err(ExitError::new(ExitCode::IOError, &e)); + return Err(ExitError::new(ExitCode::IOError, e)); }, } } @@ -541,7 +539,7 @@ pub(crate) fn boot(cli: &Cli, wallet_config: &WalletConfig) -> Result Result Result { - return Err(ExitError::new(ExitCode::IOError, &e)); + return Err(ExitError::new(ExitCode::IOError, e)); }, } } diff --git a/applications/tari_console_wallet/src/main.rs b/applications/tari_console_wallet/src/main.rs index 8641cee042..6b8c60131f 100644 --- a/applications/tari_console_wallet/src/main.rs +++ b/applications/tari_console_wallet/src/main.rs @@ -198,7 +198,7 @@ fn main_inner() -> Result<(), ExitError> { }, WalletMode::Invalid => Err(ExitError::new( ExitCode::InputError, - &"Invalid wallet mode - are you trying too many command options at once?", + "Invalid wallet mode - are you trying too many command options at once?", )), }; diff --git a/applications/tari_console_wallet/src/recovery.rs b/applications/tari_console_wallet/src/recovery.rs index f9aa745eb9..efd0e2e974 100644 --- a/applications/tari_console_wallet/src/recovery.rs +++ b/applications/tari_console_wallet/src/recovery.rs @@ -48,7 +48,7 @@ pub fn prompt_private_key_from_seed_words() -> Result { println!("Recovery Mode"); println!(); println!("Type or paste all of your seed words on one line, only separated by spaces."); - let input = rl.readline(">> ").map_err(|e| ExitError::new(ExitCode::IOError, &e))?; + let input = rl.readline(">> ").map_err(|e| ExitError::new(ExitCode::IOError, e))?; let seed_words: Vec = input.split_whitespace().map(str::to_string).collect(); match CipherSeed::from_mnemonic(&seed_words, None) { @@ -70,7 +70,7 @@ pub fn get_seed_from_seed_words(seed_words: Vec) -> Result { let err_msg = format!("MnemonicError parsing seed words: {}", e); warn!(target: LOG_TARGET, "{}", err_msg); - Err(ExitError::new(ExitCode::RecoveryError, &err_msg)) + Err(ExitError::new(ExitCode::RecoveryError, err_msg)) }, } } @@ -78,6 +78,7 @@ pub fn get_seed_from_seed_words(seed_words: Vec) -> Result::builder() @@ -200,6 +201,6 @@ pub async fn wallet_recovery( recovery_join_handle .await - .map_err(|e| ExitError::new(ExitCode::RecoveryError, &e))? - .map_err(|e| ExitError::new(ExitCode::RecoveryError, &e)) + .map_err(|e| ExitError::new(ExitCode::RecoveryError, e))? + .map_err(|e| ExitError::new(ExitCode::RecoveryError, e)) } diff --git a/applications/tari_console_wallet/src/ui/components/base_node.rs b/applications/tari_console_wallet/src/ui/components/base_node.rs index 39ca8f18da..014968f3e2 100644 --- a/applications/tari_console_wallet/src/ui/components/base_node.rs +++ b/applications/tari_console_wallet/src/ui/components/base_node.rs @@ -41,6 +41,7 @@ impl BaseNode { } impl Component for BaseNode { + #[allow(clippy::too_many_lines)] fn draw(&mut self, f: &mut Frame, area: Rect, app_state: &AppState) where B: Backend { let title = Spans::from(vec![Span::styled( diff --git a/applications/tari_console_wallet/src/ui/components/send_tab.rs b/applications/tari_console_wallet/src/ui/components/send_tab.rs index 323f2efc08..f6c85fbf99 100644 --- a/applications/tari_console_wallet/src/ui/components/send_tab.rs +++ b/applications/tari_console_wallet/src/ui/components/send_tab.rs @@ -63,6 +63,7 @@ impl SendTab { } } + #[allow(clippy::too_many_lines)] fn draw_send_form(&self, f: &mut Frame, area: Rect, _app_state: &AppState) where B: Backend { let block = Block::default().borders(Borders::ALL).title(Span::styled( diff --git a/applications/tari_console_wallet/src/ui/components/transactions_tab.rs b/applications/tari_console_wallet/src/ui/components/transactions_tab.rs index 88b5b61b4c..d92a09d72d 100644 --- a/applications/tari_console_wallet/src/ui/components/transactions_tab.rs +++ b/applications/tari_console_wallet/src/ui/components/transactions_tab.rs @@ -272,6 +272,7 @@ impl TransactionsTab { column_list.render(f, area, &mut completed_list_state); } + #[allow(clippy::too_many_lines)] fn draw_detailed_transaction(&self, f: &mut Frame, area: Rect, app_state: &AppState) where B: Backend { let block = Block::default().borders(Borders::ALL).title(Span::styled( diff --git a/applications/tari_console_wallet/src/ui/mod.rs b/applications/tari_console_wallet/src/ui/mod.rs index 9843433bc5..1934be2c6c 100644 --- a/applications/tari_console_wallet/src/ui/mod.rs +++ b/applications/tari_console_wallet/src/ui/mod.rs @@ -72,7 +72,7 @@ pub fn run(app: App>) -> Result<(), ExitError> { app.app_state.start_event_monitor(app.notifier.clone()).await; Result::<_, UiError>::Ok(()) }) - .map_err(|e| ExitError::new(ExitCode::WalletError, &e))?; + .map_err(|e| ExitError::new(ExitCode::WalletError, e))?; crossterm_loop(app) } /// This is the main loop of the application UI using Crossterm based events diff --git a/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs b/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs index 4f17416510..afc63e751d 100644 --- a/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs +++ b/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs @@ -57,6 +57,7 @@ impl WalletEventMonitor { } } + #[allow(clippy::too_many_lines)] pub async fn run(mut self, notifier: Notifier) { let mut shutdown_signal = self.app_state_inner.read().await.get_shutdown_signal(); let mut transaction_service_events = self.app_state_inner.read().await.get_transaction_service_event_stream(); diff --git a/applications/tari_console_wallet/src/wallet_modes.rs b/applications/tari_console_wallet/src/wallet_modes.rs index c959549e97..f5e9759c0e 100644 --- a/applications/tari_console_wallet/src/wallet_modes.rs +++ b/applications/tari_console_wallet/src/wallet_modes.rs @@ -92,14 +92,14 @@ impl PeerConfig { Ok(self .base_node_peers .first() - .ok_or_else(|| ExitError::new(ExitCode::ConfigError, &"Configured base node peer has no address!"))? + .ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node peer has no address!"))? .clone()) } else if !self.peer_seeds.is_empty() { // pick a random peer seed Ok(self .peer_seeds .choose(&mut OsRng) - .ok_or_else(|| ExitError::new(ExitCode::ConfigError, &"Peer seeds was empty."))? + .ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Peer seeds was empty."))? .clone()) } else { Err(ExitError::new( @@ -166,10 +166,10 @@ pub(crate) fn script_mode( ) -> Result<(), ExitError> { info!(target: LOG_TARGET, "Starting wallet script mode"); println!("Starting wallet script mode"); - let script = fs::read_to_string(path).map_err(|e| ExitError::new(ExitCode::InputError, &e))?; + let script = fs::read_to_string(path).map_err(|e| ExitError::new(ExitCode::InputError, e))?; if script.is_empty() { - return Err(ExitError::new(ExitCode::InputError, &"Input file is empty!")); + return Err(ExitError::new(ExitCode::InputError, "Input file is empty!")); }; let mut commands = Vec::new(); @@ -222,7 +222,7 @@ fn wallet_or_exit( let mut buf = String::new(); std::io::stdin() .read_line(&mut buf) - .map_err(|e| ExitError::new(ExitCode::IOError, &e))?; + .map_err(|e| ExitError::new(ExitCode::IOError, e))?; match buf.as_str().trim() { "quit" | "q" | "exit" => { @@ -258,7 +258,7 @@ pub fn tui_mode( } else if let Some(peer) = handle.block_on(wallet.get_base_node_peer()) { base_node_selected = peer; } else { - return Err(ExitError::new(ExitCode::WalletError, &"Could not select a base node")); + return Err(ExitError::new(ExitCode::WalletError, "Could not select a base node")); } let app = App::>::new( @@ -341,7 +341,7 @@ pub fn grpc_mode(handle: Handle, config: &WalletConfig, wallet: WalletSqlite) -> let grpc = WalletGrpcServer::new(wallet); handle .block_on(run_grpc(grpc, grpc_address.clone())) - .map_err(|e| ExitError::new(ExitCode::GrpcError, &e))?; + .map_err(|e| ExitError::new(ExitCode::GrpcError, e))?; } else { println!("No grpc address specified"); } diff --git a/applications/tari_merge_mining_proxy/src/config.rs b/applications/tari_merge_mining_proxy/src/config.rs index e90eeb819c..e228020822 100644 --- a/applications/tari_merge_mining_proxy/src/config.rs +++ b/applications/tari_merge_mining_proxy/src/config.rs @@ -26,6 +26,7 @@ use tari_comms::multiaddr::Multiaddr; #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[allow(clippy::struct_excessive_bools)] pub struct MergeMiningProxyConfig { override_from: Option, pub monerod_url: StringList, diff --git a/applications/tari_merge_mining_proxy/src/proxy.rs b/applications/tari_merge_mining_proxy/src/proxy.rs index d19434ddbe..074f3937e5 100644 --- a/applications/tari_merge_mining_proxy/src/proxy.rs +++ b/applications/tari_merge_mining_proxy/src/proxy.rs @@ -165,6 +165,7 @@ struct InnerService { impl InnerService { #[instrument] + #[allow(clippy::cast_possible_wrap)] async fn handle_get_height(&self, monerod_resp: Response) -> Result, MmProxyError> { let (parts, mut json) = monerod_resp.into_parts(); if json["height"].is_null() { @@ -211,6 +212,7 @@ impl InnerService { Ok(proxy::into_response(parts, &json)) } + #[allow(clippy::too_many_lines)] async fn handle_submit_block( &self, request: Request, @@ -344,6 +346,7 @@ impl InnerService { Ok(proxy::into_response(parts, &json_resp)) } + #[allow(clippy::too_many_lines)] async fn handle_get_block_template( &self, monerod_resp: Response, diff --git a/applications/tari_miner/src/difficulty.rs b/applications/tari_miner/src/difficulty.rs index cca3a3dcbe..6cd641b38f 100644 --- a/applications/tari_miner/src/difficulty.rs +++ b/applications/tari_miner/src/difficulty.rs @@ -40,6 +40,8 @@ pub struct BlockHeaderSha3 { } impl BlockHeaderSha3 { + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] pub fn new(header: BlockHeader) -> Result { use std::convert::TryFrom; @@ -111,6 +113,7 @@ impl BlockHeaderSha3 { big_endian_difficulty(&hash) } + #[allow(clippy::cast_possible_wrap)] pub fn create_header(&self) -> BlockHeader { let mut header = self.header.clone(); header.timestamp = Some(prost_types::Timestamp { @@ -141,6 +144,7 @@ pub mod test { use super::*; + #[allow(clippy::cast_sign_loss)] pub fn get_header() -> (BlockHeader, CoreBlockHeader) { let mut header = CoreBlockHeader::new(0); header.timestamp = diff --git a/applications/tari_miner/src/main.rs b/applications/tari_miner/src/main.rs index 25dd267c43..9de5a512e2 100644 --- a/applications/tari_miner/src/main.rs +++ b/applications/tari_miner/src/main.rs @@ -80,6 +80,7 @@ fn main() { } } +#[allow(clippy::too_many_lines)] async fn main_inner() -> Result<(), ExitError> { let cli = Cli::parse(); @@ -98,7 +99,7 @@ async fn main_inner() -> Result<(), ExitError> { let _ = RistrettoPublicKey::from_hex(&miner_address).map_err(|_| { ExitError::new( ExitCode::ConfigError, - &"Miner is not configured with a valid wallet address.", + "Miner is not configured with a valid wallet address.", ) })?; if !config.mining_worker_name.is_empty() { @@ -126,14 +127,14 @@ async fn main_inner() -> Result<(), ExitError> { mc.run() .await - .map_err(|err| ExitError::new(ExitCode::UnknownError, &format!("Stratum error: {:?}", err)))?; + .map_err(|err| ExitError::new(ExitCode::UnknownError, format!("Stratum error: {:?}", err)))?; Ok(()) } else { let (mut node_conn, mut wallet_conn) = connect(&config).await.map_err(|e| { ExitError::new( ExitCode::GrpcError, - &format!("Could not connect to wallet or base node: {}", e), + format!("Could not connect to wallet or base node: {}", e), ) })?; diff --git a/applications/tari_miner/src/miner.rs b/applications/tari_miner/src/miner.rs index b1a268d213..9954f292e2 100644 --- a/applications/tari_miner/src/miner.rs +++ b/applications/tari_miner/src/miner.rs @@ -230,6 +230,7 @@ pub fn mining_task( return; } if !(share_mode) { + #[allow(clippy::cast_sign_loss)] hasher.set_forward_timestamp(timestamp().seconds as u64); } } diff --git a/applications/tari_miner/src/stratum/stratum_controller/controller.rs b/applications/tari_miner/src/stratum/stratum_controller/controller.rs index ae305911be..e20a18b07c 100644 --- a/applications/tari_miner/src/stratum/stratum_controller/controller.rs +++ b/applications/tari_miner/src/stratum/stratum_controller/controller.rs @@ -71,6 +71,7 @@ impl Controller { self.client_tx = Some(client_tx); } + #[allow(clippy::too_many_lines)] pub async fn run(&mut self) -> Result<(), Error> { let mut miner: Option = None; loop { diff --git a/applications/tari_validator_node/src/comms.rs b/applications/tari_validator_node/src/comms.rs index e9c9517704..2bccc1537a 100644 --- a/applications/tari_validator_node/src/comms.rs +++ b/applications/tari_validator_node/src/comms.rs @@ -52,7 +52,7 @@ pub async fn build_service_and_comms_stack( let mut transport_config = config.validator_node.p2p.transport.clone(); transport_config.tor.identity = load_from_json(&config.validator_node.tor_identity_file) - .map_err(|e| ExitError::new(ExitCode::ConfigError, &e))?; + .map_err(|e| ExitError::new(ExitCode::ConfigError, e))?; let mut handles = StackBuilder::new(shutdown.clone()) .add_initializer(P2pInitializer::new( @@ -65,7 +65,7 @@ pub async fn build_service_and_comms_stack( )) .build() .await - .map_err(|err| ExitError::new(ExitCode::ConfigError, &err))?; + .map_err(|err| ExitError::new(ExitCode::ConfigError, err.to_string()))?; let comms = handles .take_handle::() @@ -75,20 +75,15 @@ pub async fn build_service_and_comms_stack( let comms = spawn_comms_using_transport(comms, transport_config) .await - .map_err(|e| { - ExitError::new( - ExitCode::ConfigError, - &format!("Could not spawn using transport: {}", e), - ) - })?; + .map_err(|e| ExitError::new(ExitCode::ConfigError, format!("Could not spawn using transport: {}", e)))?; // Save final node identity after comms has initialized. This is required because the public_address can be // changed by comms during initialization when using tor. identity_management::save_as_json(&config.validator_node.identity_file, &*comms.node_identity()) - .map_err(|e| ExitError::new(ExitCode::ConfigError, &format!("Failed to save node identity: {}", e)))?; + .map_err(|e| ExitError::new(ExitCode::ConfigError, format!("Failed to save node identity: {}", e)))?; if let Some(hs) = comms.hidden_service() { identity_management::save_as_json(&config.validator_node.tor_identity_file, hs.tor_identity()) - .map_err(|e| ExitError::new(ExitCode::ConfigError, &format!("Failed to save tor identity: {}", e)))?; + .map_err(|e| ExitError::new(ExitCode::ConfigError, format!("Failed to save tor identity: {}", e)))?; } handles.register(comms); diff --git a/applications/tari_validator_node/src/dan_node.rs b/applications/tari_validator_node/src/dan_node.rs index a2a348fff4..d38d70c3df 100644 --- a/applications/tari_validator_node/src/dan_node.rs +++ b/applications/tari_validator_node/src/dan_node.rs @@ -92,7 +92,7 @@ impl DanNode { let tip = base_node_client .get_tip_info() .await - .map_err(|e| ExitError::new(ExitCode::DigitalAssetError, &e))?; + .map_err(|e| ExitError::new(ExitCode::DigitalAssetError, e))?; if tip.height_of_longest_chain >= next_scanned_height { info!( target: LOG_TARGET, @@ -108,7 +108,7 @@ impl DanNode { let mut assets = base_node_client .get_assets_for_dan_node(node_identity.public_key().clone()) .await - .map_err(|e| ExitError::new(ExitCode::DigitalAssetError, &e))?; + .map_err(|e| ExitError::new(ExitCode::DigitalAssetError, e))?; info!( target: LOG_TARGET, "Base node returned {} asset(s) to process", @@ -189,7 +189,7 @@ impl DanNode { .iter() .map(|s| { CommsPublicKey::from_hex(s) - .map_err(|e| ExitError::new(ExitCode::ConfigError, &format!("could not convert to hex:{}", e))) + .map_err(|e| ExitError::new(ExitCode::ConfigError, format!("could not convert to hex:{}", e))) }) .collect::, _>>()?; @@ -245,7 +245,7 @@ impl DanNode { if let Err(err) = consensus_worker.run(shutdown.clone(), None, kill).await { error!(target: LOG_TARGET, "Consensus worker failed with error: {}", err); - return Err(ExitError::new(ExitCode::UnknownError, &err)); + return Err(ExitError::new(ExitCode::UnknownError, err)); } Ok(()) diff --git a/applications/tari_validator_node/src/main.rs b/applications/tari_validator_node/src/main.rs index dbf3123b2c..4996f0268e 100644 --- a/applications/tari_validator_node/src/main.rs +++ b/applications/tari_validator_node/src/main.rs @@ -164,7 +164,7 @@ fn build_runtime() -> Result { builder .enable_all() .build() - .map_err(|e| ExitError::new(ExitCode::UnknownError, &e)) + .map_err(|e| ExitError::new(ExitCode::UnknownError, e)) } async fn run_dan_node( diff --git a/base_layer/common_types/src/tx_id.rs b/base_layer/common_types/src/tx_id.rs index d34088399a..476de9e27d 100644 --- a/base_layer/common_types/src/tx_id.rs +++ b/base_layer/common_types/src/tx_id.rs @@ -40,6 +40,22 @@ impl TxId { pub fn as_u64(self) -> u64 { self.0 } + + /// Returns a cast to i64. This number may be negative. + /// Although this is usually a bad idea, in this case TxId is never used in calculations and + /// the data within TxId is not lost when converting to i64. + /// + /// Use this function to say explicitly that this is acceptable. + /// + /// ```rust + /// let a = u64::MAX; + /// let b = a as i64; // -1 + /// assert_eq!(a, b as u64); + /// ``` + #[allow(clippy::cast_possible_wrap)] + pub fn as_i64_wrapped(self) -> i64 { + self.0 as i64 + } } impl Hash for TxId { @@ -80,24 +96,12 @@ impl From for TxId { } } -impl From for TxId { - fn from(s: i32) -> Self { - Self(s as u64) - } -} - impl From for u64 { fn from(s: TxId) -> Self { s.0 } } -impl From for i64 { - fn from(s: TxId) -> Self { - s.0 as i64 - } -} - impl fmt::Display for TxId { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0) diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index d83294a8bc..bbd5c846bd 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -104,6 +104,8 @@ where B: BlockchainBackend + 'static } /// Handle inbound node comms requests from remote nodes and local services. + // TODO: Break this function up into smaller pieces + #[allow(clippy::too_many_lines)] pub async fn handle_request(&self, request: NodeCommsRequest) -> Result { debug!(target: LOG_TARGET, "Handling remote request {}", request); match request { @@ -463,6 +465,7 @@ where B: BlockchainBackend + 'static }, NodeCommsRequest::FetchAssetRegistrations { range } => { let top_level_pubkey = PublicKey::default(); + #[allow(clippy::range_plus_one)] let exclusive_range = (*range.start())..(*range.end() + 1); let outputs = self .blockchain_db @@ -803,6 +806,7 @@ where B: BlockchainBackend + 'static match block_add_result { BlockAddResult::Ok(ref block) => { + #[allow(clippy::cast_possible_wrap)] metrics::tip_height().set(block.height() as i64); update_target_difficulty(block); let utxo_set_size = self.blockchain_db.utxo_count().await?; @@ -810,6 +814,7 @@ where B: BlockchainBackend + 'static }, BlockAddResult::ChainReorg { added, removed } => { if let Some(fork_height) = added.last().map(|b| b.height()) { + #[allow(clippy::cast_possible_wrap)] metrics::tip_height().set(fork_height as i64); metrics::reorg(fork_height, added.len(), removed.len()).inc(); } diff --git a/base_layer/core/src/base_node/rpc/service.rs b/base_layer/core/src/base_node/rpc/service.rs index 586f8ab9b0..abd156bf9d 100644 --- a/base_layer/core/src/base_node/rpc/service.rs +++ b/base_layer/core/src/base_node/rpc/service.rs @@ -428,10 +428,8 @@ impl BaseNodeWalletService for BaseNodeWalletRpc let mut not_deleted_positions = vec![]; for position in message.mmr_positions { - if position > u64::from(u32::MAX) { - return Err(RpcStatus::bad_request("position must fit into a u32")); - } - let position = position as u32; + let position = + u32::try_from(position).map_err(|_| RpcStatus::bad_request("All MMR positions must fit into a u32"))?; if deleted_bitmap.bitmap().contains(position) { deleted_positions.push(position); } else { diff --git a/base_layer/core/src/base_node/state_machine_service/states/listening.rs b/base_layer/core/src/base_node/state_machine_service/states/listening.rs index 3bc20b30d0..b22fa10c6b 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/listening.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/listening.rs @@ -68,6 +68,7 @@ pub struct PeerMetadata { impl PeerMetadata { pub fn to_bytes(&self) -> Vec { let size = bincode::serialized_size(self).unwrap(); + #[allow(clippy::cast_possible_truncation)] let mut buf = Vec::with_capacity(size as usize); bincode::serialize_into(&mut buf, self).unwrap(); // this should not fail buf @@ -110,6 +111,8 @@ impl Listening { Default::default() } + // TODO: Break up into smaller functions + #[allow(clippy::too_many_lines)] pub async fn next_event( &mut self, shared: &mut BaseNodeStateMachine, diff --git a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs index 9fd37a12c2..64ee168e8f 100644 --- a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs @@ -182,6 +182,7 @@ impl BlockSynchronizer { Ok(connection) } + #[allow(clippy::too_many_lines)] async fn synchronize_blocks( &mut self, mut sync_peer: SyncPeer, diff --git a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs index 10569fca07..bbafd5fe18 100644 --- a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs @@ -54,7 +54,7 @@ use crate::{ const LOG_TARGET: &str = "c::bn::header_sync"; -const NUM_INITIAL_HEADERS_TO_REQUEST: u64 = 1000; +const NUM_INITIAL_HEADERS_TO_REQUEST: usize = 1000; pub struct HeaderSynchronizer<'a, B> { config: BlockchainSyncConfig, @@ -424,9 +424,9 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { client: &mut rpc::BaseNodeSyncRpcClient, ) -> Result { let (resp, block_hashes, steps_back) = self - .find_chain_split(sync_peer.node_id(), client, NUM_INITIAL_HEADERS_TO_REQUEST) + .find_chain_split(sync_peer.node_id(), client, NUM_INITIAL_HEADERS_TO_REQUEST as u64) .await?; - if resp.headers.len() > NUM_INITIAL_HEADERS_TO_REQUEST as usize { + if resp.headers.len() > NUM_INITIAL_HEADERS_TO_REQUEST { self.ban_peer_long( sync_peer.node_id(), BanReason::PeerSentTooManyHeaders(resp.headers.len()), @@ -489,6 +489,7 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { // NOTE: We can trust that the header associated with this hash exists because `block_hashes` was supplied by // this node. Bounds checking for fork_hash_index has been done above. + #[allow(clippy::cast_possible_truncation)] let chain_split_hash = block_hashes.get(fork_hash_index as usize).unwrap(); self.header_validator.initialize_state(chain_split_hash).await?; @@ -547,6 +548,7 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { Ok(blocks) } + #[allow(clippy::too_many_lines)] async fn synchronize_headers( &mut self, mut sync_peer: SyncPeer, @@ -588,7 +590,7 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { has_switched_to_new_chain = true; } - if pending_len < NUM_INITIAL_HEADERS_TO_REQUEST as usize { + if pending_len < NUM_INITIAL_HEADERS_TO_REQUEST { // Peer returned less than the number of requested headers. This indicates that we have all the available // headers. debug!(target: LOG_TARGET, "No further headers to download"); diff --git a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs index 3334120111..ef8d6a9f3b 100644 --- a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs @@ -231,6 +231,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } + #[allow(clippy::too_many_lines)] async fn synchronize_kernels( &mut self, mut sync_peer: SyncPeer, @@ -301,6 +302,10 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { kernel_hashes.push(kernel.hash()); + // TODO: We mix u32 and u64 for the mmr length. This comes down to the use of a 32-bit croaring Bitmap. + // Suggest should use u64 externally (u64 is in the header) and error on the database calls if they + // are > u32::MAX. Remove the clippy exception once fixed. + #[allow(clippy::cast_possible_truncation)] txn.insert_kernel_via_horizon_sync(kernel, current_header.hash().clone(), mmr_position as u32); if mmr_position == current_header.header().kernel_mmr_size - 1 { let num_kernels = kernel_hashes.len(); @@ -399,6 +404,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } + // TODO: Split this function into smaller pieces + #[allow(clippy::too_many_lines)] async fn synchronize_outputs( &mut self, mut sync_peer: SyncPeer, diff --git a/base_layer/core/src/base_node/sync/rpc/service.rs b/base_layer/core/src/base_node/sync/rpc/service.rs index b4f501f2a0..9c79f9e220 100644 --- a/base_layer/core/src/base_node/sync/rpc/service.rs +++ b/base_layer/core/src/base_node/sync/rpc/service.rs @@ -283,6 +283,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ return Ok(Streaming::empty()); } + #[allow(clippy::cast_possible_truncation)] let chunk_size = cmp::min(100, count) as usize; debug!( target: LOG_TARGET, diff --git a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs index d971b92788..1f51bdb953 100644 --- a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs +++ b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{sync::Arc, time::Instant}; +use std::{convert::TryFrom, sync::Arc, time::Instant}; use log::*; use tari_comms::{ @@ -138,6 +138,8 @@ where B: BlockchainBackend + 'static Ok(()) } + // TODO: Split into smaller functions + #[allow(clippy::too_many_lines)] async fn start_streaming( &self, tx: &mut mpsc::Sender>, @@ -217,9 +219,12 @@ where B: BlockchainBackend + 'static break; } + let skip = usize::try_from(skip_outputs) + .map_err(|_| RpcStatus::bad_request("skip_outputs exceeds a 32-bit unsigned integer"))?; + let utxos = utxos .into_iter() - .skip(skip_outputs as usize) + .skip(skip) // Only enumerate after skip, because `start` already has the offset in it so `i` can begin from 0 .enumerate() .filter_map(|(i, utxo)| { diff --git a/base_layer/core/src/blocks/genesis_block.rs b/base_layer/core/src/blocks/genesis_block.rs index 504f033149..807b52a58d 100644 --- a/base_layer/core/src/blocks/genesis_block.rs +++ b/base_layer/core/src/blocks/genesis_block.rs @@ -130,6 +130,7 @@ fn get_igor_genesis_block_raw() -> Block { body.sort(); // set genesis timestamp let genesis = DateTime::parse_from_rfc2822("31 Oct 2021 06:00:00 +0200").unwrap(); + #[allow(clippy::cast_sign_loss)] let timestamp = genesis.timestamp() as u64; Block { header: BlockHeader { @@ -271,6 +272,7 @@ fn get_dibbler_genesis_block_raw() -> Block { body.sort(); // set genesis timestamp let genesis = DateTime::parse_from_rfc2822("25 Jan 2022 16:00:00 +0200").unwrap(); + #[allow(clippy::cast_sign_loss)] let timestamp = genesis.timestamp() as u64; Block { header: BlockHeader { diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index a975570b32..2dd73f8d47 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -92,7 +92,7 @@ use crate::{ const LOG_TARGET: &str = "c::cs::database"; /// Configuration for the BlockchainDatabase. -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct BlockchainDatabaseConfig { pub orphan_storage_capacity: usize, @@ -211,7 +211,7 @@ where B: BlockchainBackend let blockchain_db = BlockchainDatabase { db: Arc::new(RwLock::new(db)), validators, - config: config.clone(), + config, consensus_manager, difficulty_calculator: Arc::new(difficulty_calculator), disable_add_block_flag: Arc::new(AtomicBool::new(false)), @@ -1324,6 +1324,7 @@ pub fn fetch_headers( } // Allow the headers to be returned in reverse order + #[allow(clippy::cast_possible_truncation)] let mut headers = Vec::with_capacity((end_inclusive - start) as usize); for h in start..=end_inclusive { match db.fetch(&DbKey::BlockHeader(h))? { @@ -1669,7 +1670,7 @@ fn rewind_to_height( return Ok(vec![]); } - let mut removed_blocks = Vec::with_capacity(steps_back as usize); + let mut removed_blocks = Vec::with_capacity(usize::try_from(steps_back).unwrap_or(usize::MAX)); info!( target: LOG_TARGET, "Rewinding blocks from height {} to {}", @@ -1755,6 +1756,8 @@ fn rewind_to_hash( // Checks whether we should add the block as an orphan. If it is the case, the orphan block is added and the chain // is reorganised if necessary. +// TODO: Reduce LOC in this function +#[allow(clippy::too_many_lines)] fn handle_possible_reorg( db: &mut T, config: &BlockchainDatabaseConfig, @@ -2340,7 +2343,7 @@ impl Clone for BlockchainDatabase { BlockchainDatabase { db: self.db.clone(), validators: self.validators.clone(), - config: self.config.clone(), + config: self.config, consensus_manager: self.consensus_manager.clone(), difficulty_calculator: self.difficulty_calculator.clone(), disable_add_block_flag: self.disable_add_block_flag.clone(), diff --git a/base_layer/core/src/chain_storage/lmdb_db/helpers.rs b/base_layer/core/src/chain_storage/lmdb_db/helpers.rs index 69820920d5..1bf6b2e60d 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/helpers.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/helpers.rs @@ -31,6 +31,7 @@ pub const LOG_TARGET: &str = "c::cs::lmdb_db::lmdb"; pub fn serialize(data: &T) -> Result, ChainStorageError> where T: Serialize { let size = bincode::serialized_size(&data).map_err(|e| ChainStorageError::AccessError(e.to_string()))?; + #[allow(clippy::cast_possible_truncation)] let mut buf = Vec::with_capacity(size as usize); bincode::serialize_into(&mut buf, data).map_err(|e| { error!(target: LOG_TARGET, "Could not serialize lmdb: {:?}", e); diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 636578fe1e..823bb1e4f0 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -26,6 +26,7 @@ #![allow(clippy::ptr_arg)] use std::{ + convert::TryFrom, fmt, fmt::Formatter, fs, @@ -178,7 +179,7 @@ pub fn create_lmdb_database>(path: P, config: LMDBConfig) -> Resu .build() .map_err(|err| ChainStorageError::CriticalError(format!("Could not create LMDB store:{}", err)))?; debug!(target: LOG_TARGET, "LMDB database creation successful"); - LMDBDatabase::new(lmdb_store, file_lock) + LMDBDatabase::new(&lmdb_store, file_lock) } /// This is a lmdb-based blockchain database for persistent storage of the chain state. @@ -212,36 +213,36 @@ pub struct LMDBDatabase { } impl LMDBDatabase { - pub fn new(store: LMDBStore, file_lock: File) -> Result { + pub fn new(store: &LMDBStore, file_lock: File) -> Result { let env = store.env(); let db = Self { - metadata_db: get_database(&store, LMDB_DB_METADATA)?, - headers_db: get_database(&store, LMDB_DB_HEADERS)?, - header_accumulated_data_db: get_database(&store, LMDB_DB_HEADER_ACCUMULATED_DATA)?, - block_accumulated_data_db: get_database(&store, LMDB_DB_BLOCK_ACCUMULATED_DATA)?, - block_hashes_db: get_database(&store, LMDB_DB_BLOCK_HASHES)?, - utxos_db: get_database(&store, LMDB_DB_UTXOS)?, - inputs_db: get_database(&store, LMDB_DB_INPUTS)?, - txos_hash_to_index_db: get_database(&store, LMDB_DB_TXOS_HASH_TO_INDEX)?, - kernels_db: get_database(&store, LMDB_DB_KERNELS)?, - kernel_excess_index: get_database(&store, LMDB_DB_KERNEL_EXCESS_INDEX)?, - kernel_excess_sig_index: get_database(&store, LMDB_DB_KERNEL_EXCESS_SIG_INDEX)?, - kernel_mmr_size_index: get_database(&store, LMDB_DB_KERNEL_MMR_SIZE_INDEX)?, - output_mmr_size_index: get_database(&store, LMDB_DB_UTXO_MMR_SIZE_INDEX)?, - utxo_commitment_index: get_database(&store, LMDB_DB_UTXO_COMMITMENT_INDEX)?, - unique_id_index: get_database(&store, LMDB_DB_UNIQUE_ID_INDEX)?, + metadata_db: get_database(store, LMDB_DB_METADATA)?, + headers_db: get_database(store, LMDB_DB_HEADERS)?, + header_accumulated_data_db: get_database(store, LMDB_DB_HEADER_ACCUMULATED_DATA)?, + block_accumulated_data_db: get_database(store, LMDB_DB_BLOCK_ACCUMULATED_DATA)?, + block_hashes_db: get_database(store, LMDB_DB_BLOCK_HASHES)?, + utxos_db: get_database(store, LMDB_DB_UTXOS)?, + inputs_db: get_database(store, LMDB_DB_INPUTS)?, + txos_hash_to_index_db: get_database(store, LMDB_DB_TXOS_HASH_TO_INDEX)?, + kernels_db: get_database(store, LMDB_DB_KERNELS)?, + kernel_excess_index: get_database(store, LMDB_DB_KERNEL_EXCESS_INDEX)?, + kernel_excess_sig_index: get_database(store, LMDB_DB_KERNEL_EXCESS_SIG_INDEX)?, + kernel_mmr_size_index: get_database(store, LMDB_DB_KERNEL_MMR_SIZE_INDEX)?, + output_mmr_size_index: get_database(store, LMDB_DB_UTXO_MMR_SIZE_INDEX)?, + utxo_commitment_index: get_database(store, LMDB_DB_UTXO_COMMITMENT_INDEX)?, + unique_id_index: get_database(store, LMDB_DB_UNIQUE_ID_INDEX)?, deleted_txo_mmr_position_to_height_index: get_database( - &store, + store, LMDB_DB_DELETED_TXO_MMR_POSITION_TO_HEIGHT_INDEX, )?, - orphans_db: get_database(&store, LMDB_DB_ORPHANS)?, - orphan_header_accumulated_data_db: get_database(&store, LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA)?, - monero_seed_height_db: get_database(&store, LMDB_DB_MONERO_SEED_HEIGHT)?, - orphan_chain_tips_db: get_database(&store, LMDB_DB_ORPHAN_CHAIN_TIPS)?, - orphan_parent_map_index: get_database(&store, LMDB_DB_ORPHAN_PARENT_MAP_INDEX)?, - bad_blocks: get_database(&store, LMDB_DB_BAD_BLOCK_LIST)?, - reorgs: get_database(&store, LMDB_DB_REORGS)?, + orphans_db: get_database(store, LMDB_DB_ORPHANS)?, + orphan_header_accumulated_data_db: get_database(store, LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA)?, + monero_seed_height_db: get_database(store, LMDB_DB_MONERO_SEED_HEIGHT)?, + orphan_chain_tips_db: get_database(store, LMDB_DB_ORPHAN_CHAIN_TIPS)?, + orphan_parent_map_index: get_database(store, LMDB_DB_ORPHAN_PARENT_MAP_INDEX)?, + bad_blocks: get_database(store, LMDB_DB_BAD_BLOCK_LIST)?, + reorgs: get_database(store, LMDB_DB_REORGS)?, env, env_config: store.env_config(), _file_lock: Arc::new(file_lock), @@ -262,6 +263,7 @@ impl LMDBDatabase { WriteTransaction::new(&*self.env).map_err(Into::into) } + #[allow(clippy::too_many_lines)] fn apply_db_transaction(&mut self, txn: &DbTransaction) -> Result<(), ChainStorageError> { #[allow(clippy::enum_glob_use)] use WriteOperation::*; @@ -382,38 +384,38 @@ impl LMDBDatabase { self.set_metadata( &write_txn, MetadataKey::ChainHeight, - MetadataValue::ChainHeight(*height), + &MetadataValue::ChainHeight(*height), )?; self.set_metadata( &write_txn, MetadataKey::BestBlock, - MetadataValue::BestBlock(hash.clone()), + &MetadataValue::BestBlock(hash.clone()), )?; self.set_metadata( &write_txn, MetadataKey::AccumulatedWork, - MetadataValue::AccumulatedWork(*accumulated_difficulty), + &MetadataValue::AccumulatedWork(*accumulated_difficulty), )?; }, SetPruningHorizonConfig(pruning_horizon) => { self.set_metadata( &write_txn, MetadataKey::PruningHorizon, - MetadataValue::PruningHorizon(*pruning_horizon), + &MetadataValue::PruningHorizon(*pruning_horizon), )?; }, SetPrunedHeight { height } => { self.set_metadata( &write_txn, MetadataKey::PrunedHeight, - MetadataValue::PrunedHeight(*height), + &MetadataValue::PrunedHeight(*height), )?; }, SetHorizonData { horizon_data } => { self.set_metadata( &write_txn, MetadataKey::HorizonData, - MetadataValue::HorizonData(horizon_data.clone()), + &MetadataValue::HorizonData(horizon_data.clone()), )?; }, InsertBadBlock { hash, height } => { @@ -732,9 +734,9 @@ impl LMDBDatabase { &self, txn: &WriteTransaction<'_>, k: MetadataKey, - v: MetadataValue, + v: &MetadataValue, ) -> Result<(), ChainStorageError> { - lmdb_replace(txn, &self.metadata_db, &k.as_u32(), &v)?; + lmdb_replace(txn, &self.metadata_db, &k.as_u32(), v)?; Ok(()) } @@ -1143,6 +1145,8 @@ impl LMDBDatabase { Ok(()) } + // Break function up into smaller pieces + #[allow(clippy::too_many_lines)] fn insert_block_body( &self, txn: &WriteTransaction<'_>, @@ -1206,7 +1210,10 @@ impl LMDBDatabase { "Inserting kernel `{}`", kernel.excess_sig.get_signature().to_hex() ); - self.insert_kernel(txn, &block_hash, &kernel, pos as u32)?; + let pos = u32::try_from(pos).map_err(|_| { + ChainStorageError::InvalidOperation(format!("Kernel MMR node count ({}) is greater than u32::MAX", pos)) + })?; + self.insert_kernel(txn, &block_hash, &kernel, pos)?; } let mut output_mmr = MutableMmr::::new(pruned_output_set, Bitmap::create())?; let mut witness_mmr = MerkleMountainRange::::new(pruned_proof_set); @@ -1254,7 +1261,13 @@ impl LMDBDatabase { for (output, mmr_count) in outputs { trace!(target: LOG_TARGET, "Inserting output `{}`", output.commitment.to_hex()); - self.insert_output(txn, &block_hash, header.height, &output, mmr_count as u32 - 1)?; + let mmr_count = u32::try_from(mmr_count).map(|c| c - 1).map_err(|_| { + ChainStorageError::InvalidOperation(format!( + "Output MMR node count ({}) is greater than u32::MAX", + mmr_count + )) + })?; + self.insert_output(txn, &block_hash, header.height, &output, mmr_count)?; } for commitment in spent_zero_conf_commitments { diff --git a/base_layer/core/src/consensus/consensus_constants.rs b/base_layer/core/src/consensus/consensus_constants.rs index 3f4a833fa6..f1d13a0085 100644 --- a/base_layer/core/src/consensus/consensus_constants.rs +++ b/base_layer/core/src/consensus/consensus_constants.rs @@ -158,6 +158,9 @@ impl ConsensusConstants { /// This returns the FTL (Future Time Limit) for blocks. /// Any block with a timestamp greater than this is rejected. pub fn ftl(&self) -> EpochTime { + // Timestamp never negative + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_wrap)] (Utc::now() .add(Duration::seconds(self.future_time_limit as i64)) .timestamp() as u64) @@ -168,6 +171,8 @@ impl ConsensusConstants { /// Any block with a timestamp greater than this is rejected. /// This function returns the FTL as a UTC datetime pub fn ftl_as_time(&self) -> DateTime { + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_wrap)] Utc::now().add(Duration::seconds(self.future_time_limit as i64)) } diff --git a/base_layer/core/src/covenants/filters/xor.rs b/base_layer/core/src/covenants/filters/xor.rs index b93f946a3f..d587411941 100644 --- a/base_layer/core/src/covenants/filters/xor.rs +++ b/base_layer/core/src/covenants/filters/xor.rs @@ -35,7 +35,7 @@ impl Filter for XorFilter { let mut output_set_b = output_set.clone(); b.filter(context, &mut output_set_b)?; - output_set.set(output_set_a.symmetric_difference(output_set_b)); + output_set.set(output_set_a.symmetric_difference(&output_set_b)); Ok(()) } } diff --git a/base_layer/core/src/covenants/output_set.rs b/base_layer/core/src/covenants/output_set.rs index 30e5aa2a7a..8ab2c076c5 100644 --- a/base_layer/core/src/covenants/output_set.rs +++ b/base_layer/core/src/covenants/output_set.rs @@ -76,7 +76,7 @@ impl<'a> OutputSet<'a> { self.0.difference(&other.0).copied().collect() } - pub fn symmetric_difference(&self, other: Self) -> Self { + pub fn symmetric_difference(&self, other: &Self) -> Self { self.0.symmetric_difference(&other.0).copied().collect() } diff --git a/base_layer/core/src/lib.rs b/base_layer/core/src/lib.rs index cca7e2875b..85f7afadad 100644 --- a/base_layer/core/src/lib.rs +++ b/base_layer/core/src/lib.rs @@ -54,7 +54,7 @@ mod common; #[allow(clippy::ptr_offset_with_cast)] #[allow(clippy::assign_op_pattern)] #[allow(clippy::manual_range_contains)] -// #[allow(clippy::fallible_impl_from)] +#[allow(clippy::range_plus_one)] pub mod large_ints { uint::construct_uint! { /// 256-bit unsigned integer. diff --git a/base_layer/core/src/mempool/mempool_storage.rs b/base_layer/core/src/mempool/mempool_storage.rs index a97a0dd62e..d776091f9f 100644 --- a/base_layer/core/src/mempool/mempool_storage.rs +++ b/base_layer/core/src/mempool/mempool_storage.rs @@ -279,9 +279,9 @@ impl MempoolStorage { pub fn stats(&self) -> StatsResponse { let weighting = self.get_transaction_weighting(0); StatsResponse { - total_txs: self.len(), - unconfirmed_txs: self.unconfirmed_pool.len(), - reorg_txs: self.reorg_pool.len(), + total_txs: self.len() as u64, + unconfirmed_txs: self.unconfirmed_pool.len() as u64, + reorg_txs: self.reorg_pool.len() as u64, total_weight: self.unconfirmed_pool.calculate_weight(&weighting), } } diff --git a/base_layer/core/src/mempool/mod.rs b/base_layer/core/src/mempool/mod.rs index 7ce286230b..017e6c4e8e 100644 --- a/base_layer/core/src/mempool/mod.rs +++ b/base_layer/core/src/mempool/mod.rs @@ -78,9 +78,9 @@ use crate::transactions::transaction_components::Transaction; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct StatsResponse { - pub total_txs: usize, - pub unconfirmed_txs: usize, - pub reorg_txs: usize, + pub total_txs: u64, + pub unconfirmed_txs: u64, + pub reorg_txs: u64, pub total_weight: u64, } diff --git a/base_layer/core/src/mempool/priority/prioritized_transaction.rs b/base_layer/core/src/mempool/priority/prioritized_transaction.rs index 7f48128f38..fd93bfa726 100644 --- a/base_layer/core/src/mempool/priority/prioritized_transaction.rs +++ b/base_layer/core/src/mempool/priority/prioritized_transaction.rs @@ -40,6 +40,8 @@ impl FeePriority { pub fn new(transaction: &Transaction, weight: u64) -> Self { // The weights have been normalised, so the fee priority is now equal to the fee per gram ± a few pct points // Include 3 decimal places before flooring + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] let fee_per_byte = ((transaction.body.get_total_fee().as_u64() as f64 / weight as f64) * 1000.0) as u64; // Big-endian used here, the MSB is in the starting index. The ordering for Vec is big-endian and the // unconfirmed pool expects the lowest priority to be sorted lowest to highest in the BTreeMap diff --git a/base_layer/core/src/mempool/proto/stats_response.rs b/base_layer/core/src/mempool/proto/stats_response.rs index c1c2abc103..2dea6e7e92 100644 --- a/base_layer/core/src/mempool/proto/stats_response.rs +++ b/base_layer/core/src/mempool/proto/stats_response.rs @@ -29,9 +29,9 @@ impl TryFrom for StatsResponse { fn try_from(stats: ProtoStatsResponse) -> Result { Ok(Self { - total_txs: stats.total_txs as usize, - unconfirmed_txs: stats.unconfirmed_txs as usize, - reorg_txs: stats.reorg_txs as usize, + total_txs: stats.total_txs, + unconfirmed_txs: stats.unconfirmed_txs, + reorg_txs: stats.reorg_txs, total_weight: stats.total_weight, }) } @@ -40,9 +40,9 @@ impl TryFrom for StatsResponse { impl From for ProtoStatsResponse { fn from(stats: StatsResponse) -> Self { Self { - total_txs: stats.total_txs as u64, - unconfirmed_txs: stats.unconfirmed_txs as u64, - reorg_txs: stats.reorg_txs as u64, + total_txs: stats.total_txs, + unconfirmed_txs: stats.unconfirmed_txs, + reorg_txs: stats.reorg_txs, total_weight: stats.total_weight, } } diff --git a/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs b/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs index 0ead11edb6..8cc5f55a60 100644 --- a/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs +++ b/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs @@ -310,6 +310,8 @@ impl ReorgPool { } } + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] pub fn compact(&mut self) { fn shrink_hashmap(map: &mut HashMap) -> (usize, usize) { let cap = map.capacity(); diff --git a/base_layer/core/src/mempool/service/inbound_handlers.rs b/base_layer/core/src/mempool/service/inbound_handlers.rs index a04111fcd2..96070ab1d7 100644 --- a/base_layer/core/src/mempool/service/inbound_handlers.rs +++ b/base_layer/core/src/mempool/service/inbound_handlers.rs @@ -145,6 +145,7 @@ impl MempoolInboundHandlers { } } + #[allow(clippy::cast_possible_wrap)] async fn update_pool_size_metrics(&self) { if let Ok(stats) = self.mempool.stats().await { metrics::unconfirmed_pool_size().set(stats.unconfirmed_txs as i64); diff --git a/base_layer/core/src/mempool/sync_protocol/mod.rs b/base_layer/core/src/mempool/sync_protocol/mod.rs index 32f81b65c5..952f562249 100644 --- a/base_layer/core/src/mempool/sync_protocol/mod.rs +++ b/base_layer/core/src/mempool/sync_protocol/mod.rs @@ -423,6 +423,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin self.write_transactions(transactions).await?; // Generate an index list of inventory indexes that this node does not have + #[allow(clippy::cast_possible_truncation)] let missing_items = inventory .items .into_iter() @@ -482,9 +483,13 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin } } - let stats = self.mempool.stats().await?; - metrics::unconfirmed_pool_size().set(stats.unconfirmed_txs as i64); - metrics::reorg_pool_size().set(stats.reorg_txs as i64); + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_possible_wrap)] + { + let stats = self.mempool.stats().await?; + metrics::unconfirmed_pool_size().set(stats.unconfirmed_txs as i64); + metrics::reorg_pool_size().set(stats.reorg_txs as i64); + } Ok(()) } diff --git a/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs b/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs index eea18ba914..1a2ec88293 100644 --- a/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs +++ b/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs @@ -566,6 +566,8 @@ impl UnconfirmedPool { key } + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] pub fn compact(&mut self) { fn shrink_hashmap(map: &mut HashMap) -> (usize, usize) { let cap = map.capacity(); diff --git a/base_layer/core/src/proof_of_work/lwma_diff.rs b/base_layer/core/src/proof_of_work/lwma_diff.rs index cb964661c2..f5de6dcaf2 100644 --- a/base_layer/core/src/proof_of_work/lwma_diff.rs +++ b/base_layer/core/src/proof_of_work/lwma_diff.rs @@ -75,6 +75,7 @@ impl LinearWeightedMovingAverage { } // k is the sum of weights (1+2+..+n) * target_time let k = n * (n + 1) * self.target_time / 2; + #[allow(clippy::cast_possible_truncation)] let target = (ave_difficulty * k / weighted_times) as u64; trace!( target: LOG_TARGET, diff --git a/base_layer/core/src/proof_of_work/monero_rx/fixed_array.rs b/base_layer/core/src/proof_of_work/monero_rx/fixed_array.rs index 7979073fe9..9ec1970cd4 100644 --- a/base_layer/core/src/proof_of_work/monero_rx/fixed_array.rs +++ b/base_layer/core/src/proof_of_work/monero_rx/fixed_array.rs @@ -93,11 +93,12 @@ impl Default for FixedByteArray { impl ByteArray for FixedByteArray { fn from_bytes(bytes: &[u8]) -> Result { - let len = u8::try_from(bytes.len()).map_err(|_| ByteArrayError::IncorrectLength)?; - if len > MAX_ARR_SIZE as u8 { + if bytes.len() > MAX_ARR_SIZE { return Err(ByteArrayError::IncorrectLength); } + let len = u8::try_from(bytes.len()).map_err(|_| ByteArrayError::IncorrectLength)?; + let mut elems = [0u8; MAX_ARR_SIZE]; elems[..len as usize].copy_from_slice(&bytes[..len as usize]); Ok(Self { elems, len }) @@ -110,6 +111,7 @@ impl ByteArray for FixedByteArray { impl Decodable for FixedByteArray { fn consensus_decode(d: &mut D) -> Result { + #[allow(clippy::cast_possible_truncation)] let len = VarInt::consensus_decode(d)?.0 as usize; if len > MAX_ARR_SIZE { return Err(encode::Error::ParseFailed( diff --git a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs index 96e1a1fae3..a1c7f05021 100644 --- a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs +++ b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs @@ -122,6 +122,7 @@ pub fn construct_monero_data(block: monero::Block, seed: FixedByteArray) -> Resu .to_string(), ) })?; + #[allow(clippy::cast_possible_truncation)] Ok(MoneroPowData { header: block.header, randomx_key: seed, @@ -266,6 +267,7 @@ mod test { let header = consensus::serialize::(&block.header); let tx_count = 1 + block.tx_hashes.len() as u64; let mut count = consensus::serialize::(&VarInt(tx_count)); + #[allow(clippy::cast_possible_truncation)] let mut hashes = Vec::with_capacity(tx_count as usize); hashes.push(block.miner_tx.hash()); for item in block.clone().tx_hashes { @@ -314,7 +316,7 @@ mod test { let monero_data = MoneroPowData { header: block.header, randomx_key: FixedByteArray::from_bytes(&from_hex(&seed_hash).unwrap()).unwrap(), - transaction_count: hashes.len() as u16, + transaction_count: u16::try_from(hashes.len()).unwrap(), merkle_root: root, coinbase_merkle_proof, coinbase_tx: block.miner_tx, diff --git a/base_layer/core/src/proof_of_work/sha3_pow.rs b/base_layer/core/src/proof_of_work/sha3_pow.rs index 768202b5f1..d6d7c906ae 100644 --- a/base_layer/core/src/proof_of_work/sha3_pow.rs +++ b/base_layer/core/src/proof_of_work/sha3_pow.rs @@ -88,9 +88,11 @@ pub mod test { pub fn get_header() -> BlockHeader { let mut header = BlockHeader::new(0); - header.timestamp = EpochTime::from_secs_since_epoch( - DateTime::::from_utc(NaiveDate::from_ymd(2000, 1, 1).and_hms(1, 1, 1), Utc).timestamp() as u64, - ); + + #[allow(clippy::cast_sign_loss)] + let epoch_secs = + DateTime::::from_utc(NaiveDate::from_ymd(2000, 1, 1).and_hms(1, 1, 1), Utc).timestamp() as u64; + header.timestamp = EpochTime::from_secs_since_epoch(epoch_secs); header.pow.pow_algo = PowAlgorithm::Sha3; header } diff --git a/base_layer/core/src/proto/block_header.rs b/base_layer/core/src/proto/block_header.rs index fc41802c78..ad2e53caf9 100644 --- a/base_layer/core/src/proto/block_header.rs +++ b/base_layer/core/src/proto/block_header.rs @@ -46,8 +46,8 @@ impl TryFrom for BlockHeader { let timestamp = header .timestamp - .map(timestamp_to_datetime) - .ok_or_else(|| "timestamp not provided".to_string())?; + .and_then(timestamp_to_datetime) + .ok_or_else(|| "timestamp not provided or is negative".to_string())?; let pow = match header.pow { Some(p) => ProofOfWork::try_from(p)?, @@ -74,11 +74,12 @@ impl TryFrom for BlockHeader { impl From for proto::BlockHeader { fn from(header: BlockHeader) -> Self { + let timestamp = datetime_to_timestamp(header.timestamp).unwrap(); Self { version: u32::try_from(header.version).unwrap(), height: header.height, prev_hash: header.prev_hash, - timestamp: Some(datetime_to_timestamp(header.timestamp)), + timestamp: Some(timestamp), output_mr: header.output_mr, witness_mr: header.witness_mr, kernel_mr: header.kernel_mr, diff --git a/base_layer/core/src/proto/transaction.rs b/base_layer/core/src/proto/transaction.rs index e7a8374f07..d1d51ec1d1 100644 --- a/base_layer/core/src/proto/transaction.rs +++ b/base_layer/core/src/proto/transaction.rs @@ -78,12 +78,13 @@ impl TryFrom for TransactionKernel { .ok_or_else(|| "excess_sig not provided".to_string())? .try_into() .map_err(|err: ByteArrayError| err.to_string())?; + let kernel_features = u8::try_from(kernel.features).map_err(|_| "Kernel features must be a single byte")?; Ok(TransactionKernel::new( TransactionKernelVersion::try_from( u8::try_from(kernel.version).map_err(|_| "Invalid version: overflowed u8")?, )?, - KernelFeatures::from_bits(kernel.features as u8) + KernelFeatures::from_bits(kernel_features) .ok_or_else(|| "Invalid or unrecognised kernel feature flag".to_string())?, MicroTari::from(kernel.fee), kernel.lock_height, @@ -281,12 +282,13 @@ impl TryFrom for OutputFeatures { Some(PublicKey::from_bytes(features.parent_public_key.as_bytes()).map_err(|err| format!("{:?}", err))?) }; + let flags = u8::try_from(features.flags).map_err(|_| "Invalid output flags: overflowed u8")?; + Ok(OutputFeatures::new( OutputFeaturesVersion::try_from( u8::try_from(features.version).map_err(|_| "Invalid version: overflowed u8")?, )?, - OutputFlags::from_bits(features.flags as u8) - .ok_or_else(|| "Invalid or unrecognised output flags".to_string())?, + OutputFlags::from_bits(flags).ok_or_else(|| "Invalid or unrecognised output flags".to_string())?, features.maturity, u8::try_from(features.recovery_byte).map_err(|_| "Invalid recovery byte: overflowed u8")?, features.metadata, diff --git a/base_layer/core/src/proto/utils.rs b/base_layer/core/src/proto/utils.rs index 729e31ddb1..0033681318 100644 --- a/base_layer/core/src/proto/utils.rs +++ b/base_layer/core/src/proto/utils.rs @@ -20,18 +20,19 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use std::convert::TryFrom; + use prost_types::Timestamp; use tari_utilities::epoch_time::EpochTime; /// Utility function that converts a `prost::Timestamp` to a `chrono::DateTime` -pub(crate) fn timestamp_to_datetime(timestamp: Timestamp) -> EpochTime { - (timestamp.seconds as u64).into() +/// Returns None if the timestamp is negative +pub(super) fn timestamp_to_datetime(timestamp: Timestamp) -> Option { + u64::try_from(timestamp.seconds).ok().map(Into::into) } /// Utility function that converts a `chrono::DateTime` to a `prost::Timestamp` -pub(crate) fn datetime_to_timestamp(datetime: EpochTime) -> Timestamp { - Timestamp { - seconds: datetime.as_u64() as i64, - nanos: 0, - } +pub(super) fn datetime_to_timestamp(datetime: EpochTime) -> Option { + let seconds = i64::try_from(datetime.as_u64()).ok()?; + Some(Timestamp { seconds, nanos: 0 }) } diff --git a/base_layer/core/src/transactions/format_currency.rs b/base_layer/core/src/transactions/format_currency.rs index 74047a4ab2..61d73fea14 100644 --- a/base_layer/core/src/transactions/format_currency.rs +++ b/base_layer/core/src/transactions/format_currency.rs @@ -33,13 +33,12 @@ pub fn format_currency(value: &str, separator: char) -> String { let mut buffer = String::with_capacity(full_len / 3 + full_len); let mut iter = value.splitn(2, '.'); let whole = iter.next().unwrap_or(""); - let mut idx = whole.len() as isize - 1; - for c in whole.chars() { + for (i, c) in whole.chars().enumerate() { buffer.push(c); + let idx = whole.len() - i - 1; if idx > 0 && idx % 3 == 0 { buffer.push(separator); } - idx -= 1; } if let Some(decimal) = iter.next() { buffer.push('.'); @@ -58,6 +57,7 @@ mod test { assert_eq!("0.00", format_currency("0.00", ',')); assert_eq!("0.000000000000", format_currency("0.000000000000", ',')); assert_eq!("123,456.123456789", format_currency("123456.123456789", ',')); + assert_eq!("1,123,123,456.123456789", format_currency("1123123456.123456789", ',')); assert_eq!("123,456", format_currency("123456", ',')); assert_eq!("123", format_currency("123", ',')); assert_eq!("7,123", format_currency("7123", ',')); diff --git a/base_layer/core/src/transactions/test_helpers.rs b/base_layer/core/src/transactions/test_helpers.rs index 0005152368..872727af38 100644 --- a/base_layer/core/src/transactions/test_helpers.rs +++ b/base_layer/core/src/transactions/test_helpers.rs @@ -625,6 +625,7 @@ pub fn spend_utxos(schema: TransactionSchema) -> (Transaction, Vec (SenderTransactionProtocol, Vec) { let factories = CryptoFactories::default(); let test_params_change_and_txn = TestParams::new(); diff --git a/base_layer/core/src/transactions/transaction_protocol/recipient.rs b/base_layer/core/src/transactions/transaction_protocol/recipient.rs index 253519c170..fdf0341481 100644 --- a/base_layer/core/src/transactions/transaction_protocol/recipient.rs +++ b/base_layer/core/src/transactions/transaction_protocol/recipient.rs @@ -244,7 +244,7 @@ mod test { ..Default::default() }; let msg = SingleRoundSenderData { - tx_id: 15.into(), + tx_id: 15u64.into(), amount, public_excess: PublicKey::from_secret_key(&p.spend_key), // any random key will do public_nonce: PublicKey::from_secret_key(&p.change_spend_key), // any random key will do @@ -306,7 +306,7 @@ mod test { ..Default::default() }; let msg = SingleRoundSenderData { - tx_id: 15.into(), + tx_id: 15u64.into(), amount, public_excess: PublicKey::from_secret_key(&p.spend_key), // any random key will do public_nonce: PublicKey::from_secret_key(&p.change_spend_key), // any random key will do diff --git a/base_layer/core/src/transactions/transaction_protocol/sender.rs b/base_layer/core/src/transactions/transaction_protocol/sender.rs index 8009ed10dc..df3fc94a8d 100644 --- a/base_layer/core/src/transactions/transaction_protocol/sender.rs +++ b/base_layer/core/src/transactions/transaction_protocol/sender.rs @@ -807,54 +807,42 @@ mod test { #[test] fn test_errors() { - let stp = SenderTransactionProtocol { + let mut stp = SenderTransactionProtocol { state: SenderState::Failed(TransactionProtocolError::InvalidStateError), }; - assert_eq!( - stp.clone().get_transaction(), - Err(TransactionProtocolError::InvalidStateError) - ); + assert_eq!(stp.get_transaction(), Err(TransactionProtocolError::InvalidStateError)); assert_eq!( stp.clone().take_transaction(), Err(TransactionProtocolError::InvalidStateError) ); - assert_eq!(stp.clone().check_tx_id(0.into()), false); - assert_eq!( - stp.clone().get_tx_id(), - Err(TransactionProtocolError::InvalidStateError) - ); - assert_eq!( - stp.clone().get_total_amount(), - Err(TransactionProtocolError::InvalidStateError) - ); - assert_eq!( - stp.clone().get_amount_to_self(), - Err(TransactionProtocolError::InvalidStateError) - ); + assert!(!stp.check_tx_id(0u64.into())); + assert_eq!(stp.get_tx_id(), Err(TransactionProtocolError::InvalidStateError)); + assert_eq!(stp.get_total_amount(), Err(TransactionProtocolError::InvalidStateError)); assert_eq!( - stp.clone().get_change_amount(), + stp.get_amount_to_self(), Err(TransactionProtocolError::InvalidStateError) ); assert_eq!( - stp.clone().get_change_unblinded_output(), + stp.get_change_amount(), Err(TransactionProtocolError::InvalidStateError) ); assert_eq!( - stp.clone().get_change_output_metadata_signature(), + stp.get_change_unblinded_output(), Err(TransactionProtocolError::InvalidStateError) ); assert_eq!( - stp.clone().get_change_sender_offset_public_key(), + stp.get_change_output_metadata_signature(), Err(TransactionProtocolError::InvalidStateError) ); assert_eq!( - stp.clone().get_recipient_sender_offset_private_key(0), + stp.get_change_sender_offset_public_key(), Err(TransactionProtocolError::InvalidStateError) ); assert_eq!( - stp.clone().get_fee_amount(), + stp.get_recipient_sender_offset_private_key(0), Err(TransactionProtocolError::InvalidStateError) ); + assert_eq!(stp.get_fee_amount(), Err(TransactionProtocolError::InvalidStateError)); assert_eq!( stp.clone().build_single_round_message(), Err(TransactionProtocolError::InvalidStateError) @@ -867,7 +855,7 @@ mod test { stp.clone().get_single_round_message(), Err(TransactionProtocolError::InvalidStateError) ); - assert_eq!(stp.clone().sign(), Err(TransactionProtocolError::InvalidStateError)); + assert_eq!(stp.sign(), Err(TransactionProtocolError::InvalidStateError)); } #[test] diff --git a/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs b/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs index 412c11efc9..b2d9c11c68 100644 --- a/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs +++ b/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs @@ -196,7 +196,7 @@ mod test { let public_commitment_nonce = PublicKey::from_secret_key(&private_commitment_nonce); let script = TariScript::default(); let info = SingleRoundSenderData { - tx_id: 500.into(), + tx_id: 500u64.into(), amount: MicroTari(1500), public_excess: pub_xs, public_nonce: pub_rs.clone(), diff --git a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs index bc2e329c8a..7b57a532fc 100644 --- a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs +++ b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs @@ -459,6 +459,7 @@ impl SenderTransactionInitializer { /// error (so that you can continue building) along with a string listing the missing fields. /// If all the input data is present, but one or more fields are invalid, the function will return a /// `SenderTransactionProtocol` instance in the Failed state. + #[allow(clippy::too_many_lines)] pub fn build( mut self, factories: &CryptoFactories, diff --git a/base_layer/core/src/validation/error.rs b/base_layer/core/src/validation/error.rs index f253a8ee6b..5f9226a058 100644 --- a/base_layer/core/src/validation/error.rs +++ b/base_layer/core/src/validation/error.rs @@ -125,7 +125,7 @@ impl From for ValidationError { } impl ValidationError { - pub fn custom_error(err: T) -> Self { - ValidationError::CustomError(err.to_string()) + pub fn custom_error>(err: T) -> Self { + ValidationError::CustomError(err.into()) } } diff --git a/base_layer/core/src/validation/header_iter.rs b/base_layer/core/src/validation/header_iter.rs index e1f99a7c39..e6b7189574 100644 --- a/base_layer/core/src/validation/header_iter.rs +++ b/base_layer/core/src/validation/header_iter.rs @@ -53,6 +53,7 @@ impl<'a, B> HeaderIter<'a, B> { } fn next_chunk(&self) -> (u64, u64) { + #[allow(clippy::cast_possible_truncation)] let upper_bound = cmp::min(self.cursor + self.chunk_size, self.height as usize); (self.cursor as u64, upper_bound as u64) } diff --git a/base_layer/core/src/validation/test.rs b/base_layer/core/src/validation/test.rs index bba0705dc4..62615f497e 100644 --- a/base_layer/core/src/validation/test.rs +++ b/base_layer/core/src/validation/test.rs @@ -125,6 +125,7 @@ mod header_validators { } #[test] +#[allow(clippy::too_many_lines)] fn chain_balance_validation() { let factories = CryptoFactories::default(); let consensus_manager = ConsensusManagerBuilder::new(Network::Dibbler).build(); diff --git a/base_layer/core/tests/block_validation.rs b/base_layer/core/tests/block_validation.rs index 95cd24f7e8..f53e9e2334 100644 --- a/base_layer/core/tests/block_validation.rs +++ b/base_layer/core/tests/block_validation.rs @@ -178,6 +178,7 @@ fn add_monero_data(tblock: &mut Block, seed_key: &str) { let hashes = monero_rx::create_ordered_transaction_hashes_from_block(&mblock); let merkle_root = monero_rx::tree_hash(&hashes).unwrap(); let coinbase_merkle_proof = monero_rx::create_merkle_proof(&hashes, &hashes[0]).unwrap(); + #[allow(clippy::cast_possible_truncation)] let monero_data = MoneroPowData { header: mblock.header, randomx_key: FixedByteArray::from_hex(seed_key).unwrap(), @@ -362,6 +363,7 @@ OutputFeatures::default()), } #[test] +#[allow(clippy::too_many_lines)] fn test_orphan_body_validation() { let factories = CryptoFactories::default(); let network = Network::Weatherwax; @@ -674,6 +676,7 @@ OutputFeatures::default()), } #[tokio::test] +#[allow(clippy::too_many_lines)] async fn test_block_sync_body_validator() { let factories = CryptoFactories::default(); let network = Network::Weatherwax; diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index 0fde988c44..93f7ca65b4 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -351,6 +351,7 @@ fn handle_tip_reorg() { #[test] #[allow(clippy::identity_op)] +#[allow(clippy::too_many_lines)] fn handle_reorg() { // GB --> A1 --> A2 --> A3 -----> A4(Low PoW) [Main Chain] // \--> B2 --> B3(?) --> B4(Medium PoW) [Forked Chain 1] @@ -523,6 +524,7 @@ fn handle_reorg() { } #[test] +#[allow(clippy::too_many_lines)] fn reorgs_should_update_orphan_tips() { // Create a main chain GB -> A1 -> A2 // Create an orphan chain GB -> B1 @@ -1011,6 +1013,7 @@ fn store_and_retrieve_blocks() { #[test] #[allow(clippy::erasing_op)] +#[allow(clippy::too_many_lines)] fn asset_unique_id() { let mut rng = rand::thread_rng(); let network = Network::LocalNet; @@ -1219,7 +1222,7 @@ fn restore_metadata_and_pruning_horizon_update() { db, rules.clone(), validators.clone(), - config.clone(), + config, DifficultyCalculator::new(rules.clone(), Default::default()), ) .unwrap(); @@ -1242,7 +1245,7 @@ fn restore_metadata_and_pruning_horizon_update() { db, rules.clone(), validators.clone(), - config.clone(), + config, DifficultyCalculator::new(rules.clone(), Default::default()), ) .unwrap(); @@ -1500,6 +1503,7 @@ fn horizon_height_orphan_cleanup() { } #[test] +#[allow(clippy::too_many_lines)] fn orphan_cleanup_on_reorg() { // Create Main Chain let network = Network::LocalNet; @@ -1667,7 +1671,7 @@ fn orphan_cleanup_delete_all_orphans() { db, consensus_manager.clone(), validators.clone(), - config.clone(), + config, DifficultyCalculator::new(consensus_manager.clone(), Default::default()), ) .unwrap(); @@ -1720,7 +1724,7 @@ fn orphan_cleanup_delete_all_orphans() { db, consensus_manager.clone(), validators.clone(), - config.clone(), + config, DifficultyCalculator::new(consensus_manager.clone(), Default::default()), ) .unwrap(); diff --git a/base_layer/core/tests/helpers/block_builders.rs b/base_layer/core/tests/helpers/block_builders.rs index b58167ce10..259834cf60 100644 --- a/base_layer/core/tests/helpers/block_builders.rs +++ b/base_layer/core/tests/helpers/block_builders.rs @@ -243,6 +243,8 @@ pub fn create_genesis_block_with_utxos( } /// Create a new block using the provided transactions that adds to the blockchain given in `prev_block`. +// This function is used, unclear why clippy says it isn't. +#[allow(dead_code)] pub fn chain_block( prev_block: &Block, transactions: Vec, diff --git a/base_layer/core/tests/helpers/mod.rs b/base_layer/core/tests/helpers/mod.rs index 3271c943b1..aa139fabc6 100644 --- a/base_layer/core/tests/helpers/mod.rs +++ b/base_layer/core/tests/helpers/mod.rs @@ -14,7 +14,6 @@ pub mod database; pub mod event_stream; pub mod mock_state_machine; pub mod nodes; -pub mod pow_blockchain; pub mod sample_blockchains; pub mod test_block_builder; pub mod test_blockchain; diff --git a/base_layer/core/tests/helpers/pow_blockchain.rs b/base_layer/core/tests/helpers/pow_blockchain.rs deleted file mode 100644 index 1cc66f44d9..0000000000 --- a/base_layer/core/tests/helpers/pow_blockchain.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2019. The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use monero::{ - consensus, - consensus::deserialize, - cryptonote::hash::{Hash as MoneroHash, Hashable as MoneroHashable}, - Block as MoneroBlock, -}; -use tari_core::{ - blocks::Block, - chain_storage::{BlockchainBackend, BlockchainDatabase}, - consensus::{ConsensusConstants, ConsensusManager}, - proof_of_work::{ - lwma_diff::LinearWeightedMovingAverage, - monero_rx, - monero_rx::{FixedByteArray, MoneroPowData}, - Difficulty, - DifficultyAdjustment, - PowAlgorithm, - }, - test_helpers::blockchain::TempDatabase, -}; -use tari_utilities::hex::Hex; - -use super::block_builders::chain_block; - -#[allow(dead_code)] -pub fn create_test_pow_blockchain( - db: &BlockchainDatabase, - mut pow_algos: Vec, - consensus_manager: &ConsensusManager, -) { - // Remove the first as it will be replaced by the genesis block - pow_algos.remove(0); - let block0 = db.fetch_block(0).unwrap().block().clone(); - append_to_pow_blockchain(db, block0, pow_algos, consensus_manager); -} - -#[allow(dead_code)] -pub fn append_to_pow_blockchain( - db: &BlockchainDatabase, - chain_tip: Block, - pow_algos: Vec, - consensus_manager: &ConsensusManager, -) { - let mut prev_block = chain_tip; - for pow_algo in pow_algos { - let new_block = chain_block(&prev_block, Vec::new(), consensus_manager); - let mut new_block = db.prepare_new_block(new_block).unwrap(); - new_block.header.timestamp = prev_block.header.timestamp.increase(120); - new_block.header.pow.pow_algo = pow_algo; - - if new_block.header.pow.pow_algo == PowAlgorithm::Monero { - let blocktemplate_blob = "0c0c8cd6a0fa057fe21d764e7abf004e975396a2160773b93712bf6118c3b4959ddd8ee0f76aad0000000002e1ea2701ffa5ea2701d5a299e2abb002028eb3066ced1b2cc82ea046f3716a48e9ae37144057d5fb48a97f941225a1957b2b0106225b7ec0a6544d8da39abe68d8bd82619b4a7c5bdae89c3783b256a8fa47820208f63aa86d2e857f070000"; - let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97"; - let bytes = hex::decode(&blocktemplate_blob).unwrap(); - let mut block = deserialize::(&bytes[..]).unwrap(); - let hash = MoneroHash::from_slice(new_block.header.merged_mining_hash().as_ref()); - monero_rx::append_merge_mining_tag(&mut block, hash).unwrap(); - let count = 1 + (block.tx_hashes.len() as u16); - let mut hashes = Vec::with_capacity(count as usize); - let mut proof = Vec::with_capacity(count as usize); - hashes.push(block.miner_tx.hash()); - proof.push(block.miner_tx.hash()); - for item in block.clone().tx_hashes { - hashes.push(item); - proof.push(item); - } - let root = monero_rx::tree_hash(hashes.clone().as_ref()).unwrap(); - let monero_data = MoneroPowData { - header: block.header, - randomx_key: FixedByteArray::from_hex(seed_hash).unwrap(), - transaction_count: count, - merkle_root: root, - coinbase_merkle_proof: monero_rx::create_merkle_proof(&hashes, &hashes[0]).unwrap(), - coinbase_tx: block.miner_tx, - }; - new_block.header.pow.pow_data = consensus::serialize(&monero_data); - } - - db.add_block(new_block.clone().into()).unwrap(); - prev_block = new_block; - } -} - -// Calculated the accumulated difficulty for the selected blocks in the blockchain db. -#[allow(dead_code)] -pub fn calculate_accumulated_difficulty( - db: &BlockchainDatabase, - pow_algo: PowAlgorithm, - heights: Vec, - consensus_constants: &ConsensusConstants, -) -> Difficulty { - let mut lwma = LinearWeightedMovingAverage::new( - consensus_constants.get_difficulty_block_window() as usize, - consensus_constants.get_diff_target_block_interval(pow_algo), - consensus_constants.get_difficulty_max_block_interval(pow_algo), - ); - for height in heights { - let (header, accum) = db.fetch_chain_header(height).unwrap().into_parts(); - lwma.add(header.timestamp, accum.target_difficulty).unwrap(); - } - lwma.get_difficulty().unwrap() -} diff --git a/base_layer/core/tests/helpers/test_blockchain.rs b/base_layer/core/tests/helpers/test_blockchain.rs index 930b2ada6e..a28e2034c5 100644 --- a/base_layer/core/tests/helpers/test_blockchain.rs +++ b/base_layer/core/tests/helpers/test_blockchain.rs @@ -122,6 +122,7 @@ impl TestBlockchain { } pub fn outputs_at(&self, height: u64) -> &[UnblindedOutput] { + #[allow(clippy::cast_possible_truncation)] self.outputs.get(height as usize).unwrap() } diff --git a/base_layer/core/tests/mempool.rs b/base_layer/core/tests/mempool.rs index 17982d2d0e..2b6be2ce9f 100644 --- a/base_layer/core/tests/mempool.rs +++ b/base_layer/core/tests/mempool.rs @@ -74,6 +74,7 @@ mod helpers; #[tokio::test] #[allow(clippy::identity_op)] +#[allow(clippy::too_many_lines)] async fn test_insert_and_process_published_block() { let network = Network::LocalNet; let (mut store, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); @@ -385,6 +386,7 @@ async fn test_retrieve() { #[tokio::test] #[allow(clippy::identity_op)] +#[allow(clippy::too_many_lines)] async fn test_zero_conf() { let network = Network::LocalNet; let (mut store, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); @@ -782,6 +784,7 @@ async fn test_reorg() { static EMISSION: [u64; 2] = [10, 10]; #[tokio::test] +#[allow(clippy::too_many_lines)] #[allow(clippy::identity_op)] async fn receive_and_propagate_transaction() { let factories = CryptoFactories::default(); @@ -1009,6 +1012,7 @@ async fn consensus_validation_large_tx() { #[tokio::test] #[allow(clippy::erasing_op)] #[allow(clippy::identity_op)] +#[allow(clippy::too_many_lines)] async fn consensus_validation_versions() { use tari_core::transactions::transaction_components::{ OutputFeaturesVersion, @@ -1300,6 +1304,7 @@ async fn consensus_validation_unique_id() { #[tokio::test] #[allow(clippy::identity_op)] +#[allow(clippy::too_many_lines)] async fn block_event_and_reorg_event_handling() { // This test creates 2 nodes Alice and Bob // Then creates 2 chains B1 -> B2A (diff 1) and B1 -> B2B (diff 10) diff --git a/base_layer/core/tests/node_service.rs b/base_layer/core/tests/node_service.rs index 6fd9475e2c..bb03fb11fc 100644 --- a/base_layer/core/tests/node_service.rs +++ b/base_layer/core/tests/node_service.rs @@ -286,6 +286,7 @@ async fn propagate_and_forward_invalid_block_hash() { } #[tokio::test] +#[allow(clippy::too_many_lines)] async fn propagate_and_forward_invalid_block() { let temp_dir = tempdir().unwrap(); let factories = CryptoFactories::default(); diff --git a/base_layer/mmr/src/common.rs b/base_layer/mmr/src/common.rs index 5b24e9f809..581e5bc514 100644 --- a/base_layer/mmr/src/common.rs +++ b/base_layer/mmr/src/common.rs @@ -131,7 +131,6 @@ pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> { } /// The height of a node in a full binary tree from its index. -#[inline(always)] pub fn bintree_height(num: usize) -> usize { if num == 0 { return 0; diff --git a/base_layer/mmr/src/merkle_mountain_range.rs b/base_layer/mmr/src/merkle_mountain_range.rs index e948082dd3..14b0716482 100644 --- a/base_layer/mmr/src/merkle_mountain_range.rs +++ b/base_layer/mmr/src/merkle_mountain_range.rs @@ -85,7 +85,6 @@ where } /// Return the number of nodes in the full Merkle Mountain range, excluding bagged hashes - #[inline(always)] pub fn len(&self) -> Result { self.hashes .len() diff --git a/base_layer/mmr/src/mutable_mmr.rs b/base_layer/mmr/src/mutable_mmr.rs index 3c12cfefbe..10197ae12c 100644 --- a/base_layer/mmr/src/mutable_mmr.rs +++ b/base_layer/mmr/src/mutable_mmr.rs @@ -84,7 +84,6 @@ where /// nodes in the MMR, while this function returns the number of leaf nodes minus the number of nodes marked for /// deletion. #[allow(clippy::len_without_is_empty)] - #[inline(always)] pub fn len(&self) -> u32 { self.size - u32::try_from(self.deleted.cardinality()).unwrap() } diff --git a/base_layer/mmr/src/pruned_hashset.rs b/base_layer/mmr/src/pruned_hashset.rs index 4be35a927c..ae1e5b0119 100644 --- a/base_layer/mmr/src/pruned_hashset.rs +++ b/base_layer/mmr/src/pruned_hashset.rs @@ -78,7 +78,6 @@ impl ArrayLike for PrunedHashSet { type Error = MerkleMountainRangeError; type Value = Hash; - #[inline(always)] fn len(&self) -> Result { Ok(self.base_offset + self.hashes.len()) } diff --git a/base_layer/p2p/src/initialization.rs b/base_layer/p2p/src/initialization.rs index 648ae58419..e0e436440b 100644 --- a/base_layer/p2p/src/initialization.rs +++ b/base_layer/p2p/src/initialization.rs @@ -434,7 +434,6 @@ impl P2pInitializer { } // Following are inlined due to Rust ICE: https://github.com/rust-lang/rust/issues/73537 - #[inline(always)] fn try_parse_seed_peers(peer_seeds_str: &[String]) -> Result, ServiceInitializationError> { peer_seeds_str .iter() diff --git a/base_layer/p2p/src/services/utils.rs b/base_layer/p2p/src/services/utils.rs index 57d7aa3612..1f1bc2a42a 100644 --- a/base_layer/p2p/src/services/utils.rs +++ b/base_layer/p2p/src/services/utils.rs @@ -40,7 +40,6 @@ where E: Debug { } } -#[allow(clippy::needless_pass_by_value)] pub fn map_decode(serialized: Arc) -> Result, prost::DecodeError> where T: prost::Message + Default { Ok(DomainMessage { diff --git a/base_layer/tari_mining_helper_ffi/src/lib.rs b/base_layer/tari_mining_helper_ffi/src/lib.rs index 9db2bb75ef..773b0901a0 100644 --- a/base_layer/tari_mining_helper_ffi/src/lib.rs +++ b/base_layer/tari_mining_helper_ffi/src/lib.rs @@ -30,7 +30,7 @@ mod error; use core::ptr; -use std::{ffi::CString, slice}; +use std::{convert::TryFrom, ffi::CString, slice}; use libc::{c_char, c_int, c_uchar, c_uint, c_ulonglong}; use tari_core::{ @@ -127,8 +127,8 @@ pub unsafe extern "C" fn byte_vector_get_at(ptr: *mut ByteVector, position: c_ui ptr::swap(error_out, &mut error as *mut c_int); return 0u8; } - let len = byte_vector_get_length(ptr, error_out) as c_int - 1; // clamp to length - if len < 0 || position > len as c_uint { + let len = byte_vector_get_length(ptr, error_out); + if len == 0 || position > len - 1 { error = MiningHelperError::from(InterfaceError::PositionInvalidError).code; ptr::swap(error_out, &mut error as *mut c_int); return 0u8; @@ -158,7 +158,14 @@ pub unsafe extern "C" fn byte_vector_get_length(vec: *const ByteVector, error_ou ptr::swap(error_out, &mut error as *mut c_int); return 0; } - (*vec).0.len() as c_uint + match c_uint::try_from((*vec).0.len()) { + Ok(v) => v, + Err(_) => { + error = MiningHelperError::from(InterfaceError::Conversion("byte_vector".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + 0 + }, + } } /// Validates a hex string is convertible into a TariPublicKey @@ -369,6 +376,7 @@ mod tests { let block = create_test_block(); let mut header_bytes: Vec = Vec::new(); block.header.consensus_encode(&mut header_bytes).unwrap(); + #[allow(clippy::cast_possible_truncation)] let len = header_bytes.len() as u32; let byte_vec = byte_vector_create(header_bytes.as_ptr(), len, error_ptr); inject_nonce(byte_vec, NONCE, error_ptr); @@ -387,6 +395,7 @@ mod tests { let block = create_test_block(); let mut header_bytes: Vec = Vec::new(); block.header.consensus_encode(&mut header_bytes).unwrap(); + #[allow(clippy::cast_possible_truncation)] let len = header_bytes.len() as u32; let byte_vec = byte_vector_create(header_bytes.as_ptr(), len, error_ptr); inject_nonce(byte_vec, NONCE, error_ptr); @@ -409,6 +418,7 @@ mod tests { let mut share_difficulty = 24000; let mut header_bytes: Vec = Vec::new(); block.header.consensus_encode(&mut header_bytes).unwrap(); + #[allow(clippy::cast_possible_truncation)] let len = header_bytes.len() as u32; let byte_vec = byte_vector_create(header_bytes.as_ptr(), len, error_ptr); inject_nonce(byte_vec, NONCE, error_ptr); diff --git a/base_layer/wallet/src/contacts_service/service.rs b/base_layer/wallet/src/contacts_service/service.rs index 01ab8bc093..95470e8816 100644 --- a/base_layer/wallet/src/contacts_service/service.rs +++ b/base_layer/wallet/src/contacts_service/service.rs @@ -315,10 +315,11 @@ where T: ContactsBackend + 'static } fn is_online(&self, last_seen: NaiveDateTime) -> bool { - Utc::now().naive_utc().sub(last_seen) <= - chrono::Duration::seconds( - (self.contacts_online_ping_window as u64 * self.contacts_auto_ping_interval.as_secs()) as i64, - ) + #[allow(clippy::cast_possible_wrap)] + let ping_window = chrono::Duration::seconds( + (self.contacts_online_ping_window as u64 * self.contacts_auto_ping_interval.as_secs()) as i64, + ); + Utc::now().naive_utc().sub(last_seen) <= ping_window } async fn update_with_ping_pong( @@ -358,11 +359,11 @@ where T: ContactsBackend + 'static ); self.liveness_data.push(data.clone()); + trace!(target: LOG_TARGET, "{}", data); // Send only fails if there are no subscribers. let _size = self .event_publisher - .send(Arc::new(ContactsLivenessEvent::StatusUpdated(Box::new(data.clone())))); - trace!(target: LOG_TARGET, "{}", data); + .send(Arc::new(ContactsLivenessEvent::StatusUpdated(Box::new(data)))); } else { trace!( target: LOG_TARGET, diff --git a/base_layer/wallet/src/contacts_service/storage/sqlite_db.rs b/base_layer/wallet/src/contacts_service/storage/sqlite_db.rs index e28d125c45..553fe3dfef 100644 --- a/base_layer/wallet/src/contacts_service/storage/sqlite_db.rs +++ b/base_layer/wallet/src/contacts_service/storage/sqlite_db.rs @@ -215,6 +215,7 @@ impl ContactSql { impl TryFrom for Contact { type Error = ContactsServiceStorageError; + #[allow(clippy::cast_sign_loss)] fn try_from(o: ContactSql) -> Result { let public_key = PublicKey::from_vec(&o.public_key).map_err(|_| ContactsServiceStorageError::ConversionError)?; @@ -230,6 +231,7 @@ impl TryFrom for Contact { } /// Conversion from a Contact to the Sql datatype form +#[allow(clippy::cast_possible_wrap)] impl From for ContactSql { fn from(o: Contact) -> Self { Self { diff --git a/base_layer/wallet/src/error.rs b/base_layer/wallet/src/error.rs index 8b3c6aa95d..f0b720a782 100644 --- a/base_layer/wallet/src/error.rs +++ b/base_layer/wallet/src/error.rs @@ -109,7 +109,7 @@ pub const LOG_TARGET: &str = "tari::application"; impl From for ExitError { fn from(err: WalletError) -> Self { log::error!(target: LOG_TARGET, "{}", err); - Self::new(ExitCode::WalletError, &err) + Self::new(ExitCode::WalletError, err.to_string()) } } @@ -180,7 +180,7 @@ impl From for ExitError { use WalletStorageError::{InvalidPassphrase, NoPasswordError}; match err { NoPasswordError | InvalidPassphrase => ExitCode::IncorrectOrEmptyPassword.into(), - e => ExitError::new(ExitCode::WalletError, &e), + e => ExitError::new(ExitCode::WalletError, e), } } } diff --git a/base_layer/wallet/src/output_manager_service/error.rs b/base_layer/wallet/src/output_manager_service/error.rs index 2442baffae..0cb6ab98cf 100644 --- a/base_layer/wallet/src/output_manager_service/error.rs +++ b/base_layer/wallet/src/output_manager_service/error.rs @@ -183,7 +183,7 @@ pub enum OutputManagerStorageError { impl From for ExitError { fn from(err: OutputManagerError) -> Self { log::error!(target: crate::error::LOG_TARGET, "{}", err); - Self::new(ExitCode::WalletError, &err) + Self::new(ExitCode::WalletError, err.to_string()) } } diff --git a/base_layer/wallet/src/output_manager_service/service.rs b/base_layer/wallet/src/output_manager_service/service.rs index 1dfc49a50f..bdc37b2124 100644 --- a/base_layer/wallet/src/output_manager_service/service.rs +++ b/base_layer/wallet/src/output_manager_service/service.rs @@ -248,6 +248,7 @@ where } /// This handler is called when the Service executor loops receives an API request + #[allow(clippy::too_many_lines)] async fn handle_request( &mut self, request: OutputManagerRequest, @@ -851,6 +852,7 @@ where /// Prepare a Sender Transaction Protocol for the amount and fee_per_gram specified. If required a change output /// will be produced. + #[allow(clippy::too_many_lines)] pub async fn prepare_transaction_to_send( &mut self, tx_id: TxId, @@ -1232,6 +1234,7 @@ where Ok((tx_id, stp.take_transaction()?)) } + #[allow(clippy::too_many_lines)] async fn create_pay_to_self_transaction( &mut self, tx_id: TxId, @@ -1419,6 +1422,7 @@ where /// Select which unspent transaction outputs to use to send a transaction of the specified amount. Use the specified /// selection strategy to choose the outputs. It also determines if a change output is required. + #[allow(clippy::too_many_lines)] async fn select_utxos( &mut self, amount: MicroTari, @@ -1574,6 +1578,7 @@ where Ok(()) } + #[allow(clippy::too_many_lines)] async fn create_coin_split( &mut self, amount_per_split: MicroTari, diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs index cc2015b332..5c73fe59a0 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs @@ -143,6 +143,7 @@ impl OutputManagerSqliteDatabase { impl OutputManagerBackend for OutputManagerSqliteDatabase { #[allow(clippy::cognitive_complexity)] + #[allow(clippy::too_many_lines)] fn fetch(&self, key: &DbKey) -> Result, OutputManagerStorageError> { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; @@ -864,7 +865,7 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { } for output in &outputs { - if output.received_in_tx_id == Some(i64::from(tx_id)) { + if output.received_in_tx_id == Some(tx_id.as_i64_wrapped()) { info!( target: LOG_TARGET, "Cancelling pending inbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", @@ -879,7 +880,7 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { }, &conn, )?; - } else if output.spent_in_tx_id == Some(i64::from(tx_id)) { + } else if output.spent_in_tx_id == Some(tx_id.as_i64_wrapped()) { info!( target: LOG_TARGET, "Cancelling pending outbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", @@ -1272,8 +1273,8 @@ impl From for UpdateOutputSql { script_private_key: u.script_private_key, metadata_signature_nonce: u.metadata_signature_nonce, metadata_signature_u_key: u.metadata_signature_u_key, - received_in_tx_id: u.received_in_tx_id.map(|o| o.map(i64::from)), - spent_in_tx_id: u.spent_in_tx_id.map(|o| o.map(i64::from)), + received_in_tx_id: u.received_in_tx_id.map(|o| o.map(TxId::as_i64_wrapped)), + spent_in_tx_id: u.spent_in_tx_id.map(|o| o.map(TxId::as_i64_wrapped)), mined_in_block: u.mined_in_block, } } @@ -1578,7 +1579,7 @@ mod test { .update( UpdateOutput { status: Some(OutputStatus::Unspent), - received_in_tx_id: Some(Some(44.into())), + received_in_tx_id: Some(Some(44u64.into())), ..Default::default() }, &conn, @@ -1590,14 +1591,14 @@ mod test { .update( UpdateOutput { status: Some(OutputStatus::EncumberedToBeReceived), - received_in_tx_id: Some(Some(44.into())), + received_in_tx_id: Some(Some(44u64.into())), ..Default::default() }, &conn, ) .unwrap(); - let result = OutputSql::find_by_tx_id_and_encumbered(44.into(), &conn).unwrap(); + let result = OutputSql::find_by_tx_id_and_encumbered(44u64.into(), &conn).unwrap(); assert_eq!(result.len(), 1); assert_eq!(result[0].spending_key, outputs[1].spending_key); } diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs index 6ff6877b54..a22d15e54f 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs @@ -67,6 +67,7 @@ pub struct NewOutputSql { } impl NewOutputSql { + #[allow(clippy::cast_possible_wrap)] pub fn new( output: DbUnblindedOutput, status: OutputStatus, @@ -76,7 +77,7 @@ impl NewOutputSql { Ok(Self { commitment: Some(output.commitment.to_vec()), spending_key: output.unblinded_output.spending_key.to_vec(), - value: (u64::from(output.unblinded_output.value)) as i64, + value: output.unblinded_output.value.as_u64() as i64, flags: i32::from(output.unblinded_output.features.flags.bits()), maturity: output.unblinded_output.features.maturity as i64, recovery_byte: i32::from(output.unblinded_output.features.recovery_byte), diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs index 09e8317a4e..27d5b74e8b 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs @@ -115,6 +115,7 @@ impl OutputSql { } /// Retrieves UTXOs than can be spent, sorted by priority, then value from smallest to largest. + #[allow(clippy::cast_sign_loss)] pub fn fetch_unspent_outputs_for_spending( mut strategy: UTXOSelectionStrategy, amount: u64, @@ -168,6 +169,7 @@ impl OutputSql { } /// Return all unspent outputs that have a maturity above the provided chain tip + #[allow(clippy::cast_possible_wrap)] pub fn index_time_locked(tip: u64, conn: &SqliteConnection) -> Result, OutputManagerStorageError> { Ok(outputs::table .filter(outputs::status.eq(OutputStatus::Unspent as i32)) @@ -244,13 +246,14 @@ impl OutputSql { Ok(outputs::table .filter( outputs::received_in_tx_id - .eq(i64::from(tx_id)) - .or(outputs::spent_in_tx_id.eq(i64::from(tx_id))), + .eq(tx_id.as_i64_wrapped()) + .or(outputs::spent_in_tx_id.eq(tx_id.as_i64_wrapped())), ) .load(conn)?) } /// Return the available, time locked, pending incoming and pending outgoing balance + #[allow(clippy::cast_possible_wrap)] pub fn get_balance( current_tip_for_time_lock_calculation: Option, conn: &SqliteConnection, @@ -491,6 +494,7 @@ impl OutputSql { impl TryFrom for DbUnblindedOutput { type Error = OutputManagerStorageError; + #[allow(clippy::too_many_lines)] fn try_from(o: OutputSql) -> Result { let mut features: OutputFeatures = serde_json::from_str(&o.features_json).map_err(|s| OutputManagerStorageError::ConversionError { diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs index 4a4130daf6..5ea7cb7338 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs @@ -179,6 +179,7 @@ where /// `Ok(true)` => Transaction was successfully submitted to UnconfirmedPool /// `Ok(false)` => There was a problem with the RPC call and this should be retried /// `Err(_)` => The transaction was rejected by the base node and the protocol should end. + #[allow(clippy::too_many_lines)] async fn submit_transaction( &mut self, tx: Transaction, diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs index eb2565d92f..d0b1c3c5d3 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs @@ -225,6 +225,7 @@ where } } + #[allow(clippy::too_many_lines)] async fn wait_for_finalization(&mut self) -> Result<(), TransactionServiceProtocolError> { let mut receiver = self .transaction_finalize_receiver diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs index ed183f7ace..35badf317a 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs @@ -258,6 +258,7 @@ where } } + #[allow(clippy::too_many_lines)] async fn initial_send_transaction( &mut self, mut sender_protocol: SenderTransactionProtocol, @@ -374,6 +375,7 @@ where Ok(transaction_status) } + #[allow(clippy::too_many_lines)] async fn wait_for_reply(&mut self) -> Result<(), TransactionServiceProtocolError> { // Waiting for Transaction Reply let tx_id = self.id; @@ -665,6 +667,7 @@ where /// the transaction will be cancelled. /// # Argumentswallet_sync_with_base_node /// `msg`: The transaction data message to be sent + #[allow(clippy::too_many_lines)] async fn send_transaction_direct( &mut self, msg: SingleRoundSenderData, diff --git a/base_layer/wallet/src/transaction_service/service.rs b/base_layer/wallet/src/transaction_service/service.rs index 51a4895559..9eb674ab84 100644 --- a/base_layer/wallet/src/transaction_service/service.rs +++ b/base_layer/wallet/src/transaction_service/service.rs @@ -254,7 +254,7 @@ where } } - #[warn(unreachable_code)] + #[allow(clippy::too_many_lines)] pub async fn start(mut self) -> Result<(), TransactionServiceError> { let request_stream = self .request_stream @@ -530,6 +530,7 @@ where } /// This handler is called when requests arrive from the various streams + #[allow(clippy::too_many_lines)] async fn handle_request( &mut self, request: TransactionServiceRequest, @@ -895,6 +896,7 @@ where /// 'dest_pubkey': The Comms pubkey of the recipient node /// 'amount': The amount of Tari to send to the recipient /// 'fee_per_gram': The amount of fee per transaction gram to be included in transaction + #[allow(clippy::too_many_lines)] pub async fn send_sha_atomic_swap_transaction( &mut self, dest_pubkey: CommsPublicKey, @@ -1212,6 +1214,7 @@ where /// Accept the public reply from a recipient and apply the reply to the relevant transaction protocol /// # Arguments /// 'recipient_reply' - The public response from a recipient with data required to complete the transaction + #[allow(clippy::too_many_lines)] pub async fn accept_recipient_reply( &mut self, source_pubkey: CommsPublicKey, @@ -1539,6 +1542,7 @@ where /// # Arguments /// 'source_pubkey' - The pubkey from which the message was sent and to which the reply will be sent. /// 'sender_message' - Message from a sender containing the setup of the transaction being sent to you + #[allow(clippy::too_many_lines)] pub async fn accept_transaction( &mut self, source_pubkey: CommsPublicKey, diff --git a/base_layer/wallet/src/transaction_service/storage/database.rs b/base_layer/wallet/src/transaction_service/storage/database.rs index ebf7481e06..8d353e9ff6 100644 --- a/base_layer/wallet/src/transaction_service/storage/database.rs +++ b/base_layer/wallet/src/transaction_service/storage/database.rs @@ -184,13 +184,23 @@ impl fmt::Debug for DbKey { // Add in i64 representatives for easy debugging in sqlite. This should probably be removed at some point match self { PendingOutboundTransaction(tx_id) => { - write!(f, "PendingOutboundTransaction ({}u64, {}i64)", tx_id, i64::from(*tx_id)) + write!( + f, + "PendingOutboundTransaction ({}u64, {}i64)", + tx_id, + tx_id.as_i64_wrapped() + ) }, PendingInboundTransaction(tx_id) => { - write!(f, "PendingInboundTransaction ({}u64, {}i64)", tx_id, i64::from(*tx_id)) + write!( + f, + "PendingInboundTransaction ({}u64, {}i64)", + tx_id, + tx_id.as_i64_wrapped() + ) }, CompletedTransaction(tx_id) => { - write!(f, "CompletedTransaction ({}u64, {}i64)", tx_id, i64::from(*tx_id)) + write!(f, "CompletedTransaction ({}u64, {}i64)", tx_id, tx_id.as_i64_wrapped()) }, PendingOutboundTransactions => { write!(f, "PendingOutboundTransactions ") @@ -215,7 +225,7 @@ impl fmt::Debug for DbKey { f, "CancelledPendingOutboundTransaction ({}u64, {}i64)", tx_id, - i64::from(*tx_id) + tx_id.as_i64_wrapped() ) }, CancelledPendingInboundTransaction(tx_id) => { @@ -223,11 +233,11 @@ impl fmt::Debug for DbKey { f, "CancelledPendingInboundTransaction ({}u64, {}i64)", tx_id, - i64::from(*tx_id) + tx_id.as_i64_wrapped() ) }, AnyTransaction(tx_id) => { - write!(f, "AnyTransaction ({}u64, {}i64)", tx_id, i64::from(*tx_id)) + write!(f, "AnyTransaction ({}u64, {}i64)", tx_id, tx_id.as_i64_wrapped()) }, } } diff --git a/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs b/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs index b7d0ee39a0..fee23b286b 100644 --- a/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs +++ b/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs @@ -222,6 +222,7 @@ impl TransactionServiceSqliteDatabase { } impl TransactionBackend for TransactionServiceSqliteDatabase { + #[allow(clippy::too_many_lines)] fn fetch(&self, key: &DbKey) -> Result, TransactionStorageError> { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; @@ -2173,6 +2174,7 @@ mod test { }; #[test] + #[allow(clippy::too_many_lines)] fn test_crud() { let factories = CryptoFactories::default(); let db_name = format!("{}.sqlite3", string(8).as_str()); @@ -2263,9 +2265,10 @@ mod test { let outbound_txs = OutboundTransactionSql::index_by_cancelled(&conn, false).unwrap(); assert_eq!(outbound_txs.len(), 2); - let returned_outbound_tx = - OutboundTransaction::try_from(OutboundTransactionSql::find_by_cancelled(1.into(), false, &conn).unwrap()) - .unwrap(); + let returned_outbound_tx = OutboundTransaction::try_from( + OutboundTransactionSql::find_by_cancelled(1u64.into(), false, &conn).unwrap(), + ) + .unwrap(); assert_eq!( OutboundTransactionSql::try_from(returned_outbound_tx).unwrap(), OutboundTransactionSql::try_from(outbound_tx1.clone()).unwrap() @@ -2318,7 +2321,7 @@ mod test { assert_eq!(inbound_txs.len(), 2); let returned_inbound_tx = - InboundTransaction::try_from(InboundTransactionSql::find_by_cancelled(2.into(), false, &conn).unwrap()) + InboundTransaction::try_from(InboundTransactionSql::find_by_cancelled(2u64.into(), false, &conn).unwrap()) .unwrap(); assert_eq!( InboundTransactionSql::try_from(returned_inbound_tx).unwrap(), @@ -2391,9 +2394,10 @@ mod test { let completed_txs = CompletedTransactionSql::index_by_cancelled(&conn, false).unwrap(); assert_eq!(completed_txs.len(), 2); - let returned_completed_tx = - CompletedTransaction::try_from(CompletedTransactionSql::find_by_cancelled(2.into(), false, &conn).unwrap()) - .unwrap(); + let returned_completed_tx = CompletedTransaction::try_from( + CompletedTransactionSql::find_by_cancelled(2u64.into(), false, &conn).unwrap(), + ) + .unwrap(); assert_eq!( CompletedTransactionSql::try_from(returned_completed_tx).unwrap(), CompletedTransactionSql::try_from(completed_tx1.clone()).unwrap() @@ -2600,7 +2604,7 @@ mod test { inbound_tx_sql.commit(&conn).unwrap(); inbound_tx_sql.encrypt(&cipher).unwrap(); inbound_tx_sql.update_encryption(&conn).unwrap(); - let mut db_inbound_tx = InboundTransactionSql::find_by_cancelled(1.into(), false, &conn).unwrap(); + let mut db_inbound_tx = InboundTransactionSql::find_by_cancelled(1u64.into(), false, &conn).unwrap(); db_inbound_tx.decrypt(&cipher).unwrap(); let decrypted_inbound_tx = InboundTransaction::try_from(db_inbound_tx).unwrap(); assert_eq!(inbound_tx, decrypted_inbound_tx); @@ -2624,7 +2628,7 @@ mod test { outbound_tx_sql.commit(&conn).unwrap(); outbound_tx_sql.encrypt(&cipher).unwrap(); outbound_tx_sql.update_encryption(&conn).unwrap(); - let mut db_outbound_tx = OutboundTransactionSql::find_by_cancelled(2.into(), false, &conn).unwrap(); + let mut db_outbound_tx = OutboundTransactionSql::find_by_cancelled(2u64.into(), false, &conn).unwrap(); db_outbound_tx.decrypt(&cipher).unwrap(); let decrypted_outbound_tx = OutboundTransaction::try_from(db_outbound_tx).unwrap(); assert_eq!(outbound_tx, decrypted_outbound_tx); @@ -2660,7 +2664,7 @@ mod test { completed_tx_sql.commit(&conn).unwrap(); completed_tx_sql.encrypt(&cipher).unwrap(); completed_tx_sql.update_encryption(&conn).unwrap(); - let mut db_completed_tx = CompletedTransactionSql::find_by_cancelled(3.into(), false, &conn).unwrap(); + let mut db_completed_tx = CompletedTransactionSql::find_by_cancelled(3u64.into(), false, &conn).unwrap(); db_completed_tx.decrypt(&cipher).unwrap(); let decrypted_completed_tx = CompletedTransaction::try_from(db_completed_tx).unwrap(); assert_eq!(completed_tx, decrypted_completed_tx); @@ -2777,6 +2781,7 @@ mod test { } #[test] + #[allow(clippy::too_many_lines)] fn test_customized_transactional_queries() { let db_name = format!("{}.sqlite3", string(8).as_str()); let temp_dir = tempdir().unwrap(); diff --git a/base_layer/wallet/src/transaction_service/tasks/check_faux_transaction_status.rs b/base_layer/wallet/src/transaction_service/tasks/check_faux_transaction_status.rs index aba5997bc3..07c9ead31b 100644 --- a/base_layer/wallet/src/transaction_service/tasks/check_faux_transaction_status.rs +++ b/base_layer/wallet/src/transaction_service/tasks/check_faux_transaction_status.rs @@ -39,6 +39,7 @@ use crate::{ const LOG_TARGET: &str = "wallet::transaction_service::service"; +#[allow(clippy::too_many_lines)] pub async fn check_faux_transactions( mut output_manager: OutputManagerHandle, db: TransactionDatabase, diff --git a/base_layer/wallet/src/transaction_service/tasks/send_finalized_transaction.rs b/base_layer/wallet/src/transaction_service/tasks/send_finalized_transaction.rs index 19ef8cdb55..094b482057 100644 --- a/base_layer/wallet/src/transaction_service/tasks/send_finalized_transaction.rs +++ b/base_layer/wallet/src/transaction_service/tasks/send_finalized_transaction.rs @@ -87,6 +87,7 @@ pub async fn send_finalized_transaction_message( Ok(()) } +#[allow(clippy::too_many_lines)] pub async fn send_finalized_transaction_message_direct( tx_id: TxId, transaction: Transaction, diff --git a/base_layer/wallet/src/transaction_service/tasks/send_transaction_reply.rs b/base_layer/wallet/src/transaction_service/tasks/send_transaction_reply.rs index 43fa618398..50637763b7 100644 --- a/base_layer/wallet/src/transaction_service/tasks/send_transaction_reply.rs +++ b/base_layer/wallet/src/transaction_service/tasks/send_transaction_reply.rs @@ -78,6 +78,7 @@ pub async fn send_transaction_reply( } /// A task to resend a transaction reply message if a repeated Send Transaction is received from a Sender +#[allow(clippy::too_many_lines)] pub async fn send_transaction_reply_direct( inbound_transaction: InboundTransaction, mut outbound_message_service: OutboundMessageRequester, diff --git a/base_layer/wallet/src/wallet.rs b/base_layer/wallet/src/wallet.rs index 60adf4f326..86f9da1252 100644 --- a/base_layer/wallet/src/wallet.rs +++ b/base_layer/wallet/src/wallet.rs @@ -135,6 +135,7 @@ where W: ContactsBackend + 'static, X: KeyManagerBackend + 'static, { + #[allow(clippy::too_many_lines)] pub async fn start( config: WalletConfig, peer_seeds: PeerSeedsConfig, @@ -270,7 +271,8 @@ where wallet_database .set_node_features(comms.node_identity().features()) .await?; - if let Some(identity_sig) = comms.node_identity().identity_signature_read().as_ref().cloned() { + let identity_sig = comms.node_identity().identity_signature_read().as_ref().cloned(); + if let Some(identity_sig) = identity_sig { wallet_database.set_comms_identity_signature(identity_sig).await?; } diff --git a/base_layer/wallet/tests/output_manager_service_tests/service.rs b/base_layer/wallet/tests/output_manager_service_tests/service.rs index 15c2da655b..fe4e8d085d 100644 --- a/base_layer/wallet/tests/output_manager_service_tests/service.rs +++ b/base_layer/wallet/tests/output_manager_service_tests/service.rs @@ -125,6 +125,7 @@ struct TestOmsService { } #[allow(clippy::type_complexity)] +#[allow(clippy::too_many_lines)] async fn setup_output_manager_service( backend: T, ks_backend: U, @@ -533,8 +534,9 @@ async fn test_utxo_selection_no_chain_metadata() { } } -#[allow(clippy::identity_op)] #[tokio::test] +#[allow(clippy::identity_op)] +#[allow(clippy::too_many_lines)] async fn test_utxo_selection_with_chain_metadata() { let factories = CryptoFactories::default(); let (connection, _tempdir) = get_temp_sqlite_database_connection(); @@ -938,7 +940,7 @@ async fn cancel_transaction() { .await .unwrap(); - match oms.output_manager_handle.cancel_transaction(1.into()).await { + match oms.output_manager_handle.cancel_transaction(1u64.into()).await { Err(OutputManagerError::OutputManagerStorageError(OutputManagerStorageError::ValueNotFound)) => {}, _ => panic!("Value should not exist"), } @@ -1286,6 +1288,7 @@ async fn handle_coinbase() { } #[tokio::test] +#[allow(clippy::too_many_lines)] async fn test_txo_validation() { let factories = CryptoFactories::default(); @@ -1320,7 +1323,7 @@ async fn test_txo_validation() { .unwrap(); oms.output_manager_handle - .add_rewindable_output_with_tx_id(TxId::from(1), output1.clone(), None, None) + .add_rewindable_output_with_tx_id(TxId::from(1u64), output1.clone(), None, None) .await .unwrap(); @@ -1339,7 +1342,7 @@ async fn test_txo_validation() { .unwrap(); oms.output_manager_handle - .add_rewindable_output_with_tx_id(TxId::from(2), output2.clone(), None, None) + .add_rewindable_output_with_tx_id(TxId::from(2u64), output2.clone(), None, None) .await .unwrap(); @@ -1353,7 +1356,7 @@ async fn test_txo_validation() { .await; oms.output_manager_handle - .add_rewindable_output_with_tx_id(TxId::from(3), output3.clone(), None, None) + .add_rewindable_output_with_tx_id(TxId::from(3u64), output3.clone(), None, None) .await .unwrap(); @@ -1420,7 +1423,7 @@ async fn test_txo_validation() { oms.output_manager_handle .prepare_transaction_to_send( - 4.into(), + 4u64.into(), MicroTari::from(900_000), None, None, @@ -1844,6 +1847,7 @@ async fn test_txo_validation() { } #[tokio::test] +#[allow(clippy::too_many_lines)] async fn test_txo_revalidation() { let factories = CryptoFactories::default(); @@ -1871,7 +1875,7 @@ async fn test_txo_revalidation() { ); let output1_tx_output = output1.as_transaction_output(&factories).unwrap(); oms.output_manager_handle - .add_output_with_tx_id(TxId::from(1), output1.clone(), None) + .add_output_with_tx_id(TxId::from(1u64), output1.clone(), None) .await .unwrap(); @@ -1885,7 +1889,7 @@ async fn test_txo_revalidation() { let output2_tx_output = output2.as_transaction_output(&factories).unwrap(); oms.output_manager_handle - .add_output_with_tx_id(TxId::from(2), output2.clone(), None) + .add_output_with_tx_id(TxId::from(2u64), output2.clone(), None) .await .unwrap(); diff --git a/base_layer/wallet/tests/output_manager_service_tests/storage.rs b/base_layer/wallet/tests/output_manager_service_tests/storage.rs index bea26952ee..1169c8259c 100644 --- a/base_layer/wallet/tests/output_manager_service_tests/storage.rs +++ b/base_layer/wallet/tests/output_manager_service_tests/storage.rs @@ -40,7 +40,7 @@ use tokio::runtime::Runtime; use crate::support::{data::get_temp_sqlite_database_connection, utils::make_input}; -#[allow(clippy::same_item_push)] +#[allow(clippy::too_many_lines)] pub fn test_db_backend(backend: T) { let runtime = Runtime::new().unwrap(); @@ -252,7 +252,7 @@ pub fn test_db_backend(backend: T) { None, )); let output_to_be_received = DbUnblindedOutput::from_unblinded_output(uo, &factories, None).unwrap(); - db.add_output_to_be_received(TxId::from(11), output_to_be_received.clone(), None) + db.add_output_to_be_received(TxId::from(11u64), output_to_be_received.clone(), None) .unwrap(); pending_incoming_balance += output_to_be_received.unblinded_output.value; @@ -352,7 +352,7 @@ pub async fn test_short_term_encumberance() { unspent_outputs.push(uo); } - db.encumber_outputs(1.into(), unspent_outputs[0..=2].to_vec(), vec![]) + db.encumber_outputs(1u64.into(), unspent_outputs[0..=2].to_vec(), vec![]) .unwrap(); let balance = db.get_balance(None).unwrap(); @@ -373,10 +373,10 @@ pub async fn test_short_term_encumberance() { .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value) ); - db.encumber_outputs(2.into(), unspent_outputs[0..=2].to_vec(), vec![]) + db.encumber_outputs(2u64.into(), unspent_outputs[0..=2].to_vec(), vec![]) .unwrap(); - db.confirm_encumbered_outputs(TxId::from(2)).unwrap(); + db.confirm_encumbered_outputs(TxId::from(2u64)).unwrap(); db.clear_short_term_encumberances().unwrap(); let balance = db.get_balance(None).unwrap(); @@ -415,7 +415,7 @@ pub async fn test_no_duplicate_outputs() { // add a pending transaction with the same duplicate output - assert!(db.encumber_outputs(2.into(), vec![], vec![uo]).is_err()); + assert!(db.encumber_outputs(2u64.into(), vec![], vec![uo]).is_err()); // we should still only have 1 unspent output let outputs = db.fetch_mined_unspent_outputs().unwrap(); diff --git a/base_layer/wallet/tests/support/comms_rpc.rs b/base_layer/wallet/tests/support/comms_rpc.rs index ce27fc1df0..b218f277fb 100644 --- a/base_layer/wallet/tests/support/comms_rpc.rs +++ b/base_layer/wallet/tests/support/comms_rpc.rs @@ -20,6 +20,9 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Sync lock used in async context throughout this module +#![allow(clippy::await_holding_lock)] + use std::{ cmp::min, collections::HashMap, diff --git a/base_layer/wallet/tests/transaction_service_tests/mod.rs b/base_layer/wallet/tests/transaction_service_tests/mod.rs index 4431baf1d3..a37e3142de 100644 --- a/base_layer/wallet/tests/transaction_service_tests/mod.rs +++ b/base_layer/wallet/tests/transaction_service_tests/mod.rs @@ -19,6 +19,10 @@ // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Majority of the tests here have large function bodies +#![allow(clippy::too_many_lines)] + mod service; mod storage; mod transaction_protocols; diff --git a/base_layer/wallet/tests/transaction_service_tests/service.rs b/base_layer/wallet/tests/transaction_service_tests/service.rs index b7b48746fc..db32b866f3 100644 --- a/base_layer/wallet/tests/transaction_service_tests/service.rs +++ b/base_layer/wallet/tests/transaction_service_tests/service.rs @@ -582,7 +582,7 @@ fn manage_single_transaction() { } }); - let mut tx_id = TxId::from(0); + let mut tx_id = TxId::from(0u64); runtime.block_on(async { let delay = sleep(Duration::from_secs(90)); tokio::pin!(delay); @@ -604,7 +604,9 @@ fn manage_single_transaction() { assert_eq!(finalized, 1); }); - assert!(runtime.block_on(bob_ts.get_completed_transaction(999.into())).is_err()); + assert!(runtime + .block_on(bob_ts.get_completed_transaction(999u64.into())) + .is_err()); let _bob_completed_tx = runtime .block_on(bob_ts.get_completed_transaction(tx_id)) @@ -1461,7 +1463,7 @@ fn test_accepting_unknown_tx_id_and_malformed_reply() { let mut tx_reply = rtp.get_signed_data().unwrap().clone(); let mut wrong_tx_id = tx_reply.clone(); - wrong_tx_id.tx_id = 2.into(); + wrong_tx_id.tx_id = 2u64.into(); let (_p, pub_key) = PublicKey::random_keypair(&mut OsRng); tx_reply.public_spend_key = pub_key; runtime @@ -1845,7 +1847,7 @@ fn discovery_async_return_test() { assert_ne!(initial_balance, runtime.block_on(alice_oms.get_balance()).unwrap()); - let mut txid = TxId::from(0); + let mut txid = TxId::from(0u64); let mut is_success = true; runtime.block_on(async { let delay = sleep(Duration::from_secs(60)); @@ -1880,7 +1882,7 @@ fn discovery_async_return_test() { .unwrap(); let mut success_result = false; - let mut success_tx_id = TxId::from(0); + let mut success_tx_id = TxId::from(0u64); runtime.block_on(async { let delay = sleep(Duration::from_secs(60)); tokio::pin!(delay); @@ -1951,7 +1953,7 @@ fn test_power_mode_updates() { PrivateKey::random(&mut OsRng), ); let completed_tx1 = CompletedTransaction { - tx_id: 1.into(), + tx_id: 1u64.into(), source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount: 5000 * uT, @@ -1972,7 +1974,7 @@ fn test_power_mode_updates() { }; let completed_tx2 = CompletedTransaction { - tx_id: 2.into(), + tx_id: 2u64.into(), source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount: 6000 * uT, @@ -1994,13 +1996,13 @@ fn test_power_mode_updates() { tx_backend .write(WriteOperation::Insert(DbKeyValuePair::CompletedTransaction( - 1.into(), + 1u64.into(), Box::new(completed_tx1), ))) .unwrap(); tx_backend .write(WriteOperation::Insert(DbKeyValuePair::CompletedTransaction( - 2.into(), + 2u64.into(), Box::new(completed_tx2), ))) .unwrap(); @@ -5355,7 +5357,7 @@ fn broadcast_all_completed_transactions_on_startup() { ); let completed_tx1 = CompletedTransaction { - tx_id: 1.into(), + tx_id: 1u64.into(), source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount: 5000 * uT, @@ -5376,13 +5378,13 @@ fn broadcast_all_completed_transactions_on_startup() { }; let completed_tx2 = CompletedTransaction { - tx_id: 2.into(), + tx_id: 2u64.into(), status: TransactionStatus::MinedConfirmed, ..completed_tx1.clone() }; let completed_tx3 = CompletedTransaction { - tx_id: 3.into(), + tx_id: 3u64.into(), status: TransactionStatus::Completed, ..completed_tx1.clone() }; @@ -5448,13 +5450,13 @@ fn broadcast_all_completed_transactions_on_startup() { tokio::select! { event = event_stream.recv() => { if let TransactionEvent::TransactionBroadcast(tx_id) = (*event.unwrap()).clone() { - if tx_id == TxId::from(1) { + if tx_id == 1u64 { found1 = true } - if tx_id == TxId::from(2) { + if tx_id == 2u64 { found2 = true } - if tx_id == TxId::from(3) { + if tx_id == 3u64 { found3 = true } if found1 && found3 { diff --git a/base_layer/wallet/tests/transaction_service_tests/storage.rs b/base_layer/wallet/tests/transaction_service_tests/storage.rs index 94d7178163..92650a70e0 100644 --- a/base_layer/wallet/tests/transaction_service_tests/storage.rs +++ b/base_layer/wallet/tests/transaction_service_tests/storage.rs @@ -684,18 +684,18 @@ async fn import_tx_and_read_it_from_db() { let db_tx = sqlite_db.fetch_imported_transactions().unwrap(); assert_eq!(db_tx.len(), 1); - assert_eq!(db_tx.first().unwrap().tx_id, TxId::from(1)); + assert_eq!(db_tx.first().unwrap().tx_id, TxId::from(1u64)); assert_eq!(db_tx.first().unwrap().mined_height, Some(5)); let db_tx = sqlite_db.fetch_unconfirmed_faux_transactions().unwrap(); assert_eq!(db_tx.len(), 1); - assert_eq!(db_tx.first().unwrap().tx_id, TxId::from(2)); + assert_eq!(db_tx.first().unwrap().tx_id, TxId::from(2u64)); assert_eq!(db_tx.first().unwrap().mined_height, Some(6)); let db_tx = sqlite_db.fetch_confirmed_faux_transactions_from_height(10).unwrap(); assert_eq!(db_tx.len(), 0); let db_tx = sqlite_db.fetch_confirmed_faux_transactions_from_height(4).unwrap(); assert_eq!(db_tx.len(), 1); - assert_eq!(db_tx.first().unwrap().tx_id, TxId::from(3)); + assert_eq!(db_tx.first().unwrap().tx_id, TxId::from(3u64)); assert_eq!(db_tx.first().unwrap().mined_height, Some(7)); } diff --git a/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs b/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs index adb1b5a825..b39876e945 100644 --- a/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs +++ b/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs @@ -245,18 +245,18 @@ async fn tx_broadcast_protocol_submit_success() { let timeout_watch = Watch::new(Duration::from_secs(1)); - let protocol = TransactionBroadcastProtocol::new(2.into(), resources.clone(), timeout_watch.get_receiver()); + let protocol = TransactionBroadcastProtocol::new(2u64.into(), resources.clone(), timeout_watch.get_receiver()); let join_handle = task::spawn(protocol.execute()); // Fails because there is no transaction in the database to be broadcast assert!(join_handle.await.unwrap().is_err()); - add_transaction_to_database(1.into(), 1 * T, None, None, resources.db.clone()).await; + add_transaction_to_database(1u64.into(), 1 * T, None, None, resources.db.clone()).await; - let db_completed_tx = resources.db.get_completed_transaction(1.into()).await.unwrap(); + let db_completed_tx = resources.db.get_completed_transaction(1u64.into()).await.unwrap(); assert!(db_completed_tx.confirmations.is_none()); - let protocol = TransactionBroadcastProtocol::new(1.into(), resources.clone(), timeout_watch.get_receiver()); + let protocol = TransactionBroadcastProtocol::new(1u64.into(), resources.clone(), timeout_watch.get_receiver()); task::spawn(protocol.execute()); @@ -320,7 +320,7 @@ async fn tx_broadcast_protocol_submit_rejection() { ) = setup().await; let mut event_stream = resources.event_publisher.subscribe(); - add_transaction_to_database(1.into(), 1 * T, None, None, resources.db.clone()).await; + add_transaction_to_database(1u64.into(), 1 * T, None, None, resources.db.clone()).await; let timeout_update_watch = Watch::new(Duration::from_secs(1)); wallet_connectivity.notify_base_node_set(server_node_identity.to_peer()); // Now we add the connection @@ -329,7 +329,8 @@ async fn tx_broadcast_protocol_submit_rejection() { .await; wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); - let protocol = TransactionBroadcastProtocol::new(1.into(), resources.clone(), timeout_update_watch.get_receiver()); + let protocol = + TransactionBroadcastProtocol::new(1u64.into(), resources.clone(), timeout_update_watch.get_receiver()); rpc_service_state.set_submit_transaction_response(TxSubmissionResponse { accepted: false, @@ -350,7 +351,7 @@ async fn tx_broadcast_protocol_submit_rejection() { } // Check transaction is cancelled in db - let db_completed_tx = resources.db.get_completed_transaction(1.into()).await; + let db_completed_tx = resources.db.get_completed_transaction(1u64.into()).await; assert!(db_completed_tx.is_err()); // Check that the appropriate events were emitted @@ -390,7 +391,7 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { wallet_connectivity, ) = setup().await; - add_transaction_to_database(1.into(), 1 * T, None, None, resources.db.clone()).await; + add_transaction_to_database(1u64.into(), 1 * T, None, None, resources.db.clone()).await; // Set Base Node query response to be not stored, as if the base node does not have the tx in its pool rpc_service_state.set_transaction_query_response(TxQueryResponse { @@ -410,7 +411,8 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { .await; wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); - let protocol = TransactionBroadcastProtocol::new(1.into(), resources.clone(), timeout_update_watch.get_receiver()); + let protocol = + TransactionBroadcastProtocol::new(1u64.into(), resources.clone(), timeout_update_watch.get_receiver()); let join_handle = task::spawn(protocol.execute()); // Check if in mempool (its not) @@ -452,10 +454,10 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { // Check that the protocol ends with success let result = join_handle.await.unwrap(); - assert_eq!(result.unwrap(), TxId::from(1)); + assert_eq!(result.unwrap(), TxId::from(1u64)); // Check transaction status is updated - let db_completed_tx = resources.db.get_completed_transaction(1.into()).await.unwrap(); + let db_completed_tx = resources.db.get_completed_transaction(1u64.into()).await.unwrap(); assert_eq!(db_completed_tx.status, TransactionStatus::Broadcast); } @@ -478,7 +480,7 @@ async fn tx_broadcast_protocol_submit_success_followed_by_rejection() { ) = setup().await; let mut event_stream = resources.event_publisher.subscribe(); - add_transaction_to_database(1.into(), 1 * T, None, None, resources.db.clone()).await; + add_transaction_to_database(1u64.into(), 1 * T, None, None, resources.db.clone()).await; resources.config.transaction_mempool_resubmission_window = Duration::from_secs(3); resources.config.broadcast_monitoring_timeout = Duration::from_secs(60); @@ -492,7 +494,8 @@ async fn tx_broadcast_protocol_submit_success_followed_by_rejection() { .await; wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); - let protocol = TransactionBroadcastProtocol::new(1.into(), resources.clone(), timeout_update_watch.get_receiver()); + let protocol = + TransactionBroadcastProtocol::new(1u64.into(), resources.clone(), timeout_update_watch.get_receiver()); let join_handle = task::spawn(protocol.execute()); @@ -527,7 +530,7 @@ async fn tx_broadcast_protocol_submit_success_followed_by_rejection() { } // Check transaction is cancelled in db - let db_completed_tx = resources.db.get_completed_transaction(1.into()).await; + let db_completed_tx = resources.db.get_completed_transaction(1u64.into()).await; assert!(db_completed_tx.is_err()); // Check that the appropriate events were emitted @@ -566,7 +569,7 @@ async fn tx_broadcast_protocol_submit_already_mined() { _transaction_event_receiver, wallet_connectivity, ) = setup().await; - add_transaction_to_database(1.into(), 1 * T, None, None, resources.db.clone()).await; + add_transaction_to_database(1u64.into(), 1 * T, None, None, resources.db.clone()).await; // Set Base Node to respond with AlreadyMined rpc_service_state.set_submit_transaction_response(TxSubmissionResponse { @@ -583,7 +586,8 @@ async fn tx_broadcast_protocol_submit_already_mined() { .await; wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); - let protocol = TransactionBroadcastProtocol::new(1.into(), resources.clone(), timeout_update_watch.get_receiver()); + let protocol = + TransactionBroadcastProtocol::new(1u64.into(), resources.clone(), timeout_update_watch.get_receiver()); let join_handle = task::spawn(protocol.execute()); @@ -611,7 +615,7 @@ async fn tx_broadcast_protocol_submit_already_mined() { assert_eq!(result.unwrap(), 1); // Check transaction status is updated - let db_completed_tx = resources.db.get_completed_transaction(1.into()).await.unwrap(); + let db_completed_tx = resources.db.get_completed_transaction(1u64.into()).await.unwrap(); assert_eq!(db_completed_tx.status, TransactionStatus::Completed); } @@ -631,7 +635,7 @@ async fn tx_broadcast_protocol_submit_and_base_node_gets_changed() { wallet_connectivity, ) = setup().await; - add_transaction_to_database(1.into(), 1 * T, None, None, resources.db.clone()).await; + add_transaction_to_database(1u64.into(), 1 * T, None, None, resources.db.clone()).await; resources.config.broadcast_monitoring_timeout = Duration::from_secs(60); @@ -651,7 +655,8 @@ async fn tx_broadcast_protocol_submit_and_base_node_gets_changed() { .await; wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); - let protocol = TransactionBroadcastProtocol::new(1.into(), resources.clone(), timeout_update_watch.get_receiver()); + let protocol = + TransactionBroadcastProtocol::new(1u64.into(), resources.clone(), timeout_update_watch.get_receiver()); let join_handle = task::spawn(protocol.execute()); @@ -703,10 +708,10 @@ async fn tx_broadcast_protocol_submit_and_base_node_gets_changed() { // Check that the protocol ends with success let result = join_handle.await.unwrap(); - assert_eq!(result.unwrap(), TxId::from(1)); + assert_eq!(result.unwrap(), TxId::from(1u64)); // Check transaction status is updated - let db_completed_tx = resources.db.get_completed_transaction(1.into()).await.unwrap(); + let db_completed_tx = resources.db.get_completed_transaction(1u64.into()).await.unwrap(); assert_eq!(db_completed_tx.status, TransactionStatus::Broadcast); } @@ -732,7 +737,7 @@ async fn tx_validation_protocol_tx_becomes_mined_unconfirmed_then_confirmed() { .await; wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); add_transaction_to_database( - 1.into(), + 1u64.into(), 1 * T, Some(TransactionStatus::Broadcast), None, @@ -740,7 +745,7 @@ async fn tx_validation_protocol_tx_becomes_mined_unconfirmed_then_confirmed() { ) .await; add_transaction_to_database( - 2.into(), + 2u64.into(), 2 * T, Some(TransactionStatus::Completed), None, @@ -748,7 +753,7 @@ async fn tx_validation_protocol_tx_becomes_mined_unconfirmed_then_confirmed() { ) .await; - let tx2 = resources.db.get_completed_transaction(2.into()).await.unwrap(); + let tx2 = resources.db.get_completed_transaction(2u64.into()).await.unwrap(); let transaction_query_batch_responses = vec![TxQueryBatchResponseProto { signature: Some(SignatureProto::from( @@ -785,11 +790,11 @@ async fn tx_validation_protocol_tx_becomes_mined_unconfirmed_then_confirmed() { let completed_txs = resources.db.get_completed_transactions().await.unwrap(); assert_eq!( - completed_txs.get(&1.into()).unwrap().status, + completed_txs.get(&1u64.into()).unwrap().status, TransactionStatus::Broadcast ); assert_eq!( - completed_txs.get(&2.into()).unwrap().status, + completed_txs.get(&2u64.into()).unwrap().status, TransactionStatus::MinedUnconfirmed ); @@ -813,11 +818,11 @@ async fn tx_validation_protocol_tx_becomes_mined_unconfirmed_then_confirmed() { let completed_txs = resources.db.get_completed_transactions().await.unwrap(); assert_eq!( - completed_txs.get(&1.into()).unwrap().status, + completed_txs.get(&1u64.into()).unwrap().status, TransactionStatus::Broadcast ); assert_eq!( - completed_txs.get(&2.into()).unwrap().status, + completed_txs.get(&2u64.into()).unwrap().status, TransactionStatus::Completed ); @@ -857,10 +862,10 @@ async fn tx_validation_protocol_tx_becomes_mined_unconfirmed_then_confirmed() { let completed_txs = resources.db.get_completed_transactions().await.unwrap(); assert_eq!( - completed_txs.get(&2.into()).unwrap().status, + completed_txs.get(&2u64.into()).unwrap().status, TransactionStatus::MinedConfirmed ); - assert_eq!(completed_txs.get(&2.into()).unwrap().confirmations.unwrap(), 4); + assert_eq!(completed_txs.get(&2u64.into()).unwrap().confirmations.unwrap(), 4); } /// Test that revalidation clears the correct db fields and calls for validation of is said transactions @@ -884,7 +889,7 @@ async fn tx_revalidation() { .await; wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); add_transaction_to_database( - 1.into(), + 1u64.into(), 1 * T, Some(TransactionStatus::Completed), None, @@ -892,7 +897,7 @@ async fn tx_revalidation() { ) .await; add_transaction_to_database( - 2.into(), + 2u64.into(), 2 * T, Some(TransactionStatus::Completed), None, @@ -900,7 +905,7 @@ async fn tx_revalidation() { ) .await; - let tx2 = resources.db.get_completed_transaction(2.into()).await.unwrap(); + let tx2 = resources.db.get_completed_transaction(2u64.into()).await.unwrap(); // set tx2 as fully mined let transaction_query_batch_responses = vec![TxQueryBatchResponseProto { @@ -938,10 +943,10 @@ async fn tx_revalidation() { let completed_txs = resources.db.get_completed_transactions().await.unwrap(); assert_eq!( - completed_txs.get(&2.into()).unwrap().status, + completed_txs.get(&2u64.into()).unwrap().status, TransactionStatus::MinedConfirmed ); - assert_eq!(completed_txs.get(&2.into()).unwrap().confirmations.unwrap(), 4); + assert_eq!(completed_txs.get(&2u64.into()).unwrap().confirmations.unwrap(), 4); let transaction_query_batch_responses = vec![TxQueryBatchResponseProto { signature: Some(SignatureProto::from( @@ -965,11 +970,11 @@ async fn tx_revalidation() { resources.db.mark_all_transactions_as_unvalidated().await.unwrap(); let completed_txs = resources.db.get_completed_transactions().await.unwrap(); assert_eq!( - completed_txs.get(&2.into()).unwrap().status, + completed_txs.get(&2u64.into()).unwrap().status, TransactionStatus::MinedConfirmed ); - assert_eq!(completed_txs.get(&2.into()).unwrap().mined_height, None); - assert_eq!(completed_txs.get(&2.into()).unwrap().mined_in_block, None); + assert_eq!(completed_txs.get(&2u64.into()).unwrap().mined_height, None); + assert_eq!(completed_txs.get(&2u64.into()).unwrap().mined_in_block, None); let protocol = TransactionValidationProtocol::new( 5.into(), @@ -987,10 +992,10 @@ async fn tx_revalidation() { let completed_txs = resources.db.get_completed_transactions().await.unwrap(); // data should now be updated and changed assert_eq!( - completed_txs.get(&2.into()).unwrap().status, + completed_txs.get(&2u64.into()).unwrap().status, TransactionStatus::MinedConfirmed ); - assert_eq!(completed_txs.get(&2.into()).unwrap().confirmations.unwrap(), 8); + assert_eq!(completed_txs.get(&2u64.into()).unwrap().confirmations.unwrap(), 8); } /// Test that validation detects transactions becoming mined unconfirmed and then confirmed with some going back to @@ -1027,7 +1032,7 @@ async fn tx_validation_protocol_reorg() { } add_transaction_to_database( - 6.into(), + 6u64.into(), 6 * T, Some(TransactionStatus::Coinbase), Some(8), @@ -1036,7 +1041,7 @@ async fn tx_validation_protocol_reorg() { .await; add_transaction_to_database( - 7.into(), + 7u64.into(), 7 * T, Some(TransactionStatus::Coinbase), Some(9), @@ -1052,13 +1057,13 @@ async fn tx_validation_protocol_reorg() { } rpc_service_state.set_blocks(block_headers.clone()); - let tx1 = resources.db.get_completed_transaction(1.into()).await.unwrap(); - let tx2 = resources.db.get_completed_transaction(2.into()).await.unwrap(); - let tx3 = resources.db.get_completed_transaction(3.into()).await.unwrap(); - let tx4 = resources.db.get_completed_transaction(4.into()).await.unwrap(); - let tx5 = resources.db.get_completed_transaction(5.into()).await.unwrap(); - let coinbase_tx1 = resources.db.get_completed_transaction(6.into()).await.unwrap(); - let coinbase_tx2 = resources.db.get_completed_transaction(7.into()).await.unwrap(); + let tx1 = resources.db.get_completed_transaction(1u64.into()).await.unwrap(); + let tx2 = resources.db.get_completed_transaction(2u64.into()).await.unwrap(); + let tx3 = resources.db.get_completed_transaction(3u64.into()).await.unwrap(); + let tx4 = resources.db.get_completed_transaction(4u64.into()).await.unwrap(); + let tx5 = resources.db.get_completed_transaction(5u64.into()).await.unwrap(); + let coinbase_tx1 = resources.db.get_completed_transaction(6u64.into()).await.unwrap(); + let coinbase_tx2 = resources.db.get_completed_transaction(7u64.into()).await.unwrap(); let transaction_query_batch_responses = vec![ TxQueryBatchResponseProto { @@ -1262,23 +1267,29 @@ async fn tx_validation_protocol_reorg() { let completed_txs = resources.db.get_completed_transactions().await.unwrap(); assert_eq!( - completed_txs.get(&4.into()).unwrap().status, + completed_txs.get(&4u64.into()).unwrap().status, TransactionStatus::Completed ); assert_eq!( - completed_txs.get(&5.into()).unwrap().status, + completed_txs.get(&5u64.into()).unwrap().status, TransactionStatus::MinedUnconfirmed ); - assert_eq!(completed_txs.get(&5.into()).cloned().unwrap().mined_height.unwrap(), 8); - assert_eq!(completed_txs.get(&5.into()).cloned().unwrap().confirmations.unwrap(), 1); assert_eq!( - completed_txs.get(&7.into()).unwrap().status, + completed_txs.get(&5u64.into()).cloned().unwrap().mined_height.unwrap(), + 8 + ); + assert_eq!( + completed_txs.get(&5u64.into()).cloned().unwrap().confirmations.unwrap(), + 1 + ); + assert_eq!( + completed_txs.get(&7u64.into()).unwrap().status, TransactionStatus::Coinbase ); let cancelled_completed_txs = resources.db.get_cancelled_completed_transactions().await.unwrap(); assert!(matches!( - cancelled_completed_txs.get(&6.into()).unwrap().cancelled, + cancelled_completed_txs.get(&6u64.into()).unwrap().cancelled, Some(TxCancellationReason::AbandonedCoinbase) )); } diff --git a/base_layer/wallet/tests/utxo_scanner.rs b/base_layer/wallet/tests/utxo_scanner.rs index 32ae78a6e7..8df7e6c22f 100644 --- a/base_layer/wallet/tests/utxo_scanner.rs +++ b/base_layer/wallet/tests/utxo_scanner.rs @@ -362,6 +362,7 @@ async fn test_utxo_scanner_recovery() { } } #[tokio::test] +#[allow(clippy::too_many_lines)] async fn test_utxo_scanner_recovery_with_restart() { let factories = CryptoFactories::default(); let mut test_interface = setup(UtxoScannerMode::Recovery, None, None, None).await; @@ -521,6 +522,7 @@ async fn test_utxo_scanner_recovery_with_restart() { } #[tokio::test] +#[allow(clippy::too_many_lines)] async fn test_utxo_scanner_recovery_with_restart_and_reorg() { let factories = CryptoFactories::default(); let mut test_interface = setup(UtxoScannerMode::Recovery, None, None, None).await; @@ -774,6 +776,7 @@ async fn test_utxo_scanner_scanned_block_cache_clearing() { } #[tokio::test] +#[allow(clippy::too_many_lines)] async fn test_utxo_scanner_one_sided_payments() { let factories = CryptoFactories::default(); let mut test_interface = setup( diff --git a/base_layer/wallet/tests/wallet.rs b/base_layer/wallet/tests/wallet.rs index 8dcf27e974..b923843ef7 100644 --- a/base_layer/wallet/tests/wallet.rs +++ b/base_layer/wallet/tests/wallet.rs @@ -214,6 +214,7 @@ async fn create_wallet( } #[tokio::test] +#[allow(clippy::too_many_lines)] async fn test_wallet() { let mut shutdown_a = Shutdown::new(); let mut shutdown_b = Shutdown::new(); @@ -529,6 +530,7 @@ fn test_many_iterations_store_and_forward_send_tx() { } #[tokio::test] +#[allow(clippy::too_many_lines)] async fn test_store_and_forward_send_tx() { let shutdown_a = Shutdown::new(); let shutdown_c = Shutdown::new(); @@ -612,11 +614,8 @@ async fn test_store_and_forward_send_tx() { let events = collect_recv!(alice_events, take = 2, timeout = Duration::from_secs(10)); for evt in events { - match &*evt { - TransactionEvent::TransactionSendResult(_, result) => { - assert!(result.store_and_forward_send_result); - }, - _ => {}, + if let TransactionEvent::TransactionSendResult(_, result) = &*evt { + assert!(result.store_and_forward_send_result); } } @@ -643,9 +642,8 @@ async fn test_store_and_forward_send_tx() { loop { tokio::select! { event = carol_event_stream.recv() => { - match &*event.unwrap() { - TransactionEvent::ReceivedTransaction(_) => tx_recv = true, - _ => (), + if let TransactionEvent::ReceivedTransaction(_) = &*event.unwrap() { + tx_recv = true; } if tx_recv { break; @@ -816,6 +814,7 @@ async fn test_recovery_birthday() { } #[tokio::test] +#[allow(clippy::too_many_lines)] async fn test_contacts_service_liveness() { let mut shutdown_a = Shutdown::new(); let mut shutdown_b = Shutdown::new(); diff --git a/base_layer/wallet_ffi/src/callback_handler.rs b/base_layer/wallet_ffi/src/callback_handler.rs index 39312b8c24..9135a2dbab 100644 --- a/base_layer/wallet_ffi/src/callback_handler.rs +++ b/base_layer/wallet_ffi/src/callback_handler.rs @@ -237,6 +237,7 @@ where TBackend: TransactionBackend + 'static } } + #[allow(clippy::too_many_lines)] pub async fn start(mut self) { let mut shutdown_signal = self .shutdown_signal diff --git a/base_layer/wallet_ffi/src/callback_handler_tests.rs b/base_layer/wallet_ffi/src/callback_handler_tests.rs index 47a0dfd4c6..5cdba40323 100644 --- a/base_layer/wallet_ffi/src/callback_handler_tests.rs +++ b/base_layer/wallet_ffi/src/callback_handler_tests.rs @@ -74,6 +74,7 @@ mod test { use crate::{callback_handler::CallbackHandler, output_manager_service_mock::MockOutputManagerService}; #[derive(Debug)] + #[allow(clippy::struct_excessive_bools)] struct CallbackState { pub received_tx_callback_called: bool, pub received_tx_reply_callback_called: bool, @@ -248,6 +249,7 @@ mod test { } #[test] + #[allow(clippy::too_many_lines)] fn test_callback_handler() { let runtime = Runtime::new().unwrap(); diff --git a/base_layer/wallet_ffi/src/error.rs b/base_layer/wallet_ffi/src/error.rs index 14ceb7dd13..a5ae53710c 100644 --- a/base_layer/wallet_ffi/src/error.rs +++ b/base_layer/wallet_ffi/src/error.rs @@ -112,6 +112,7 @@ impl From for LibWalletError { /// This implementation maps the internal WalletError to a set of LibWalletErrors. The mapping is explicitly managed /// here and error code 999 is a catch-all code for any errors that are not explicitly mapped impl From for LibWalletError { + #[allow(clippy::too_many_lines)] fn from(w: WalletError) -> Self { error!(target: LOG_TARGET, "{}", format!("{:?}", w)); match w { diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 206ff4b380..be59c721a4 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -3219,6 +3219,7 @@ pub unsafe extern "C" fn transport_config_destroy(transport: *mut TariTransportC /// # Safety /// The ```comms_config_destroy``` method must be called when finished with a TariCommsConfig to prevent a memory leak #[no_mangle] +#[allow(clippy::too_many_lines)] pub unsafe extern "C" fn comms_config_create( public_address: *const c_char, transport: *const TariTransportConfig, @@ -3580,6 +3581,7 @@ unsafe fn init_logging( /// The ```wallet_destroy``` method must be called when finished with a TariWallet to prevent a memory leak #[no_mangle] #[allow(clippy::cognitive_complexity)] +#[allow(clippy::too_many_lines)] pub unsafe extern "C" fn wallet_create( config: *mut TariCommsConfig, log_path: *const c_char, @@ -3736,15 +3738,12 @@ pub unsafe extern "C" fn wallet_create( if !node_identity.is_signed() { node_identity.sign(); // unreachable panic: signed above - wallet_database - .set_comms_identity_signature( - node_identity - .identity_signature_read() - .as_ref() - .expect("unreachable panic") - .clone(), - ) - .await?; + let sig = node_identity + .identity_signature_read() + .as_ref() + .expect("unreachable panic") + .clone(); + wallet_database.set_comms_identity_signature(sig).await?; } Ok((master_seed, node_identity)) }); @@ -5251,6 +5250,7 @@ pub unsafe extern "C" fn wallet_get_public_key(wallet: *mut TariWallet, error_ou /// # Safety /// None #[no_mangle] +#[allow(clippy::too_many_lines)] pub unsafe extern "C" fn wallet_import_external_utxo_as_non_rewindable( wallet: *mut TariWallet, amount: c_ulonglong, @@ -6458,6 +6458,7 @@ mod test { #[allow(dead_code)] #[derive(Debug)] + #[allow(clippy::struct_excessive_bools)] struct CallbackState { pub received_tx_callback_called: bool, pub received_tx_reply_callback_called: bool, @@ -7069,6 +7070,7 @@ mod test { } #[test] + #[allow(clippy::too_many_lines)] fn test_master_private_key_persistence() { unsafe { let mut error = 0; @@ -7230,6 +7232,7 @@ mod test { } #[test] + #[allow(clippy::too_many_lines)] fn test_wallet_encryption() { unsafe { let mut error = 0; @@ -7470,6 +7473,7 @@ mod test { } #[test] + #[allow(clippy::too_many_lines)] fn test_wallet_client_key_value_store() { unsafe { let mut error = 0; @@ -7650,6 +7654,7 @@ mod test { } #[test] + #[allow(clippy::too_many_lines)] pub fn test_import_external_utxo() { unsafe { let mut error = 0; @@ -7846,6 +7851,7 @@ mod test { } #[test] + #[allow(clippy::too_many_lines)] pub fn test_seed_words() { unsafe { let mut error = 0; diff --git a/base_layer/wallet_ffi/src/tasks.rs b/base_layer/wallet_ffi/src/tasks.rs index 0013e220ba..c36b801902 100644 --- a/base_layer/wallet_ffi/src/tasks.rs +++ b/base_layer/wallet_ffi/src/tasks.rs @@ -38,6 +38,7 @@ enum RecoveryEvent { RecoveryFailed, // 6 } +#[allow(clippy::too_many_lines)] pub async fn recovery_event_monitoring( mut event_stream: broadcast::Receiver, recovery_join_handle: JoinHandle>, diff --git a/common/src/configuration/loader.rs b/common/src/configuration/loader.rs index 741f498296..74a6527f04 100644 --- a/common/src/configuration/loader.rs +++ b/common/src/configuration/loader.rs @@ -276,7 +276,7 @@ where C: ConfigPath + Default + serde::ser::Serialize + for<'de> serde::de::Dese let final_value: config::Value = merger.get(Self::main_key_prefix())?; final_value .try_deserialize() - .map_err(|ce| ConfigurationError::new(Self::main_key_prefix(), None, ce)) + .map_err(|ce| ConfigurationError::new(Self::main_key_prefix(), None, ce.to_string())) } } @@ -290,11 +290,11 @@ pub struct ConfigurationError { } impl ConfigurationError { - pub fn new(field: F, value: Option, msg: M) -> Self { + pub fn new, M: Into>(field: F, value: Option, msg: M) -> Self { ConfigurationError { - field: field.to_string(), + field: field.into(), value, - message: msg.to_string(), + message: msg.into(), } } } diff --git a/common/src/exit_codes.rs b/common/src/exit_codes.rs index 452f9638d4..50f707c8bf 100644 --- a/common/src/exit_codes.rs +++ b/common/src/exit_codes.rs @@ -12,9 +12,11 @@ pub struct ExitError { } impl ExitError { - pub fn new(exit_code: ExitCode, details: impl ToString) -> Self { - let details = Some(details.to_string()); - Self { exit_code, details } + pub fn new(exit_code: ExitCode, details: T) -> Self { + Self { + exit_code, + details: Some(details.to_string()), + } } } @@ -36,7 +38,7 @@ impl fmt::Display for ExitError { impl From for ExitError { fn from(err: anyhow::Error) -> Self { - ExitError::new(ExitCode::UnknownError, &err) + ExitError::new(ExitCode::UnknownError, err) } } @@ -100,24 +102,24 @@ impl From for ExitError { fn from(err: super::ConfigError) -> Self { // TODO: Move it out // error!(target: LOG_TARGET, "{}", err); - Self::new(ExitCode::ConfigError, &err) + Self::new(ExitCode::ConfigError, err.to_string()) } } impl From for ExitError { fn from(err: crate::ConfigurationError) -> Self { - Self::new(ExitCode::ConfigError, &err) + Self::new(ExitCode::ConfigError, err.to_string()) } } impl From for ExitError { fn from(err: multiaddr::Error) -> Self { - Self::new(ExitCode::ConfigError, &err) + Self::new(ExitCode::ConfigError, err.to_string()) } } impl From for ExitError { fn from(err: std::io::Error) -> Self { - Self::new(ExitCode::IOError, &err) + Self::new(ExitCode::IOError, err.to_string()) } } diff --git a/comms/core/src/backoff.rs b/comms/core/src/backoff.rs index 39f912a344..207823e51e 100644 --- a/comms/core/src/backoff.rs +++ b/comms/core/src/backoff.rs @@ -58,7 +58,9 @@ impl Backoff for ExponentialBackoff { if attempts <= 1 { return Duration::from_secs(0); } - let secs = (f64::from(self.factor)) * ((1usize << min(attempts, 63)) as f64 - 1.0); + // We put an upper bound on attempts so that it can never overflow the 52-bit mantissa when converting to f64 + let secs = (f64::from(self.factor)) * ((1usize << min(attempts, 51)) as f64 - 1.0); + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] Duration::from_secs(secs.ceil() as u64) } } @@ -100,9 +102,9 @@ mod test { assert_eq!(backoff.calculate_backoff(8).as_secs(), 383); assert_eq!(backoff.calculate_backoff(9).as_secs(), 767); assert_eq!(backoff.calculate_backoff(10).as_secs(), 1535); - assert_eq!(backoff.calculate_backoff(63).as_secs(), 13835058055282163712); - assert_eq!(backoff.calculate_backoff(64).as_secs(), 13835058055282163712); - assert_eq!(backoff.calculate_backoff(200).as_secs(), 13835058055282163712); + assert_eq!(backoff.calculate_backoff(63).as_secs(), 3377699720527871); + assert_eq!(backoff.calculate_backoff(64).as_secs(), 3377699720527871); + assert_eq!(backoff.calculate_backoff(200).as_secs(), 3377699720527871); } #[test] diff --git a/comms/core/src/peer_manager/peer_storage.rs b/comms/core/src/peer_manager/peer_storage.rs index 4101df8b22..e466b2b77f 100644 --- a/comms/core/src/peer_manager/peer_storage.rs +++ b/comms/core/src/peer_manager/peer_storage.rs @@ -678,6 +678,7 @@ mod test { assert!(peer_storage.find_by_public_key(&peer3.public_key).is_ok()); } + #[allow(clippy::too_many_lines)] #[test] fn test_add_delete_find_peer() { let mut peer_storage = PeerStorage::new_indexed(HashmapDatabase::new()).unwrap(); diff --git a/comms/core/src/protocol/rpc/server/chunking.rs b/comms/core/src/protocol/rpc/server/chunking.rs index 6fae216084..85aa986ade 100644 --- a/comms/core/src/protocol/rpc/server/chunking.rs +++ b/comms/core/src/protocol/rpc/server/chunking.rs @@ -132,6 +132,7 @@ impl ChunkedResponseIter { fn exceeded_message_size(&self) -> proto::rpc::RpcResponse { const BYTES_PER_MB: f32 = 1024.0 * 1024.0; + // Precision loss is acceptable because this is for display purposes only let msg = format!( "The response size exceeded the maximum allowed payload size. Max = {:.4} MiB, Got = {:.4} MiB", rpc::max_response_payload_size() as f32 / BYTES_PER_MB, diff --git a/comms/dht/examples/memory_net/utilities.rs b/comms/dht/examples/memory_net/utilities.rs index 4210dd16c0..b942e74197 100644 --- a/comms/dht/examples/memory_net/utilities.rs +++ b/comms/dht/examples/memory_net/utilities.rs @@ -256,6 +256,7 @@ pub async fn network_connectivity_stats(nodes: &[TestNode], wallets: &[TestNode] ); } +#[allow(clippy::too_many_lines)] pub async fn do_network_wide_propagation(nodes: &mut [TestNode], origin_node_index: Option) -> (usize, usize) { let random_node = match origin_node_index { Some(n) if n < nodes.len() => &nodes[n], @@ -376,6 +377,7 @@ pub async fn do_network_wide_propagation(nodes: &mut [TestNode], origin_node_ind (num_successes, nodes.len() - 1) } +#[allow(clippy::too_many_lines)] pub async fn do_store_and_forward_message_propagation( wallet: TestNode, wallets: &[TestNode], diff --git a/comms/dht/examples/memorynet.rs b/comms/dht/examples/memorynet.rs index ad1d5e83eb..5633f3ba23 100644 --- a/comms/dht/examples/memorynet.rs +++ b/comms/dht/examples/memorynet.rs @@ -71,6 +71,7 @@ const PROPAGATION_FACTOR: usize = 4; #[tokio::main] #[allow(clippy::same_item_push)] +#[allow(clippy::too_many_lines)] async fn main() { env_logger::init(); diff --git a/comms/dht/examples/memorynet_graph_network_join_multiple_seeds.rs b/comms/dht/examples/memorynet_graph_network_join_multiple_seeds.rs index 1643fd054b..ac677482bd 100644 --- a/comms/dht/examples/memorynet_graph_network_join_multiple_seeds.rs +++ b/comms/dht/examples/memorynet_graph_network_join_multiple_seeds.rs @@ -73,6 +73,7 @@ use crate::{ #[tokio::main] #[allow(clippy::same_item_push)] +#[allow(clippy::too_many_lines)] async fn main() { env_logger::init(); let matches = App::new("MemoryNet") diff --git a/comms/dht/examples/memorynet_graph_network_track_join.rs b/comms/dht/examples/memorynet_graph_network_track_join.rs index 6673d94b7b..e62aa199a7 100644 --- a/comms/dht/examples/memorynet_graph_network_track_join.rs +++ b/comms/dht/examples/memorynet_graph_network_track_join.rs @@ -81,6 +81,7 @@ use crate::{ #[tokio::main] #[allow(clippy::same_item_push)] +#[allow(clippy::too_many_lines)] async fn main() { let _ = env_logger::from_env(Env::default()) .format_timestamp_millis() diff --git a/comms/dht/src/actor.rs b/comms/dht/src/actor.rs index 94ab91036a..f4f63308fd 100644 --- a/comms/dht/src/actor.rs +++ b/comms/dht/src/actor.rs @@ -264,8 +264,7 @@ impl DhtActor { debug!( target: LOG_TARGET, "Message dedup cache will be trimmed to capacity every {}s", - config.dedup_cache_trim_interval.as_secs() as f64 + - f64::from(config.dedup_cache_trim_interval.subsec_nanos()) * 1e-9 + config.dedup_cache_trim_interval.as_secs() ); Self { msg_hash_dedup_cache: DedupCacheDatabase::new(conn.clone(), config.dedup_cache_capacity), @@ -463,6 +462,8 @@ impl DhtActor { Ok(()) } + // TODO: Break up this function + #[allow(clippy::too_many_lines)] async fn select_peers( config: &DhtConfig, node_identity: Arc, diff --git a/comms/dht/src/connectivity/mod.rs b/comms/dht/src/connectivity/mod.rs index 982b15a443..edb13d3779 100644 --- a/comms/dht/src/connectivity/mod.rs +++ b/comms/dht/src/connectivity/mod.rs @@ -218,15 +218,16 @@ impl DhtConnectivity { matches!(ip, Some(multiaddr::Protocol::Ip4(_))) && matches!(tcp, Some(multiaddr::Protocol::Tcp(_))) }) .count(); + let current_ratio = num_tcp_nodes as f32 / conns.len() as f32; if current_ratio < desired_ratio { warn!( target: LOG_TARGET, - "{}% of this node's {} connections are using TCPv4. This node requires at least {}% of nodes to be \ - TCP nodes.", - (current_ratio * 100.0).round() as i64, + "{:.1?}% of this node's {} connections are using TCPv4. This node requires at least {:.1?}% of nodes \ + to be TCP nodes.", + (current_ratio * 100.0).round(), conns.len(), - (desired_ratio * 100.0).round() as i64, + (desired_ratio * 100.0).round(), ); } diff --git a/comms/dht/src/dedup/dedup_cache.rs b/comms/dht/src/dedup/dedup_cache.rs index 2c7786295d..a18116ceac 100644 --- a/comms/dht/src/dedup/dedup_cache.rs +++ b/comms/dht/src/dedup/dedup_cache.rs @@ -79,6 +79,7 @@ impl DedupCacheDatabase { .get_result::(&conn) .optional()?; + #[allow(clippy::cast_sign_loss)] Ok(hit_count.unwrap_or(0) as u32) } @@ -138,6 +139,7 @@ impl DedupCacheDatabase { .select(dedup_cache::number_of_hits) .filter(dedup_cache::body_hash.eq(&body_hash)) .get_result::(&conn)?; + #[allow(clippy::cast_sign_loss)] Ok(hits as u32) }, _ => Err(diesel::result::Error::DatabaseError(kind, e_info).into()), diff --git a/comms/dht/src/envelope.rs b/comms/dht/src/envelope.rs index dba33c957c..ef75e16a3f 100644 --- a/comms/dht/src/envelope.rs +++ b/comms/dht/src/envelope.rs @@ -49,7 +49,6 @@ pub(crate) fn datetime_to_timestamp(datetime: DateTime) -> Timestamp { } /// Utility function that converts a `prost::Timestamp` to a `chrono::DateTime` -#[allow(clippy::needless_pass_by_value)] pub(crate) fn timestamp_to_datetime(timestamp: Timestamp) -> Option> { let naive = NaiveDateTime::from_timestamp_opt(timestamp.seconds, u32::try_from(cmp::max(0, timestamp.nanos)).unwrap())?; @@ -58,6 +57,7 @@ pub(crate) fn timestamp_to_datetime(timestamp: Timestamp) -> Option) -> EpochTime { + #[allow(clippy::cast_sign_loss)] EpochTime::from_secs_since_epoch(datetime.timestamp() as u64) } diff --git a/comms/dht/src/network_discovery/ready.rs b/comms/dht/src/network_discovery/ready.rs index 2e95623989..68f6ac67bd 100644 --- a/comms/dht/src/network_discovery/ready.rs +++ b/comms/dht/src/network_discovery/ready.rs @@ -56,6 +56,8 @@ impl DiscoveryReady { } } + // TODO: Reduce LOC for this function + #[allow(clippy::too_many_lines)] async fn process(&mut self) -> Result { let num_peers = self.context.peer_manager.count().await; debug!(target: LOG_TARGET, "Peer list currently contains {} entries", num_peers); diff --git a/comms/dht/src/outbound/message_send_state.rs b/comms/dht/src/outbound/message_send_state.rs index 9de9383412..347be8616c 100644 --- a/comms/dht/src/outbound/message_send_state.rs +++ b/comms/dht/src/outbound/message_send_state.rs @@ -111,6 +111,7 @@ impl MessageSendStates { failed.push(tag); }, } + #[allow(clippy::cast_possible_wrap)] if (count as f32) / (total as f32) >= threshold_perc { break; } diff --git a/comms/dht/src/store_forward/database/mod.rs b/comms/dht/src/store_forward/database/mod.rs index f5ae0d5ce9..b27e75059c 100644 --- a/comms/dht/src/store_forward/database/mod.rs +++ b/comms/dht/src/store_forward/database/mod.rs @@ -204,11 +204,14 @@ impl StoreAndForwardDatabase { pub(crate) fn truncate_messages(&self, max_size: usize) -> Result { let mut num_removed = 0; let conn = self.connection.get_pooled_connection()?; + let max_size = max_size as u64; + #[allow(clippy::cast_sign_loss)] let msg_count = stored_messages::table .select(dsl::count(stored_messages::id)) - .first::(&conn)? as usize; + .first::(&conn)? as u64; if msg_count > max_size { let remove_count = msg_count - max_size; + #[allow(clippy::cast_possible_wrap)] let message_ids: Vec = stored_messages::table .select(stored_messages::id) .order_by(stored_messages::stored_at.asc()) diff --git a/comms/dht/src/store_forward/saf_handler/task.rs b/comms/dht/src/store_forward/saf_handler/task.rs index 64e5645793..59c5d6df08 100644 --- a/comms/dht/src/store_forward/saf_handler/task.rs +++ b/comms/dht/src/store_forward/saf_handler/task.rs @@ -675,6 +675,7 @@ mod test { #[tokio::test] #[allow(clippy::similar_names)] + #[allow(clippy::too_many_lines)] async fn request_stored_messages() { let spy = service_spy(); let (requester, mock_state) = create_store_and_forward_mock(); diff --git a/comms/dht/src/test_utils/store_and_forward_mock.rs b/comms/dht/src/test_utils/store_and_forward_mock.rs index 142ed1f21e..ceb42ddc02 100644 --- a/comms/dht/src/test_utils/store_and_forward_mock.rs +++ b/comms/dht/src/test_utils/store_and_forward_mock.rs @@ -135,6 +135,9 @@ impl StoreAndForwardMock { .collect())); }, InsertMessage(msg, reply_tx) => { + // Clippy: There is no data lost here, when converting back to u32 from i32 the unsigned value is + // preserved + #[allow(clippy::cast_possible_wrap)] self.state.stored_messages.write().await.push(StoredMessage { id: OsRng.next_u32() as i32, version: msg.version, diff --git a/comms/dht/tests/dht.rs b/comms/dht/tests/dht.rs index 52528b9f2c..95e4b250ea 100644 --- a/comms/dht/tests/dht.rs +++ b/comms/dht/tests/dht.rs @@ -493,6 +493,7 @@ async fn dht_store_forward() { #[tokio::test] #[allow(non_snake_case)] +#[allow(clippy::too_many_lines)] async fn dht_propagate_dedup() { let mut config = dht_config(); // For this test we want to exactly measure the path of a message, so we disable repropagation of messages (i.e @@ -637,6 +638,7 @@ async fn dht_propagate_dedup() { #[tokio::test] #[allow(non_snake_case)] +#[allow(clippy::too_many_lines)] async fn dht_do_not_store_invalid_message_in_dedup() { let mut config = dht_config(); config.dedup_allowed_message_occurrences = 1; diff --git a/comms/rpc_macros/src/expand.rs b/comms/rpc_macros/src/expand.rs index 9097a01f03..7641613da8 100644 --- a/comms/rpc_macros/src/expand.rs +++ b/comms/rpc_macros/src/expand.rs @@ -158,6 +158,8 @@ impl TraitInfoCollector { Ok(()) } + // TODO: remove this after PR #4063 is merged + #[allow(clippy::too_many_lines)] fn parse_method_signature(&self, node: &syn::TraitItemMethod, info: &mut RpcMethodInfo) -> syn::Result<()> { info.method_ident = node.sig.ident.clone(); diff --git a/dan_layer/core/src/models/view_id.rs b/dan_layer/core/src/models/view_id.rs index 0f9459e3c3..b1fbbd95ad 100644 --- a/dan_layer/core/src/models/view_id.rs +++ b/dan_layer/core/src/models/view_id.rs @@ -31,7 +31,9 @@ pub struct ViewId(pub u64); impl ViewId { pub fn current_leader(&self, committee_size: usize) -> usize { - (self.0 % committee_size as u64) as usize + #[allow(clippy::cast_possible_truncation)] + let view_id = self.0 as usize; + view_id % committee_size } pub fn is_genesis(&self) -> bool { diff --git a/dan_layer/core/src/templates/tip004_template.rs b/dan_layer/core/src/templates/tip004_template.rs index 28b8aa7329..0e642e3cbf 100644 --- a/dan_layer/core/src/templates/tip004_template.rs +++ b/dan_layer/core/src/templates/tip004_template.rs @@ -135,6 +135,7 @@ fn token_of_owner_by_index( let owner = request.owner.clone(); let index = request.index; let owner_records = state_db.find_keys_by_value("owners", &owner)?; + #[allow(clippy::cast_possible_truncation)] if let Some(token_id) = owner_records.into_iter().nth(index as usize) { let token = state_db .get_value("tokens", &token_id)? diff --git a/dan_layer/core/src/workers/states/prepare.rs b/dan_layer/core/src/workers/states/prepare.rs index e61d0cd425..c86649e372 100644 --- a/dan_layer/core/src/workers/states/prepare.rs +++ b/dan_layer/core/src/workers/states/prepare.rs @@ -317,6 +317,7 @@ impl Prepare { max_qc.unwrap() } + #[allow(clippy::cast_possible_truncation)] async fn create_proposal( &self, parent: TreeNodeHash, diff --git a/dan_layer/storage_sqlite/src/models/state_op_log.rs b/dan_layer/storage_sqlite/src/models/state_op_log.rs index 86b1ab2d40..5d21f3670e 100644 --- a/dan_layer/storage_sqlite/src/models/state_op_log.rs +++ b/dan_layer/storage_sqlite/src/models/state_op_log.rs @@ -51,6 +51,7 @@ pub struct NewStateOpLogEntry { } impl From for NewStateOpLogEntry { + #[allow(clippy::cast_possible_wrap)] fn from(entry: DbStateOpLogEntry) -> Self { Self { height: entry.height as i64, @@ -63,6 +64,7 @@ impl From for NewStateOpLogEntry { } } +#[allow(clippy::cast_sign_loss)] impl TryFrom for DbStateOpLogEntry { type Error = SqliteStorageError; diff --git a/dan_layer/storage_sqlite/src/sqlite_chain_backend_adapter.rs b/dan_layer/storage_sqlite/src/sqlite_chain_backend_adapter.rs index 3c19c49c86..7535f23166 100644 --- a/dan_layer/storage_sqlite/src/sqlite_chain_backend_adapter.rs +++ b/dan_layer/storage_sqlite/src/sqlite_chain_backend_adapter.rs @@ -111,6 +111,7 @@ impl ChainDbBackendAdapter for SqliteChainBackendAdapter { Ok(count > 0) } + #[allow(clippy::cast_sign_loss)] fn get_tip_node(&self) -> Result, Self::Error> { use crate::schema::nodes::dsl; @@ -137,6 +138,7 @@ impl ChainDbBackendAdapter for SqliteChainBackendAdapter { fn insert_node(&self, item: &DbNode, transaction: &Self::BackendTransaction) -> Result<(), Self::Error> { debug!(target: LOG_TARGET, "Inserting {:?}", item); + #[allow(clippy::cast_possible_wrap)] let new_node = NewNode { hash: Vec::from(item.hash.as_bytes()), parent: Vec::from(item.parent.as_bytes()), @@ -175,6 +177,7 @@ impl ChainDbBackendAdapter for SqliteChainBackendAdapter { Ok(()) } + #[allow(clippy::cast_possible_wrap)] fn update_locked_qc(&self, item: &DbQc, transaction: &Self::BackendTransaction) -> Result<(), Self::Error> { use crate::schema::locked_qc::dsl; let message_type = i32::from(item.message_type.as_u8()); @@ -213,6 +216,7 @@ impl ChainDbBackendAdapter for SqliteChainBackendAdapter { Ok(()) } + #[allow(clippy::cast_possible_wrap)] fn update_prepare_qc(&self, item: &DbQc, transaction: &Self::BackendTransaction) -> Result<(), Self::Error> { use crate::schema::prepare_qc::dsl; let message_type = i32::from(item.message_type.as_u8()); @@ -251,6 +255,7 @@ impl ChainDbBackendAdapter for SqliteChainBackendAdapter { Ok(()) } + #[allow(clippy::cast_sign_loss)] fn get_prepare_qc(&self) -> Result, Self::Error> { let connection = self.get_connection()?; use crate::schema::prepare_qc::dsl; @@ -293,6 +298,7 @@ impl ChainDbBackendAdapter for SqliteChainBackendAdapter { 1 } + #[allow(clippy::cast_sign_loss)] fn find_highest_prepared_qc(&self) -> Result { use crate::schema::locked_qc::dsl; let connection = self.get_connection()?; @@ -333,6 +339,7 @@ impl ChainDbBackendAdapter for SqliteChainBackendAdapter { )) } + #[allow(clippy::cast_sign_loss)] fn get_locked_qc(&self) -> Result { use crate::schema::locked_qc::dsl; let connection = self.get_connection()?; @@ -374,6 +381,7 @@ impl ChainDbBackendAdapter for SqliteChainBackendAdapter { } } + #[allow(clippy::cast_sign_loss)] fn find_node_by_parent_hash(&self, parent_hash: &TreeNodeHash) -> Result, Self::Error> { use crate::schema::nodes::dsl; let connection = self.get_connection()?; diff --git a/dan_layer/storage_sqlite/src/sqlite_state_db_backend_adapter.rs b/dan_layer/storage_sqlite/src/sqlite_state_db_backend_adapter.rs index 3b5b9af7b6..bd4db62c84 100644 --- a/dan_layer/storage_sqlite/src/sqlite_state_db_backend_adapter.rs +++ b/dan_layer/storage_sqlite/src/sqlite_state_db_backend_adapter.rs @@ -261,6 +261,8 @@ impl StateDbBackendAdapter for SqliteStateDbBackendAdapter { .collect()) } + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_wrap)] fn get_state_op_logs_by_height( &self, height: u64, diff --git a/infrastructure/libtor/src/tor.rs b/infrastructure/libtor/src/tor.rs index 86a9b51033..e9b6387194 100644 --- a/infrastructure/libtor/src/tor.rs +++ b/infrastructure/libtor/src/tor.rs @@ -123,7 +123,7 @@ impl Tor { }, _ => { let e = format!("Expected a TorHiddenService comms transport, received: {:?}", transport); - Err(ExitError::new(ExitCode::ConfigError, &e)) + Err(ExitError::new(ExitCode::ConfigError, e)) }, } } diff --git a/infrastructure/tari_script/src/script.rs b/infrastructure/tari_script/src/script.rs index ca3b284ae1..5ac0e826f6 100644 --- a/infrastructure/tari_script/src/script.rs +++ b/infrastructure/tari_script/src/script.rs @@ -1000,6 +1000,7 @@ mod test { (msg, data) } + #[allow(clippy::too_many_lines)] #[test] fn check_multisig() { use crate::{op_codes::Opcode::CheckMultiSig, StackItem::Number}; @@ -1189,6 +1190,7 @@ mod test { assert_eq!(result, Number(1)); } + #[allow(clippy::too_many_lines)] #[test] fn check_multisig_verify() { use crate::{op_codes::Opcode::CheckMultiSigVerify, StackItem::Number}; diff --git a/infrastructure/tari_script/src/script_context.rs b/infrastructure/tari_script/src/script_context.rs index 6be42b8e28..dcc20fe7be 100644 --- a/infrastructure/tari_script/src/script_context.rs +++ b/infrastructure/tari_script/src/script_context.rs @@ -40,7 +40,6 @@ impl ScriptContext { } } - #[inline(always)] pub fn block_height(&self) -> u64 { self.block_height } diff --git a/lints.toml b/lints.toml index 4deea9d3d3..c2a6172caf 100644 --- a/lints.toml +++ b/lints.toml @@ -1,21 +1,19 @@ deny = [ # Prevent spelling mistakes in lints 'unknown_lints', - # clippy groups: 'clippy::correctness', - # All clippy allows must have a reason # TODO: enable lint-reasons feature -# 'clippy::allow_attributes_without_reason', + # 'clippy::allow_attributes_without_reason', # Docs - # 'missing_docs', -# 'clippy::missing_errors_doc', -# 'clippy::missing_safety_doc', -# 'clippy::missing_panics_doc', + #'missing_docs', + # 'clippy::missing_errors_doc', + # 'clippy::missing_safety_doc', + # 'clippy::missing_panics_doc', # Common mistakes - # 'clippy::await_holding_lock', + 'clippy::await_holding_lock', 'unused_variables', 'unused_imports', 'dead_code', @@ -27,37 +25,42 @@ deny = [ 'clippy::dbg_macro', 'clippy::else_if_without_else', 'clippy::enum_glob_use', - # This is 99% not needed - # 'clippy::inline_always', + 'clippy::inline_always', 'clippy::let_underscore_drop', 'clippy::let_unit_value', 'clippy::match_on_vec_items', 'clippy::match_wild_err_arm', + # In crypto code, it is fairly common to have similar names e.g. `owner_pk` and `owner_k` # 'clippy::similar_names', 'clippy::needless_borrow', - # style + 'clippy::style', 'clippy::explicit_into_iter_loop', 'clippy::explicit_iter_loop', 'clippy::if_not_else', 'clippy::match_bool', - # perhaps this is a bit harsh - # 'clippy::too_many_lines', + # Although generally good practice, this is disabled because the code becomes worse + # or needs mod-level exclusion in these cases: + # tauri commands, blockchain async db needs owned copy, &Arc, Copy types, T: AsRef<..>, T: ToString + # 'clippy::needless_pass_by_value', + 'clippy::range_plus_one', + 'clippy::struct_excessive_bools', + 'clippy::too_many_lines', 'clippy::trivially_copy_pass_by_ref', - - # casting mistakes + # Highlights potential casting mistakes 'clippy::cast_lossless', - # 'clippy::cast_possible_truncation', - # 'clippy::cast_possible_wrap', +# 'clippy::cast_possible_truncation', +# 'clippy::cast_possible_wrap', + # Precision loss is almost always competely fine and is only useful as a sanity check. + # https://rust-lang.github.io/rust-clippy/master/index.html#cast_precision_loss # 'clippy::cast_precision_loss', - # This is tricky because sqlite uses signed ints and it's used to represent unsigned data - # 'clippy::cast_sign_loss' +# 'clippy::cast_sign_loss' ] -allow= [ +allow = [ # allow Default::default calls 'clippy::default_trait_access', - # Generally when developers fix this, it can lead to leaky abstractions or worse, so - # too many arguments is generally the lesser of two evils - 'clippy::too_many_arguments' -] \ No newline at end of file + # Generally when developers fix this, it can lead to leaky abstractions or worse, so + # too many arguments is generally the lesser of two evils + 'clippy::too_many_arguments' +]