Skip to content

Commit

Permalink
chore: use standard lint settings and fix lints (#4066)
Browse files Browse the repository at this point in the history
Description
---
- updates lints.toml to match [tari-meta](https://github.com/tari-project/meta/blob/main/lints.toml)
- fixes a number of clippy minor lints

Motivation and Context
---
Tari repo uses standard linter settings EDIT: there are too many cast clippys to investigate, so that is left for another PR

Larger fixes are in #4063 #4064 #4065 which should be merged before this PR.

How Has This Been Tested?
---
`cargo lints clippy` passes
  • Loading branch information
sdbondi authored May 16, 2022
1 parent e8d1091 commit 6163d7b
Show file tree
Hide file tree
Showing 163 changed files with 708 additions and 595 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ use crate::{

#[derive(Derivative, Serialize, Deserialize)]
#[derivative(Debug)]
#[allow(clippy::struct_excessive_bools)]
pub struct WorkspaceLaunchOptions {
root_folder: String,
tari_network: String,
Expand Down
8 changes: 4 additions & 4 deletions applications/launchpad/backend/src/commands/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -246,14 +246,14 @@ async fn start_service_impl(
app.clone(),
log_events_name.as_str(),
container_name.as_str(),
docker.clone(),
&docker,
workspace,
);
container_stats(
app.clone(),
stats_events_name.as_str(),
container_name.as_str(),
docker.clone(),
&docker,
workspace,
);
// Collect data for the return object
Expand All @@ -280,7 +280,7 @@ fn container_logs(
app: AppHandle<Wry>,
event_name: &str,
container_name: &str,
docker: Docker,
docker: &Docker,
workspace: &mut TariWorkspace,
) {
info!("Setting up log events for {}", container_name);
Expand Down Expand Up @@ -312,7 +312,7 @@ fn container_stats(
app: AppHandle<Wry>,
event_name: &str,
container_name: &str,
docker: Docker,
docker: &Docker,
workspace: &mut TariWorkspace,
) {
info!("Setting up Resource stats events for {}", container_name);
Expand Down
6 changes: 3 additions & 3 deletions applications/launchpad/backend/src/docker/workspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -289,11 +289,11 @@ impl TariWorkspace {
Ok(ids)
}

/// Create and return a [`Stream`] of [`LogMessage`] instances for the `name`d container in the workspace.
/// Create and return a [`Stream`] of [`LogMessage`] instances for the `name`d container in the workspace.
pub fn logs(
&self,
container_name: &str,
docker: Docker,
docker: &Docker,
) -> Option<impl Stream<Item = Result<LogMessage, DockerWrapperError>>> {
let options = LogsOptions::<String> {
follow: true,
Expand All @@ -313,7 +313,7 @@ impl TariWorkspace {
pub fn resource_stats(
&self,
name: &str,
docker: Docker,
docker: &Docker,
) -> Option<impl Stream<Item = Result<Stats, DockerWrapperError>>> {
if let Some(container) = self.containers.get(name) {
let options = StatsOptions {
Expand Down
8 changes: 4 additions & 4 deletions applications/tari_app_grpc/src/conversions/block_header.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ impl From<BlockHeader> for grpc::BlockHeader {
version: u32::from(h.version),
height: h.height,
prev_hash: h.prev_hash,
timestamp: Some(datetime_to_timestamp(h.timestamp)),
timestamp: datetime_to_timestamp(h.timestamp),
input_mr: h.input_mr,
output_mr: h.output_mr,
output_mmr_size: h.output_mmr_size,
Expand Down Expand Up @@ -69,15 +69,15 @@ impl TryFrom<grpc::BlockHeader> for BlockHeader {

let timestamp = header
.timestamp
.map(timestamp_to_datetime)
.ok_or_else(|| "timestamp not provided".to_string())?;
.and_then(timestamp_to_datetime)
.ok_or_else(|| "timestamp not provided or was negative".to_string())?;

let pow = match header.pow {
Some(p) => ProofOfWork::try_from(p)?,
None => return Err("No proof of work provided".into()),
};
Ok(Self {
version: header.version as u16,
version: u16::try_from(header.version).map_err(|_| "header version too large")?,
height: header.height,
prev_hash: header.prev_hash,
timestamp,
Expand Down
23 changes: 15 additions & 8 deletions applications/tari_app_grpc/src/conversions/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@ mod transaction_kernel;
mod transaction_output;
mod unblinded_output;

use std::convert::TryFrom;

use chrono::Utc;
use prost_types::Timestamp;
use tari_utilities::epoch_time::EpochTime;

Expand All @@ -64,11 +67,10 @@ pub use self::{
use crate::{tari_rpc as grpc, tari_rpc::BlockGroupRequest};

/// Utility function that converts a `EpochTime` to a `prost::Timestamp`
pub fn datetime_to_timestamp(datetime: EpochTime) -> Timestamp {
Timestamp {
seconds: datetime.as_u64() as i64,
nanos: 0,
}
/// Returns None if the EpochTime is negative or larger than i64::MAX.
pub(self) fn datetime_to_timestamp(datetime: EpochTime) -> Option<Timestamp> {
let seconds = i64::try_from(datetime.as_u64()).ok()?;
Some(Timestamp { seconds, nanos: 0 })
}

/// Utility function that converts a `chrono::NaiveDateTime` to a `prost::Timestamp`
Expand All @@ -79,13 +81,18 @@ pub fn naive_datetime_to_timestamp(datetime: chrono::NaiveDateTime) -> Timestamp
}
}

pub(crate) fn timestamp_to_datetime(timestamp: Timestamp) -> EpochTime {
(timestamp.seconds as u64).into()
/// Converts a protobuf Timestamp to an EpochTime.
/// Returns None if the timestamp is negative.
pub(self) fn timestamp_to_datetime(timestamp: Timestamp) -> Option<EpochTime> {
u64::try_from(timestamp.seconds).ok().map(Into::into)
}

/// Current unix time as timestamp (seconds part only)
pub fn timestamp() -> Timestamp {
datetime_to_timestamp(EpochTime::now())
Timestamp {
seconds: Utc::now().timestamp(),
nanos: 0,
}
}

impl From<u64> for grpc::IntegerValue {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ impl TryFrom<grpc::NewBlockTemplate> for NewBlockTemplate {
None => return Err("No proof of work provided".into()),
};
let header = NewBlockHeaderTemplate {
version: header.version as u16,
version: u16::try_from(header.version).map_err(|_| "header version too large")?,
height: header.height,
prev_hash: header.prev_hash,
total_kernel_offset,
Expand Down
4 changes: 2 additions & 2 deletions applications/tari_app_grpc/src/conversions/output_features.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,13 +54,13 @@ impl TryFrom<grpc::OutputFeatures> for OutputFeatures {
} else {
Some(PublicKey::from_bytes(features.parent_public_key.as_bytes()).map_err(|err| format!("{:?}", err))?)
};
let flags = u8::try_from(features.flags).map_err(|_| "Invalid output flags: overflowed u8")?;

Ok(OutputFeatures::new(
OutputFeaturesVersion::try_from(
u8::try_from(features.version).map_err(|_| "Invalid version: overflowed u8")?,
)?,
OutputFlags::from_bits(features.flags as u8)
.ok_or_else(|| "Invalid or unrecognised output flags".to_string())?,
OutputFlags::from_bits(flags).ok_or_else(|| "Invalid or unrecognised output flags".to_string())?,
features.maturity,
u8::try_from(features.recovery_byte).map_err(|_| "Invalid recovery byte: overflowed u8")?,
features.metadata,
Expand Down
31 changes: 12 additions & 19 deletions applications/tari_app_grpc/src/conversions/peer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,36 +23,29 @@
use tari_comms::{connectivity::ConnectivityStatus, net_address::MutliaddrWithStats, peer_manager::Peer};
use tari_utilities::ByteArray;

use crate::{conversions::datetime_to_timestamp, tari_rpc as grpc};
use crate::{conversions::naive_datetime_to_timestamp, tari_rpc as grpc};

#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
impl From<Peer> for grpc::Peer {
fn from(peer: Peer) -> Self {
let public_key = peer.public_key.to_vec();
let node_id = peer.node_id.to_vec();
let mut addresses: Vec<grpc::Address> = Vec::new();
let last_connection = match peer.addresses.last_seen() {
Some(v) => Some(datetime_to_timestamp((v.timestamp() as u64).into())),
None => Some(datetime_to_timestamp(0.into())),
};
let mut addresses = Vec::with_capacity(peer.addresses.addresses.len());
let last_connection = peer
.addresses
.last_seen()
.map(|v| naive_datetime_to_timestamp(v.naive_utc()));
for address in peer.addresses.addresses {
addresses.push(address.clone().into())
}
let flags = u32::from(peer.flags.bits());
let banned_until = match peer.banned_until {
Some(v) => Some(datetime_to_timestamp((v.timestamp() as u64).into())),
None => Some(datetime_to_timestamp(0.into())),
};
let banned_until = peer.banned_until.map(naive_datetime_to_timestamp);
let banned_reason = peer.banned_reason.to_string();
let offline_at = match peer.offline_at {
Some(v) => Some(datetime_to_timestamp((v.timestamp() as u64).into())),
None => Some(datetime_to_timestamp(0.into())),
};
let offline_at = peer.offline_at.map(naive_datetime_to_timestamp);
let features = peer.features.bits();

let last_connected_at = match peer.connection_stats.last_connected_at {
Some(v) => Some(datetime_to_timestamp((v.timestamp() as u64).into())),
None => Some(datetime_to_timestamp(0.into())),
};
let last_connected_at = peer.connection_stats.last_connected_at.map(naive_datetime_to_timestamp);
let supported_protocols = peer.supported_protocols.into_iter().map(|p| p.to_vec()).collect();
let user_agent = peer.user_agent;
Self {
Expand All @@ -77,7 +70,7 @@ impl From<MutliaddrWithStats> for grpc::Address {
let address = address_with_stats.address.to_vec();
let last_seen = match address_with_stats.last_seen {
Some(v) => v.to_string(),
None => "".to_string(),
None => String::new(),
};
let connection_attempts = address_with_stats.connection_attempts;
let rejected_message_count = address_with_stats.rejected_message_count;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,13 @@ impl TryFrom<grpc::TransactionKernel> for TransactionKernel {
.try_into()
.map_err(|_| "excess_sig could not be converted".to_string())?;

let kernel_features = u8::try_from(kernel.features).map_err(|_| "kernel features must be a single byte")?;

Ok(Self::new(
TransactionKernelVersion::try_from(
u8::try_from(kernel.version).map_err(|_| "Invalid version: overflowed u8")?,
)?,
KernelFeatures::from_bits(kernel.features as u8)
KernelFeatures::from_bits(kernel_features)
.ok_or_else(|| "Invalid or unrecognised kernel feature flag".to_string())?,
MicroTari::from(kernel.fee),
kernel.lock_height,
Expand Down
6 changes: 3 additions & 3 deletions applications/tari_app_utilities/src/identity_management.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ pub fn setup_node_identity<P: AsRef<Path>>(
},
Err(IdentityError::InvalidPermissions) => Err(ExitError::new(
ExitCode::ConfigError,
&format!(
format!(
"{path} has incorrect permissions. You can update the identity file with the correct permissions \
using 'chmod 600 {path}', or delete the identity file and a new one will be created on next start",
path = identity_file.as_ref().to_string_lossy()
Expand All @@ -82,7 +82,7 @@ pub fn setup_node_identity<P: AsRef<Path>>(
);
return Err(ExitError::new(
ExitCode::ConfigError,
&format!(
format!(
"Node identity information not found. {}. You can update the configuration file to point \
to a valid node identity file, or re-run the node to create a new one",
e
Expand Down Expand Up @@ -112,7 +112,7 @@ pub fn setup_node_identity<P: AsRef<Path>>(
error!(target: LOG_TARGET, "Could not create new node id. {}.", e);
Err(ExitError::new(
ExitCode::ConfigError,
&format!("Could not create new node id. {}.", e),
format!("Could not create new node id. {}.", e),
))
},
}
Expand Down
2 changes: 1 addition & 1 deletion applications/tari_app_utilities/src/utilities.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ pub fn setup_runtime() -> Result<Runtime, ExitError> {
let mut builder = runtime::Builder::new_multi_thread();
builder.enable_all().build().map_err(|e| {
let msg = format!("There was an error while building the node runtime. {}", e);
ExitError::new(ExitCode::UnknownError, &msg)
ExitError::new(ExitCode::UnknownError, msg)
})
}

Expand Down
10 changes: 5 additions & 5 deletions applications/tari_base_node/src/bootstrap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,15 +93,15 @@ where B: BlockchainBackend + 'static
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from).map(|p| p.node_id))
.collect::<Result<Vec<_>, _>>()
.map_err(|e| ExitError::new(ExitCode::ConfigError, &e))?;
.map_err(|e| ExitError::new(ExitCode::ConfigError, e))?;

debug!(target: LOG_TARGET, "{} sync peer(s) configured", sync_peers.len());

let mempool_sync = MempoolSyncInitializer::new(mempool_config, self.mempool.clone());
let mempool_protocol = mempool_sync.get_protocol_extension();

let tor_identity = load_from_json(&base_node_config.tor_identity_file)
.map_err(|e| ExitError::new(ExitCode::ConfigError, &e))?;
.map_err(|e| ExitError::new(ExitCode::ConfigError, e))?;
p2p_config.transport.tor.identity = tor_identity;

let mut handles = StackBuilder::new(self.interrupt_signal)
Expand Down Expand Up @@ -157,19 +157,19 @@ where B: BlockchainBackend + 'static
let comms = Self::setup_rpc_services(comms, &handles, self.db.into(), &p2p_config);
let comms = initialization::spawn_comms_using_transport(comms, p2p_config.transport.clone())
.await
.map_err(|e| ExitError::new(ExitCode::NetworkError, &e))?;
.map_err(|e| ExitError::new(ExitCode::NetworkError, e))?;
// Save final node identity after comms has initialized. This is required because the public_address can be
// changed by comms during initialization when using tor.
match p2p_config.transport.transport_type {
TransportType::Tcp => {}, // Do not overwrite TCP public_address in the base_node_id!
_ => {
identity_management::save_as_json(&base_node_config.identity_file, &*comms.node_identity())
.map_err(|e| ExitError::new(ExitCode::IdentityError, &e))?;
.map_err(|e| ExitError::new(ExitCode::IdentityError, e))?;
},
};
if let Some(hs) = comms.hidden_service() {
identity_management::save_as_json(&base_node_config.tor_identity_file, hs.tor_identity())
.map_err(|e| ExitError::new(ExitCode::IdentityError, &e))?;
.map_err(|e| ExitError::new(ExitCode::IdentityError, e))?;
}

handles.register(comms);
Expand Down
8 changes: 4 additions & 4 deletions applications/tari_base_node/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ pub async fn configure_and_initialize_node(
app_config.base_node.lmdb_path.as_path(),
app_config.base_node.lmdb.clone(),
)
.map_err(|e| ExitError::new(ExitCode::DatabaseError, &e))?;
.map_err(|e| ExitError::new(ExitCode::DatabaseError, e))?;
build_node_context(backend, app_config, node_identity, interrupt_signal).await?
},
};
Expand Down Expand Up @@ -225,17 +225,17 @@ async fn build_node_context(
backend,
rules.clone(),
validators,
app_config.base_node.storage.clone(),
app_config.base_node.storage,
DifficultyCalculator::new(rules.clone(), randomx_factory),
)
.map_err(|err| {
if let ChainStorageError::DatabaseResyncRequired(reason) = err {
return ExitError::new(
ExitCode::DbInconsistentState,
&format!("You may need to re-sync your database because {}", reason),
format!("You may need to re-sync your database because {}", reason),
);
} else {
ExitError::new(ExitCode::DatabaseError, &err)
ExitError::new(ExitCode::DatabaseError, err)
}
})?;
let mempool_validator = MempoolValidator::new(vec![
Expand Down
1 change: 1 addition & 0 deletions applications/tari_base_node/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ const DEFAULT_NETWORK: &str = "dibbler";
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
#[clap(propagate_version = true)]
#[allow(clippy::struct_excessive_bools)]
pub(crate) struct Cli {
#[clap(flatten)]
pub common: CommonCliArgs,
Expand Down
Loading

0 comments on commit 6163d7b

Please sign in to comment.