Skip to content
This repository has been archived by the owner on Jun 25, 2021. It is now read-only.

Commit

Permalink
chore(node): solving clippy issues by temporarily disablig unused fun…
Browse files Browse the repository at this point in the history
…cts and vars
  • Loading branch information
bochaco committed Sep 24, 2020
1 parent a42b065 commit 8828a68
Show file tree
Hide file tree
Showing 12 changed files with 66 additions and 36 deletions.
1 change: 0 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ env_logger = { version = "~0.7.1", optional = true }
err-derive = "~0.2.4"
sn_fake_clock = "~0.4.0"
futures = "~0.3.5"
fxhash = "~0.2.1"
hex_fmt = "~0.3.0"
itertools = "~0.9.0"
lazy_static = { version = "1", optional = true }
Expand Down
21 changes: 14 additions & 7 deletions examples/minimal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
//!

use hex_fmt::HexFmt;
use log::{info, LevelFilter};
use log::{debug, info, LevelFilter};
use sn_routing::{
event::{Connected, Event},
Node, NodeConfig, TransportConfig,
Expand Down Expand Up @@ -201,16 +201,18 @@ async fn start_node(

let contact_info = node
.our_connection_info()
.await
.expect("Failed to obtain node's contact info.");
run_node(index, node);
run_node(index, node).await;

contact_info
}

// Runs the nodes event loop. Blocks until terminated.
fn run_node(index: usize, node: Node) {
async fn run_node(index: usize, node: Node) {
let mut event_stream = node
.listen_events()
.await
.expect("Failed to start listening for events from node.");

tokio::spawn(async move {
Expand Down Expand Up @@ -285,14 +287,19 @@ fn handle_event(index: usize, event: Event) -> bool {
"Node #{} relocation started - previous_name: {}",
index, previous_name
),
Event::Terminated => {
info!("Node #{} terminated", index);
return false;
}
Event::RestartRequired => {
info!("Node #{} requires restart", index);
return false;
}
Event::ClientMessageReceived {
content, src, dst, ..
} => info!(
"Node #{} received message from client: {:?}, dst: {:?}, content: {}",
index,
src,
dst,
HexFmt(content)
),
}

true
Expand Down
15 changes: 13 additions & 2 deletions src/consensus/dkg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,8 @@ impl DkgVoter {
dkg_key,
key_gen,
elders_info: Some(elders_info),
timer_token: 0,
// TODO: review if we still need this
//timer_token: 0,
});

Some(message)
Expand Down Expand Up @@ -214,6 +215,8 @@ impl DkgVoter {
// - `Some((dkg_key, Err(())))` if the DKG failed. The result should be sent to the DKG observers
// for accumulation.
// - `None` if there is no active DKG session.
// TODO: review if we still need this function
/*
pub fn progress_dkg(
&mut self,
rng: &mut MainRng,
Expand All @@ -235,6 +238,7 @@ impl DkgVoter {
}
}
}
*/

/// Returns the participants of the DKG session, if there is one.
pub fn participants(&self) -> impl Iterator<Item = &P2pNode> {
Expand Down Expand Up @@ -348,17 +352,23 @@ impl DkgVoter {

// Returns the timer token of the active DKG session if there is one. If this timer fires, we
// should call `progress_dkg`.
// TODO: review if we still need this function
/*
pub fn timer_token(&self) -> Option<u64> {
self.participant.as_ref().map(|session| session.timer_token)
}
*/

// Sets the timer token for the active DKG session. This should be set after a successful DKG
// initialization, or after handling a DKG message that produced at least one response.
// TODO: review if we still need this function
/*
pub fn set_timer_token(&mut self, token: u64) {
if let Some(session) = &mut self.participant {
session.timer_token = token;
}
}
*/
}

// Data for a DKG participant.
Expand All @@ -367,7 +377,8 @@ struct Participant {
elders_info: Option<EldersInfo>,
dkg_key: DkgKey,
key_gen: KeyGen<FullId>,
timer_token: u64,
// TODO: review if we still need this
//timer_token: u64,
}

// Data for a DKG observer.
Expand Down
10 changes: 4 additions & 6 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
)]
// For explanation of lint checks, run `rustc -W help` or see
// https://github.com/maidsafe/QA/blob/master/Documentation/Rust%20Lint%20Checks.md
/*#![forbid(
#![forbid(
arithmetic_overflow,
mutable_transmutes,
no_mangle_const_items,
Expand Down Expand Up @@ -63,9 +63,7 @@
unused_results,
clippy::needless_borrow
)]
// Need this to stop clippy complaining about the `use quic_p2p` line which is actually necessary.
#![allow(clippy::single_component_path_imports)]
*/

#[macro_use]
extern crate serde;

Expand Down Expand Up @@ -158,9 +156,9 @@ const ELDER_SIZE: usize = 7;

// Quic-p2p
#[cfg(feature = "mock")]
use mock_qp2p as q2p;
use mock_qp2p as qp2p;
#[cfg(not(feature = "mock"))]
use qp2p;
use qp2p::{self};

#[cfg(test)]
mod tests {
Expand Down
8 changes: 1 addition & 7 deletions src/node/event_stream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,7 @@ impl EventStream {
is_genesis: bool,
) -> Self {
let (events_tx, events_rx) = mpsc::channel::<Event>(MAX_EVENTS_BUFFERED);
Self::spawn_connections_handler(
stage.clone(),
events_tx,
incoming_conns,
xorname,
is_genesis,
);
Self::spawn_connections_handler(stage, events_tx, incoming_conns, xorname, is_genesis);

Self { events_rx }
}
Expand Down
4 changes: 2 additions & 2 deletions src/node/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ impl Node {
pub async fn new(config: NodeConfig) -> Result<Self> {
let mut rng = config.rng;
let full_id = config.full_id.unwrap_or_else(|| FullId::gen(&mut rng));
let node_name = full_id.public_id().name().clone();
let node_name = *full_id.public_id().name();
let transport_config = config.transport_config;
let network_params = config.network_params;
let is_genesis = config.first;
Expand Down Expand Up @@ -237,7 +237,7 @@ impl Node {
// Set log identifier
let str = self.stage.lock().await.name_and_prefix();
use std::fmt::Write;
log_utils::set_ident(|buffer| write!(buffer, "{}", str));
let _log_ident = log_utils::set_ident(|buffer| write!(buffer, "{}", str));

self.stage
.lock()
Expand Down
18 changes: 16 additions & 2 deletions src/node/stage/approved.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ use crate::{
EldersInfo, MemberInfo, NeighbourEldersRemoved, SectionKeyShare, SectionKeysProvider,
SectionUpdateBarrier, SharedState, MIN_AGE,
},
time::Duration,
};
use bls_dkg::key_gen::message::Message as DkgMessage;
use bytes::Bytes;
Expand All @@ -38,8 +37,9 @@ use std::net::SocketAddr;
use tokio::sync::mpsc;
use xor_name::{Prefix, XorName};

// TODO: review if we still need to set a timer for DKG
// Interval to progress DKG timed phase
const DKG_PROGRESS_INTERVAL: Duration = Duration::from_secs(30);
// const DKG_PROGRESS_INTERVAL: Duration = Duration::from_secs(30);

// The approved stage - node is a full member of a section and is performing its duties according
// to its persona (infant, adult or elder).
Expand Down Expand Up @@ -222,6 +222,9 @@ impl Approved {
Ok(())
}

// TODO: review if we still need to invoke this function which used to
// be called when couldn't connect to a peer.
/*
async fn handle_connection_failure(&mut self, addr: SocketAddr) -> Result<()> {
let node = self
.shared_state
Expand All @@ -244,7 +247,11 @@ impl Approved {
Ok(())
}
*/

// TODO: review if we still need to call this function which used to be
// called when a message to a peer wasn't not sent even after retrying.
/*
async fn handle_peer_lost(&mut self, peer_addr: SocketAddr) -> Result<()> {
let name = if let Some(node) = self.shared_state.find_p2p_node_from_addr(&peer_addr) {
debug!("Lost known peer {}", node);
Expand All @@ -265,7 +272,10 @@ impl Approved {
Ok(())
}
*/

// TODO: review if we still need this function
/*
async fn handle_timeout(&mut self, token: u64) {
if self.dkg_voter.timer_token() == Some(token) {
// TODO ??
Expand All @@ -277,6 +287,7 @@ impl Approved {
}
}
}
*/

async fn check_dkg(&mut self, dkg_key: DkgKey) -> Result<()> {
match self.dkg_voter.check_dkg() {
Expand All @@ -298,6 +309,8 @@ impl Approved {
}
}

// TODO: review if we still need this function
/*
async fn progress_dkg(&mut self) -> Result<()> {
match self.dkg_voter.progress_dkg(&mut self.rng) {
Some((dkg_key, Ok(messages))) => {
Expand All @@ -313,6 +326,7 @@ impl Approved {
None => Ok(()),
}
}
*/

/// Is the node with the given id an elder in our section?
pub fn is_our_elder(&self, id: &PublicId) -> bool {
Expand Down
8 changes: 3 additions & 5 deletions src/node/stage/bootstrapping.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,18 @@ use crate::{
relocation::{RelocatePayload, SignedRelocateDetails},
rng::MainRng,
section::EldersInfo,
time::Duration,
};
use fxhash::FxHashSet;
use std::{iter, net::SocketAddr};
use xor_name::Prefix;

// TODO: review if we still need to set a timeout for joining
/// Time after which bootstrap is cancelled (and possibly retried).
pub const BOOTSTRAP_TIMEOUT: Duration = Duration::from_secs(20);
// pub const BOOTSTRAP_TIMEOUT: Duration = Duration::from_secs(20);

// The bootstrapping stage - node is trying to find the section to join.
pub(crate) struct Bootstrapping {
// Using `FxHashSet` for deterministic iteration order.
pending_requests: FxHashSet<SocketAddr>,
// TODO - we may not need it anymore: pending_requests: FxHashSet<SocketAddr>,
relocate_details: Option<SignedRelocateDetails>,
full_id: FullId,
rng: MainRng,
Expand All @@ -45,7 +44,6 @@ impl Bootstrapping {
network_params: NetworkParams,
) -> Self {
Self {
pending_requests: Default::default(),
relocate_details,
full_id,
rng,
Expand Down
5 changes: 3 additions & 2 deletions src/node/stage/joining.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,13 @@ use crate::{
rng::MainRng,
section::{EldersInfo, SharedState},
};
use std::{net::SocketAddr, time::Duration};
use std::net::SocketAddr;
use tokio::sync::mpsc;
use xor_name::Prefix;

// TODO: review if we still need to set a timeout for joining
/// Time after which an attempt to joining a section is cancelled (and possibly retried).
pub const JOIN_TIMEOUT: Duration = Duration::from_secs(60);
//pub const JOIN_TIMEOUT: Duration = Duration::from_secs(60);

// The joining stage - node is waiting to be approved by the section.
pub(crate) struct Joining {
Expand Down
2 changes: 1 addition & 1 deletion src/node/stage/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ impl Stage {

pub fn approved(&self) -> Option<&Approved> {
match &self.state {
State::Approved(stage) => Some(&stage),
State::Approved(stage) => Some(stage),
_ => None,
}
}
Expand Down
3 changes: 3 additions & 0 deletions src/section/member_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,15 @@ impl MemberInfo {
}

// Converts this info into one with the state changed to `Left`.
// TODO: review if we still need this function
/*
pub fn leave(self) -> Self {
Self {
state: MemberState::Left,
..self
}
}
*/

// Convert this info into one with the state changed to `Relocated`.
pub fn relocate(self, destination: XorName) -> Self {
Expand Down
7 changes: 6 additions & 1 deletion src/section/shared_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ use std::{
collections::{BTreeMap, BTreeSet},
convert::TryInto,
fmt::Debug,
net::SocketAddr,
};
use xor_name::{Prefix, XorName};

Expand Down Expand Up @@ -146,13 +145,16 @@ impl SharedState {
.filter(move |p2p_node| !self.is_peer_our_elder(p2p_node.name()))
}

// TODO: review if we still need this function
/*
/// Returns all nodes we know (our members + neighbour elders).
pub fn known_nodes(&self) -> impl Iterator<Item = &P2pNode> {
self.our_members
.joined()
.map(|info| &info.p2p_node)
.chain(self.sections.neighbour_elders())
}
*/

/// Returns our members that are either joined or are left but still elders.
pub fn active_members(&self) -> impl Iterator<Item = &P2pNode> {
Expand Down Expand Up @@ -185,10 +187,13 @@ impl SharedState {
self.our_members.is_adult(name) || self.is_peer_our_elder(name)
}

// TODO: review if we still need this function
/*
pub fn find_p2p_node_from_addr(&self, socket_addr: &SocketAddr) -> Option<&P2pNode> {
self.known_nodes()
.find(|p2p_node| p2p_node.peer_addr() == socket_addr)
}
*/

/// All section keys we know of, including the past keys of our section.
pub fn section_keys(&self) -> impl Iterator<Item = (&Prefix, &bls::PublicKey)> {
Expand Down

0 comments on commit 8828a68

Please sign in to comment.