From e732a67f71ab83d0ba7d7b704eae04662fa95796 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 5 Mar 2018 15:32:55 +0100 Subject: [PATCH 1/2] Limit ingress connections --- util/network-devp2p/src/host.rs | 64 +++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 26 deletions(-) diff --git a/util/network-devp2p/src/host.rs b/util/network-devp2p/src/host.rs index 351f232800e..d5f98b567fa 100644 --- a/util/network-devp2p/src/host.rs +++ b/util/network-devp2p/src/host.rs @@ -18,9 +18,9 @@ use std::net::{SocketAddr, SocketAddrV4, Ipv4Addr}; use std::collections::{HashMap, HashSet}; use std::str::FromStr; use std::sync::Arc; -use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; +use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; use std::ops::*; -use std::cmp::min; +use std::cmp::{min, max}; use std::path::{Path, PathBuf}; use std::io::{Read, Write, self}; use std::fs; @@ -247,7 +247,6 @@ pub struct Host { timer_counter: RwLock, stats: Arc, reserved_nodes: RwLock>, - num_sessions: AtomicUsize, stopping: AtomicBool, filter: Option>, } @@ -304,7 +303,6 @@ impl Host { timer_counter: RwLock::new(USER_TIMER), stats: stats, reserved_nodes: RwLock::new(HashSet::new()), - num_sessions: AtomicUsize::new(0), stopping: AtomicBool::new(false), filter: filter, }; @@ -359,7 +357,7 @@ impl Host { // disconnect all non-reserved peers here. let reserved: HashSet = self.reserved_nodes.read().clone(); let mut to_kill = Vec::new(); - for e in self.sessions.write().iter_mut() { + for e in self.sessions.read().iter() { let mut s = e.lock(); { let id = s.id(); @@ -399,7 +397,7 @@ impl Host { pub fn stop(&self, io: &IoContext) -> Result<(), Error> { self.stopping.store(true, AtomicOrdering::Release); let mut to_kill = Vec::new(); - for e in self.sessions.write().iter_mut() { + for e in self.sessions.read().iter() { let mut s = e.lock(); s.disconnect(io, DisconnectReason::ClientQuit); to_kill.push(s.token()); @@ -490,8 +488,19 @@ impl Host { self.sessions.read().iter().any(|e| e.lock().info.id == Some(id.clone())) } - fn session_count(&self) -> usize { - self.num_sessions.load(AtomicOrdering::Relaxed) + // returns (handshakes, egress, ingress) + fn session_count(&self) -> (usize, usize, usize) { + let mut handshakes = 0; + let mut egress = 0; + let mut ingress = 0; + for s in self.sessions.read().iter() { + match s.try_lock() { + Some(ref s) if s.is_ready() && s.info.originated => egress += 1, + Some(ref s) if s.is_ready() && !s.info.originated => ingress += 1, + _ => handshakes +=1, + } + } + (handshakes, egress, ingress) } fn connecting_to(&self, id: &NodeId) -> bool { @@ -499,13 +508,13 @@ impl Host { } fn handshake_count(&self) -> usize { - // session_count < total_count is possible because of the data race. - self.sessions.read().count().saturating_sub(self.session_count()) + let (handshakes, ..) = self.session_count(); + handshakes } fn keep_alive(&self, io: &IoContext) { let mut to_kill = Vec::new(); - for e in self.sessions.write().iter_mut() { + for e in self.sessions.read().iter() { let mut s = e.lock(); if !s.keep_alive(io) { s.disconnect(io, DisconnectReason::PingTimeout); @@ -529,9 +538,9 @@ impl Host { (config.min_peers, config.non_reserved_mode == NonReservedPeerMode::Deny, config.max_handshakes as usize, config.ip_filter.clone(), info.id().clone()) }; - let session_count = self.session_count(); + let (handshake_count, egress_count, ingress_count) = self.session_count(); let reserved_nodes = self.reserved_nodes.read(); - if session_count >= min_peers as usize + reserved_nodes.len() { + if egress_count + ingress_count >= min_peers as usize + reserved_nodes.len() { // check if all pinned nodes are connected. if reserved_nodes.iter().all(|n| self.have_session(n) && self.connecting_to(n)) { return; @@ -541,7 +550,6 @@ impl Host { pin = true; } - let handshake_count = self.handshake_count(); // allow 16 slots for incoming connections if handshake_count >= max_handshakes { return; @@ -566,7 +574,7 @@ impl Host { self.connect_peer(&id, io); started += 1; } - debug!(target: "network", "Connecting peers: {} sessions, {} pending, {} started", self.session_count(), self.handshake_count(), started); + debug!(target: "network", "Connecting peers: {} sessions, {} pending, {} started", egress_count + ingress_count, self.handshake_count(), started); } fn connect_peer(&self, id: &NodeId, io: &IoContext) { @@ -676,11 +684,11 @@ impl Host { let mut ready_id = None; if let Some(session) = session.clone() { { - let mut s = session.lock(); loop { - let session_result = s.readable(io, &self.info.read()); + let session_result = session.lock().readable(io, &self.info.read()); match session_result { Err(e) => { + let s = session.lock(); trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e); if let ErrorKind::Disconnect(DisconnectReason::IncompatibleProtocol) = *e.kind() { if let Some(id) = s.id() { @@ -693,9 +701,9 @@ impl Host { break; }, Ok(SessionData::Ready) => { - self.num_sessions.fetch_add(1, AtomicOrdering::SeqCst); - let session_count = self.session_count(); - let (min_peers, max_peers, reserved_only, self_id) = { + let (_, egress_count, ingress_count) = self.session_count(); + let mut s = session.lock(); + let (min_peers, mut max_peers, reserved_only, self_id) = { let info = self.info.read(); let mut max_peers = info.config.max_peers; for cap in s.info.capabilities.iter() { @@ -707,12 +715,17 @@ impl Host { (info.config.min_peers as usize, max_peers as usize, info.config.non_reserved_mode == NonReservedPeerMode::Deny, info.id().clone()) }; + max_peers = max(max_peers, min_peers); + let id = s.id().expect("Ready session always has id").clone(); - // Check for the session limit. session_counts accounts for the new session. + // Check for the session limit. + // Outgoing connections are allowed as long as their count is <= min_peers + // Incoming connections are allowed to take all of the max_peers reserve, or at most half of the slots. + let max_ingress = max(max_peers - min_peers, min_peers / 2); if reserved_only || - (s.info.originated && session_count > min_peers) || - (!s.info.originated && session_count > max_peers) { + (s.info.originated && egress_count > min_peers) || + (!s.info.originated && ingress_count > max_ingress) { // only proceed if the connecting peer is reserved. if !self.reserved_nodes.read().contains(&id) { s.disconnect(io, DisconnectReason::TooManyPeers); @@ -816,13 +829,12 @@ impl Host { let mut deregister = false; let mut expired_session = None; if let FIRST_SESSION ... LAST_SESSION = token { - let sessions = self.sessions.write(); + let sessions = self.sessions.read(); if let Some(session) = sessions.get(token).cloned() { expired_session = Some(session.clone()); let mut s = session.lock(); if !s.expired() { if s.is_ready() { - self.num_sessions.fetch_sub(1, AtomicOrdering::SeqCst); for (p, _) in self.handlers.read().iter() { if s.have_capability(*p) { to_disconnect.push(*p); @@ -854,7 +866,7 @@ impl Host { fn update_nodes(&self, _io: &IoContext, node_changes: TableUpdates) { let mut to_remove: Vec = Vec::new(); { - let sessions = self.sessions.write(); + let sessions = self.sessions.read(); for c in sessions.iter() { let s = c.lock(); if let Some(id) = s.id() { From 5b171c217fcb28e4c816a7d27dcd3fcd629edf25 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 5 Mar 2018 17:11:21 +0100 Subject: [PATCH 2/2] Optimized handshakes logging --- util/network-devp2p/src/host.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/util/network-devp2p/src/host.rs b/util/network-devp2p/src/host.rs index d5f98b567fa..629b6af8a48 100644 --- a/util/network-devp2p/src/host.rs +++ b/util/network-devp2p/src/host.rs @@ -507,11 +507,6 @@ impl Host { self.sessions.read().iter().any(|e| e.lock().id() == Some(id)) } - fn handshake_count(&self) -> usize { - let (handshakes, ..) = self.session_count(); - handshakes - } - fn keep_alive(&self, io: &IoContext) { let mut to_kill = Vec::new(); for e in self.sessions.read().iter() { @@ -574,7 +569,7 @@ impl Host { self.connect_peer(&id, io); started += 1; } - debug!(target: "network", "Connecting peers: {} sessions, {} pending, {} started", egress_count + ingress_count, self.handshake_count(), started); + debug!(target: "network", "Connecting peers: {} sessions, {} pending + {} started", egress_count + ingress_count, handshake_count, started); } fn connect_peer(&self, id: &NodeId, io: &IoContext) {