Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Avoid long state queries when serving GetNodeData requests #11444

Merged
merged 12 commits into from
Feb 4, 2020
18 changes: 9 additions & 9 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions ethcore/sync/src/chain/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,12 @@ pub const PAR_PROTOCOL_VERSION_4: (u8, u8) = (4, 0x20);

pub const MAX_BODIES_TO_SEND: usize = 256;
pub const MAX_HEADERS_TO_SEND: usize = 512;
/// Maximum number of "entries" to include in a GetDataNode request.
pub const MAX_NODE_DATA_TO_SEND: usize = 1024;
/// Maximum allowed duration for serving a batch GetNodeData request.
const MAX_NODE_DATA_TOTAL_DURATION: Duration = Duration::from_millis(5_000);
/// Maximum allowed duration for serving a single GetNodeData request.
const MAX_NODE_DATA_SINGLE_DURATION: Duration = Duration::from_millis(50);
dvdplm marked this conversation as resolved.
Show resolved Hide resolved
pub const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 256;
const MIN_PEERS_PROPAGATION: usize = 4;
const MAX_PEERS_PROPAGATION: usize = 128;
Expand Down
28 changes: 21 additions & 7 deletions ethcore/sync/src/chain/supplier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,14 @@
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.

use std::cmp;
use std::time::{Duration, Instant};

use crate::sync_io::SyncIo;

use bytes::Bytes;
use enum_primitive::FromPrimitive;
use ethereum_types::H256;
use log::{debug, trace};
use log::{debug, trace, warn};
use network::{self, PeerId};
use parking_lot::RwLock;
use rlp::{Rlp, RlpStream};
Expand Down Expand Up @@ -56,6 +57,8 @@ use super::{
MAX_BODIES_TO_SEND,
MAX_HEADERS_TO_SEND,
MAX_NODE_DATA_TO_SEND,
MAX_NODE_DATA_TOTAL_DURATION,
MAX_NODE_DATA_SINGLE_DURATION,
MAX_RECEIPTS_HEADERS_TO_SEND,
};

Expand Down Expand Up @@ -258,9 +261,9 @@ impl SyncSupplier {

/// Respond to GetNodeData request
fn return_node_data(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
let payload_soft_limit = io.payload_soft_limit();
let payload_soft_limit = io.payload_soft_limit(); // 4Mb
let mut count = r.item_count().unwrap_or(0);
trace!(target: "sync", "{} -> GetNodeData: {} entries", peer_id, count);
trace!(target: "sync", "{} -> GetNodeData: {} entries requested", peer_id, count);
if count == 0 {
debug!(target: "sync", "Empty GetNodeData request, ignoring.");
return Ok(None);
Expand All @@ -269,18 +272,29 @@ impl SyncSupplier {
let mut added = 0usize;
let mut data = Vec::new();
let mut total_bytes = 0;
let mut total_elpsd = Duration::from_secs(0);
for i in 0..count {
if let Some(node) = io.chain().state_data(&r.val_at::<H256>(i)?) {
let hash = &r.val_at(i)?;
let elpsd = Instant::now();
let state = io.chain().state_data(hash);

total_elpsd += elpsd.elapsed();
if elpsd.elapsed() > MAX_NODE_DATA_SINGLE_DURATION || total_elpsd > MAX_NODE_DATA_TOTAL_DURATION {
warn!(target: "sync", "{} -> GetNodeData: item {}/{} – slow state fetch for hash {:?}; took {:?}",
peer_id, i, count, hash, elpsd);
break;
}
if let Some(node) = state {
total_bytes += node.len();
// Check that the packet won't be oversized
if total_bytes > payload_soft_limit {
break;
}
data.push(node);
added += 1;
}
}
trace!(target: "sync", "{} -> GetNodeData: return {} entries", peer_id, added);
trace!(target: "sync", "{} -> GetNodeData: returning {}/{} entries ({} bytes total in {:?})",
peer_id, added, count, total_bytes, total_elpsd);
let mut rlp = RlpStream::new_list(added);
for d in data {
rlp.append(&d);
Expand Down Expand Up @@ -540,7 +554,7 @@ mod test {
let rlp_result = result.unwrap();
assert!(rlp_result.is_some());

// the length of one rlp-encoded hashe
// the length of one rlp-encoded hash
let rlp = rlp_result.unwrap().1.out();
let rlp = Rlp::new(&rlp);
assert_eq!(Ok(1), rlp.item_count());
Expand Down
17 changes: 13 additions & 4 deletions util/journaldb/src/overlayrecentdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ use std::{
collections::{HashMap, hash_map::Entry},
io,
sync::Arc,
time::Duration,
};

use ethereum_types::H256;
Expand Down Expand Up @@ -279,11 +280,19 @@ impl JournalDB for OverlayRecentDB {
fn earliest_era(&self) -> Option<u64> { self.journal_overlay.read().earliest_era }

fn state(&self, key: &H256) -> Option<Bytes> {
let journal_overlay = self.journal_overlay.read();
let key = to_short_key(key);
journal_overlay.backing_overlay.get(&key, EMPTY_PREFIX)
.or_else(|| journal_overlay.pending_overlay.get(&key).map(|d| d.clone()))
.or_else(|| self.backing.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN]).map(|b| b.to_vec()))
let maybe_state_data = {
let journal_overlay = self.journal_overlay.try_read_for(Duration::from_secs(2))?;
dvdplm marked this conversation as resolved.
Show resolved Hide resolved
journal_overlay.backing_overlay.get(&key, EMPTY_PREFIX)
.or_else(|| journal_overlay.pending_overlay.get(&key).map(|d| d.clone()))
};
match maybe_state_data {
dvdplm marked this conversation as resolved.
Show resolved Hide resolved
Some(data) => Some(data),
None => {
let pkey = &key[..DB_PREFIX_LEN];
self.backing.get_by_prefix(self.column, &pkey).map(|b| b.to_vec())
}
}
}

fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
Expand Down
12 changes: 6 additions & 6 deletions util/network-devp2p/src/connection.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,11 @@ use crate::handshake::Handshake;

const ENCRYPTED_HEADER_LEN: usize = 32;
const RECEIVE_PAYLOAD: Duration = Duration::from_secs(30);
pub const MAX_PAYLOAD_SIZE: usize = (1 << 24) - 1;
pub const MAX_PAYLOAD_SIZE: usize = (1 << 24) - 1; // 16Mb

/// Network responses should try not to go over this limit.
/// This should be lower than MAX_PAYLOAD_SIZE
pub const PAYLOAD_SOFT_LIMIT: usize = (1 << 22) - 1;
pub const PAYLOAD_SOFT_LIMIT: usize = (1 << 22) - 1; // 4Mb

pub trait GenericSocket : Read + Write {
}
Expand Down Expand Up @@ -97,15 +97,15 @@ impl<Socket: GenericSocket> GenericConnection<Socket> {
else if self.rec_buf.len() > self.rec_size {
warn!(target:"network", "Read past buffer {} bytes", self.rec_buf.len() - self.rec_size);
return Ok(Some(::std::mem::replace(&mut self.rec_buf, Bytes::new())))
}
}
},
Ok(_) => return Ok(None),
Err(e) => {
debug!(target:"network", "Read error {} ({})", self.token, e);
return Err(e)
}
}
}
}
}

/// Add a packet to send queue.
Expand Down Expand Up @@ -222,7 +222,7 @@ impl Connection {
pub fn register_socket<Host: Handler>(&self, reg: Token, event_loop: &mut EventLoop<Host>) -> io::Result<()> {
if self.registered.compare_and_swap(false, true, AtomicOrdering::SeqCst) {
return Ok(());
}
}
trace!(target: "network", "connection register; token={:?}", reg);
if let Err(e) = event_loop.register(&self.socket, reg, self.interest, PollOpt::edge() /* | PollOpt::oneshot() */) { // TODO: oneshot is broken on windows
trace!(target: "network", "Failed to register {:?}, {:?}", reg, e);
Expand All @@ -235,7 +235,7 @@ impl Connection {
trace!(target: "network", "connection reregister; token={:?}", reg);
if !self.registered.load(AtomicOrdering::SeqCst) {
self.register_socket(reg, event_loop)
} else {
} else {
event_loop.reregister(&self.socket, reg, self.interest, PollOpt::edge() /* | PollOpt::oneshot() */ ).unwrap_or_else(|e| { // TODO: oneshot is broken on windows
trace!(target: "network", "Failed to reregister {:?}, {:?}", reg, e);
});
Expand Down