Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Polite-grandpa improvements (#2229)
Browse files Browse the repository at this point in the history
* send neighbor packets in more generic way

* integrate periodic neighbor-packet rebroadcaster

* integrate reporting

* attach callbacks to commit messages for rebroadcasting and reporting

* Tests for commit relay

* crunch up some nice warnings

* exit-scope sub-futures of grandpa

* address small grumbles

* some changes to commit handling
  • Loading branch information
rphmeier authored and gavofyork committed Apr 16, 2019
1 parent 22ea827 commit 9631622
Show file tree
Hide file tree
Showing 10 changed files with 933 additions and 244 deletions.
238 changes: 127 additions & 111 deletions core/finality-grandpa/src/communication/gossip.rs

Large diffs are not rendered by default.

296 changes: 237 additions & 59 deletions core/finality-grandpa/src/communication/mod.rs

Large diffs are not rendered by default.

93 changes: 93 additions & 0 deletions core/finality-grandpa/src/communication/periodic.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
// Copyright 2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.

// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.

//! Periodic rebroadcast of neighbor packets.

use super::{gossip::{NeighborPacket, GossipMessage}, Network};
use futures::prelude::*;
use futures::sync::mpsc;
use runtime_primitives::traits::{NumberFor, Block as BlockT};
use network::PeerId;
use tokio::timer::Delay;
use log::warn;
use parity_codec::Encode;

use std::time::{Instant, Duration};

// how often to rebroadcast, if no other
const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60);

fn rebroadcast_instant() -> Instant {
Instant::now() + REBROADCAST_AFTER
}

/// A sender used to send neighbor packets to a background job.
pub(super) type NeighborPacketSender<B> = mpsc::UnboundedSender<(Vec<PeerId>, NeighborPacket<NumberFor<B>>)>;

/// Does the work of sending neighbor packets, asynchronously.
///
/// It may rebroadcast the last neighbor packet periodically when no
/// progress is made.
pub(super) fn neighbor_packet_worker<B, N>(net: N) -> (
impl Future<Item = (), Error = ()> + Send + 'static,
NeighborPacketSender<B>,
) where
B: BlockT,
N: Network<B>,
{
let mut last = None;
let (tx, mut rx) = mpsc::unbounded::<(Vec<PeerId>, NeighborPacket<NumberFor<B>>)>();
let mut delay = Delay::new(rebroadcast_instant());

let work = futures::future::poll_fn(move || {
loop {
match rx.poll().expect("unbounded receivers do not error; qed") {
Async::Ready(None) => return Ok(Async::Ready(())),
Async::Ready(Some((to, packet))) => {
// send to peers.
net.send_message(to.clone(), GossipMessage::<B>::from(packet.clone()).encode());

// rebroadcasting network.
delay.reset(rebroadcast_instant());
last = Some((to, packet));
}
Async::NotReady => break,
}
}

// has to be done in a loop because it needs to be polled after
// re-scheduling.
loop {
match delay.poll() {
Err(e) => {
warn!(target: "afg", "Could not rebroadcast neighbor packets: {:?}", e);
delay.reset(rebroadcast_instant());
}
Ok(Async::Ready(())) => {
delay.reset(rebroadcast_instant());

if let Some((ref to, ref packet)) = last {
// send to peers.
net.send_message(to.clone(), GossipMessage::<B>::from(packet.clone()).encode());
}
}
Ok(Async::NotReady) => return Ok(Async::NotReady),
}
}
});

(work, tx)
}
Loading

0 comments on commit 9631622

Please sign in to comment.