diff --git a/core/Cargo.toml b/core/Cargo.toml index f8b9a659..a8b83887 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -13,6 +13,7 @@ categories = ["network-programming", "asynchronous"] asn1_der = "0.6.1" bs58 = "0.3.0" ed25519-dalek = "1.0.0-pre.3" +either = "1.5" fnv = "1.0" futures = { version = "0.3.1", features = ["compat", "io-compat", "executor", "thread-pool"] } futures-timer = "3" diff --git a/core/src/connection.rs b/core/src/connection.rs new file mode 100644 index 00000000..de6a03d0 --- /dev/null +++ b/core/src/connection.rs @@ -0,0 +1,336 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +mod error; +mod handler; +mod listeners; +mod substream; + +pub(crate) mod manager; +pub(crate) mod pool; + +pub use error::{ConnectionError, PendingConnectionError}; +pub use handler::{ConnectionHandler, ConnectionHandlerEvent, IntoConnectionHandler}; +pub use listeners::{ListenerId, ListenersStream, ListenersEvent}; +pub use manager::ConnectionId; +pub use substream::{Substream, SubstreamEndpoint, Close}; +pub use pool::{EstablishedConnection, EstablishedConnectionIter, PendingConnection}; + +use crate::muxing::StreamMuxer; +use crate::{Multiaddr, PeerId}; +use std::{fmt, pin::Pin, task::Context, task::Poll}; +use std::hash::Hash; +use substream::{Muxing, SubstreamEvent}; + +/// The endpoint roles associated with a peer-to-peer communication channel. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum Endpoint { + /// The socket comes from a dialer. + Dialer, + /// The socket comes from a listener. + Listener, +} + +impl std::ops::Not for Endpoint { + type Output = Endpoint; + + fn not(self) -> Self::Output { + match self { + Endpoint::Dialer => Endpoint::Listener, + Endpoint::Listener => Endpoint::Dialer + } + } +} + +impl Endpoint { + /// Is this endpoint a dialer? + pub fn is_dialer(self) -> bool { + if let Endpoint::Dialer = self { + true + } else { + false + } + } + + /// Is this endpoint a listener? + pub fn is_listener(self) -> bool { + if let Endpoint::Listener = self { + true + } else { + false + } + } +} + +/// The endpoint roles associated with a peer-to-peer connection. +#[derive(PartialEq, Eq, Debug, Clone, Hash)] +pub enum ConnectedPoint { + /// We dialed the node. + Dialer { + /// Multiaddress that was successfully dialed. + address: Multiaddr, + }, + /// We received the node. + Listener { + /// Local connection address. + local_addr: Multiaddr, + /// Stack of protocols used to send back data to the remote. + send_back_addr: Multiaddr, + } +} + +impl From<&'_ ConnectedPoint> for Endpoint { + fn from(endpoint: &'_ ConnectedPoint) -> Endpoint { + endpoint.to_endpoint() + } +} + +impl From for Endpoint { + fn from(endpoint: ConnectedPoint) -> Endpoint { + endpoint.to_endpoint() + } +} + +impl ConnectedPoint { + /// Turns the `ConnectedPoint` into the corresponding `Endpoint`. + pub fn to_endpoint(&self) -> Endpoint { + match self { + ConnectedPoint::Dialer { .. } => Endpoint::Dialer, + ConnectedPoint::Listener { .. } => Endpoint::Listener + } + } + + /// Returns true if we are `Dialer`. + pub fn is_dialer(&self) -> bool { + match self { + ConnectedPoint::Dialer { .. } => true, + ConnectedPoint::Listener { .. } => false + } + } + + /// Returns true if we are `Listener`. + pub fn is_listener(&self) -> bool { + match self { + ConnectedPoint::Dialer { .. } => false, + ConnectedPoint::Listener { .. } => true + } + } +} + +/// Information about a successfully established connection. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Connected { + /// The connected endpoint, including network address information. + pub endpoint: ConnectedPoint, + /// Information obtained from the transport. + pub info: I, +} + +impl Connected +where + I: ConnectionInfo +{ + pub fn peer_id(&self) -> &I::PeerId { + self.info.peer_id() + } +} + +/// Information about a connection. +pub trait ConnectionInfo { + /// Identity of the node we are connected to. + type PeerId: Eq + Hash; + + /// Returns the identity of the node we are connected to on this connection. + fn peer_id(&self) -> &Self::PeerId; +} + +impl ConnectionInfo for PeerId { + type PeerId = PeerId; + + fn peer_id(&self) -> &PeerId { + self + } +} + +/// A multiplexed connection to a peer with an associated `ConnectionHandler`. +pub struct Connection +where + TMuxer: StreamMuxer, + THandler: ConnectionHandler>, +{ + /// Node that handles the muxing. + muxing: substream::Muxing, + /// Handler that processes substreams. + handler: THandler, +} + +impl fmt::Debug for Connection +where + TMuxer: StreamMuxer, + THandler: ConnectionHandler> + fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection") + .field("muxing", &self.muxing) + .field("handler", &self.handler) + .finish() + } +} + +impl Unpin for Connection +where + TMuxer: StreamMuxer, + THandler: ConnectionHandler>, +{ +} + +impl Connection +where + TMuxer: StreamMuxer, + THandler: ConnectionHandler>, +{ + /// Builds a new `Connection` from the given substream multiplexer + /// and connection handler. + pub fn new(muxer: TMuxer, handler: THandler) -> Self { + Connection { + muxing: Muxing::new(muxer), + handler, + } + } + + /// Returns a reference to the `ConnectionHandler` + pub fn handler(&self) -> &THandler { + &self.handler + } + + /// Returns a mutable reference to the `ConnectionHandler` + pub fn handler_mut(&mut self) -> &mut THandler { + &mut self.handler + } + + /// Notifies the connection handler of an event. + pub fn inject_event(&mut self, event: THandler::InEvent) { + self.handler.inject_event(event); + } + + /// Returns `true` if the remote has shown any sign of activity + /// since the connection has been established. + /// + /// See also [`StreamMuxer::is_remote_acknowledged`]. + pub fn is_remote_acknowledged(&self) -> bool { + self.muxing.is_remote_acknowledged() + } + + /// Begins an orderly shutdown of the connection, returning a + /// `Future` that resolves when connection shutdown is complete. + pub fn close(self) -> Close { + self.muxing.close().0 + } + + /// Polls the connection for events produced by the associated handler + /// as a result of I/O activity on the substream multiplexer. + pub fn poll(mut self: Pin<&mut Self>, cx: &mut Context) + -> Poll>> + { + loop { + let mut io_pending = false; + + // Perform I/O on the connection through the muxer, informing the handler + // of new substreams. + match self.muxing.poll(cx) { + Poll::Pending => io_pending = true, + Poll::Ready(Ok(SubstreamEvent::InboundSubstream { substream })) => { + self.handler.inject_substream(substream, SubstreamEndpoint::Listener) + } + Poll::Ready(Ok(SubstreamEvent::OutboundSubstream { user_data, substream })) => { + let endpoint = SubstreamEndpoint::Dialer(user_data); + self.handler.inject_substream(substream, endpoint) + } + Poll::Ready(Err(err)) => return Poll::Ready(Err(ConnectionError::IO(err))), + } + + // Poll the handler for new events. + match self.handler.poll(cx) { + Poll::Pending => { + if io_pending { + return Poll::Pending // Nothing to do + } + } + Poll::Ready(Ok(ConnectionHandlerEvent::OutboundSubstreamRequest(user_data))) => { + self.muxing.open_substream(user_data); + } + Poll::Ready(Ok(ConnectionHandlerEvent::Custom(event))) => { + return Poll::Ready(Ok(event)); + } + Poll::Ready(Err(err)) => return Poll::Ready(Err(ConnectionError::Handler(err))), + } + } + } +} + +/// Borrowed information about an incoming connection currently being negotiated. +#[derive(Debug, Copy, Clone)] +pub struct IncomingInfo<'a> { + /// Local connection address. + pub local_addr: &'a Multiaddr, + /// Stack of protocols used to send back data to the remote. + pub send_back_addr: &'a Multiaddr, +} + +impl<'a> IncomingInfo<'a> { + /// Builds the `ConnectedPoint` corresponding to the incoming connection. + pub fn to_connected_point(&self) -> ConnectedPoint { + ConnectedPoint::Listener { + local_addr: self.local_addr.clone(), + send_back_addr: self.send_back_addr.clone(), + } + } +} + +/// Borrowed information about an outgoing connection currently being negotiated. +#[derive(Debug, Copy, Clone)] +pub struct OutgoingInfo<'a, TPeerId> { + pub address: &'a Multiaddr, + pub peer_id: Option<&'a TPeerId>, +} + +impl<'a, TPeerId> OutgoingInfo<'a, TPeerId> { + /// Builds a `ConnectedPoint` corresponding to the outgoing connection. + pub fn to_connected_point(&self) -> ConnectedPoint { + ConnectedPoint::Dialer { + address: self.address.clone() + } + } +} + +/// Information about a connection limit. +#[derive(Debug, Clone)] +pub struct ConnectionLimit { + /// The maximum number of connections. + pub limit: usize, + /// The current number of connections. + pub current: usize, +} + +impl fmt::Display for ConnectionLimit { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/{}", self.current, self.limit) + } +} diff --git a/core/src/connection/error.rs b/core/src/connection/error.rs new file mode 100644 index 00000000..6704dfa5 --- /dev/null +++ b/core/src/connection/error.rs @@ -0,0 +1,115 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::connection::ConnectionLimit; +use crate::transport::TransportError; +use std::{io, fmt}; + +/// Errors that can occur in the context of an established `Connection`. +#[derive(Debug)] +pub enum ConnectionError { + /// An I/O error occurred on the connection. + // TODO: Eventually this should also be a custom error? + IO(io::Error), + + /// The connection handler produced an error. + Handler(THandlerErr), +} + +impl fmt::Display +for ConnectionError +where + THandlerErr: fmt::Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConnectionError::IO(err) => + write!(f, "Connection error: I/O error: {}", err), + ConnectionError::Handler(err) => + write!(f, "Connection error: Handler error: {}", err), + } + } +} + +impl std::error::Error +for ConnectionError +where + THandlerErr: std::error::Error + 'static, +{ + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + ConnectionError::IO(err) => Some(err), + ConnectionError::Handler(err) => Some(err), + } + } +} + +/// Errors that can occur in the context of a pending `Connection`. +#[derive(Debug)] +pub enum PendingConnectionError { + /// An error occurred while negotiating the transport protocol(s). + Transport(TransportError), + + /// The peer identity obtained on the connection did not + /// match the one that was expected or is otherwise invalid. + InvalidPeerId, + + /// The pending connection was successfully negotiated but dropped + /// because the connection limit for a peer has been reached. + ConnectionLimit(ConnectionLimit), + + /// An I/O error occurred on the connection. + // TODO: Eventually this should also be a custom error? + IO(io::Error), +} + +impl fmt::Display +for PendingConnectionError +where + TTransErr: fmt::Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PendingConnectionError::IO(err) => + write!(f, "Pending connection: I/O error: {}", err), + PendingConnectionError::Transport(err) => + write!(f, "Pending connection: Transport error: {}", err), + PendingConnectionError::InvalidPeerId => + write!(f, "Pending connection: Invalid peer ID."), + PendingConnectionError::ConnectionLimit(l) => + write!(f, "Pending connection: Connection limit: {}.", l) + } + } +} + +impl std::error::Error +for PendingConnectionError +where + TTransErr: std::error::Error + 'static +{ + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + PendingConnectionError::IO(err) => Some(err), + PendingConnectionError::Transport(err) => Some(err), + PendingConnectionError::InvalidPeerId => None, + PendingConnectionError::ConnectionLimit(..) => None, + } + } +} diff --git a/core/src/connection/handler.rs b/core/src/connection/handler.rs new file mode 100644 index 00000000..0379ace1 --- /dev/null +++ b/core/src/connection/handler.rs @@ -0,0 +1,127 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::PeerId; +use std::{task::Context, task::Poll}; +use super::{Connected, SubstreamEndpoint}; + +/// The interface of a connection handler. +/// +/// Each handler is responsible for a single connection. +pub trait ConnectionHandler { + /// The inbound type of events used to notify the handler through the `Network`. + /// + /// See also [`EstablishedConnection::notify_handler`](super::EstablishedConnection::notify_handler) + /// and [`ConnectionHandler::inject_event`]. + type InEvent; + /// The outbound type of events that the handler emits to the `Network` + /// through [`ConnectionHandler::poll`]. + /// + /// See also [`NetworkEvent::ConnectionEvent`](crate::network::NetworkEvent::ConnectionEvent). + type OutEvent; + /// The type of errors that the handler can produce when polled by the `Network`. + type Error; + /// The type of the substream containing the data. + type Substream; + /// Information about a substream. Can be sent to the handler through a `SubstreamEndpoint`, + /// and will be passed back in `inject_substream` or `inject_outbound_closed`. + type OutboundOpenInfo; + + /// Sends a new substream to the handler. + /// + /// The handler is responsible for upgrading the substream to whatever protocol it wants. + /// + /// # Panic + /// + /// Implementations are allowed to panic in the case of dialing if the `user_data` in + /// `endpoint` doesn't correspond to what was returned earlier when polling, or is used + /// multiple times. + fn inject_substream(&mut self, substream: Self::Substream, endpoint: SubstreamEndpoint); + + /// Notifies the handler of an event. + fn inject_event(&mut self, event: Self::InEvent); + + /// Polls the handler for events. + /// + /// Returning an error will close the connection to the remote. + fn poll(&mut self, cx: &mut Context) + -> Poll, Self::Error>>; +} + +/// Prototype for a `ConnectionHandler`. +pub trait IntoConnectionHandler { + /// The node handler. + type Handler: ConnectionHandler; + + /// Builds the node handler. + /// + /// The implementation is given a `Connected` value that holds information about + /// the newly established connection for which a handler should be created. + fn into_handler(self, connected: &Connected) -> Self::Handler; +} + +impl IntoConnectionHandler for T +where + T: ConnectionHandler +{ + type Handler = Self; + + fn into_handler(self, _: &Connected) -> Self { + self + } +} + +/// Event produced by a handler. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum ConnectionHandlerEvent { + /// Require a new outbound substream to be opened with the remote. + OutboundSubstreamRequest(TOutboundOpenInfo), + + /// Other event. + Custom(TCustom), +} + +/// Event produced by a handler. +impl ConnectionHandlerEvent { + /// If this is `OutboundSubstreamRequest`, maps the content to something else. + pub fn map_outbound_open_info(self, map: F) -> ConnectionHandlerEvent + where F: FnOnce(TOutboundOpenInfo) -> I + { + match self { + ConnectionHandlerEvent::OutboundSubstreamRequest(val) => { + ConnectionHandlerEvent::OutboundSubstreamRequest(map(val)) + }, + ConnectionHandlerEvent::Custom(val) => ConnectionHandlerEvent::Custom(val), + } + } + + /// If this is `Custom`, maps the content to something else. + pub fn map_custom(self, map: F) -> ConnectionHandlerEvent + where F: FnOnce(TCustom) -> I + { + match self { + ConnectionHandlerEvent::OutboundSubstreamRequest(val) => { + ConnectionHandlerEvent::OutboundSubstreamRequest(val) + }, + ConnectionHandlerEvent::Custom(val) => ConnectionHandlerEvent::Custom(map(val)), + } + } +} + diff --git a/core/src/nodes/listeners.rs b/core/src/connection/listeners.rs similarity index 99% rename from core/src/nodes/listeners.rs rename to core/src/connection/listeners.rs index f711ef71..c703095d 100644 --- a/core/src/nodes/listeners.rs +++ b/core/src/connection/listeners.rs @@ -43,7 +43,7 @@ use std::{collections::VecDeque, fmt, pin::Pin}; /// ```no_run /// # fn main() { /// use futures::prelude::*; -/// use libp2p_core::nodes::listeners::{ListenersEvent, ListenersStream}; +/// use libp2p_core::connection::{ListenersEvent, ListenersStream}; /// /// let mut listeners = ListenersStream::new(libp2p_tcp::TcpConfig::new()); /// @@ -369,7 +369,6 @@ where mod tests { use super::*; use crate::transport; - use futures::prelude::*; #[test] fn incoming_event() { diff --git a/core/src/connection/manager.rs b/core/src/connection/manager.rs new file mode 100644 index 00000000..c0a4af44 --- /dev/null +++ b/core/src/connection/manager.rs @@ -0,0 +1,490 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::{ + Executor, + muxing::StreamMuxer, +}; +use fnv::FnvHashMap; +use futures::{ + prelude::*, + channel::mpsc, + stream::FuturesUnordered +}; +use std::{ + collections::hash_map, + error, + fmt, + pin::Pin, + task::{Context, Poll}, +}; +use super::{ + Connected, + Connection, + ConnectionError, + ConnectionHandler, + IntoConnectionHandler, + PendingConnectionError, + Substream +}; +use task::{Task, TaskId}; + +mod task; + +// Implementation Notes +// ==================== +// +// A `Manager` is decoupled from the background tasks through channels. +// The state of a `Manager` therefore "lags behind" the progress of +// the tasks -- it is only made aware of progress in the background tasks +// when it is `poll()`ed. +// +// A `Manager` is ignorant of substreams and does not emit any events +// related to specific substreams. +// +// A `Manager` is unaware of any association between connections and peers +// / peer identities (i.e. the type parameter `C` is completely opaque). +// +// There is a 1-1 correspondence between (internal) task IDs and (public) +// connection IDs, i.e. the task IDs are "re-exported" as connection IDs +// by the manager. The notion of a (background) task is internal to the +// manager. + +/// The result of a pending connection attempt. +type ConnectResult = Result<(Connected, M), PendingConnectionError>; + +/// Connection identifier. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct ConnectionId(TaskId); + +impl ConnectionId { + /// Creates a `ConnectionId` from a non-negative integer. + /// + /// This is primarily useful for creating connection IDs + /// in test environments. There is in general no guarantee + /// that all connection IDs are based on non-negative integers. + pub fn new(id: usize) -> Self { + ConnectionId(TaskId(id)) + } +} + +/// A connection `Manager` orchestrates the I/O of a set of connections. +pub struct Manager { + /// The tasks of the managed connections. + /// + /// Each managed connection is associated with a (background) task + /// spawned onto an executor. Each `TaskInfo` in `tasks` is linked to such a + /// background task via a channel. Closing that channel (i.e. dropping + /// the sender in the associated `TaskInfo`) stops the background task, + /// which will attempt to gracefully close the connection. + tasks: FnvHashMap>, + + /// Next available identifier for a new connection / task. + next_task_id: TaskId, + + /// The executor to use for running the background tasks. If `None`, + /// the tasks are kept in `local_spawns` instead and polled on the + /// current thread when the manager is polled for new events. + executor: Option>, + + /// If no `executor` is configured, tasks are kept in this set and + /// polled on the current thread when the manager is polled for new events. + local_spawns: FuturesUnordered + Send>>>, + + /// Sender distributed to managed tasks for reporting events back + /// to the manager. + events_tx: mpsc::Sender>, + + /// Receiver for events reported from managed tasks. + events_rx: mpsc::Receiver> +} + +impl fmt::Debug for Manager +where + C: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_map() + .entries(self.tasks.iter().map(|(id, task)| (id, &task.state))) + .finish() + } +} + +/// Internal information about a running task. +/// +/// Contains the sender to deliver event messages to the task, and +/// the associated user data. +#[derive(Debug)] +struct TaskInfo { + /// channel endpoint to send messages to the task + sender: mpsc::Sender>, + /// The state of the task as seen by the `Manager`. + state: TaskState, +} + +/// Internal state of a running task as seen by the `Manager`. +#[derive(Debug, Clone, PartialEq, Eq)] +enum TaskState { + /// The connection is being established. + Pending, + /// The connection is established. + Established(Connected), +} + +/// Events produced by the [`Manager`]. +#[derive(Debug)] +pub enum Event<'a, I, O, H, TE, HE, C> { + /// A connection attempt has failed. + PendingConnectionError { + /// The connection ID. + /// + /// As a result of the error, the pending connection has been removed + /// from the `Manager` and is being closed. Hence this ID will + /// no longer resolve to a valid entry in the manager. + id: ConnectionId, + /// What happened. + error: PendingConnectionError, + /// The handler that was supposed to handle the failed connection. + handler: H + }, + + /// An established connection has encountered an error. + ConnectionError { + /// The connection ID. + /// + /// As a result of the error, the connection has been removed + /// from the `Manager` and is being closed. Hence this ID will + /// no longer resolve to a valid entry in the manager. + id: ConnectionId, + /// Information about the connection that encountered the error. + connected: Connected, + /// The error that occurred. + error: ConnectionError, + }, + + /// A connection has been established. + ConnectionEstablished { + /// The entry associated with the new connection. + entry: EstablishedEntry<'a, I, C>, + }, + + /// A connection handler has produced an event. + ConnectionEvent { + /// The entry associated with the connection that produced the event. + entry: EstablishedEntry<'a, I, C>, + /// The produced event. + event: O + } +} + +impl Manager { + /// Creates a new connection manager. + pub fn new(executor: Option>) -> Self { + let (tx, rx) = mpsc::channel(1); + Self { + tasks: FnvHashMap::default(), + next_task_id: TaskId(0), + executor, + local_spawns: FuturesUnordered::new(), + events_tx: tx, + events_rx: rx + } + } + + /// Adds to the manager a future that tries to reach a node. + /// + /// This method spawns a task dedicated to resolving this future and + /// processing the node's events. + pub fn add_pending(&mut self, future: F, handler: H) -> ConnectionId + where + I: Send + 'static, + O: Send + 'static, + TE: error::Error + Send + 'static, + HE: error::Error + Send + 'static, + C: Send + 'static, + M: StreamMuxer + Send + Sync + 'static, + M::OutboundSubstream: Send + 'static, + F: Future> + Send + 'static, + H: IntoConnectionHandler + Send + 'static, + H::Handler: ConnectionHandler< + Substream = Substream, + InEvent = I, + OutEvent = O, + Error = HE + > + Send + 'static, + ::OutboundOpenInfo: Send + 'static, + { + let task_id = self.next_task_id; + self.next_task_id.0 += 1; + + let (tx, rx) = mpsc::channel(4); + self.tasks.insert(task_id, TaskInfo { sender: tx, state: TaskState::Pending }); + + let task = Box::pin(Task::pending(task_id, self.events_tx.clone(), rx, future, handler)); + if let Some(executor) = &mut self.executor { + executor.exec(task); + } else { + self.local_spawns.push(task); + } + + ConnectionId(task_id) + } + + /// Adds an existing connection to the manager. + pub fn add(&mut self, conn: Connection, info: Connected) -> ConnectionId + where + H: IntoConnectionHandler + Send + 'static, + H::Handler: ConnectionHandler< + Substream = Substream, + InEvent = I, + OutEvent = O, + Error = HE + > + Send + 'static, + ::OutboundOpenInfo: Send + 'static, + TE: error::Error + Send + 'static, + HE: error::Error + Send + 'static, + I: Send + 'static, + O: Send + 'static, + M: StreamMuxer + Send + Sync + 'static, + M::OutboundSubstream: Send + 'static, + C: Send + 'static + { + let task_id = self.next_task_id; + self.next_task_id.0 += 1; + + let (tx, rx) = mpsc::channel(4); + self.tasks.insert(task_id, TaskInfo { + sender: tx, state: TaskState::Established(info) + }); + + let task: Pin>>, _, _, _, _, _, _>>> = + Box::pin(Task::established(task_id, self.events_tx.clone(), rx, conn)); + + if let Some(executor) = &mut self.executor { + executor.exec(task); + } else { + self.local_spawns.push(task); + } + + ConnectionId(task_id) + } + + /// Notifies the handlers of all managed connections of an event. + /// + /// This function is "atomic", in the sense that if `Poll::Pending` is + /// returned then no event has been sent. + #[must_use] + pub fn poll_broadcast(&mut self, event: &I, cx: &mut Context) -> Poll<()> + where + I: Clone + { + for task in self.tasks.values_mut() { + if let Poll::Pending = task.sender.poll_ready(cx) { // (*) + return Poll::Pending; + } + } + + for (id, task) in self.tasks.iter_mut() { + let cmd = task::Command::NotifyHandler(event.clone()); + match task.sender.start_send(cmd) { + Ok(()) => {}, + Err(e) if e.is_full() => unreachable!("by (*)"), + Err(e) if e.is_disconnected() => { + // The background task ended. The manager will eventually be + // informed through an `Error` event from the task. + log::trace!("Connection dropped: {:?}", id); + }, + Err(e) => { + log::error!("Unexpected error: {:?}", e); + } + } + } + + Poll::Ready(()) + } + + /// Gets an entry for a managed connection, if it exists. + pub fn entry(&mut self, id: ConnectionId) -> Option> { + if let hash_map::Entry::Occupied(task) = self.tasks.entry(id.0) { + Some(Entry::new(task)) + } else { + None + } + } + + /// Checks whether an established connection with the given ID is currently managed. + pub fn is_established(&self, id: &ConnectionId) -> bool { + match self.tasks.get(&id.0) { + Some(TaskInfo { state: TaskState::Established(..), .. }) => true, + _ => false + } + } + + /// Polls the manager for events relating to the managed connections. + pub fn poll<'a>(&'a mut self, cx: &mut Context) -> Poll> { + // Advance the content of `local_spawns`. + while let Poll::Ready(Some(_)) = Stream::poll_next(Pin::new(&mut self.local_spawns), cx) {} + + // Poll for the first event for which the manager still has a registered task, if any. + let event = loop { + match Stream::poll_next(Pin::new(&mut self.events_rx), cx) { + Poll::Ready(Some(event)) => { + if self.tasks.contains_key(event.id()) { // (1) + break event + } + } + Poll::Pending => return Poll::Pending, + Poll::Ready(None) => unreachable!("Manager holds both sender and receiver."), + } + }; + + if let hash_map::Entry::Occupied(mut task) = self.tasks.entry(*event.id()) { + Poll::Ready(match event { + task::Event::Notify { id: _, event } => + Event::ConnectionEvent { + entry: EstablishedEntry { task }, + event + }, + task::Event::Established { id: _, info } => { // (2) + task.get_mut().state = TaskState::Established(info); // (3) + Event::ConnectionEstablished { + entry: EstablishedEntry { task }, + } + } + task::Event::Failed { id, error, handler } => { + let id = ConnectionId(id); + let _ = task.remove(); + Event::PendingConnectionError { id, error, handler } + } + task::Event::Error { id, error } => { + let id = ConnectionId(id); + let task = task.remove(); + match task.state { + TaskState::Established(connected) => + Event::ConnectionError { id, connected, error }, + TaskState::Pending => unreachable!( + "`Event::Error` implies (2) occurred on that task and thus (3)." + ), + } + } + + }) + } else { + unreachable!("By (1)") + } + } +} + +/// An entry for a connection in the manager. +#[derive(Debug)] +pub enum Entry<'a, I, C> { + Pending(PendingEntry<'a, I, C>), + Established(EstablishedEntry<'a, I, C>) +} + +impl<'a, I, C> Entry<'a, I, C> { + fn new(task: hash_map::OccupiedEntry<'a, TaskId, TaskInfo>) -> Self { + match &task.get().state { + TaskState::Pending => Entry::Pending(PendingEntry { task }), + TaskState::Established(_) => Entry::Established(EstablishedEntry { task }) + } + } +} + +/// An entry for a managed connection that is considered established. +#[derive(Debug)] +pub struct EstablishedEntry<'a, I, C> { + task: hash_map::OccupiedEntry<'a, TaskId, TaskInfo>, +} + +impl<'a, I, C> EstablishedEntry<'a, I, C> { + /// (Asynchronously) sends an event to the connection handler. + /// + /// If the handler is not ready to receive the event, either because + /// it is busy or the connection is about to close, the given event + /// is returned with an `Err`. + /// + /// If execution of this method is preceded by successful execution of + /// `poll_ready_notify_handler` without another intervening execution + /// of `notify_handler`, it only fails if the connection is now about + /// to close. + /// + /// > **Note**: As this method does not take a `Context`, the current + /// > task _may not be notified_ if sending the event fails due to + /// > the connection handler not being ready at this time. + pub fn notify_handler(&mut self, event: I) -> Result<(), I> { + let cmd = task::Command::NotifyHandler(event); + self.task.get_mut().sender.try_send(cmd) + .map_err(|e| match e.into_inner() { + task::Command::NotifyHandler(event) => event + }) + } + + /// Checks if `notify_handler` is ready to accept an event. + /// + /// Returns `Ok(())` if the handler is ready to receive an event via `notify_handler`. + /// + /// Returns `Err(())` if the background task associated with the connection + /// is terminating and the connection is about to close. + pub fn poll_ready_notify_handler(&mut self, cx: &mut Context) -> Poll> { + self.task.get_mut().sender.poll_ready(cx).map_err(|_| ()) + } + + /// Obtains information about the established connection. + pub fn connected(&self) -> &Connected { + match &self.task.get().state { + TaskState::Established(c) => c, + TaskState::Pending => unreachable!("By Entry::new()") + } + } + + /// Closes the connection represented by this entry, + /// returning the connection information. + pub fn close(self) -> Connected { + match self.task.remove().state { + TaskState::Established(c) => c, + TaskState::Pending => unreachable!("By Entry::new()") + } + } + + /// Returns the connection id. + pub fn id(&self) -> ConnectionId { + ConnectionId(*self.task.key()) + } +} + +/// An entry for a managed connection that is currently being established +/// (i.e. pending). +#[derive(Debug)] +pub struct PendingEntry<'a, I, C> { + task: hash_map::OccupiedEntry<'a, TaskId, TaskInfo> +} + +impl<'a, I, C> PendingEntry<'a, I, C> { + /// Returns the connection id. + pub fn id(&self) -> ConnectionId { + ConnectionId(*self.task.key()) + } + + /// Aborts the pending connection attempt. + pub fn abort(self) { + self.task.remove(); + } +} diff --git a/core/src/connection/manager/task.rs b/core/src/connection/manager/task.rs new file mode 100644 index 00000000..9149e89e --- /dev/null +++ b/core/src/connection/manager/task.rs @@ -0,0 +1,341 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::{ + muxing::StreamMuxer, + connection::{ + Close, + Connected, + Connection, + ConnectionError, + ConnectionHandler, + IntoConnectionHandler, + PendingConnectionError, + Substream, + }, +}; +use futures::{prelude::*, channel::mpsc, stream}; +use std::{pin::Pin, task::Context, task::Poll}; +use super::ConnectResult; + +/// Identifier of a [`Task`] in a [`Manager`](super::Manager). +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct TaskId(pub(super) usize); + +/// Commands that can be sent to a [`Task`]. +#[derive(Debug)] +pub enum Command { + /// Notify the connection handler of an event. + NotifyHandler(T), +} + +/// Events that a task can emit to its manager. +#[derive(Debug)] +pub enum Event { + /// A connection to a node has succeeded. + Established { id: TaskId, info: Connected }, + /// An established connection produced an error. + Error { id: TaskId, error: ConnectionError }, + /// A pending connection failed. + Failed { id: TaskId, error: PendingConnectionError, handler: H }, + /// Notify the manager of an event from the connection. + Notify { id: TaskId, event: T } +} + +impl Event { + pub fn id(&self) -> &TaskId { + match self { + Event::Established { id, .. } => id, + Event::Error { id, .. } => id, + Event::Notify { id, .. } => id, + Event::Failed { id, .. } => id, + } + } +} + +/// A `Task` is a [`Future`] that handles a single connection. +pub struct Task +where + M: StreamMuxer, + H: IntoConnectionHandler, + H::Handler: ConnectionHandler> +{ + /// The ID of this task. + id: TaskId, + + /// Sender to emit events to the manager of this task. + events: mpsc::Sender::Error, C>>, + + /// Receiver for commands sent by the manager of this task. + commands: stream::Fuse>>, + + /// Inner state of this `Task`. + state: State, +} + +impl Task +where + M: StreamMuxer, + H: IntoConnectionHandler, + H::Handler: ConnectionHandler> +{ + /// Create a new task to connect and handle some node. + pub fn pending( + id: TaskId, + events: mpsc::Sender::Error, C>>, + commands: mpsc::Receiver>, + future: F, + handler: H + ) -> Self { + Task { + id, + events, + commands: commands.fuse(), + state: State::Pending { + future: Box::pin(future), + handler, + events: Vec::new() + }, + } + } + + /// Create a task for an existing node we are already connected to. + pub fn established( + id: TaskId, + events: mpsc::Sender::Error, C>>, + commands: mpsc::Receiver>, + connection: Connection + ) -> Self { + Task { + id, + events, + commands: commands.fuse(), + state: State::EstablishedPending(connection), + } + } +} + +/// The state associated with the `Task` of a connection. +enum State +where + M: StreamMuxer, + H: IntoConnectionHandler, + H::Handler: ConnectionHandler> +{ + /// The task is waiting for the connection to be established. + Pending { + /// The future that will attempt to reach the node. + // TODO: don't pin this Future; this requires deeper changes though + future: Pin>, + /// The intended handler for the established connection. + handler: H, + /// While we are dialing the future, we need to buffer the events received via + /// `Command::NotifyHandler` so that they get delivered to the `handler` + /// once the connection is established. We can't leave these in `Task::receiver` + /// because we have to detect if the connection attempt has been aborted (by + /// dropping the corresponding `sender` owned by the manager). + events: Vec + }, + + /// The connection is established and a new event is ready to be emitted. + EstablishedReady { + /// The node, if available. + connection: Option>, + /// The actual event message to send. + event: Event::Error, C> + }, + + /// The connection is established and pending a new event to occur. + EstablishedPending(Connection), + + /// The task is closing the connection. + Closing(Close), + + /// The task has finished. + Done +} + +impl Unpin for Task +where + M: StreamMuxer, + H: IntoConnectionHandler, + H::Handler: ConnectionHandler> +{ +} + +impl Future for Task +where + M: StreamMuxer, + F: Future>, + H: IntoConnectionHandler, + H::Handler: ConnectionHandler, InEvent = I, OutEvent = O> +{ + type Output = (); + + // NOTE: It is imperative to always consume all incoming commands from + // the manager first, in order to not prevent it from making progress because + // it is blocked on the channel capacity. + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> { + let this = &mut *self; + let id = this.id; + + 'poll: loop { + match std::mem::replace(&mut this.state, State::Done) { + State::Pending { mut future, handler, mut events } => { + // Process commands from the manager. + loop { + match Stream::poll_next(Pin::new(&mut this.commands), cx) { + Poll::Pending => break, + Poll::Ready(None) => return Poll::Ready(()), + Poll::Ready(Some(Command::NotifyHandler(event))) => + events.push(event), + } + } + // Check if the connection succeeded. + match Future::poll(Pin::new(&mut future), cx) { + Poll::Ready(Ok((info, muxer))) => { + let mut c = Connection::new(muxer, handler.into_handler(&info)); + for event in events { + c.inject_event(event) + } + this.state = State::EstablishedReady { + connection: Some(c), + event: Event::Established { id, info } + } + } + Poll::Pending => { + this.state = State::Pending { future, handler, events }; + return Poll::Pending + } + Poll::Ready(Err(error)) => { + let event = Event::Failed { id, handler, error }; + this.state = State::EstablishedReady { connection: None, event } + } + } + } + + State::EstablishedPending(mut connection) => { + // Start by handling commands received from the manager, if any. + loop { + match Stream::poll_next(Pin::new(&mut this.commands), cx) { + Poll::Pending => break, + Poll::Ready(Some(Command::NotifyHandler(event))) => + connection.inject_event(event), + Poll::Ready(None) => { + // The manager has dropped the task, thus initiate a + // graceful shutdown of the connection. + this.state = State::Closing(connection.close()); + continue 'poll + } + } + } + // Poll the connection for new events. + loop { + match Connection::poll(Pin::new(&mut connection), cx) { + Poll::Pending => { + this.state = State::EstablishedPending(connection); + return Poll::Pending + } + Poll::Ready(Ok(event)) => { + this.state = State::EstablishedReady { + connection: Some(connection), + event: Event::Notify { id, event } + }; + continue 'poll + } + Poll::Ready(Err(error)) => { + // Notify the manager of the error via an event, + // dropping the connection. + let event = Event::Error { id, error }; + this.state = State::EstablishedReady { connection: None, event }; + continue 'poll + } + } + } + } + + // Deliver an event to the manager. + State::EstablishedReady { mut connection, event } => { + // Process commands received from the manager, if any. + loop { + match Stream::poll_next(Pin::new(&mut this.commands), cx) { + Poll::Pending => break, + Poll::Ready(Some(Command::NotifyHandler(event))) => + if let Some(ref mut c) = connection { + c.inject_event(event) + } + Poll::Ready(None) => + // The manager has dropped the task, thus initiate a + // graceful shutdown of the connection, if given. + if let Some(c) = connection { + this.state = State::Closing(c.close()); + continue 'poll + } else { + return Poll::Ready(()) + } + } + } + // Send the event to the manager. + match this.events.poll_ready(cx) { + Poll::Pending => { + self.state = State::EstablishedReady { connection, event }; + return Poll::Pending + } + Poll::Ready(Ok(())) => { + // We assume that if `poll_ready` has succeeded, then sending the event + // will succeed as well. If it turns out that it didn't, we will detect + // the closing at the next loop iteration. + let _ = this.events.start_send(event); + if let Some(c) = connection { + this.state = State::EstablishedPending(c) + } else { + // The connection has been dropped, thus this was the last event + // to send to the manager and the task is done. + return Poll::Ready(()) + } + }, + Poll::Ready(Err(_)) => { + // The manager is no longer reachable, maybe due to + // application shutdown. Try a graceful shutdown of the + // connection, if available, and end the task. + if let Some(c) = connection { + this.state = State::Closing(c.close()); + continue 'poll + } + return Poll::Ready(()) + } + } + } + + State::Closing(mut closing) => + match Future::poll(Pin::new(&mut closing), cx) { + Poll::Ready(_) => return Poll::Ready(()), // end task + Poll::Pending => { + this.state = State::Closing(closing); + return Poll::Pending + } + } + + State::Done => panic!("`Task::poll()` called after completion.") + } + } + } +} + diff --git a/core/src/connection/pool.rs b/core/src/connection/pool.rs new file mode 100644 index 00000000..8046755f --- /dev/null +++ b/core/src/connection/pool.rs @@ -0,0 +1,875 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::{ + Executor, + ConnectedPoint, + PeerId, + connection::{ + self, + Connected, + Connection, + ConnectionId, + ConnectionLimit, + ConnectionError, + ConnectionHandler, + ConnectionInfo, + IncomingInfo, + IntoConnectionHandler, + OutgoingInfo, + Substream, + PendingConnectionError, + manager::{self, Manager}, + }, + muxing::StreamMuxer, +}; +use either::Either; +use fnv::FnvHashMap; +use futures::prelude::*; +use smallvec::SmallVec; +use std::{error, fmt, hash::Hash, task::Context, task::Poll}; + +/// A connection `Pool` manages a set of connections for each peer. +pub struct Pool { + local_id: TPeerId, + + /// The configuration of the pool. + limits: PoolLimits, + + /// The connection manager that handles the connection I/O for both + /// established and pending connections. + /// + /// For every established connection there is a corresponding entry in `established`. + manager: Manager, + + /// The managed connections of each peer that are currently considered + /// established, as witnessed by the associated `ConnectedPoint`. + established: FnvHashMap>, + + /// The pending connections that are currently being negotiated. + pending: FnvHashMap)>, +} + +impl fmt::Debug +for Pool +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + // TODO: More useful debug impl? + f.debug_struct("Pool") + .field("limits", &self.limits) + .finish() + } +} + +impl Unpin +for Pool {} + +/// Event that can happen on the `Pool`. +pub enum PoolEvent<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId> { + /// A new connection has been established. + ConnectionEstablished { + connection: EstablishedConnection<'a, TInEvent, TConnInfo, TPeerId>, + num_established: usize, + }, + + /// An established connection has encountered an error. + ConnectionError { + id: ConnectionId, + /// Information about the connection that errored. + connected: Connected, + /// The error that occurred. + error: ConnectionError, + /// A reference to the pool that used to manage the connection. + pool: &'a mut Pool, + /// The remaining number of established connections to the same peer. + num_established: usize, + }, + + /// A connection attempt failed. + PendingConnectionError { + /// The ID of the failed connection. + id: ConnectionId, + /// The local endpoint of the failed connection. + endpoint: ConnectedPoint, + /// The error that occurred. + error: PendingConnectionError, + /// The handler that was supposed to handle the connection, + /// if the connection failed before the handler was consumed. + handler: Option, + /// The (expected) peer of the failed connection. + peer: Option, + /// A reference to the pool that managed the connection. + pool: &'a mut Pool, + }, + + /// A node has produced an event. + ConnectionEvent { + /// The connection that has generated the event. + connection: EstablishedConnection<'a, TInEvent, TConnInfo, TPeerId>, + /// The produced event. + event: TOutEvent, + }, +} + +impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId> fmt::Debug +for PoolEvent<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId> +where + TOutEvent: fmt::Debug, + TTransErr: fmt::Debug, + THandlerErr: fmt::Debug, + TConnInfo: fmt::Debug, + TInEvent: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + match *self { + PoolEvent::ConnectionEstablished { ref connection, .. } => { + f.debug_tuple("PoolEvent::ConnectionEstablished") + .field(connection) + .finish() + }, + PoolEvent::ConnectionError { ref id, ref connected, ref error, .. } => { + f.debug_struct("PoolEvent::ConnectionError") + .field("id", id) + .field("connected", connected) + .field("error", error) + .finish() + }, + PoolEvent::PendingConnectionError { ref id, ref error, .. } => { + f.debug_struct("PoolEvent::PendingConnectionError") + .field("id", id) + .field("error", error) + .finish() + }, + PoolEvent::ConnectionEvent { ref connection, ref event } => { + f.debug_struct("PoolEvent::ConnectionEvent") + .field("conn_info", connection.info()) + .field("event", event) + .finish() + }, + } + } +} + +impl + Pool +where + TPeerId: Eq + Hash, +{ + /// Creates a new empty `Pool`. + pub fn new( + local_id: TPeerId, + executor: Option>, + limits: PoolLimits + ) -> Self { + Pool { + local_id, + limits, + manager: Manager::new(executor), + established: Default::default(), + pending: Default::default(), + } + } + + /// Gets the configured connection limits of the pool. + pub fn limits(&self) -> &PoolLimits { + &self.limits + } + + /// Adds a pending incoming connection to the pool in the form of a + /// `Future` that establishes and negotiates the connection. + /// + /// Returns an error if the limit of pending incoming connections + /// has been reached. + pub fn add_incoming( + &mut self, + future: TFut, + handler: THandler, + info: IncomingInfo, + ) -> Result + where + TConnInfo: ConnectionInfo + Send + 'static, + TFut: Future< + Output = Result<(TConnInfo, TMuxer), PendingConnectionError> + > + Send + 'static, + THandler: IntoConnectionHandler + Send + 'static, + THandler::Handler: ConnectionHandler< + Substream = Substream, + InEvent = TInEvent, + OutEvent = TOutEvent, + Error = THandlerErr + > + Send + 'static, + ::OutboundOpenInfo: Send + 'static, + TTransErr: error::Error + Send + 'static, + THandlerErr: error::Error + Send + 'static, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, + TMuxer: StreamMuxer + Send + Sync + 'static, + TMuxer::OutboundSubstream: Send + 'static, + { + let endpoint = info.to_connected_point(); + if let Some(limit) = self.limits.max_pending_incoming { + let current = self.iter_pending_incoming().count(); + if current >= limit { + return Err(ConnectionLimit { limit, current }) + } + } + Ok(self.add_pending(future, handler, endpoint, None)) + } + + /// Adds a pending outgoing connection to the pool in the form of a `Future` + /// that establishes and negotiates the connection. + /// + /// Returns an error if the limit of pending outgoing connections + /// has been reached. + pub fn add_outgoing( + &mut self, + future: TFut, + handler: THandler, + info: OutgoingInfo, + ) -> Result + where + TConnInfo: ConnectionInfo + Send + 'static, + TFut: Future< + Output = Result<(TConnInfo, TMuxer), PendingConnectionError> + > + Send + 'static, + THandler: IntoConnectionHandler + Send + 'static, + THandler::Handler: ConnectionHandler< + Substream = Substream, + InEvent = TInEvent, + OutEvent = TOutEvent, + Error = THandlerErr + > + Send + 'static, + ::OutboundOpenInfo: Send + 'static, + TTransErr: error::Error + Send + 'static, + THandlerErr: error::Error + Send + 'static, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, + TMuxer: StreamMuxer + Send + Sync + 'static, + TMuxer::OutboundSubstream: Send + 'static, + TPeerId: Clone, + { + self.limits.check_outgoing(|| self.iter_pending_outgoing().count())?; + let endpoint = info.to_connected_point(); + Ok(self.add_pending(future, handler, endpoint, info.peer_id.cloned())) + } + + /// Adds a pending connection to the pool in the form of a + /// `Future` that establishes and negotiates the connection. + fn add_pending( + &mut self, + future: TFut, + handler: THandler, + endpoint: ConnectedPoint, + peer: Option, + ) -> ConnectionId + where + TConnInfo: ConnectionInfo + Send + 'static, + TFut: Future< + Output = Result<(TConnInfo, TMuxer), PendingConnectionError> + > + Send + 'static, + THandler: IntoConnectionHandler + Send + 'static, + THandler::Handler: ConnectionHandler< + Substream = Substream, + InEvent = TInEvent, + OutEvent = TOutEvent, + Error = THandlerErr + > + Send + 'static, + ::OutboundOpenInfo: Send + 'static, + TTransErr: error::Error + Send + 'static, + THandlerErr: error::Error + Send + 'static, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, + TMuxer: StreamMuxer + Send + Sync + 'static, + TMuxer::OutboundSubstream: Send + 'static, + { + let future = future.and_then({ + let endpoint = endpoint.clone(); + move |(info, muxer)| { + let connected = Connected { info, endpoint }; + future::ready(Ok((connected, muxer))) + } + }); + let id = self.manager.add_pending(future, handler); + self.pending.insert(id, (endpoint, peer)); + id + } + + /// Sends an event to all nodes. + /// + /// This function is "atomic", in the sense that if `Poll::Pending` is returned then no event + /// has been sent to any node yet. + #[must_use] + pub fn poll_broadcast(&mut self, event: &TInEvent, cx: &mut Context) -> Poll<()> + where + TInEvent: Clone + { + self.manager.poll_broadcast(event, cx) + } + + /// Adds an existing established connection to the pool. + /// + /// Returns the assigned connection ID on success. An error is returned + /// if the configured maximum number of established connections for the + /// connected peer has been reached. + pub fn add(&mut self, c: Connection, i: Connected) + -> Result + where + THandler: IntoConnectionHandler + Send + 'static, + THandler::Handler: ConnectionHandler< + Substream = connection::Substream, + InEvent = TInEvent, + OutEvent = TOutEvent, + Error = THandlerErr + > + Send + 'static, + ::OutboundOpenInfo: Send + 'static, + TTransErr: error::Error + Send + 'static, + THandlerErr: error::Error + Send + 'static, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, + TMuxer: StreamMuxer + Send + Sync + 'static, + TMuxer::OutboundSubstream: Send + 'static, + TConnInfo: Clone + Send + 'static, + TPeerId: Clone, + TConnInfo: ConnectionInfo, + { + if let Some(limit) = self.limits.max_established_per_peer { + let current = self.num_peer_established(i.peer_id()); + if limit >= current { + return Err(ConnectionLimit { limit, current }) + } + } + let id = self.manager.add(c, i.clone()); + self.established.entry(i.peer_id().clone()).or_default().insert(id, i.endpoint); + Ok(id) + } + + /// Gets an entry representing a connection in the pool. + /// + /// Returns `None` if the pool has no connection with the given ID. + pub fn get(&mut self, id: ConnectionId) + -> Option> + { + match self.manager.entry(id) { + Some(manager::Entry::Established(entry)) => + Some(PoolConnection::Established(EstablishedConnection { + entry, + established: &mut self.established, + })), + Some(manager::Entry::Pending(entry)) => + Some(PoolConnection::Pending(PendingConnection { + entry, + pending: &mut self.pending, + })), + None => None + } + } + + /// Gets an established connection from the pool by ID. + pub fn get_established(&mut self, id: ConnectionId) + -> Option> + { + match self.get(id) { + Some(PoolConnection::Established(c)) => Some(c), + _ => None + } + } + + /// Gets a pending outgoing connection by ID. + pub fn get_outgoing(&mut self, id: ConnectionId) + -> Option> + { + match self.pending.get(&id) { + Some((ConnectedPoint::Dialer { .. }, _peer)) => + match self.manager.entry(id) { + Some(manager::Entry::Pending(entry)) => + Some(PendingConnection { + entry, + pending: &mut self.pending, + }), + _ => unreachable!("by consistency of `self.pending` with `self.manager`") + } + _ => None + } + } + + /// Returns true if we are connected to the given peer. + /// + /// This will return true only after a `NodeReached` event has been produced by `poll()`. + pub fn is_connected(&self, id: &TPeerId) -> bool { + self.established.contains_key(id) + } + + /// Returns the number of connected peers, i.e. those with at least one + /// established connection in the pool. + pub fn num_connected(&self) -> usize { + self.established.len() + } + + /// Close all connections to the given peer. + pub fn disconnect(&mut self, peer: &TPeerId) { + if let Some(conns) = self.established.get(peer) { + for id in conns.keys() { + match self.manager.entry(*id) { + Some(manager::Entry::Established(e)) => { e.close(); }, + _ => {} + } + } + } + + for (id, (_endpoint, peer2)) in &self.pending { + if Some(peer) == peer2.as_ref() { + match self.manager.entry(*id) { + Some(manager::Entry::Pending(e)) => { e.abort(); }, + _ => {} + } + } + } + } + + /// Counts the number of established connections in the pool. + pub fn num_established(&self) -> usize { + self.established.iter().fold(0, |n, (_, conns)| n + conns.len()) + } + + /// Counts the number of pending connections in the pool. + pub fn num_pending(&self) -> usize { + self.iter_pending_info().count() + } + + /// Counts the number of established connections to the given peer. + pub fn num_peer_established(&self, peer: &TPeerId) -> usize { + self.established.get(peer).map_or(0, |conns| conns.len()) + } + + /// Returns an iterator over all established connections of `peer`. + pub fn iter_peer_established<'a>(&'a mut self, peer: &TPeerId) + -> EstablishedConnectionIter<'a, + impl Iterator, + TInEvent, + TOutEvent, + THandler, + TTransErr, + THandlerErr, + TConnInfo, + TPeerId> + { + let ids = self.iter_peer_established_info(peer) + .map(|(id, _endpoint)| *id) + .collect::>() + .into_iter(); + + EstablishedConnectionIter { pool: self, ids } + } + + /// Returns an iterator for information on all pending incoming connections. + pub fn iter_pending_incoming(&self) -> impl Iterator> { + self.iter_pending_info() + .filter_map(|(_, ref endpoint, _)| { + match endpoint { + ConnectedPoint::Listener { local_addr, send_back_addr } => { + Some(IncomingInfo { local_addr, send_back_addr }) + }, + ConnectedPoint::Dialer { .. } => None, + } + }) + } + + /// Returns an iterator for information on all pending outgoing connections. + pub fn iter_pending_outgoing(&self) -> impl Iterator> { + self.iter_pending_info() + .filter_map(|(_, ref endpoint, ref peer_id)| { + match endpoint { + ConnectedPoint::Listener { .. } => None, + ConnectedPoint::Dialer { address } => + Some(OutgoingInfo { address, peer_id: peer_id.as_ref() }), + } + }) + } + + /// Returns an iterator over all connection IDs and associated endpoints + /// of established connections to `peer` known to the pool. + pub fn iter_peer_established_info(&self, peer: &TPeerId) + -> impl Iterator + fmt::Debug + '_ + { + match self.established.get(peer) { + Some(conns) => Either::Left(conns.iter()), + None => Either::Right(std::iter::empty()) + } + } + + /// Returns an iterator over all pending connection IDs together + /// with associated endpoints and expected peer IDs in the pool. + pub fn iter_pending_info(&self) + -> impl Iterator)> + '_ + { + self.pending.iter().map(|(id, (endpoint, info))| (id, endpoint, info)) + } + + /// Returns an iterator over all connected peers, i.e. those that have + /// at least one established connection in the pool. + pub fn iter_connected<'a>(&'a self) -> impl Iterator + 'a { + self.established.keys() + } + + /// Polls the connection pool for events. + /// + /// > **Note**: We use a regular `poll` method instead of implementing `Stream`, + /// > because we want the `Pool` to stay borrowed if necessary. + pub fn poll<'a>(&'a mut self, cx: &mut Context) -> Poll< + PoolEvent<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId> + > where + TConnInfo: ConnectionInfo + Clone, + TPeerId: Clone, + { + loop { + let item = match self.manager.poll(cx) { + Poll::Ready(item) => item, + Poll::Pending => return Poll::Pending, + }; + + match item { + manager::Event::PendingConnectionError { id, error, handler } => { + if let Some((endpoint, peer)) = self.pending.remove(&id) { + return Poll::Ready(PoolEvent::PendingConnectionError { + id, + endpoint, + error, + handler: Some(handler), + peer, + pool: self + }) + } + }, + manager::Event::ConnectionError { id, connected, error } => { + let num_established = + if let Some(conns) = self.established.get_mut(connected.peer_id()) { + conns.remove(&id); + conns.len() + } else { + 0 + }; + if num_established == 0 { + self.established.remove(connected.peer_id()); + } + return Poll::Ready(PoolEvent::ConnectionError { + id, connected, error, num_established, pool: self + }) + }, + manager::Event::ConnectionEstablished { entry } => { + let id = entry.id(); + if let Some((endpoint, peer)) = self.pending.remove(&id) { + // Check connection limit. + let established = &self.established; + let current = || established.get(entry.connected().peer_id()) + .map_or(0, |conns| conns.len()); + if let Err(e) = self.limits.check_established(current) { + let connected = entry.close(); + return Poll::Ready(PoolEvent::PendingConnectionError { + id, + endpoint: connected.endpoint, + peer: Some(connected.info.peer_id().clone()), + error: PendingConnectionError::ConnectionLimit(e), + pool: self, + handler: None, + }) + } + // Check peer ID. + if let Some(peer) = peer { + if &peer != entry.connected().peer_id() { + let connected = entry.close(); + return Poll::Ready(PoolEvent::PendingConnectionError { + id, + endpoint: connected.endpoint, + peer: Some(connected.info.peer_id().clone()), + error: PendingConnectionError::InvalidPeerId, + pool: self, + handler: None, + }) + } + } + if &self.local_id == entry.connected().peer_id() { + let connected = entry.close(); + return Poll::Ready(PoolEvent::PendingConnectionError { + id, + endpoint: connected.endpoint, + peer: Some(connected.info.peer_id().clone()), + error: PendingConnectionError::InvalidPeerId, + pool: self, + handler: None, + }) + } + // Add the connection to the pool. + let peer = entry.connected().peer_id().clone(); + let conns = self.established.entry(peer).or_default(); + let num_established = conns.len() + 1; + conns.insert(id, endpoint); + match self.get(id) { + Some(PoolConnection::Established(connection)) => + return Poll::Ready(PoolEvent::ConnectionEstablished { + connection, num_established + }), + _ => unreachable!("since `entry` is an `EstablishedEntry`.") + } + } + }, + manager::Event::ConnectionEvent { entry, event } => { + let id = entry.id(); + match self.get(id) { + Some(PoolConnection::Established(connection)) => + return Poll::Ready(PoolEvent::ConnectionEvent { + connection, + event, + }), + _ => unreachable!("since `entry` is an `EstablishedEntry`.") + } + } + } + } + } + +} + +/// A connection in a [`Pool`]. +pub enum PoolConnection<'a, TInEvent, TConnInfo, TPeerId> { + Pending(PendingConnection<'a, TInEvent, TConnInfo, TPeerId>), + Established(EstablishedConnection<'a, TInEvent, TConnInfo, TPeerId>), +} + +/// A pending connection in a [`Pool`]. +pub struct PendingConnection<'a, TInEvent, TConnInfo, TPeerId> { + entry: manager::PendingEntry<'a, TInEvent, TConnInfo>, + pending: &'a mut FnvHashMap)>, +} + +impl + PendingConnection<'_, TInEvent, TConnInfo, TPeerId> +{ + /// Returns the local connection ID. + pub fn id(&self) -> ConnectionId { + self.entry.id() + } + + /// Returns the (expected) identity of the remote peer, if known. + pub fn peer_id(&self) -> &Option { + &self.pending.get(&self.entry.id()).expect("`entry` is a pending entry").1 + } + + /// Returns information about this endpoint of the connection. + pub fn endpoint(&self) -> &ConnectedPoint { + &self.pending.get(&self.entry.id()).expect("`entry` is a pending entry").0 + } + + /// Aborts the connection attempt, closing the connection. + pub fn abort(self) { + self.pending.remove(&self.entry.id()); + self.entry.abort(); + } +} + +/// An established connection in a [`Pool`]. +pub struct EstablishedConnection<'a, TInEvent, TConnInfo, TPeerId> { + entry: manager::EstablishedEntry<'a, TInEvent, TConnInfo>, + established: &'a mut FnvHashMap>, +} + +impl fmt::Debug +for EstablishedConnection<'_, TInEvent, TConnInfo, TPeerId> +where + TInEvent: fmt::Debug, + TConnInfo: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + f.debug_struct("EstablishedConnection") + .field("entry", &self.entry) + .finish() + } +} + +impl + EstablishedConnection<'_, TInEvent, TConnInfo, TPeerId> +{ + pub fn connected(&self) -> &Connected { + self.entry.connected() + } + + /// Returns information about the connected endpoint. + pub fn endpoint(&self) -> &ConnectedPoint { + &self.entry.connected().endpoint + } + + /// Returns connection information obtained from the transport. + pub fn info(&self) -> &TConnInfo { + &self.entry.connected().info + } +} + +impl + EstablishedConnection<'_, TInEvent, TConnInfo, TPeerId> +where + TConnInfo: ConnectionInfo, + TPeerId: Eq + Hash + Clone, +{ + /// Returns the local connection ID. + pub fn id(&self) -> ConnectionId { + self.entry.id() + } + + /// Returns the identity of the connected peer. + pub fn peer_id(&self) -> &TPeerId { + self.info().peer_id() + } + + /// (Asynchronously) sends an event to the connection handler. + /// + /// If the handler is not ready to receive the event, either because + /// it is busy or the connection is about to close, the given event + /// is returned with an `Err`. + /// + /// If execution of this method is preceded by successful execution of + /// `poll_ready_notify_handler` without another intervening execution + /// of `notify_handler`, it only fails if the connection is now about + /// to close. + pub fn notify_handler(&mut self, event: TInEvent) -> Result<(), TInEvent> { + self.entry.notify_handler(event) + } + + /// Checks if `notify_handler` is ready to accept an event. + /// + /// Returns `Ok(())` if the handler is ready to receive an event via `notify_handler`. + /// + /// Returns `Err(())` if the background task associated with the connection + /// is terminating and the connection is about to close. + pub fn poll_ready_notify_handler(&mut self, cx: &mut Context) -> Poll> { + self.entry.poll_ready_notify_handler(cx) + } + + /// Closes the connection, returning the connection information. + pub fn close(self) -> Connected { + let id = self.entry.id(); + let info = self.entry.close(); + + let empty = + if let Some(conns) = self.established.get_mut(info.peer_id()) { + conns.remove(&id); + conns.is_empty() + } else { + false + }; + + if empty { + self.established.remove(info.peer_id()); + } + + info + } +} + +/// An iterator over established connections in a [`Pool`]. +pub struct EstablishedConnectionIter<'a, I, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId> { + pool: &'a mut Pool, + ids: I +} + +// Note: Ideally this would be an implementation of `Iterator`, but that +// requires GATs (cf. https://github.com/rust-lang/rust/issues/44265) and +// a different definition of `Iterator`. +impl<'a, I, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId> + EstablishedConnectionIter<'a, I, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId> +where + I: Iterator +{ + /// Obtains the next connection, if any. + pub fn next<'b>(&'b mut self) -> Option> + { + while let Some(id) = self.ids.next() { + if self.pool.manager.is_established(&id) { // (*) + match self.pool.manager.entry(id) { + Some(manager::Entry::Established(entry)) => { + let established = &mut self.pool.established; + return Some(EstablishedConnection { entry, established }) + } + _ => unreachable!("by (*)") + } + } + } + None + } + + /// Turns the iterator into an iterator over just the connection IDs. + pub fn into_ids(self) -> impl Iterator { + self.ids + } + + /// Returns the first connection, if any, consuming the iterator. + pub fn into_first<'b>(mut self) + -> Option> + where 'a: 'b + { + while let Some(id) = self.ids.next() { + if self.pool.manager.is_established(&id) { // (*) + match self.pool.manager.entry(id) { + Some(manager::Entry::Established(entry)) => { + let established = &mut self.pool.established; + return Some(EstablishedConnection { entry, established }) + } + _ => unreachable!("by (*)") + } + } + } + None + } +} + +/// The configurable limits of a connection [`Pool`]. +#[derive(Debug, Clone, Default)] +pub struct PoolLimits { + pub max_pending_outgoing: Option, + pub max_pending_incoming: Option, + pub max_established_per_peer: Option, +} + +impl PoolLimits { + fn check_established(&self, current: F) -> Result<(), ConnectionLimit> + where + F: FnOnce() -> usize + { + Self::check(current, self.max_established_per_peer) + } + + fn check_outgoing(&self, current: F) -> Result<(), ConnectionLimit> + where + F: FnOnce() -> usize + { + Self::check(current, self.max_pending_outgoing) + } + + fn check(current: F, limit: Option) -> Result<(), ConnectionLimit> + where + F: FnOnce() -> usize + { + if let Some(limit) = limit { + let current = current(); + if limit >= current { + return Err(ConnectionLimit { limit, current }) + } + } + Ok(()) + } +} diff --git a/core/src/nodes/node.rs b/core/src/connection/substream.rs similarity index 67% rename from core/src/nodes/node.rs rename to core/src/connection/substream.rs index 99e5df61..f496e43a 100644 --- a/core/src/nodes/node.rs +++ b/core/src/connection/substream.rs @@ -18,40 +18,50 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use crate::muxing::{StreamMuxer, SubstreamRef, substream_from_ref}; use futures::prelude::*; -use crate::muxing; use smallvec::SmallVec; -use std::{fmt, io::Error as IoError, pin::Pin, sync::Arc, task::Context, task::Poll}; +use std::sync::Arc; +use std::{fmt, io::Error as IoError, pin::Pin, task::Context, task::Poll}; -// Implementation notes -// ================= -// -// In order to minimize the risk of bugs in higher-level code, we want to avoid as much as -// possible having a racy API. The behaviour of methods should be well-defined and predictable. -// -// In order to respect this coding practice, we should theoretically provide events such as "data -// incoming on a substream", or "a substream is ready to be written". This would however make the -// API of `NodeStream` really painful to use. Instead, we really want to provide an object that -// implements the `AsyncRead` and `AsyncWrite` traits. -// -// This substream object raises the question of how to keep the `NodeStream` and the various -// substreams in sync without exposing a racy API. The answer is that the `NodeStream` holds -// ownership of the connection. Shutting node the `NodeStream` or destroying it will close all the -// existing substreams. The user of the `NodeStream` should be aware of that. +/// Endpoint for a received substream. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum SubstreamEndpoint { + Dialer(TDialInfo), + Listener, +} + +impl SubstreamEndpoint { + /// Returns true for `Dialer`. + pub fn is_dialer(&self) -> bool { + match self { + SubstreamEndpoint::Dialer(_) => true, + SubstreamEndpoint::Listener => false, + } + } -/// Implementation of `Stream` that handles a node. + /// Returns true for `Listener`. + pub fn is_listener(&self) -> bool { + match self { + SubstreamEndpoint::Dialer(_) => false, + SubstreamEndpoint::Listener => true, + } + } +} + +/// Implementation of `Stream` that handles substream multiplexing. /// /// The stream will receive substreams and can be used to open new outgoing substreams. Destroying -/// the `NodeStream` will **not** close the existing substreams. +/// the `Muxing` will **not** close the existing substreams. /// /// The stream will close once both the inbound and outbound channels are closed, and no more /// outbound substream attempt is pending. -pub struct NodeStream +pub struct Muxing where - TMuxer: muxing::StreamMuxer, + TMuxer: StreamMuxer, { /// The muxer used to manage substreams. - muxer: Arc, + inner: Arc, /// List of substreams we are currently opening. outbound_substreams: SmallVec<[(TUserData, TMuxer::OutboundSubstream); 8]>, } @@ -63,16 +73,16 @@ pub struct Close { } /// A successfully opened substream. -pub type Substream = muxing::SubstreamRef>; +pub type Substream = SubstreamRef>; -/// Event that can happen on the `NodeStream`. -pub enum NodeEvent +/// Event that can happen on the `Muxing`. +pub enum SubstreamEvent where - TMuxer: muxing::StreamMuxer, + TMuxer: StreamMuxer, { /// A new inbound substream arrived. InboundSubstream { - /// The newly-opened substream. Will return EOF of an error if the `NodeStream` is + /// The newly-opened substream. Will return EOF of an error if the `Muxing` is /// destroyed or `close_graceful` is called. substream: Substream, }, @@ -81,7 +91,7 @@ where OutboundSubstream { /// User data that has been passed to the `open_substream` method. user_data: TUserData, - /// The newly-opened substream. Will return EOF of an error if the `NodeStream` is + /// The newly-opened substream. Will return EOF of an error if the `Muxing` is /// destroyed or `close_graceful` is called. substream: Substream, }, @@ -91,15 +101,14 @@ where #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] pub struct OutboundSubstreamId(usize); -impl NodeStream +impl Muxing where - TMuxer: muxing::StreamMuxer, + TMuxer: StreamMuxer, { /// Creates a new node events stream. - #[inline] pub fn new(muxer: TMuxer) -> Self { - NodeStream { - muxer: Arc::new(muxer), + Muxing { + inner: Arc::new(muxer), outbound_substreams: SmallVec::new(), } } @@ -110,7 +119,7 @@ where /// `OutboundSubstream` event or an `OutboundClosed` event containing the user data that has /// been passed to this method. pub fn open_substream(&mut self, user_data: TUserData) { - let raw = self.muxer.open_outbound(); + let raw = self.inner.open_outbound(); self.outbound_substreams.push((user_data, raw)); } @@ -118,7 +127,7 @@ where /// /// See `StreamMuxer::is_remote_acknowledged`. pub fn is_remote_acknowledged(&self) -> bool { - self.muxer.is_remote_acknowledged() + self.inner.is_remote_acknowledged() } /// Destroys the node stream and returns all the pending outbound substreams, plus an object @@ -126,7 +135,7 @@ where #[must_use] pub fn close(mut self) -> (Close, Vec) { let substreams = self.cancel_outgoing(); - let close = Close { muxer: self.muxer.clone() }; + let close = Close { muxer: self.inner.clone() }; (close, substreams) } @@ -135,18 +144,18 @@ where let mut out = Vec::with_capacity(self.outbound_substreams.len()); for (user_data, outbound) in self.outbound_substreams.drain(..) { out.push(user_data); - self.muxer.destroy_outbound(outbound); + self.inner.destroy_outbound(outbound); } out } /// Provides an API similar to `Future`. - pub fn poll(&mut self, cx: &mut Context) -> Poll, IoError>> { + pub fn poll(&mut self, cx: &mut Context) -> Poll, IoError>> { // Polling inbound substream. - match self.muxer.poll_inbound(cx) { + match self.inner.poll_inbound(cx) { Poll::Ready(Ok(substream)) => { - let substream = muxing::substream_from_ref(self.muxer.clone(), substream); - return Poll::Ready(Ok(NodeEvent::InboundSubstream { + let substream = substream_from_ref(self.inner.clone(), substream); + return Poll::Ready(Ok(SubstreamEvent::InboundSubstream { substream, })); } @@ -158,11 +167,11 @@ where // We remove each element from `outbound_substreams` one by one and add them back. for n in (0..self.outbound_substreams.len()).rev() { let (user_data, mut outbound) = self.outbound_substreams.swap_remove(n); - match self.muxer.poll_outbound(cx, &mut outbound) { + match self.inner.poll_outbound(cx, &mut outbound) { Poll::Ready(Ok(substream)) => { - let substream = muxing::substream_from_ref(self.muxer.clone(), substream); - self.muxer.destroy_outbound(outbound); - return Poll::Ready(Ok(NodeEvent::OutboundSubstream { + let substream = substream_from_ref(self.inner.clone(), substream); + self.inner.destroy_outbound(outbound); + return Poll::Ready(Ok(SubstreamEvent::OutboundSubstream { user_data, substream, })); @@ -171,7 +180,7 @@ where self.outbound_substreams.push((user_data, outbound)); } Poll::Ready(Err(err)) => { - self.muxer.destroy_outbound(outbound); + self.inner.destroy_outbound(outbound); return Poll::Ready(Err(err.into())); } } @@ -182,34 +191,34 @@ where } } -impl fmt::Debug for NodeStream +impl fmt::Debug for Muxing where - TMuxer: muxing::StreamMuxer, + TMuxer: StreamMuxer, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_struct("NodeStream") + f.debug_struct("Muxing") .field("outbound_substreams", &self.outbound_substreams.len()) .finish() } } -impl Drop for NodeStream +impl Drop for Muxing where - TMuxer: muxing::StreamMuxer, + TMuxer: StreamMuxer, { fn drop(&mut self) { // The substreams that were produced will continue to work, as the muxer is held in an Arc. // However we will no longer process any further inbound or outbound substream, and we // therefore close everything. for (_, outbound) in self.outbound_substreams.drain(..) { - self.muxer.destroy_outbound(outbound); + self.inner.destroy_outbound(outbound); } } } impl Future for Close where - TMuxer: muxing::StreamMuxer, + TMuxer: StreamMuxer, { type Output = Result<(), IoError>; @@ -224,7 +233,7 @@ where impl fmt::Debug for Close where - TMuxer: muxing::StreamMuxer, + TMuxer: StreamMuxer, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_struct("Close") @@ -232,21 +241,21 @@ where } } -impl fmt::Debug for NodeEvent +impl fmt::Debug for SubstreamEvent where - TMuxer: muxing::StreamMuxer, + TMuxer: StreamMuxer, TMuxer::Substream: fmt::Debug, TUserData: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - NodeEvent::InboundSubstream { substream } => { - f.debug_struct("NodeEvent::OutboundClosed") + SubstreamEvent::InboundSubstream { substream } => { + f.debug_struct("SubstreamEvent::OutboundClosed") .field("substream", substream) .finish() }, - NodeEvent::OutboundSubstream { user_data, substream } => { - f.debug_struct("NodeEvent::OutboundSubstream") + SubstreamEvent::OutboundSubstream { user_data, substream } => { + f.debug_struct("SubstreamEvent::OutboundSubstream") .field("user_data", user_data) .field("substream", substream) .finish() diff --git a/core/src/lib.rs b/core/src/lib.rs index beb0ffbb..aa851ee0 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -43,15 +43,14 @@ mod keys_proto { pub use multiaddr; pub type Negotiated = futures::compat::Compat01As03>>; -use std::{future::Future, pin::Pin}; - mod peer_id; mod translation; +pub mod connection; pub mod either; pub mod identity; pub mod muxing; -pub mod nodes; +pub mod network; pub mod transport; pub mod upgrade; @@ -62,101 +61,10 @@ pub use identity::PublicKey; pub use transport::Transport; pub use translation::address_translation; pub use upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo, UpgradeError, ProtocolName}; -pub use nodes::ConnectionInfo; - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum Endpoint { - /// The socket comes from a dialer. - Dialer, - /// The socket comes from a listener. - Listener, -} - -impl std::ops::Not for Endpoint { - type Output = Endpoint; - - fn not(self) -> Self::Output { - match self { - Endpoint::Dialer => Endpoint::Listener, - Endpoint::Listener => Endpoint::Dialer - } - } -} +pub use connection::{Connected, Endpoint, ConnectedPoint, ConnectionInfo}; +pub use network::Network; -impl Endpoint { - /// Is this endpoint a dialer? - pub fn is_dialer(self) -> bool { - if let Endpoint::Dialer = self { - true - } else { - false - } - } - - /// Is this endpoint a listener? - pub fn is_listener(self) -> bool { - if let Endpoint::Listener = self { - true - } else { - false - } - } -} - -/// How we connected to a node. -#[derive(Debug, Clone)] -pub enum ConnectedPoint { - /// We dialed the node. - Dialer { - /// Multiaddress that was successfully dialed. - address: Multiaddr, - }, - /// We received the node. - Listener { - /// Local connection address. - local_addr: Multiaddr, - /// Stack of protocols used to send back data to the remote. - send_back_addr: Multiaddr, - } -} - -impl From<&'_ ConnectedPoint> for Endpoint { - fn from(endpoint: &'_ ConnectedPoint) -> Endpoint { - endpoint.to_endpoint() - } -} - -impl From for Endpoint { - fn from(endpoint: ConnectedPoint) -> Endpoint { - endpoint.to_endpoint() - } -} - -impl ConnectedPoint { - /// Turns the `ConnectedPoint` into the corresponding `Endpoint`. - pub fn to_endpoint(&self) -> Endpoint { - match self { - ConnectedPoint::Dialer { .. } => Endpoint::Dialer, - ConnectedPoint::Listener { .. } => Endpoint::Listener - } - } - - /// Returns true if we are `Dialer`. - pub fn is_dialer(&self) -> bool { - match self { - ConnectedPoint::Dialer { .. } => true, - ConnectedPoint::Listener { .. } => false - } - } - - /// Returns true if we are `Listener`. - pub fn is_listener(&self) -> bool { - match self { - ConnectedPoint::Dialer { .. } => false, - ConnectedPoint::Listener { .. } => true - } - } -} +use std::{future::Future, pin::Pin}; /// Implemented on objects that can run a `Future` in the background. /// diff --git a/core/src/muxing.rs b/core/src/muxing.rs index c6a8aa68..64a93051 100644 --- a/core/src/muxing.rs +++ b/core/src/muxing.rs @@ -206,7 +206,6 @@ pub trait StreamMuxer { /// Polls for an inbound from the muxer but wraps the output in an object that /// implements `Read`/`Write`/`AsyncRead`/`AsyncWrite`. -#[inline] pub fn inbound_from_ref_and_wrap

( muxer: P, ) -> impl Future, ::Error>> @@ -221,7 +220,6 @@ where /// Same as `outbound_from_ref`, but wraps the output in an object that /// implements `Read`/`Write`/`AsyncRead`/`AsyncWrite`. -#[inline] pub fn outbound_from_ref_and_wrap

(muxer: P) -> OutboundSubstreamRefWrapFuture

where P: Deref + Clone, @@ -260,7 +258,6 @@ where } /// Builds a new future for an outbound substream, where the muxer is a reference. -#[inline] pub fn outbound_from_ref

(muxer: P) -> OutboundSubstreamRefFuture

where P: Deref, @@ -297,7 +294,6 @@ where { type Output = Result<::Substream, ::Error>; - #[inline] fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { // We use a `this` because the compiler isn't smart enough to allow mutably borrowing // multiple different fields from the `Pin` at the same time. @@ -311,7 +307,6 @@ where P: Deref, P::Target: StreamMuxer, { - #[inline] fn drop(&mut self) { self.muxer .destroy_outbound(self.outbound.take().expect("outbound was empty")) @@ -320,7 +315,6 @@ where /// Builds an implementation of `Read`/`Write`/`AsyncRead`/`AsyncWrite` from an `Arc` to the /// muxer and a substream. -#[inline] pub fn substream_from_ref

( muxer: P, substream: ::Substream, @@ -444,7 +438,6 @@ where P: Deref, P::Target: StreamMuxer, { - #[inline] fn drop(&mut self) { self.muxer.destroy_substream(self.substream.take().expect("substream was empty")) } diff --git a/core/src/muxing/singleton.rs b/core/src/muxing/singleton.rs index c2b56d0c..bc2521ad 100644 --- a/core/src/muxing/singleton.rs +++ b/core/src/muxing/singleton.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{Endpoint, muxing::StreamMuxer}; +use crate::{connection::Endpoint, muxing::StreamMuxer}; use futures::prelude::*; use parking_lot::Mutex; use std::{io, pin::Pin, sync::atomic::{AtomicBool, Ordering}, task::Context, task::Poll}; diff --git a/core/src/network.rs b/core/src/network.rs new file mode 100644 index 00000000..49911505 --- /dev/null +++ b/core/src/network.rs @@ -0,0 +1,660 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +mod event; +pub mod peer; + +pub use event::{NetworkEvent, IncomingConnectionEvent}; +pub use peer::Peer; + +use crate::{ + ConnectedPoint, + Executor, + Multiaddr, + PeerId, + address_translation, + connection::{ + ConnectionId, + ConnectionLimit, + ConnectionHandler, + ConnectionInfo, + IntoConnectionHandler, + IncomingInfo, + OutgoingInfo, + ListenersEvent, + ListenerId, + ListenersStream, + PendingConnectionError, + Substream, + pool::{Pool, PoolEvent, PoolLimits}, + }, + muxing::StreamMuxer, + transport::{Transport, TransportError}, +}; +use fnv::{FnvHashMap}; +use futures::{prelude::*, future}; +use std::{ + collections::hash_map, + error, + fmt, + hash::Hash, + num::NonZeroUsize, + pin::Pin, + task::{Context, Poll}, +}; + +/// Implementation of `Stream` that handles the nodes. +pub struct Network +where + TTrans: Transport, + THandler: IntoConnectionHandler, +{ + /// The local peer ID. + local_peer_id: TPeerId, + + /// Listeners for incoming connections. + listeners: ListenersStream, + + /// The nodes currently active. + pool: Pool::Error, TConnInfo, TPeerId>, + + /// The ongoing dialing attempts. + /// + /// The `Network` enforces a single ongoing dialing attempt per peer, + /// even if multiple (established) connections per peer are allowed. + /// However, a single dialing attempt operates on a list of addresses + /// to connect to, which can be extended with new addresses while + /// the connection attempt is still in progress. Thereby each + /// dialing attempt is associated with a new connection and hence a new + /// connection ID. + /// + /// > **Note**: `dialing` must be consistent with the pending outgoing + /// > connections in `pool`. That is, for every entry in `dialing` + /// > there must exist a pending outgoing connection in `pool` with + /// > the same connection ID. This is ensured by the implementation of + /// > `Network` (see `dial_peer_impl` and `on_connection_failed`) + /// > together with the implementation of `DialingConnection::abort`. + dialing: FnvHashMap, +} + +impl fmt::Debug for + Network +where + TTrans: fmt::Debug + Transport, + THandler: fmt::Debug + ConnectionHandler, + TConnInfo: fmt::Debug, + TPeerId: fmt::Debug + Eq + Hash, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + f.debug_struct("ReachAttempts") + .field("local_peer_id", &self.local_peer_id) + .field("listeners", &self.listeners) + .field("peers", &self.pool) + .field("dialing", &self.dialing) + .finish() + } +} + +impl Unpin for + Network +where + TTrans: Transport, + THandler: IntoConnectionHandler, +{ +} + +impl + Network +where + TTrans: Transport, + THandler: IntoConnectionHandler, + TConnInfo: ConnectionInfo, + TPeerId: Eq + Hash + Clone, +{ + fn disconnect(&mut self, peer: &TPeerId) { + self.pool.disconnect(peer); + self.dialing.remove(peer); + } +} + +impl + Network +where + TTrans: Transport + Clone, + TMuxer: StreamMuxer, + THandler: IntoConnectionHandler + Send + 'static, + THandler::Handler: ConnectionHandler, InEvent = TInEvent, OutEvent = TOutEvent> + Send + 'static, + ::OutboundOpenInfo: Send + 'static, // TODO: shouldn't be necessary + ::Error: error::Error + Send + 'static, + TConnInfo: fmt::Debug + ConnectionInfo + Send + 'static, + TPeerId: Eq + Hash + Clone, +{ + /// Creates a new node events stream. + pub fn new( + transport: TTrans, + local_peer_id: TPeerId, + config: NetworkConfig, + ) -> Self { + let pool_local_id = local_peer_id.clone(); + Network { + local_peer_id, + listeners: ListenersStream::new(transport), + pool: Pool::new(pool_local_id, config.executor, config.pool_limits), + dialing: Default::default(), + } + } + + /// Returns the transport passed when building this object. + pub fn transport(&self) -> &TTrans { + self.listeners.transport() + } + + /// Start listening on the given multiaddress. + pub fn listen_on(&mut self, addr: Multiaddr) -> Result> { + self.listeners.listen_on(addr) + } + + /// Remove a previously added listener. + /// + /// Returns `Ok(())` if a listener with this ID was in the list. + pub fn remove_listener(&mut self, id: ListenerId) -> Result<(), ()> { + self.listeners.remove_listener(id) + } + + /// Returns an iterator that produces the list of addresses we are listening on. + pub fn listen_addrs(&self) -> impl Iterator { + self.listeners.listen_addrs() + } + + /// Call this function in order to know which address remotes should dial to + /// access your local node. + /// + /// When receiving an observed address on a tcp connection that we initiated, the observed + /// address contains our tcp dial port, not our tcp listen port. We know which port we are + /// listening on, thereby we can replace the port within the observed address. + /// + /// When receiving an observed address on a tcp connection that we did **not** initiated, the + /// observed address should contain our listening port. In case it differs from our listening + /// port there might be a proxy along the path. + /// + /// # Arguments + /// + /// * `observed_addr` - should be an address a remote observes you as, which can be obtained for + /// example with the identify protocol. + /// + pub fn address_translation<'a>(&'a self, observed_addr: &'a Multiaddr) + -> impl Iterator + 'a + where + TMuxer: 'a, + THandler: 'a, + { + self.listen_addrs().flat_map(move |server| address_translation(server, observed_addr)) + } + + /// Returns the peer id of the local node. + pub fn local_peer_id(&self) -> &TPeerId { + &self.local_peer_id + } + + /// Dials a multiaddress without expecting a particular remote peer ID. + /// + /// The given `handler` will be used to create the + /// [`Connection`](crate::connection::Connection) upon success and the + /// connection ID is returned. + pub fn dial(&mut self, address: &Multiaddr, handler: THandler) + -> Result> + where + TTrans: Transport, + TTrans::Error: Send + 'static, + TTrans::Dial: Send + 'static, + TMuxer: Send + Sync + 'static, + TMuxer::OutboundSubstream: Send, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, + TConnInfo: Send + 'static, + TPeerId: Send + 'static, + { + let future = self.transport().clone().dial(address.clone())? + .map_err(|err| PendingConnectionError::Transport(TransportError::Other(err))); + let info = OutgoingInfo { address, peer_id: None }; + self.pool.add_outgoing(future, handler, info).map_err(DialError::MaxPending) + } + + /// Returns information about the state of the `Network`. + pub fn info(&self) -> NetworkInfo { + let num_connections_established = self.pool.num_established(); + let num_connections_pending = self.pool.num_pending(); + let num_connections = num_connections_established + num_connections_pending; + let num_peers = self.pool.num_connected(); + NetworkInfo { + num_peers, + num_connections, + num_connections_established, + num_connections_pending, + } + } + + /// Returns an iterator for information on all pending incoming connections. + pub fn incoming_info(&self) -> impl Iterator> { + self.pool.iter_pending_incoming() + } + + /// Returns the list of addresses we're currently dialing without knowing the `PeerId` of. + pub fn unknown_dials(&self) -> impl Iterator { + self.pool.iter_pending_outgoing() + .filter_map(|info| { + if info.peer_id.is_none() { + Some(info.address) + } else { + None + } + }) + } + + /// Notifies the connection handler of _every_ connection of _every_ peer of an event. + /// + /// This function is "atomic", in the sense that if `Poll::Pending` is returned then no event + /// has been sent to any node yet. + #[must_use] + pub fn poll_broadcast(&mut self, event: &TInEvent, cx: &mut Context) -> Poll<()> + where + TInEvent: Clone + { + self.pool.poll_broadcast(event, cx) + } + + /// Returns a list of all connected peers, i.e. peers to whom the `Network` + /// has at least one established connection. + pub fn connected_peers(&self) -> impl Iterator { + self.pool.iter_connected() + } + + /// Returns a list of all the peers to whom a new outgoing connection + /// is currently being established. + pub fn dialing_peers(&self) -> impl Iterator { + self.dialing.keys() + } + + /// Gets the configured limit on pending incoming connections, + /// i.e. concurrent incoming connection attempts. + pub fn incoming_limit(&self) -> Option { + self.pool.limits().max_pending_incoming + } + + /// The total number of established connections in the `Network`. + pub fn num_connections_established(&self) -> usize { + self.pool.num_established() + } + + /// The total number of pending connections in the `Network`. + pub fn num_connections_pending(&self) -> usize { + self.pool.num_pending() + } + + /// Obtains a view of a [`Peer`] with the given ID in the network. + pub fn peer(&mut self, peer_id: TPeerId) + -> Peer<'_, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> + { + Peer::new(self, peer_id) + } + + /// Provides an API similar to `Stream`, except that it cannot error. + pub fn poll<'a>(&'a mut self, cx: &mut Context) -> Poll> + where + TTrans: Transport, + TTrans::Error: Send + 'static, + TTrans::Dial: Send + 'static, + TTrans::ListenerUpgrade: Send + 'static, + TMuxer: Send + Sync + 'static, + TMuxer::OutboundSubstream: Send, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, + THandler: IntoConnectionHandler + Send + 'static, + THandler::Handler: ConnectionHandler, InEvent = TInEvent, OutEvent = TOutEvent> + Send + 'static, + ::Error: error::Error + Send + 'static, + TConnInfo: Clone, + TPeerId: AsRef<[u8]> + Send + 'static, + { + // Poll the listener(s) for new connections. + match ListenersStream::poll(Pin::new(&mut self.listeners), cx) { + Poll::Pending => (), + Poll::Ready(ListenersEvent::Incoming { + listener_id, + upgrade, + local_addr, + send_back_addr + }) => { + return Poll::Ready(NetworkEvent::IncomingConnection( + IncomingConnectionEvent { + listener_id, + upgrade, + local_addr, + send_back_addr, + pool: &mut self.pool, + })) + } + Poll::Ready(ListenersEvent::NewAddress { listener_id, listen_addr }) => { + return Poll::Ready(NetworkEvent::NewListenerAddress { listener_id, listen_addr }) + } + Poll::Ready(ListenersEvent::AddressExpired { listener_id, listen_addr }) => { + return Poll::Ready(NetworkEvent::ExpiredListenerAddress { listener_id, listen_addr }) + } + Poll::Ready(ListenersEvent::Closed { listener_id, reason }) => { + return Poll::Ready(NetworkEvent::ListenerClosed { listener_id, reason }) + } + Poll::Ready(ListenersEvent::Error { listener_id, error }) => { + return Poll::Ready(NetworkEvent::ListenerError { listener_id, error }) + } + } + + // Poll the known peers. + let event = match self.pool.poll(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(PoolEvent::ConnectionEstablished { connection, num_established }) => { + match self.dialing.entry(connection.peer_id().clone()) { + hash_map::Entry::Occupied(e) if e.get().id == connection.id() => { + e.remove(); + }, + _ => {} + } + + NetworkEvent::ConnectionEstablished { + connection, + num_established, + } + } + Poll::Ready(PoolEvent::PendingConnectionError { id, endpoint, error, handler, pool, .. }) => { + let dialing = &mut self.dialing; + let (next, event) = on_connection_failed(pool, dialing, id, endpoint, error, handler); + if let Some(dial) = next { + let transport = self.listeners.transport().clone(); + if let Err(e) = dial_peer_impl(transport, pool, dialing, dial) { + log::warn!("Dialing aborted: {:?}", e); + } + } + event + } + Poll::Ready(PoolEvent::ConnectionError { connected, error, num_established, .. }) => { + NetworkEvent::ConnectionError { + connected, + error, + num_established, + } + } + Poll::Ready(PoolEvent::ConnectionEvent { connection, event }) => { + NetworkEvent::ConnectionEvent { + connection, + event + } + } + }; + + Poll::Ready(event) + } + + /// Initiates a connection attempt to a known peer. + fn dial_peer(&mut self, opts: DialingOpts) + -> Result + where + TTrans: Transport, + TTrans::Dial: Send + 'static, + TTrans::Error: Send + 'static, + TMuxer: Send + Sync + 'static, + TMuxer::OutboundSubstream: Send, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, + TPeerId: Send + 'static, + { + dial_peer_impl(self.transport().clone(), &mut self.pool, &mut self.dialing, opts) + } +} + +/// Options for a dialing attempt (i.e. repeated connection attempt +/// via a list of address) to a peer. +struct DialingOpts { + peer: TPeerId, + handler: THandler, + address: Multiaddr, + remaining: Vec, +} + +/// Standalone implementation of `Network::dial_peer` for more granular borrowing. +fn dial_peer_impl( + transport: TTrans, + pool: &mut Pool::Error, TConnInfo, TPeerId>, + dialing: &mut FnvHashMap, + opts: DialingOpts +) -> Result +where + THandler: IntoConnectionHandler + Send + 'static, + ::Error: error::Error + Send + 'static, + ::OutboundOpenInfo: Send + 'static, + THandler::Handler: ConnectionHandler< + Substream = Substream, + InEvent = TInEvent, + OutEvent = TOutEvent, + > + Send + 'static, + TTrans: Transport, + TTrans::Dial: Send + 'static, + TTrans::Error: error::Error + Send + 'static, + TMuxer: StreamMuxer + Send + Sync + 'static, + TMuxer::OutboundSubstream: Send + 'static, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, + TPeerId: Eq + Hash + Send + Clone + 'static, + TConnInfo: ConnectionInfo + Send + 'static, +{ + let result = match transport.dial(opts.address.clone()) { + Ok(fut) => { + let fut = fut.map_err(|e| PendingConnectionError::Transport(TransportError::Other(e))); + let info = OutgoingInfo { address: &opts.address, peer_id: Some(&opts.peer) }; + pool.add_outgoing(fut, opts.handler, info) + }, + Err(err) => { + let fut = future::err(PendingConnectionError::Transport(err)); + let info = OutgoingInfo { address: &opts.address, peer_id: Some(&opts.peer) }; + pool.add_outgoing(fut, opts.handler, info) + }, + }; + + if let Ok(id) = &result { + let former = dialing.insert(opts.peer, + peer::DialingAttempt { + id: *id, + current: opts.address, + next: opts.remaining, + }, + ); + debug_assert!(former.is_none()); + } + + result +} + +/// Callback for handling a failed connection attempt, returning an +/// event to emit from the `Network`. +/// +/// If the failed connection attempt was a dialing attempt and there +/// are more addresses to try, new `DialingOpts` are returned. +fn on_connection_failed<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>( + pool: &Pool::Error, TConnInfo, TPeerId>, + dialing: &mut FnvHashMap, + id: ConnectionId, + endpoint: ConnectedPoint, + error: PendingConnectionError, + handler: Option, +) -> (Option>, NetworkEvent<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>) +where + TTrans: Transport, + THandler: IntoConnectionHandler, + TConnInfo: ConnectionInfo + Send + 'static, + TPeerId: Eq + Hash + Clone, +{ + // Check if the failed connection is associated with a dialing attempt. + // TODO: could be more optimal than iterating over everything + let dialing_peer = dialing.iter() // (1) + .find(|(_, a)| a.id == id) + .map(|(p, _)| p.clone()); + + if let Some(peer_id) = dialing_peer { + // A pending outgoing connection to a known peer failed. + let attempt = dialing.remove(&peer_id).expect("by (1)"); + + let num_remain = attempt.next.len(); + let failed_addr = attempt.current.clone(); + + let new_state = if pool.is_connected(&peer_id) { + peer::PeerState::Connected + } else if num_remain == 0 { // (2) + peer::PeerState::Disconnected + } else { + peer::PeerState::Dialing { + num_pending_addresses: NonZeroUsize::new(num_remain).expect("by (2)"), + } + }; + + let opts = + if let Some(handler) = handler { + if !attempt.next.is_empty() { + let mut attempt = attempt; + let next_attempt = attempt.next.remove(0); + Some(DialingOpts { + peer: peer_id.clone(), + handler, + address: next_attempt, + remaining: attempt.next + }) + } else { + None + } + } else { + None + }; + + (opts, NetworkEvent::DialError { + new_state, + peer_id, + multiaddr: failed_addr, + error, + }) + } else { + // A pending incoming connection or outgoing connection to an unknown peer failed. + match endpoint { + ConnectedPoint::Dialer { address } => + (None, NetworkEvent::UnknownPeerDialError { + multiaddr: address, + error, + handler, + }), + ConnectedPoint::Listener { local_addr, send_back_addr } => + (None, NetworkEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error + }) + } + } +} + +/// Information about the network obtained by [`Network::info()`]. +#[derive(Clone, Debug)] +pub struct NetworkInfo { + pub num_peers: usize, + pub num_connections: usize, + pub num_connections_pending: usize, + pub num_connections_established: usize, +} + +/// The possible errors of [`Network::dial`]. +#[derive(Debug)] +pub enum DialError { + /// The configured limit of pending outgoing connections has been reached. + MaxPending(ConnectionLimit), + /// A transport error occurred when creating the connection. + Transport(TransportError), +} + +impl fmt::Display for DialError +where T: fmt::Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DialError::MaxPending(limit) => write!(f, "Dial error (pending limit): {}", limit.current), + DialError::Transport(err) => write!(f, "Dial error (transport): {}", err), + } + } +} + +impl std::error::Error for DialError +where T: std::error::Error + 'static, +{ + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + DialError::MaxPending(_) => None, + DialError::Transport(e) => Some(e), + } + } +} + +impl From> for DialError { + fn from(e: TransportError) -> DialError { + DialError::Transport(e) + } +} + +/// The (optional) configuration for a [`Network`]. +/// +/// The default configuration specifies no dedicated task executor +/// and no connection limits. +#[derive(Default)] +pub struct NetworkConfig { + executor: Option>, + pool_limits: PoolLimits, +} + +impl NetworkConfig { + pub fn set_executor(&mut self, e: Box) -> &mut Self { + self.executor = Some(e); + self + } + + pub fn executor(&self) -> Option<&Box> { + self.executor.as_ref() + } + + pub fn set_pending_incoming_limit(&mut self, n: usize) -> &mut Self { + self.pool_limits.max_pending_incoming = Some(n); + self + } + + pub fn set_pending_outgoing_limit(&mut self, n: usize) -> &mut Self { + self.pool_limits.max_pending_outgoing = Some(n); + self + } + + pub fn set_established_per_peer_limit(&mut self, n: usize) -> &mut Self { + self.pool_limits.max_established_per_peer = Some(n); + self + } +} + diff --git a/core/src/network/event.rs b/core/src/network/event.rs new file mode 100644 index 00000000..ef28fd0a --- /dev/null +++ b/core/src/network/event.rs @@ -0,0 +1,346 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! Network events and associated information. + +use crate::{ + Multiaddr, + connection::{ + ConnectionId, + ConnectedPoint, + ConnectionError, + ConnectionHandler, + ConnectionInfo, + ConnectionLimit, + Connected, + EstablishedConnection, + IncomingInfo, + IntoConnectionHandler, + ListenerId, + PendingConnectionError, + Substream, + pool::Pool, + }, + muxing::StreamMuxer, + network::peer::PeerState, + transport::{Transport, TransportError}, +}; +use futures::prelude::*; +use std::{error, fmt, hash::Hash}; + +/// Event that can happen on the `Network`. +pub enum NetworkEvent<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, +{ + /// One of the listeners gracefully closed. + ListenerClosed { + /// The listener ID that closed. + listener_id: ListenerId, + /// Reason for the closure. Contains `Ok(())` if the stream produced `None`, or `Err` + /// if the stream produced an error. + reason: Result<(), TTrans::Error>, + }, + + /// One of the listeners reported a non-fatal error. + ListenerError { + /// The listener that errored. + listener_id: ListenerId, + /// The listener error. + error: TTrans::Error + }, + + /// One of the listeners is now listening on an additional address. + NewListenerAddress { + /// The listener that is listening on the new address. + listener_id: ListenerId, + /// The new address the listener is now also listening on. + listen_addr: Multiaddr + }, + + /// One of the listeners is no longer listening on some address. + ExpiredListenerAddress { + /// The listener that is no longer listening on some address. + listener_id: ListenerId, + /// The expired address. + listen_addr: Multiaddr + }, + + /// A new connection arrived on a listener. + IncomingConnection(IncomingConnectionEvent<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>), + + /// A new connection was arriving on a listener, but an error happened when negotiating it. + /// + /// This can include, for example, an error during the handshake of the encryption layer, or + /// the connection unexpectedly closed. + IncomingConnectionError { + /// Local connection address. + local_addr: Multiaddr, + /// Address used to send back data to the remote. + send_back_addr: Multiaddr, + /// The error that happened. + error: PendingConnectionError, + }, + + /// A new connection to a peer has been opened. + ConnectionEstablished { + /// The newly established connection. + connection: EstablishedConnection<'a, TInEvent, TConnInfo, TPeerId>, + /// The total number of established connections to the same peer. + num_established: usize, + }, + + /// An established connection to a peer has encountered an error. + /// + /// The connection is closed as a result of the error. + ConnectionError { + /// Information about the connection that encountered the error. + connected: Connected, + /// The error that occurred. + error: ConnectionError<::Error>, + /// The remaining number of established connections to the same peer. + num_established: usize, + }, + + /// A dialing attempt to an address of a peer failed. + DialError { + /// New state of a peer. + new_state: PeerState, + + /// Id of the peer we were trying to dial. + peer_id: TPeerId, + + /// The multiaddr we failed to reach. + multiaddr: Multiaddr, + + /// The error that happened. + error: PendingConnectionError, + }, + + /// Failed to reach a peer that we were trying to dial. + UnknownPeerDialError { + /// The multiaddr we failed to reach. + multiaddr: Multiaddr, + + /// The error that happened. + error: PendingConnectionError, + + /// The handler that was passed to `dial()`, if the + /// connection failed before the handler was consumed. + handler: Option, + }, + + /// An established connection produced an event. + ConnectionEvent { + /// The connection on which the event occurred. + connection: EstablishedConnection<'a, TInEvent, TConnInfo, TPeerId>, + /// Event that was produced by the node. + event: TOutEvent, + }, +} + +impl fmt::Debug for + NetworkEvent<'_, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TInEvent: fmt::Debug, + TOutEvent: fmt::Debug, + TTrans: Transport, + TTrans::Error: fmt::Debug, + THandler: IntoConnectionHandler, + ::Error: fmt::Debug, + TConnInfo: fmt::Debug, + TPeerId: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + match self { + NetworkEvent::NewListenerAddress { listener_id, listen_addr } => { + f.debug_struct("NewListenerAddress") + .field("listener_id", listener_id) + .field("listen_addr", listen_addr) + .finish() + } + NetworkEvent::ExpiredListenerAddress { listener_id, listen_addr } => { + f.debug_struct("ExpiredListenerAddress") + .field("listener_id", listener_id) + .field("listen_addr", listen_addr) + .finish() + } + NetworkEvent::ListenerClosed { listener_id, reason } => { + f.debug_struct("ListenerClosed") + .field("listener_id", listener_id) + .field("reason", reason) + .finish() + } + NetworkEvent::ListenerError { listener_id, error } => { + f.debug_struct("ListenerError") + .field("listener_id", listener_id) + .field("error", error) + .finish() + } + NetworkEvent::IncomingConnection(event) => { + f.debug_struct("IncomingConnection") + .field("local_addr", &event.local_addr) + .field("send_back_addr", &event.send_back_addr) + .finish() + } + NetworkEvent::IncomingConnectionError { local_addr, send_back_addr, error } => { + f.debug_struct("IncomingConnectionError") + .field("local_addr", local_addr) + .field("send_back_addr", send_back_addr) + .field("error", error) + .finish() + } + NetworkEvent::ConnectionEstablished { connection, .. } => { + f.debug_struct("ConnectionEstablished") + .field("connection", connection) + .finish() + } + NetworkEvent::ConnectionError { connected, error, .. } => { + f.debug_struct("ConnectionError") + .field("connected", connected) + .field("error", error) + .finish() + } + NetworkEvent::DialError { new_state, peer_id, multiaddr, error } => { + f.debug_struct("DialError") + .field("new_state", new_state) + .field("peer_id", peer_id) + .field("multiaddr", multiaddr) + .field("error", error) + .finish() + } + NetworkEvent::UnknownPeerDialError { multiaddr, error, .. } => { + f.debug_struct("UnknownPeerDialError") + .field("multiaddr", multiaddr) + .field("error", error) + .finish() + } + NetworkEvent::ConnectionEvent { connection, event } => { + f.debug_struct("ConnectionEvent") + .field("connection", connection) + .field("event", event) + .finish() + } + } + } +} + +/// A new connection arrived on a listener. +pub struct IncomingConnectionEvent<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, +{ + /// The listener who received the connection. + pub(super) listener_id: ListenerId, + /// The produced upgrade. + pub(super) upgrade: TTrans::ListenerUpgrade, + /// Local connection address. + pub(super) local_addr: Multiaddr, + /// Address used to send back data to the remote. + pub(super) send_back_addr: Multiaddr, + /// Reference to the `peers` field of the `Network`. + pub(super) pool: &'a mut Pool< + TInEvent, + TOutEvent, + THandler, + TTrans::Error, + ::Error, + TConnInfo, + TPeerId + >, +} + +impl<'a, TTrans, TInEvent, TOutEvent, TMuxer, THandler, TConnInfo, TPeerId> + IncomingConnectionEvent<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + TTrans::Error: Send + 'static, + TTrans::ListenerUpgrade: Send + 'static, + THandler: IntoConnectionHandler + Send + 'static, + THandler::Handler: ConnectionHandler, InEvent = TInEvent, OutEvent = TOutEvent> + Send + 'static, + ::OutboundOpenInfo: Send + 'static, // TODO: shouldn't be necessary + ::Error: error::Error + Send + 'static, + TMuxer: StreamMuxer + Send + Sync + 'static, + TMuxer::OutboundSubstream: Send, + TMuxer::Substream: Send, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, + TConnInfo: fmt::Debug + ConnectionInfo + Send + 'static, + TPeerId: Eq + Hash + Clone + Send + 'static, +{ + /// The ID of the listener with the incoming connection. + pub fn listener_id(&self) -> ListenerId { + self.listener_id + } + + /// Starts processing the incoming connection and sets the handler to use for it. + pub fn accept(self, handler: THandler) -> Result { + self.accept_with_builder(|_| handler) + } + + /// Same as `accept`, but accepts a closure that turns a `IncomingInfo` into a handler. + pub fn accept_with_builder(self, builder: TBuilder) + -> Result + where + TBuilder: FnOnce(IncomingInfo<'_>) -> THandler + { + let handler = builder(self.info()); + let upgrade = self.upgrade + .map_err(|err| PendingConnectionError::Transport(TransportError::Other(err))); + let info = IncomingInfo { + local_addr: &self.local_addr, + send_back_addr: &self.send_back_addr, + }; + self.pool.add_incoming(upgrade, handler, info) + } +} + +impl + IncomingConnectionEvent<'_, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, +{ + /// Returns the `IncomingInfo` corresponding to this incoming connection. + pub fn info(&self) -> IncomingInfo<'_> { + IncomingInfo { + local_addr: &self.local_addr, + send_back_addr: &self.send_back_addr, + } + } + + /// Local connection address. + pub fn local_addr(&self) -> &Multiaddr { + &self.local_addr + } + + /// Address used to send back data to the dialer. + pub fn send_back_addr(&self) -> &Multiaddr { + &self.send_back_addr + } + + /// Builds the `ConnectedPoint` corresponding to the incoming connection. + pub fn to_connected_point(&self) -> ConnectedPoint { + self.info().to_connected_point() + } +} + diff --git a/core/src/network/peer.rs b/core/src/network/peer.rs new file mode 100644 index 00000000..fcf2b863 --- /dev/null +++ b/core/src/network/peer.rs @@ -0,0 +1,594 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::{ + Multiaddr, + Transport, + StreamMuxer, + connection::{ + Connected, + ConnectedPoint, + ConnectionHandler, + ConnectionInfo, + Connection, + ConnectionId, + ConnectionLimit, + EstablishedConnection, + EstablishedConnectionIter, + IntoConnectionHandler, + PendingConnection, + Substream, + }, +}; +use std::{ + collections::hash_map, + error, + fmt, + hash::Hash, + num::NonZeroUsize, +}; +use super::{Network, DialingOpts}; + +/// The state of a (remote) peer as seen by the local peer +/// through a [`Network`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum PeerState { + /// The [`Network`] is connected to the peer, i.e. has at least one + /// established connection. + Connected, + /// We are currently trying to reach this peer. + Dialing { + /// Number of addresses we are trying to dial. + num_pending_addresses: NonZeroUsize, + }, + /// The [`Network`] is disconnected from the peer, i.e. has no + /// established connection and no pending, outgoing connection. + Disconnected, +} + +/// The possible representations of a peer in a [`Network`], as +/// seen by the local node. +pub enum Peer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler +{ + /// At least one established connection exists to the peer. + Connected(ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>), + + /// There is an ongoing dialing (i.e. outgoing connection) attempt + /// to the peer. There may already be other established connections + /// to the peer. + Dialing(DialingPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>), + + /// There exists no established connection to the peer and there is + /// currently no ongoing dialing (i.e. outgoing connection) attempt + /// in progress. + /// + /// > **Note**: In this state there may always be a pending incoming + /// > connection attempt from the peer, however, the remote identity + /// > of a peer is only known once a connection is fully established. + Disconnected(DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>), + + /// The peer represents the local node. + Local, +} + +impl<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> fmt::Debug for + Peer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, + TConnInfo: fmt::Debug + ConnectionInfo, + TPeerId: fmt::Debug + Eq + Hash, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + match *self { + Peer::Connected(ConnectedPeer { ref peer_id, .. }) => { + f.debug_struct("Connected") + .field("peer_id", peer_id) + .finish() + } + Peer::Dialing(DialingPeer { ref peer_id, .. } ) => { + f.debug_struct("DialingPeer") + .field("peer_id", peer_id) + .finish() + } + Peer::Disconnected(DisconnectedPeer { ref peer_id, .. }) => { + f.debug_struct("Disconnected") + .field("peer_id", peer_id) + .finish() + } + Peer::Local => { + f.debug_struct("Local") + .finish() + } + } + } +} + +impl<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> + Peer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, + TPeerId: Eq + Hash, + TConnInfo: ConnectionInfo +{ + pub(super) fn new( + network: &'a mut Network, + peer_id: TPeerId + ) -> Self { + if peer_id == network.local_peer_id { + return Peer::Local; + } + + if network.pool.is_connected(&peer_id) { + return Self::connected(network, peer_id) + } + + if network.dialing.get_mut(&peer_id).is_some() { + return Self::dialing(network, peer_id); + } + + Self::disconnected(network, peer_id) + } + + + fn disconnected( + network: &'a mut Network, + peer_id: TPeerId + ) -> Self { + Peer::Disconnected(DisconnectedPeer { network, peer_id }) + } + + fn connected( + network: &'a mut Network, + peer_id: TPeerId + ) -> Self { + Peer::Connected(ConnectedPeer { network, peer_id }) + } + + fn dialing( + network: &'a mut Network, + peer_id: TPeerId + ) -> Self { + Peer::Dialing(DialingPeer { network, peer_id }) + } +} + +impl<'a, TTrans, TMuxer, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> + Peer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport + Clone, + TTrans::Error: Send + 'static, + TTrans::Dial: Send + 'static, + TMuxer: StreamMuxer + Send + Sync + 'static, + TMuxer::OutboundSubstream: Send, + TMuxer::Substream: Send, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, + THandler: IntoConnectionHandler + Send + 'static, + THandler::Handler: ConnectionHandler, InEvent = TInEvent, OutEvent = TOutEvent> + Send + 'static, + ::OutboundOpenInfo: Send + 'static, // TODO: shouldn't be necessary + ::Error: error::Error + Send + 'static, + TConnInfo: fmt::Debug + ConnectionInfo + Send + 'static, + TPeerId: Eq + Hash + Clone + Send + 'static, +{ + + /// If we are connected, returns the `ConnectedPeer`. + pub fn into_connected(self) -> Option< + ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> + > { + match self { + Peer::Connected(peer) => Some(peer), + _ => None, + } + } + + /// If a connection is pending, returns the `DialingPeer`. + pub fn into_dialing(self) -> Option< + DialingPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> + > { + match self { + Peer::Dialing(peer) => Some(peer), + _ => None, + } + } + + /// If we are not connected, returns the `DisconnectedPeer`. + pub fn into_disconnected(self) -> Option< + DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> + > { + match self { + Peer::Disconnected(peer) => Some(peer), + _ => None, + } + } +} + +/// The representation of a peer in a [`Network`] to whom at least +/// one established connection exists. +pub struct ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, +{ + network: &'a mut Network, + peer_id: TPeerId, +} + +impl<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> + ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, + TConnInfo: ConnectionInfo, + TPeerId: Eq + Hash + Clone, +{ + /// Attempts to establish a new connection to this peer using the given addresses, + /// if there is currently no ongoing dialing attempt. + /// + /// Existing established connections are not affected. + /// + /// > **Note**: If there is an ongoing dialing attempt, a `DialingPeer` + /// > is returned with the given addresses and handler being ignored. + /// > You may want to check [`ConnectedPeer::is_dialing`] first. + pub fn connect(self, address: Multiaddr, remaining: I, handler: THandler) + -> Result, + ConnectionLimit> + where + I: IntoIterator, + THandler: Send + 'static, + THandler::Handler: Send, + ::Error: error::Error + Send, + ::OutboundOpenInfo: Send, + THandler::Handler: ConnectionHandler, InEvent = TInEvent, OutEvent = TOutEvent> + Send, + TTrans: Transport + Clone, + TTrans::Error: Send + 'static, + TTrans::Dial: Send + 'static, + TMuxer: StreamMuxer + Send + Sync + 'static, + TMuxer::OutboundSubstream: Send, + TMuxer::Substream: Send, + TConnInfo: fmt::Debug + Send + 'static, + TPeerId: Eq + Hash + Clone + Send + 'static, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, + + { + if self.network.dialing.contains_key(&self.peer_id) { + let peer = DialingPeer { + network: self.network, + peer_id: self.peer_id + }; + Ok(peer) + } else { + self.network.dial_peer(DialingOpts { + peer: self.peer_id.clone(), + handler, + address, + remaining: remaining.into_iter().collect(), + })?; + Ok(DialingPeer { + network: self.network, + peer_id: self.peer_id, + }) + } + } + + /// Obtains an existing connection to the peer. + pub fn connection<'b>(&'b mut self, id: ConnectionId) + -> Option> + { + self.network.pool.get_established(id) + } + + /// The number of established connections to the peer. + pub fn num_connections(&self) -> usize { + self.network.pool.num_peer_established(&self.peer_id) + } + + /// Checks whether there is an ongoing dialing attempt to the peer. + /// + /// Returns `true` iff [`ConnectedPeer::into_dialing`] returns `Some`. + pub fn is_dialing(&self) -> bool { + self.network.dialing.contains_key(&self.peer_id) + } + + /// Turns this peer into a [`DialingPeer`], if there is an ongoing + /// dialing attempt, `None` otherwise. + pub fn into_dialing(self) -> Option< + DialingPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> + > { + if self.network.dialing.contains_key(&self.peer_id) { + Some(DialingPeer { network: self.network, peer_id: self.peer_id }) + } else { + None + } + } + + /// Gets an iterator over all established connections of the peer. + pub fn connections<'b>(&'b mut self) -> + EstablishedConnectionIter<'b, + impl Iterator, + TInEvent, + TOutEvent, + THandler, + TTrans::Error, + ::Error, + TConnInfo, + TPeerId> + { + self.network.pool.iter_peer_established(&self.peer_id) + } + + /// Obtains some established connection to the peer. + pub fn some_connection<'b>(&'b mut self) + -> EstablishedConnection<'b, TInEvent, TConnInfo, TPeerId> + { + self.connections() + .into_first() + .expect("By `Peer::new` and the definition of `ConnectedPeer`.") + } + + /// Disconnects from the peer, closing all connections. + pub fn disconnect(self) + -> DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> + { + self.network.disconnect(&self.peer_id); + DisconnectedPeer { network: self.network, peer_id: self.peer_id } + } +} + +impl<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> fmt::Debug for + ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, + TPeerId: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + f.debug_struct("ConnectedPeer") + .field("peer_id", &self.peer_id) + .finish() + } +} + +/// The representation of a peer in a [`Network`] to whom a dialing +/// attempt is ongoing. There may already exist other established +/// connections to this peer. +pub struct DialingPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, +{ + network: &'a mut Network, + peer_id: TPeerId, +} + +impl<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> + DialingPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, + TConnInfo: ConnectionInfo, + TPeerId: Eq + Hash + Clone, +{ + /// Disconnects from this peer, closing all pending connections. + pub fn disconnect(self) -> DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> { + self.network.disconnect(&self.peer_id); + DisconnectedPeer { network: self.network, peer_id: self.peer_id } + } + + /// Obtains the connection that is currently being established. + pub fn connection<'b>(&'b mut self) -> DialingConnection<'b, TInEvent, TConnInfo, TPeerId> { + let attempt = match self.network.dialing.entry(self.peer_id.clone()) { + hash_map::Entry::Occupied(e) => e, + _ => unreachable!("By `Peer::new` and the definition of `DialingPeer`.") + }; + + let inner = self.network.pool + .get_outgoing(attempt.get().id) + .expect("By consistency of `network.pool` with `network.dialing`."); + + DialingConnection { + inner, dialing: attempt, peer_id: &self.peer_id + } + } +} + +impl<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> fmt::Debug for + DialingPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, + TPeerId: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + f.debug_struct("DialingPeer") + .field("peer_id", &self.peer_id) + .finish() + } +} + +/// The representation of a peer to whom the `Network` has currently +/// neither an established connection, nor an ongoing dialing attempt +/// initiated by the local peer. +pub struct DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, +{ + peer_id: TPeerId, + network: &'a mut Network, +} + +impl<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> fmt::Debug for + DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport, + THandler: IntoConnectionHandler, + TPeerId: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + f.debug_struct("DisconnectedPeer") + .field("peer_id", &self.peer_id) + .finish() + } +} + +impl<'a, TTrans, TInEvent, TOutEvent, TMuxer, THandler, TConnInfo, TPeerId> + DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> +where + TTrans: Transport + Clone, + TTrans::Error: Send + 'static, + TTrans::Dial: Send + 'static, + TMuxer: StreamMuxer + Send + Sync + 'static, + TMuxer::OutboundSubstream: Send, + TMuxer::Substream: Send, + THandler: IntoConnectionHandler + Send + 'static, + THandler::Handler: ConnectionHandler, InEvent = TInEvent, OutEvent = TOutEvent> + Send, + ::OutboundOpenInfo: Send, + ::Error: error::Error + Send, + TInEvent: Send + 'static, + TOutEvent: Send + 'static, +{ + /// Attempts to connect to this peer using the given addresses. + pub fn connect(self, first: Multiaddr, rest: TIter, handler: THandler) + -> Result, + ConnectionLimit> + where + TIter: IntoIterator, + TConnInfo: fmt::Debug + ConnectionInfo + Send + 'static, + TPeerId: Eq + Hash + Clone + Send + 'static, + { + self.network.dial_peer(DialingOpts { + peer: self.peer_id.clone(), + handler, + address: first, + remaining: rest.into_iter().collect(), + })?; + Ok(DialingPeer { + network: self.network, + peer_id: self.peer_id, + }) + + } + + /// Moves the peer into a connected state by supplying an existing + /// established connection. + /// + /// No event is generated for this action. + /// + /// # Panics + /// + /// Panics if `connected.peer_id()` does not identify the current peer. + /// + pub fn set_connected( + self, + connected: Connected, + connection: Connection, + ) -> Result< + ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>, + ConnectionLimit + > where + TConnInfo: fmt::Debug + ConnectionInfo + Clone + Send + 'static, + TPeerId: Eq + Hash + Clone + fmt::Debug, + { + if connected.peer_id() != &self.peer_id { + panic!("Invalid peer ID given: {:?}. Expected: {:?}", connected.peer_id(), self.peer_id) + } + + self.network.pool.add(connection, connected) + .map(|_id| ConnectedPeer { + network: self.network, + peer_id: self.peer_id, + }) + } +} + +/// Attempt to reach a peer. +#[derive(Debug, Clone)] +pub(super) struct DialingAttempt { + /// Identifier for the reach attempt. + pub(super) id: ConnectionId, + /// Multiaddr currently being attempted. + pub(super) current: Multiaddr, + /// Multiaddresses to attempt if the current one fails. + pub(super) next: Vec, +} + +/// A `DialingConnection` is a [`PendingConnection`] where the local peer +/// has the role of the dialer (i.e. initiator) and the (expected) remote +/// peer ID is known. +pub struct DialingConnection<'a, TInEvent, TConnInfo, TPeerId> { + peer_id: &'a TPeerId, + inner: PendingConnection<'a, TInEvent, TConnInfo, TPeerId>, + dialing: hash_map::OccupiedEntry<'a, TPeerId, DialingAttempt>, +} + +impl<'a, TInEvent, TConnInfo, TPeerId> + DialingConnection<'a, TInEvent, TConnInfo, TPeerId> +{ + /// Returns the local connection ID. + pub fn id(&self) -> ConnectionId { + self.inner.id() + } + + /// Returns the (expected) peer ID of the ongoing connection attempt. + pub fn peer_id(&self) -> &TPeerId { + self.peer_id + } + + /// Returns information about this endpoint of the connection attempt. + pub fn endpoint(&self) -> &ConnectedPoint { + self.inner.endpoint() + } + + /// Aborts the connection attempt. + pub fn abort(self) + where + TPeerId: Eq + Hash + Clone, + { + self.dialing.remove(); + self.inner.abort(); + } + + /// Adds new candidate addresses to the end of the addresses used + /// in the ongoing dialing process. + /// + /// Duplicates are ignored. + pub fn add_addresses(&mut self, addrs: impl IntoIterator) { + for addr in addrs { + self.add_address(addr); + } + } + + /// Adds an address to the end of the addresses used in the ongoing + /// dialing process. + /// + /// Duplicates are ignored. + pub fn add_address(&mut self, addr: Multiaddr) { + if self.dialing.get().next.iter().all(|a| a != &addr) { + self.dialing.get_mut().next.push(addr); + } + } +} + diff --git a/core/src/nodes.rs b/core/src/nodes.rs deleted file mode 100644 index 87cb13d9..00000000 --- a/core/src/nodes.rs +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Low-level networking primitives. -//! -//! Contains structs that are aiming at providing very precise control over what happens over the -//! network. -//! -//! The more complete and highest-level struct is the `Network`. The `Network` directly or -//! indirectly uses all the other structs of this module. - -pub mod collection; -pub mod handled_node; -pub mod tasks; -pub mod listeners; -pub mod node; -pub mod network; - -pub use collection::ConnectionInfo; -pub use node::Substream; -pub use handled_node::{NodeHandlerEvent, NodeHandlerEndpoint}; -pub use network::{Peer, Network, NetworkEvent}; -pub use listeners::ListenerId; - diff --git a/core/src/nodes/collection.rs b/core/src/nodes/collection.rs deleted file mode 100644 index eb346092..00000000 --- a/core/src/nodes/collection.rs +++ /dev/null @@ -1,659 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::{ - Executor, - PeerId, - muxing::StreamMuxer, - nodes::{ - node::Substream, - handled_node::{HandledNodeError, IntoNodeHandler, NodeHandler}, - tasks::{self, ClosedTask, TaskEntry, TaskId} - } -}; -use fnv::FnvHashMap; -use futures::prelude::*; -use std::{error, fmt, hash::Hash, mem, task::Context, task::Poll}; - -/// Implementation of `Stream` that handles a collection of nodes. -pub struct CollectionStream { - /// Object that handles the tasks. - /// - /// The user data contains the state of the task. If `Connected`, then a corresponding entry - /// must be present in `nodes`. - inner: tasks::Manager, TConnInfo>, - - /// List of nodes, with the task id that handles this node. The corresponding entry in `tasks` - /// must always be in the `Connected` state. - nodes: FnvHashMap, -} - -impl fmt::Debug for - CollectionStream -where - TConnInfo: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_tuple("CollectionStream").finish() - } -} - -impl Unpin for - CollectionStream { } - -/// State of a task. -#[derive(Debug, Clone, PartialEq, Eq)] -enum TaskState { - /// Task is attempting to reach a peer. - Pending, - /// The task is connected to a peer. - Connected(TConnInfo, TUserData), -} - -/// Event that can happen on the `CollectionStream`. -pub enum CollectionEvent<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId> { - /// A connection to a node has succeeded. You must use the provided event in order to accept - /// the connection. - NodeReached(CollectionReachEvent<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId>), - - /// A connection to a node has errored. - /// - /// Can only happen after a node has been successfully reached. - NodeClosed { - /// Information about the connection. - conn_info: TConnInfo, - /// The error that happened. - error: HandledNodeError, - /// User data that was passed when accepting. - user_data: TUserData, - }, - - /// An error happened on the future that was trying to reach a node. - ReachError { - /// Identifier of the reach attempt that failed. - id: ReachAttemptId, - /// Error that happened on the future. - error: TReachErr, - /// The handler that was passed to `add_reach_attempt`. - handler: THandler, - }, - - /// A node has produced an event. - NodeEvent { - /// The node that has generated the event. - peer: PeerMut<'a, TInEvent, TUserData, TConnInfo, TPeerId>, - /// The produced event. - event: TOutEvent, - }, -} - -impl<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId> fmt::Debug for - CollectionEvent<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId> -where TOutEvent: fmt::Debug, - TReachErr: fmt::Debug, - THandlerErr: fmt::Debug, - TConnInfo: fmt::Debug, - TUserData: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - match *self { - CollectionEvent::NodeReached(ref inner) => { - f.debug_tuple("CollectionEvent::NodeReached") - .field(inner) - .finish() - }, - CollectionEvent::NodeClosed { ref conn_info, ref error, ref user_data } => { - f.debug_struct("CollectionEvent::NodeClosed") - .field("conn_info", conn_info) - .field("user_data", user_data) - .field("error", error) - .finish() - }, - CollectionEvent::ReachError { ref id, ref error, .. } => { - f.debug_struct("CollectionEvent::ReachError") - .field("id", id) - .field("error", error) - .finish() - }, - CollectionEvent::NodeEvent { ref peer, ref event } => { - f.debug_struct("CollectionEvent::NodeEvent") - .field("conn_info", peer.info()) - .field("event", event) - .finish() - }, - } - } -} - -/// Event that happens when we reach a node. -#[must_use = "The node reached event is used to accept the newly-opened connection"] -pub struct CollectionReachEvent<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo = PeerId, TPeerId = PeerId> { - /// Information about the connection, or `None` if it's been extracted. - conn_info: Option, - /// The task id that reached the node. - id: TaskId, - /// The `CollectionStream` we are referencing. - parent: &'a mut CollectionStream, -} - -impl<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId> - CollectionReachEvent<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId> -{ - /// Returns the information of the connection. - pub fn connection_info(&self) -> &TConnInfo { - self.conn_info.as_ref().expect("conn_info is always Some when the object is alive; QED") - } - - /// Returns the identity of the node we connected to. - pub fn peer_id(&self) -> &TPeerId - where - TConnInfo: ConnectionInfo, - TPeerId: Eq + Hash, - { - self.connection_info().peer_id() - } - - /// Returns the reach attempt that reached the node. - #[inline] - pub fn reach_attempt_id(&self) -> ReachAttemptId { - ReachAttemptId(self.id) - } -} - -impl<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId> - CollectionReachEvent<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId> -where - TConnInfo: ConnectionInfo, - TPeerId: Eq + Hash, -{ - /// Returns `true` if accepting this reached node would replace an existing connection to that - /// node. - #[inline] - pub fn would_replace(&self) -> bool { - self.parent.nodes.contains_key(self.connection_info().peer_id()) - } - - /// Accepts the new node. - pub fn accept(mut self, user_data: TUserData) -> (CollectionNodeAccept, TConnInfo) - where - // TODO: these two clones shouldn't be necessary if we return references - TConnInfo: Clone, - TPeerId: Clone, - { - let self_conn_info = self.conn_info.take() - .expect("conn_info is always Some when the object is alive; QED"); - - // Set the state of the task to `Connected`. - let former_task_id = self.parent.nodes.insert(self_conn_info.peer_id().clone(), self.id); - *self.parent.inner.task(self.id) - .expect("A CollectionReachEvent is only ever created from a valid attempt; QED") - .user_data_mut() = TaskState::Connected(self_conn_info.clone(), user_data); - - // It is possible that we already have a task connected to the same peer. In this - // case, we need to emit a `NodeReplaced` event. - let tasks = &mut self.parent.inner; - let ret_value = if let Some(former_task) = former_task_id.and_then(|i| tasks.task(i)) { - debug_assert!(match *former_task.user_data() { - TaskState::Connected(ref p, _) if p.peer_id() == self_conn_info.peer_id() => true, - _ => false - }); - let (old_info, user_data) = match former_task.close().into_user_data() { - TaskState::Connected(old_info, user_data) => (old_info, user_data), - _ => panic!("The former task was picked from `nodes`; all the nodes in `nodes` \ - are always in the connected state") - }; - (CollectionNodeAccept::ReplacedExisting(old_info, user_data), self_conn_info) - - } else { - (CollectionNodeAccept::NewEntry, self_conn_info) - }; - - // Don't run the destructor. - mem::forget(self); - - ret_value - } - - /// Denies the node. - /// - /// Has the same effect as dropping the event without accepting it. - #[inline] - pub fn deny(mut self) -> TConnInfo { - let conn_info = self.conn_info.take() - .expect("conn_info is always Some when the object is alive; QED"); - drop(self); // Just to be explicit - conn_info - } -} - -impl<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId> fmt::Debug for - CollectionReachEvent<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId> -where - TConnInfo: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_struct("CollectionReachEvent") - .field("conn_info", &self.conn_info) - .field("reach_attempt_id", &self.reach_attempt_id()) - .finish() - } -} - -impl<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId> Drop for - CollectionReachEvent<'a, TInEvent, TOutEvent, THandler, TReachErr, THandlerErr, TUserData, TConnInfo, TPeerId> -{ - fn drop(&mut self) { - let task = self.parent.inner.task(self.id) - .expect("we create the CollectionReachEvent with a valid task id; the \ - CollectionReachEvent mutably borrows the collection, therefore nothing \ - can delete this task during the lifetime of the CollectionReachEvent; \ - therefore the task is still valid when we delete it; QED"); - debug_assert!(if let TaskState::Pending = task.user_data() { true } else { false }); - task.close(); - } -} - -/// Outcome of accepting a node. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum CollectionNodeAccept { - /// We replaced an existing node. Returns the information about the old connection and the - /// user data that was assigned to this node. - ReplacedExisting(TConnInfo, TUserData), - /// We didn't replace anything existing. - NewEntry, -} - -/// Identifier for a future that attempts to reach a node. -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct ReachAttemptId(TaskId); - -/// Information about a connection. -pub trait ConnectionInfo { - /// Identity of the node we are connected to. - type PeerId: Eq + Hash; - - /// Returns the identity of the node we are connected to on this connection. - fn peer_id(&self) -> &Self::PeerId; -} - -impl ConnectionInfo for PeerId { - type PeerId = PeerId; - - fn peer_id(&self) -> &PeerId { - self - } -} - -impl - CollectionStream -where - TConnInfo: ConnectionInfo, - TPeerId: Eq + Hash, -{ - /// Creates a new empty collection. If `executor` is `Some`, uses the given executor to spawn - /// tasks. Otherwise, runs tasks locally. - pub fn new(executor: Option>) -> Self { - CollectionStream { - inner: tasks::Manager::new(executor), - nodes: Default::default(), - } - } - - /// Adds to the collection a future that tries to reach a remote. - /// - /// This method spawns a task dedicated to resolving this future and processing the node's - /// events. - pub fn add_reach_attempt(&mut self, future: TFut, handler: THandler) - -> ReachAttemptId - where - TFut: Future> + Send + 'static, - THandler: IntoNodeHandler + Send + 'static, - THandler::Handler: NodeHandler, InEvent = TInEvent, OutEvent = TOutEvent, Error = THandlerErr> + Send + 'static, - ::OutboundOpenInfo: Send + 'static, - TReachErr: error::Error + Send + 'static, - THandlerErr: error::Error + Send + 'static, - TInEvent: Send + 'static, - TOutEvent: Send + 'static, - TMuxer: StreamMuxer + Send + Sync + 'static, - TMuxer::OutboundSubstream: Send + 'static, - TConnInfo: Send + 'static, - { - ReachAttemptId(self.inner.add_reach_attempt(future, TaskState::Pending, handler)) - } - - /// Interrupts a reach attempt. - /// - /// Returns `Ok` if something was interrupted, and `Err` if the ID is not or no longer valid. - pub fn interrupt(&mut self, id: ReachAttemptId) -> Result, InterruptError> { - match self.inner.task(id.0) { - None => Err(InterruptError::ReachAttemptNotFound), - Some(task) => { - match task.user_data() { - TaskState::Connected(_, _) => return Err(InterruptError::AlreadyReached), - TaskState::Pending => (), - }; - - Ok(InterruptedReachAttempt { - inner: task.close(), - }) - } - } - } - - /// Sends an event to all nodes. - /// - /// This function is "atomic", in the sense that if `Poll::Pending` is returned then no event - /// has been sent to any node yet. - #[must_use] - pub fn poll_broadcast(&mut self, event: &TInEvent, cx: &mut Context) -> Poll<()> - where - TInEvent: Clone - { - self.inner.poll_broadcast(event, cx) - } - - /// Adds an existing connection to a node to the collection. - /// - /// Returns whether we have replaced an existing connection, or not. - pub fn add_connection(&mut self, conn_info: TConnInfo, user_data: TUserData, muxer: TMuxer, handler: THandler::Handler) - -> CollectionNodeAccept - where - THandler: IntoNodeHandler + Send + 'static, - THandler::Handler: NodeHandler, InEvent = TInEvent, OutEvent = TOutEvent, Error = THandlerErr> + Send + 'static, - ::OutboundOpenInfo: Send + 'static, - TReachErr: error::Error + Send + 'static, - THandlerErr: error::Error + Send + 'static, - TInEvent: Send + 'static, - TOutEvent: Send + 'static, - TMuxer: StreamMuxer + Send + Sync + 'static, - TMuxer::OutboundSubstream: Send + 'static, - TConnInfo: Clone + Send + 'static, - TPeerId: Clone, - { - // Calling `tasks::Manager::add_connection` is the same as calling - // `tasks::Manager::add_reach_attempt`, except that we don't get any `NodeReached` event. - // We therefore implement this method the same way as calling `add_reach_attempt` followed - // with simulating a received `NodeReached` event and accepting it. - - let task_id = self.inner.add_connection( - TaskState::Pending, - muxer, - handler - ); - - CollectionReachEvent { - conn_info: Some(conn_info), - id: task_id, - parent: self, - }.accept(user_data).0 - } - - /// Grants access to an object that allows controlling a peer of the collection. - /// - /// Returns `None` if we don't have a connection to this peer. - #[inline] - pub fn peer_mut(&mut self, id: &TPeerId) -> Option> { - let task = match self.nodes.get(id) { - Some(&task) => task, - None => return None, - }; - - match self.inner.task(task) { - Some(inner) => Some(PeerMut { - inner, - nodes: &mut self.nodes, - }), - None => None, - } - } - - /// Returns true if we are connected to the given peer. - /// - /// This will return true only after a `NodeReached` event has been produced by `poll()`. - #[inline] - pub fn has_connection(&self, id: &TPeerId) -> bool { - self.nodes.contains_key(id) - } - - /// Returns a list of all the active connections. - /// - /// Does not include reach attempts that haven't reached any target yet. - #[inline] - pub fn connections(&self) -> impl Iterator { - self.nodes.keys() - } - - /// Provides an API similar to `Stream`, except that it cannot error. - /// - /// > **Note**: we use a regular `poll` method instead of implementing `Stream` in order to - /// > remove the `Err` variant, but also because we want the `CollectionStream` to stay - /// > borrowed if necessary. - pub fn poll(&mut self, cx: &mut Context) -> Poll> - where - TConnInfo: Clone, // TODO: Clone shouldn't be necessary - { - let item = match self.inner.poll(cx) { - Poll::Ready(item) => item, - Poll::Pending => return Poll::Pending, - }; - - match item { - tasks::Event::TaskClosed { task, result, handler } => { - let id = task.id(); - let user_data = task.into_user_data(); - - match (user_data, result, handler) { - (TaskState::Pending, tasks::Error::Reach(err), Some(handler)) => { - Poll::Ready(CollectionEvent::ReachError { - id: ReachAttemptId(id), - error: err, - handler, - }) - }, - (TaskState::Pending, tasks::Error::Node(_), _) => { - panic!("We switch the task state to Connected once we're connected, and \ - a tasks::Error::Node can only happen after we're connected; QED"); - }, - (TaskState::Pending, tasks::Error::Reach(_), None) => { - // TODO: this could be improved in the API of tasks::Manager - panic!("The tasks::Manager is guaranteed to always return the handler \ - when producing a tasks::Error::Reach error"); - }, - (TaskState::Connected(conn_info, user_data), tasks::Error::Node(err), _handler) => { - debug_assert!(_handler.is_none()); - let _node_task_id = self.nodes.remove(conn_info.peer_id()); - debug_assert_eq!(_node_task_id, Some(id)); - Poll::Ready(CollectionEvent::NodeClosed { - conn_info, - error: err, - user_data, - }) - }, - (TaskState::Connected(_, _), tasks::Error::Reach(_), _) => { - panic!("A tasks::Error::Reach can only happen before we are connected \ - to a node; therefore the TaskState won't be Connected; QED"); - }, - } - }, - tasks::Event::NodeReached { task, conn_info } => { - let id = task.id(); - drop(task); - Poll::Ready(CollectionEvent::NodeReached(CollectionReachEvent { - parent: &mut *self, - id, - conn_info: Some(conn_info), - })) - }, - tasks::Event::NodeEvent { task, event } => { - let conn_info = match task.user_data() { - TaskState::Connected(conn_info, _) => conn_info.clone(), - _ => panic!("we can only receive NodeEvent events from a task after we \ - received a corresponding NodeReached event from that same task; \ - when we receive a NodeReached event, we ensure that the entry in \ - self.tasks is switched to the Connected state; QED"), - }; - drop(task); - Poll::Ready(CollectionEvent::NodeEvent { - // TODO: normally we'd build a `PeerMut` manually here, but the borrow checker - // doesn't like it - peer: self.peer_mut(&conn_info.peer_id()) - .expect("we can only receive NodeEvent events from a task after we \ - received a corresponding NodeReached event from that same task;\ - when that happens, peer_mut will always return Some; QED"), - event, - }) - } - } - } -} - -/// Reach attempt interrupt errors. -#[derive(Debug)] -pub enum InterruptError { - /// An invalid reach attempt has been used to try to interrupt. The task - /// entry is vacant; it needs to be added first via add_reach_attempt - /// (with the TaskState set to Pending) before we try to connect. - ReachAttemptNotFound, - /// The task has already connected to the node; interrupting a reach attempt - /// is thus redundant as it has already completed. Thus, the reach attempt - /// that has tried to be used is no longer valid, since already reached. - AlreadyReached, -} - -impl fmt::Display for InterruptError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - InterruptError::ReachAttemptNotFound => - write!(f, "The reach attempt could not be found."), - InterruptError::AlreadyReached => - write!(f, "The reach attempt has already completed or reached the node."), - } - } -} - -impl error::Error for InterruptError {} - -/// Reach attempt after it has been interrupted. -pub struct InterruptedReachAttempt { - inner: ClosedTask>, -} - -impl fmt::Debug for InterruptedReachAttempt -where - TUserData: fmt::Debug, - TConnInfo: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_tuple("InterruptedReachAttempt") - .field(&self.inner) - .finish() - } -} - -/// Access to a peer in the collection. -pub struct PeerMut<'a, TInEvent, TUserData, TConnInfo = PeerId, TPeerId = PeerId> { - inner: TaskEntry<'a, TInEvent, TaskState>, - nodes: &'a mut FnvHashMap, -} - -impl<'a, TInEvent, TUserData, TConnInfo, TPeerId> PeerMut<'a, TInEvent, TUserData, TConnInfo, TPeerId> { - /// Returns the information of the connection with the peer. - // TODO: we would love to return a `&'a TConnInfo`, but this isn't possible because we have - // a mutable borrow. - pub fn info(&self) -> &TConnInfo { - match self.inner.user_data() { - TaskState::Connected(conn_info, _) => conn_info, - _ => panic!("A PeerMut is only ever constructed from a peer in the connected \ - state; QED") - } - } -} - -impl<'a, TInEvent, TUserData, TConnInfo, TPeerId> PeerMut<'a, TInEvent, TUserData, TConnInfo, TPeerId> -where - TConnInfo: ConnectionInfo, - TPeerId: Eq + Hash, -{ - /// Returns the identity of the peer. - pub fn id(&self) -> &TPeerId { - self.info().peer_id() - } - - /// Returns the user data that was stored in the collections when we accepted the connection. - pub fn user_data(&self) -> &TUserData { - match self.inner.user_data() { - TaskState::Connected(_, user_data) => user_data, - _ => panic!("A PeerMut is only ever constructed from a peer in the connected \ - state; QED") - } - } - - /// Returns the user data that was stored in the collections when we accepted the connection. - pub fn user_data_mut(&mut self) -> &mut TUserData { - match self.inner.user_data_mut() { - TaskState::Connected(_, user_data) => user_data, - _ => panic!("A PeerMut is only ever constructed from a peer in the connected \ - state; QED") - } - } - - /// Begin sending an event to the given node. Must be called only after a successful call to - /// `poll_ready_event`. - pub fn start_send_event(&mut self, event: TInEvent) { - self.inner.start_send_event(event) - } - - /// Make sure we are ready to accept an event to be sent with `start_send_event`. - pub fn poll_ready_event(&mut self, cx: &mut Context) -> Poll<()> { - self.inner.poll_ready_event(cx) - } - - /// Closes the connections to this node. Returns the user data. - /// - /// No further event will be generated for this node. - pub fn close(self) -> TUserData { - let task_id = self.inner.id(); - if let TaskState::Connected(conn_info, user_data) = self.inner.close().into_user_data() { - let old_task_id = self.nodes.remove(conn_info.peer_id()); - debug_assert_eq!(old_task_id, Some(task_id)); - user_data - } else { - panic!("a PeerMut can only be created if an entry is present in nodes; an entry in \ - nodes always matched a Connected entry in the tasks; QED"); - } - } - - /// Gives ownership of a closed reach attempt. As soon as the connection to the peer (`self`) - /// has some acknowledgment from the remote that its connection is alive, it will close the - /// connection inside `id`. - /// - /// The reach attempt will only be effectively cancelled once the peer (the object you're - /// manipulating) has received some network activity. However no event will be ever be - /// generated from this reach attempt, and this takes effect immediately. - pub fn start_take_over(&mut self, id: InterruptedReachAttempt) { - self.inner.start_take_over(id.inner) - } - - /// Make sure we are ready to taking over with `start_take_over`. - #[must_use] - pub fn poll_ready_take_over(&mut self, cx: &mut Context) -> Poll<()> { - self.inner.poll_ready_take_over(cx) - } -} diff --git a/core/src/nodes/handled_node.rs b/core/src/nodes/handled_node.rs deleted file mode 100644 index f8b08d11..00000000 --- a/core/src/nodes/handled_node.rs +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::{PeerId, muxing::StreamMuxer}; -use crate::nodes::node::{NodeEvent, NodeStream, Substream, Close}; -use std::{error, fmt, io, pin::Pin, task::Context, task::Poll}; - -/// Handler for the substreams of a node. -// TODO: right now it is possible for a node handler to be built, then shut down right after if we -// realize we dialed the wrong peer for example; this could be surprising and should either -// be documented or changed (favouring the "documented" right now) -pub trait NodeHandler { - /// Custom event that can be received from the outside. - type InEvent; - /// Custom event that can be produced by the handler and that will be returned by the `Network`. - type OutEvent; - /// Error that can happen during the processing of the node. - type Error; - /// The type of the substream containing the data. - type Substream; - /// Information about a substream. Can be sent to the handler through a `NodeHandlerEndpoint`, - /// and will be passed back in `inject_substream` or `inject_outbound_closed`. - type OutboundOpenInfo; - - /// Sends a new substream to the handler. - /// - /// The handler is responsible for upgrading the substream to whatever protocol it wants. - /// - /// # Panic - /// - /// Implementations are allowed to panic in the case of dialing if the `user_data` in - /// `endpoint` doesn't correspond to what was returned earlier when polling, or is used - /// multiple times. - fn inject_substream(&mut self, substream: Self::Substream, endpoint: NodeHandlerEndpoint); - - /// Injects an event coming from the outside into the handler. - fn inject_event(&mut self, event: Self::InEvent); - - /// Should behave like `Stream::poll()`. - /// - /// Returning an error will close the connection to the remote. - fn poll(&mut self, cx: &mut Context) - -> Poll, Self::Error>>; -} - -/// Prototype for a `NodeHandler`. -pub trait IntoNodeHandler { - /// The node handler. - type Handler: NodeHandler; - - /// Builds the node handler. - /// - /// The `TConnInfo` is the information about the connection that the handler is going to handle. - /// This is generated by the `Transport` and typically implements the `ConnectionInfo` trait. - fn into_handler(self, remote_conn_info: &TConnInfo) -> Self::Handler; -} - -impl IntoNodeHandler for T -where - T: NodeHandler -{ - type Handler = Self; - - fn into_handler(self, _: &TConnInfo) -> Self { - self - } -} - -/// Endpoint for a received substream. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum NodeHandlerEndpoint { - Dialer(TOutboundOpenInfo), - Listener, -} - -impl NodeHandlerEndpoint { - /// Returns true for `Dialer`. - pub fn is_dialer(&self) -> bool { - match self { - NodeHandlerEndpoint::Dialer(_) => true, - NodeHandlerEndpoint::Listener => false, - } - } - - /// Returns true for `Listener`. - pub fn is_listener(&self) -> bool { - match self { - NodeHandlerEndpoint::Dialer(_) => false, - NodeHandlerEndpoint::Listener => true, - } - } -} - -/// Event produced by a handler. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum NodeHandlerEvent { - /// Require a new outbound substream to be opened with the remote. - OutboundSubstreamRequest(TOutboundOpenInfo), - - /// Other event. - Custom(TCustom), -} - -/// Event produced by a handler. -impl NodeHandlerEvent { - /// If this is `OutboundSubstreamRequest`, maps the content to something else. - pub fn map_outbound_open_info(self, map: F) -> NodeHandlerEvent - where F: FnOnce(TOutboundOpenInfo) -> I - { - match self { - NodeHandlerEvent::OutboundSubstreamRequest(val) => { - NodeHandlerEvent::OutboundSubstreamRequest(map(val)) - }, - NodeHandlerEvent::Custom(val) => NodeHandlerEvent::Custom(val), - } - } - - /// If this is `Custom`, maps the content to something else. - pub fn map_custom(self, map: F) -> NodeHandlerEvent - where F: FnOnce(TCustom) -> I - { - match self { - NodeHandlerEvent::OutboundSubstreamRequest(val) => { - NodeHandlerEvent::OutboundSubstreamRequest(val) - }, - NodeHandlerEvent::Custom(val) => NodeHandlerEvent::Custom(map(val)), - } - } -} - -/// A node combined with an implementation of `NodeHandler`. -pub struct HandledNode -where - TMuxer: StreamMuxer, - THandler: NodeHandler>, -{ - /// Node that handles the muxing. - node: NodeStream, - /// Handler that processes substreams. - handler: THandler, -} - -impl fmt::Debug for HandledNode -where - TMuxer: StreamMuxer, - THandler: NodeHandler> + fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("HandledNode") - .field("node", &self.node) - .field("handler", &self.handler) - .finish() - } -} - -impl Unpin for HandledNode -where - TMuxer: StreamMuxer, - THandler: NodeHandler>, -{ -} - -impl HandledNode -where - TMuxer: StreamMuxer, - THandler: NodeHandler>, -{ - /// Builds a new `HandledNode`. - pub fn new(muxer: TMuxer, handler: THandler) -> Self { - HandledNode { - node: NodeStream::new(muxer), - handler, - } - } - - /// Returns a reference to the `NodeHandler` - pub fn handler(&self) -> &THandler { - &self.handler - } - - /// Returns a mutable reference to the `NodeHandler` - pub fn handler_mut(&mut self) -> &mut THandler { - &mut self.handler - } - - /// Injects an event to the handler. Has no effect if the handler is closing. - pub fn inject_event(&mut self, event: THandler::InEvent) { - self.handler.inject_event(event); - } - - /// Returns `true` if the remote has shown any sign of activity after the muxer has been open. - /// - /// See `StreamMuxer::is_remote_acknowledged`. - pub fn is_remote_acknowledged(&self) -> bool { - self.node.is_remote_acknowledged() - } - - /// Indicates to the handled node that it should shut down. After calling this method, the - /// `Stream` will end in the not-so-distant future. - pub fn close(self) -> Close { - self.node.close().0 - } - - /// API similar to `Future::poll` that polls the node for events. - pub fn poll(mut self: Pin<&mut Self>, cx: &mut Context) - -> Poll>> - { - loop { - let mut node_not_ready = false; - - match self.node.poll(cx) { - Poll::Pending => node_not_ready = true, - Poll::Ready(Ok(NodeEvent::InboundSubstream { substream })) => { - self.handler.inject_substream(substream, NodeHandlerEndpoint::Listener) - } - Poll::Ready(Ok(NodeEvent::OutboundSubstream { user_data, substream })) => { - let endpoint = NodeHandlerEndpoint::Dialer(user_data); - self.handler.inject_substream(substream, endpoint) - } - Poll::Ready(Err(err)) => return Poll::Ready(Err(HandledNodeError::Node(err))), - } - - match self.handler.poll(cx) { - Poll::Pending => { - if node_not_ready { - break - } - } - Poll::Ready(Ok(NodeHandlerEvent::OutboundSubstreamRequest(user_data))) => { - self.node.open_substream(user_data); - } - Poll::Ready(Ok(NodeHandlerEvent::Custom(event))) => { - return Poll::Ready(Ok(event)); - } - Poll::Ready(Err(err)) => return Poll::Ready(Err(HandledNodeError::Handler(err))), - } - } - - Poll::Pending - } -} - -/// Error that can happen when polling a `HandledNode`. -#[derive(Debug)] -pub enum HandledNodeError { - /// An error happend in the stream muxer. - // TODO: eventually this should also be a custom error - Node(io::Error), - /// An error happened in the handler of the connection to the node. - Handler(THandlerErr), -} - -impl fmt::Display for HandledNodeError -where - THandlerErr: fmt::Display -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - HandledNodeError::Node(err) => write!(f, "{}", err), - HandledNodeError::Handler(err) => write!(f, "{}", err), - } - } -} - -impl error::Error for HandledNodeError -where - THandlerErr: error::Error + 'static -{ - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - HandledNodeError::Node(err) => Some(err), - HandledNodeError::Handler(err) => Some(err), - } - } -} diff --git a/core/src/nodes/handled_node_tasks/tests.rs b/core/src/nodes/handled_node_tasks/tests.rs deleted file mode 100644 index 03b34cef..00000000 --- a/core/src/nodes/handled_node_tasks/tests.rs +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -#![cfg(test)] - -use super::*; - -use std::io; - -use futures::future; -use crate::tests::dummy_handler::{Handler, InEvent, OutEvent}; -use crate::tests::dummy_muxer::DummyMuxer; -use void::Void; -use crate::PeerId; - -type TestHandledNodesTasks = HandledNodesTasks; - -struct HandledNodeTaskTestBuilder { - muxer: DummyMuxer, - handler: Handler, - task_count: usize, -} - -impl HandledNodeTaskTestBuilder { - fn new() -> Self { - HandledNodeTaskTestBuilder { - muxer: DummyMuxer::new(), - handler: Handler::default(), - task_count: 0, - } - } - - fn with_tasks(&mut self, amt: usize) -> &mut Self { - self.task_count = amt; - self - } - fn handled_nodes_tasks(&mut self) -> (TestHandledNodesTasks, Vec) { - let mut handled_nodes = HandledNodesTasks::new(); - let peer_id = PeerId::random(); - let mut task_ids = Vec::new(); - for _i in 0..self.task_count { - let fut = future::ok((peer_id.clone(), self.muxer.clone())); - task_ids.push( - handled_nodes.add_reach_attempt(fut, (), self.handler.clone()) - ); - } - (handled_nodes, task_ids) - } -} - - -// Tests for HandledNodeTasks - -#[test] -fn query_for_tasks() { - let (mut handled_nodes, task_ids) = HandledNodeTaskTestBuilder::new() - .with_tasks(3) - .handled_nodes_tasks(); - - assert_eq!(task_ids.len(), 3); - assert_eq!(handled_nodes.task(TaskId(2)).unwrap().id(), task_ids[2]); - assert!(handled_nodes.task(TaskId(545534)).is_none()); -} - -#[test] -fn iterate_over_all_tasks() { - let (handled_nodes, task_ids) = HandledNodeTaskTestBuilder::new() - .with_tasks(3) - .handled_nodes_tasks(); - - let mut tasks: Vec = handled_nodes.tasks().collect(); - assert!(tasks.len() == 3); - tasks.sort_by_key(|t| t.0 ); - assert_eq!(tasks, task_ids); -} - -#[test] -fn add_reach_attempt_prepares_a_new_task() { - let mut handled_nodes: HandledNodesTasks<_, _, _, _, _, _> = HandledNodesTasks::new(); - assert_eq!(handled_nodes.tasks().count(), 0); - assert_eq!(handled_nodes.to_spawn.len(), 0); - - handled_nodes.add_reach_attempt(future::empty::<_, Void>(), (), Handler::default()); - - assert_eq!(handled_nodes.tasks().count(), 1); - assert_eq!(handled_nodes.to_spawn.len(), 1); -} diff --git a/core/src/nodes/network.rs b/core/src/nodes/network.rs deleted file mode 100644 index 75399a34..00000000 --- a/core/src/nodes/network.rs +++ /dev/null @@ -1,1864 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::muxing::StreamMuxer; -use crate::{ - ConnectedPoint, Executor, Multiaddr, PeerId, address_translation, - nodes::{ - collection::{ - CollectionEvent, - CollectionNodeAccept, - CollectionReachEvent, - CollectionStream, - ConnectionInfo, - ReachAttemptId, - InterruptedReachAttempt - }, - handled_node::{ - HandledNodeError, - NodeHandler - }, - handled_node::IntoNodeHandler, - node::Substream - }, - nodes::listeners::{ListenersEvent, ListenerId, ListenersStream}, - transport::{Transport, TransportError} -}; -use fnv::FnvHashMap; -use futures::{prelude::*, future}; -use std::{ - collections::hash_map::{Entry, OccupiedEntry}, - error, - fmt, - hash::Hash, - num::NonZeroUsize, - pin::Pin, - task::{Context, Poll}, -}; - -mod tests; - -/// Implementation of `Stream` that handles the nodes. -pub struct Network -where - TTrans: Transport, -{ - /// Listeners for incoming connections. - listeners: ListenersStream, - - /// The nodes currently active. - active_nodes: CollectionStream, THandlerErr, (), (TConnInfo, ConnectedPoint), TPeerId>, - - /// The reach attempts of the network. - /// This needs to be a separate struct in order to handle multiple mutable borrows issues. - reach_attempts: ReachAttempts, - - /// Max number of incoming connections. - incoming_limit: Option, - - /// Unfinished take over message to be delivered. - /// - /// If the pair's second element is `AsyncSink::NotReady`, the take over - /// message has yet to be sent using `PeerMut::start_take_over`. - /// - /// If the pair's second element is `AsyncSink::Ready`, the take over - /// message has been sent and needs to be flushed using - /// `PeerMut::complete_take_over`. - take_over_to_complete: Option<(TPeerId, InterruptedReachAttempt)> -} - -impl fmt::Debug for - Network -where - TTrans: fmt::Debug + Transport, - TConnInfo: fmt::Debug, - TPeerId: fmt::Debug + Eq + Hash, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_struct("ReachAttempts") - .field("listeners", &self.listeners) - .field("active_nodes", &self.active_nodes) - .field("reach_attempts", &self.reach_attempts) - .field("incoming_limit", &self.incoming_limit) - .field("take_over_to_complete", &self.take_over_to_complete) - .finish() - } -} - -impl Unpin for - Network -where - TTrans: Transport -{ -} - -impl ConnectionInfo for (TConnInfo, ConnectedPoint) -where - TConnInfo: ConnectionInfo -{ - type PeerId = TConnInfo::PeerId; - - fn peer_id(&self) -> &Self::PeerId { - self.0.peer_id() - } -} - -struct ReachAttempts { - /// Peer ID of the node we control. - local_peer_id: TPeerId, - - /// Attempts to reach a peer. - /// May contain nodes we are already connected to, because we don't cancel outgoing attempts. - out_reach_attempts: FnvHashMap, - - /// Reach attempts for incoming connections, and outgoing connections for which we don't know - /// the peer ID. - other_reach_attempts: Vec<(ReachAttemptId, ConnectedPoint)>, - - /// For each peer ID we're connected to, contains the endpoint we're connected to. - /// Always in sync with `active_nodes`. - connected_points: FnvHashMap, -} - -impl fmt::Debug for ReachAttempts -where - TPeerId: fmt::Debug + Eq + Hash, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_struct("ReachAttempts") - .field("local_peer_id", &self.local_peer_id) - .field("out_reach_attempts", &self.out_reach_attempts) - .field("other_reach_attempts", &self.other_reach_attempts) - .field("connected_points", &self.connected_points) - .finish() - } -} - -/// Attempt to reach a peer. -#[derive(Debug, Clone)] -struct OutReachAttempt { - /// Identifier for the reach attempt. - id: ReachAttemptId, - /// Multiaddr currently being attempted. - cur_attempted: Multiaddr, - /// Multiaddresses to attempt if the current one fails. - next_attempts: Vec, -} - -/// Event that can happen on the `Network`. -pub enum NetworkEvent<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo = PeerId, TPeerId = PeerId> -where - TTrans: Transport, -{ - /// One of the listeners gracefully closed. - ListenerClosed { - /// The listener ID that closed. - listener_id: ListenerId, - /// Reason for the closure. Contains `Ok(())` if the stream produced `None`, or `Err` - /// if the stream produced an error. - reason: Result<(), TTrans::Error>, - }, - - /// One of the listeners reported a non-fatal errored. - ListenerError { - /// The listener that errored. - listener_id: ListenerId, - /// The listener error. - error: TTrans::Error - }, - - /// One of the listeners is now listening on an additional address. - NewListenerAddress { - /// The listener that is listening on the new address. - listener_id: ListenerId, - /// The new address the listener is now also listening on. - listen_addr: Multiaddr - }, - - /// One of the listeners is no longer listening on some address. - ExpiredListenerAddress { - /// The listener that is no longer listening on some address. - listener_id: ListenerId, - /// The expired address. - listen_addr: Multiaddr - }, - - /// A new connection arrived on a listener. - IncomingConnection(IncomingConnectionEvent<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId>), - - /// A new connection was arriving on a listener, but an error happened when negotiating it. - /// - /// This can include, for example, an error during the handshake of the encryption layer, or - /// the connection unexpectedly closed. - IncomingConnectionError { - /// Local connection address. - local_addr: Multiaddr, - /// Address used to send back data to the remote. - send_back_addr: Multiaddr, - /// The error that happened. - error: IncomingError, - }, - - /// A new connection to a peer has been opened. - Connected { - /// Information about the connection, including the peer ID. - conn_info: TConnInfo, - /// If `Listener`, then we received the connection. If `Dial`, then it's a connection that - /// we opened. - endpoint: ConnectedPoint, - }, - - /// A connection to a peer has been replaced with a new one. - Replaced { - /// Information about the new connection. The `TPeerId` is the same as the one as the one - /// in `old_info`. - new_info: TConnInfo, - /// Information about the old connection. The `TPeerId` is the same as the one as the one - /// in `new_info`. - old_info: TConnInfo, - /// Endpoint we were connected to. - closed_endpoint: ConnectedPoint, - /// If `Listener`, then we received the connection. If `Dial`, then it's a connection that - /// we opened. - endpoint: ConnectedPoint, - }, - - /// The handler of a node has produced an error. - NodeClosed { - /// Information about the connection that has been closed. - conn_info: TConnInfo, - /// Endpoint we were connected to. - endpoint: ConnectedPoint, - /// The error that happened. - error: HandledNodeError, - }, - - /// Failed to reach a peer that we were trying to dial. - DialError { - /// New state of a peer. - new_state: PeerState, - - /// Id of the peer we were trying to dial. - peer_id: TPeerId, - - /// The multiaddr we failed to reach. - multiaddr: Multiaddr, - - /// The error that happened. - error: NetworkReachError, - }, - - /// Failed to reach a peer that we were trying to dial. - UnknownPeerDialError { - /// The multiaddr we failed to reach. - multiaddr: Multiaddr, - - /// The error that happened. - error: UnknownPeerDialErr, - - /// The handler that was passed to `dial()`. - handler: THandler, - }, - - /// A node produced a custom event. - NodeEvent { - /// Connection that produced the event. - conn_info: TConnInfo, - /// Event that was produced by the node. - event: TOutEvent, - }, -} - -impl<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> fmt::Debug for - NetworkEvent<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TOutEvent: fmt::Debug, - TTrans: Transport, - TTrans::Error: fmt::Debug, - THandlerErr: fmt::Debug, - TConnInfo: fmt::Debug, - TPeerId: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - match self { - NetworkEvent::NewListenerAddress { listener_id, listen_addr } => { - f.debug_struct("NewListenerAddress") - .field("listener_id", listener_id) - .field("listen_addr", listen_addr) - .finish() - } - NetworkEvent::ExpiredListenerAddress { listener_id, listen_addr } => { - f.debug_struct("ExpiredListenerAddress") - .field("listener_id", listener_id) - .field("listen_addr", listen_addr) - .finish() - } - NetworkEvent::ListenerClosed { listener_id, reason } => { - f.debug_struct("ListenerClosed") - .field("listener_id", listener_id) - .field("reason", reason) - .finish() - } - NetworkEvent::ListenerError { listener_id, error } => { - f.debug_struct("ListenerError") - .field("listener_id", listener_id) - .field("error", error) - .finish() - } - NetworkEvent::IncomingConnection(event) => { - f.debug_struct("IncomingConnection") - .field("local_addr", &event.local_addr) - .field("send_back_addr", &event.send_back_addr) - .finish() - } - NetworkEvent::IncomingConnectionError { local_addr, send_back_addr, error } => { - f.debug_struct("IncomingConnectionError") - .field("local_addr", local_addr) - .field("send_back_addr", send_back_addr) - .field("error", error) - .finish() - } - NetworkEvent::Connected { conn_info, endpoint } => { - f.debug_struct("Connected") - .field("conn_info", conn_info) - .field("endpoint", endpoint) - .finish() - } - NetworkEvent::Replaced { new_info, old_info, closed_endpoint, endpoint } => { - f.debug_struct("Replaced") - .field("new_info", new_info) - .field("old_info", old_info) - .field("closed_endpoint", closed_endpoint) - .field("endpoint", endpoint) - .finish() - } - NetworkEvent::NodeClosed { conn_info, endpoint, error } => { - f.debug_struct("NodeClosed") - .field("conn_info", conn_info) - .field("endpoint", endpoint) - .field("error", error) - .finish() - } - NetworkEvent::DialError { new_state, peer_id, multiaddr, error } => { - f.debug_struct("DialError") - .field("new_state", new_state) - .field("peer_id", peer_id) - .field("multiaddr", multiaddr) - .field("error", error) - .finish() - } - NetworkEvent::UnknownPeerDialError { multiaddr, error, .. } => { - f.debug_struct("UnknownPeerDialError") - .field("multiaddr", multiaddr) - .field("error", error) - .finish() - } - NetworkEvent::NodeEvent { conn_info, event } => { - f.debug_struct("NodeEvent") - .field("conn_info", conn_info) - .field("event", event) - .finish() - } - } - } -} - -/// Internal error type that contains all the possible errors that can happen in a reach attempt. -#[derive(Debug)] -enum InternalReachErr { - /// Error in the transport layer. - Transport(TransportError), - /// We successfully reached the peer, but there was a mismatch between the expected id and the - /// actual id of the peer. - PeerIdMismatch { - /// The information about the bad connection. - obtained: TConnInfo, - }, - /// The negotiated `PeerId` is the same as the one of the local node. - FoundLocalPeerId, -} - -impl fmt::Display for InternalReachErr -where - TTransErr: fmt::Display, - TConnInfo: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - InternalReachErr::Transport(err) => write!(f, "{}", err), - InternalReachErr::PeerIdMismatch { obtained } => { - write!(f, "Peer ID mismatch, obtained: {:?}", obtained) - }, - InternalReachErr::FoundLocalPeerId => { - write!(f, "Remote has the same PeerId as us") - } - } - } -} - -impl error::Error for InternalReachErr -where - TTransErr: error::Error + 'static, - TConnInfo: fmt::Debug, -{ - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - InternalReachErr::Transport(err) => Some(err), - InternalReachErr::PeerIdMismatch { .. } => None, - InternalReachErr::FoundLocalPeerId => None, - } - } -} - -/// State of a peer. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum PeerState { - /// We are connected to this peer. - Connected, - /// We are currently trying to reach this peer. - Dialing { - /// Number of addresses we are trying to dial. - num_pending_addresses: NonZeroUsize, - }, - /// We are not connected to this peer. - NotConnected, -} - -/// Error that can happen when trying to reach a node. -#[derive(Debug)] -pub enum NetworkReachError { - /// Error in the transport layer. - Transport(TransportError), - - /// We successfully reached the peer, but there was a mismatch between the expected id and the - /// actual id of the peer. - PeerIdMismatch { - /// The information about the other connection. - obtained: TConnInfo, - } -} - -impl fmt::Display for NetworkReachError -where - TTransErr: fmt::Display, - TConnInfo: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - NetworkReachError::Transport(err) => write!(f, "{}", err), - NetworkReachError::PeerIdMismatch { obtained } => { - write!(f, "Peer ID mismatch, obtained: {:?}", obtained) - }, - } - } -} - -impl error::Error for NetworkReachError -where - TTransErr: error::Error + 'static, - TConnInfo: fmt::Debug, -{ - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - NetworkReachError::Transport(err) => Some(err), - NetworkReachError::PeerIdMismatch { .. } => None, - } - } -} - -/// Error that can happen when dialing a node with an unknown peer ID. -#[derive(Debug)] -pub enum UnknownPeerDialErr { - /// Error in the transport layer. - Transport(TransportError), - /// The negotiated `PeerId` is the same as the local node. - FoundLocalPeerId, -} - -impl fmt::Display for UnknownPeerDialErr -where TTransErr: fmt::Display -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - UnknownPeerDialErr::Transport(err) => write!(f, "{}", err), - UnknownPeerDialErr::FoundLocalPeerId => { - write!(f, "Unknown peer has same PeerId as us") - }, - } - } -} - -impl error::Error for UnknownPeerDialErr -where TTransErr: error::Error + 'static -{ - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - UnknownPeerDialErr::Transport(err) => Some(err), - UnknownPeerDialErr::FoundLocalPeerId => None, - } - } -} - -/// Error that can happen on an incoming connection. -#[derive(Debug)] -pub enum IncomingError { - /// Error in the transport layer. - // TODO: just TTransError should be enough? - Transport(TransportError), - /// Denied the incoming connection because we're already connected to this peer as a dialer - /// and we have a higher priority than the remote. - DeniedLowerPriority, - /// The negotiated `PeerId` is the same as the local node. - FoundLocalPeerId, -} - -impl fmt::Display for IncomingError -where TTransErr: fmt::Display -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - IncomingError::Transport(err) => write!(f, "{}", err), - IncomingError::DeniedLowerPriority => { - write!(f, "Denied because of lower priority") - }, - IncomingError::FoundLocalPeerId => { - write!(f, "Incoming connection has same PeerId as us") - }, - } - } -} - -impl error::Error for IncomingError -where TTransErr: error::Error + 'static -{ - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - IncomingError::Transport(err) => Some(err), - IncomingError::DeniedLowerPriority => None, - IncomingError::FoundLocalPeerId => None, - } - } -} - -/// A new connection arrived on a listener. -pub struct IncomingConnectionEvent<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where TTrans: Transport -{ - /// The listener who received the connection. - listener_id: ListenerId, - /// The produced upgrade. - upgrade: TTrans::ListenerUpgrade, - /// PeerId of the local node. - local_peer_id: TPeerId, - /// Local connection address. - local_addr: Multiaddr, - /// Address used to send back data to the remote. - send_back_addr: Multiaddr, - /// Reference to the `active_nodes` field of the `Network`. - active_nodes: &'a mut CollectionStream, THandlerErr, (), (TConnInfo, ConnectedPoint), TPeerId>, - /// Reference to the `other_reach_attempts` field of the `Network`. - other_reach_attempts: &'a mut Vec<(ReachAttemptId, ConnectedPoint)>, -} - -impl<'a, TTrans, TInEvent, TOutEvent, TMuxer, THandler, THandlerErr, TConnInfo, TPeerId> - IncomingConnectionEvent<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport, - TTrans::Error: Send + 'static, - TTrans::ListenerUpgrade: Send + 'static, - THandler: IntoNodeHandler<(TConnInfo, ConnectedPoint)> + Send + 'static, - THandler::Handler: NodeHandler, InEvent = TInEvent, OutEvent = TOutEvent, Error = THandlerErr> + Send + 'static, - ::OutboundOpenInfo: Send + 'static, // TODO: shouldn't be necessary - THandlerErr: error::Error + Send + 'static, - TMuxer: StreamMuxer + Send + Sync + 'static, - TMuxer::OutboundSubstream: Send, - TMuxer::Substream: Send, - TInEvent: Send + 'static, - TOutEvent: Send + 'static, - TConnInfo: fmt::Debug + ConnectionInfo + Send + 'static, - TPeerId: Eq + Hash + Clone + Send + 'static, -{ - /// The ID of the listener with the incoming connection. - pub fn listener_id(&self) -> ListenerId { - self.listener_id - } - - /// Starts processing the incoming connection and sets the handler to use for it. - pub fn accept(self, handler: THandler) { - self.accept_with_builder(|_| handler) - } - - /// Same as `accept`, but accepts a closure that turns a `IncomingInfo` into a handler. - pub fn accept_with_builder(self, builder: TBuilder) - where TBuilder: FnOnce(IncomingInfo<'_>) -> THandler - { - let connected_point = self.to_connected_point(); - let handler = builder(self.info()); - let local_peer_id = self.local_peer_id; - let upgrade = self.upgrade - .map_err(|err| InternalReachErr::Transport(TransportError::Other(err))) - .and_then({ - let connected_point = connected_point.clone(); - move |(peer_id, muxer)| { - if *peer_id.peer_id() == local_peer_id { - future::ready(Err(InternalReachErr::FoundLocalPeerId)) - } else { - future::ready(Ok(((peer_id, connected_point), muxer))) - } - } - }); - let id = self.active_nodes.add_reach_attempt(upgrade, handler); - self.other_reach_attempts.push(( - id, - connected_point, - )); - } -} - -impl<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> - IncomingConnectionEvent<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where TTrans: Transport -{ - /// Returns the `IncomingInfo` corresponding to this incoming connection. - pub fn info(&self) -> IncomingInfo<'_> { - IncomingInfo { - local_addr: &self.local_addr, - send_back_addr: &self.send_back_addr, - } - } - - /// Local connection address. - pub fn local_addr(&self) -> &Multiaddr { - &self.local_addr - } - - /// Address used to send back data to the dialer. - pub fn send_back_addr(&self) -> &Multiaddr { - &self.send_back_addr - } - - /// Builds the `ConnectedPoint` corresponding to the incoming connection. - pub fn to_connected_point(&self) -> ConnectedPoint { - self.info().to_connected_point() - } -} - -/// Information about an incoming connection currently being negotiated. -#[derive(Debug, Copy, Clone)] -pub struct IncomingInfo<'a> { - /// Local connection address. - pub local_addr: &'a Multiaddr, - /// Stack of protocols used to send back data to the remote. - pub send_back_addr: &'a Multiaddr, -} - -impl<'a> IncomingInfo<'a> { - /// Builds the `ConnectedPoint` corresponding to the incoming connection. - pub fn to_connected_point(&self) -> ConnectedPoint { - ConnectedPoint::Listener { - local_addr: self.local_addr.clone(), - send_back_addr: self.send_back_addr.clone(), - } - } -} - -impl - Network -where - TTrans: Transport + Clone, - TMuxer: StreamMuxer, - THandler: IntoNodeHandler<(TConnInfo, ConnectedPoint)> + Send + 'static, - THandler::Handler: NodeHandler, InEvent = TInEvent, OutEvent = TOutEvent, Error = THandlerErr> + Send + 'static, - ::OutboundOpenInfo: Send + 'static, // TODO: shouldn't be necessary - THandlerErr: error::Error + Send + 'static, - TConnInfo: fmt::Debug + ConnectionInfo + Send + 'static, - TPeerId: Eq + Hash + Clone, -{ - /// Creates a new node events stream. - pub fn new(transport: TTrans, local_peer_id: TPeerId, executor: Option>) -> Self { - // TODO: with_capacity? - Network { - listeners: ListenersStream::new(transport), - active_nodes: CollectionStream::new(executor), - reach_attempts: ReachAttempts { - local_peer_id, - out_reach_attempts: Default::default(), - other_reach_attempts: Vec::new(), - connected_points: Default::default(), - }, - incoming_limit: None, - take_over_to_complete: None - } - } - - /// Creates a new node event stream with incoming connections limit. - pub fn new_with_incoming_limit(transport: TTrans, - local_peer_id: TPeerId, executor: Option>, incoming_limit: Option) -> Self - { - Network { - incoming_limit, - listeners: ListenersStream::new(transport), - active_nodes: CollectionStream::new(executor), - reach_attempts: ReachAttempts { - local_peer_id, - out_reach_attempts: Default::default(), - other_reach_attempts: Vec::new(), - connected_points: Default::default(), - }, - take_over_to_complete: None - } - } - - /// Returns the transport passed when building this object. - pub fn transport(&self) -> &TTrans { - self.listeners.transport() - } - - /// Start listening on the given multiaddress. - pub fn listen_on(&mut self, addr: Multiaddr) -> Result> { - self.listeners.listen_on(addr) - } - - /// Remove a previously added listener. - /// - /// Returns `Ok(())` if a listener with this ID was in the list. - pub fn remove_listener(&mut self, id: ListenerId) -> Result<(), ()> { - self.listeners.remove_listener(id) - } - - /// Returns an iterator that produces the list of addresses we are listening on. - pub fn listen_addrs(&self) -> impl Iterator { - self.listeners.listen_addrs() - } - - /// Returns limit on incoming connections. - pub fn incoming_limit(&self) -> Option { - self.incoming_limit - } - - /// Call this function in order to know which address remotes should dial to - /// access your local node. - /// - /// When receiving an observed address on a tcp connection that we initiated, the observed - /// address contains our tcp dial port, not our tcp listen port. We know which port we are - /// listening on, thereby we can replace the port within the observed address. - /// - /// When receiving an observed address on a tcp connection that we did **not** initiated, the - /// observed address should contain our listening port. In case it differs from our listening - /// port there might be a proxy along the path. - /// - /// # Arguments - /// - /// * `observed_addr` - should be an address a remote observes you as, which can be obtained for - /// example with the identify protocol. - /// - pub fn address_translation<'a>(&'a self, observed_addr: &'a Multiaddr) - -> impl Iterator + 'a - where - TMuxer: 'a, - THandler: 'a, - { - self.listen_addrs().flat_map(move |server| address_translation(server, observed_addr)) - } - - /// Returns the peer id of the local node. - /// - /// This is the same value as was passed to `new()`. - pub fn local_peer_id(&self) -> &TPeerId { - &self.reach_attempts.local_peer_id - } - - /// Dials a multiaddress without knowing the peer ID we're going to obtain. - /// - /// The second parameter is the handler to use if we manage to reach a node. - pub fn dial(&mut self, addr: Multiaddr, handler: THandler) -> Result<(), TransportError> - where - TTrans: Transport, - TTrans::Error: Send + 'static, - TTrans::Dial: Send + 'static, - TMuxer: Send + Sync + 'static, - TMuxer::OutboundSubstream: Send, - TInEvent: Send + 'static, - TOutEvent: Send + 'static, - TConnInfo: Send + 'static, - TPeerId: Send + 'static, - { - let local_peer_id = self.reach_attempts.local_peer_id.clone(); - let connected_point = ConnectedPoint::Dialer { address: addr.clone() }; - let future = self.transport().clone().dial(addr)? - .map_err(|err| InternalReachErr::Transport(TransportError::Other(err))) - .and_then({ - let connected_point = connected_point.clone(); - move |(peer_id, muxer)| { - if *peer_id.peer_id() == local_peer_id { - future::ready(Err(InternalReachErr::FoundLocalPeerId)) - } else { - future::ready(Ok(((peer_id, connected_point), muxer))) - } - } - }); - - let reach_id = self.active_nodes.add_reach_attempt(future, handler); - self.reach_attempts.other_reach_attempts.push((reach_id, connected_point)); - Ok(()) - } - - /// Returns the number of incoming connections that are currently in the process of being - /// negotiated. - /// - /// We don't know anything about these connections yet, so all we can do is know how many of - /// them we have. - #[deprecated(note = "Use incoming_negotiated().count() instead")] - pub fn num_incoming_negotiated(&self) -> usize { - self.reach_attempts.other_reach_attempts - .iter() - .filter(|&(_, endpoint)| endpoint.is_listener()) - .count() - } - - /// Returns the list of incoming connections that are currently in the process of being - /// negotiated. We don't know the `PeerId` of these nodes yet. - pub fn incoming_negotiated(&self) -> impl Iterator> { - self.reach_attempts - .other_reach_attempts - .iter() - .filter_map(|&(_, ref endpoint)| { - match endpoint { - ConnectedPoint::Listener { local_addr, send_back_addr } => { - Some(IncomingInfo { local_addr, send_back_addr }) - }, - ConnectedPoint::Dialer { .. } => None, - } - }) - } - - /// Sends an event to all nodes. - /// - /// This function is "atomic", in the sense that if `Poll::Pending` is returned then no event - /// has been sent to any node yet. - #[must_use] - pub fn poll_broadcast(&mut self, event: &TInEvent, cx: &mut Context) -> Poll<()> - where - TInEvent: Clone - { - self.active_nodes.poll_broadcast(event, cx) - } - - /// Returns a list of all the peers we are currently connected to. - /// - /// Calling `peer()` with each `PeerId` is guaranteed to produce a `PeerConnected`. - // TODO: ideally this would return a list of `PeerConnected` structs, but this is quite - // complicated to do in terms of implementation - pub fn connected_peers(&self) -> impl Iterator { - self.active_nodes.connections() - } - - /// Returns a list of all the nodes we are currently trying to reach. - /// - /// Calling `peer()` with each `PeerId` is guaranteed to produce a `PeerPendingConnect` - // TODO: ideally this would return a list of `PeerPendingConnect` structs, but this is quite - // complicated to do in terms of implementation - pub fn pending_connection_peers(&self) -> impl Iterator { - self.reach_attempts - .out_reach_attempts - .keys() - .filter(move |p| !self.active_nodes.has_connection(p)) - } - - /// Returns the list of addresses we're currently dialing without knowing the `PeerId` of. - pub fn unknown_dials(&self) -> impl Iterator { - self.reach_attempts - .other_reach_attempts - .iter() - .filter_map(|&(_, ref endpoint)| { - match endpoint { - ConnectedPoint::Dialer { address } => Some(address), - ConnectedPoint::Listener { .. } => None, - } - }) - } - - /// Grants access to a struct that represents a peer. - pub fn peer(&mut self, peer_id: TPeerId) -> Peer<'_, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> { - if peer_id == self.reach_attempts.local_peer_id { - return Peer::LocalNode; - } - - // TODO: we do `peer_mut(...).is_some()` followed with `peer_mut(...).unwrap()`, otherwise - // the borrow checker yells at us. - - if self.active_nodes.peer_mut(&peer_id).is_some() { - return Peer::Connected(PeerConnected { - active_nodes: &mut self.active_nodes, - peer_id, - connected_points: &mut self.reach_attempts.connected_points, - out_reach_attempts: &mut self.reach_attempts.out_reach_attempts, - }); - } - - // The state of `connected_points` always follows `self.active_nodes`. - debug_assert!(!self.reach_attempts.connected_points.contains_key(&peer_id)); - - if self.reach_attempts.out_reach_attempts.get_mut(&peer_id).is_some() { - return Peer::PendingConnect(PeerPendingConnect { - attempt: match self.reach_attempts.out_reach_attempts.entry(peer_id.clone()) { - Entry::Occupied(e) => e, - Entry::Vacant(_) => panic!("we checked for Some just above"), - }, - active_nodes: &mut self.active_nodes, - }); - } - - Peer::NotConnected(PeerNotConnected { - nodes: self, - peer_id, - }) - } - - /// Starts dialing out a multiaddress. `rest` is the list of multiaddresses to attempt if - /// `first` fails. - /// - /// It is a logic error to call this method if we already have an outgoing attempt to the - /// given peer. - fn start_dial_out(&mut self, peer_id: TPeerId, handler: THandler, first: Multiaddr, rest: Vec) - where - TTrans: Transport, - TTrans::Dial: Send + 'static, - TTrans::Error: Send + 'static, - TMuxer: Send + Sync + 'static, - TMuxer::OutboundSubstream: Send, - TInEvent: Send + 'static, - TOutEvent: Send + 'static, - TPeerId: Send + 'static, - { - let reach_id = match self.transport().clone().dial(first.clone()) { - Ok(fut) => { - let expected_peer_id = peer_id.clone(); - let connected_point = ConnectedPoint::Dialer { address: first.clone() }; - let fut = fut - .map_err(|err| InternalReachErr::Transport(TransportError::Other(err))) - .and_then(move |(actual_conn_info, muxer)| { - if *actual_conn_info.peer_id() == expected_peer_id { - future::ready(Ok(((actual_conn_info, connected_point), muxer))) - } else { - future::ready(Err(InternalReachErr::PeerIdMismatch { obtained: actual_conn_info })) - } - }); - self.active_nodes.add_reach_attempt(fut, handler) - }, - Err(err) => { - let fut = future::err(InternalReachErr::Transport(err)); - self.active_nodes.add_reach_attempt(fut, handler) - }, - }; - - let former = self.reach_attempts.out_reach_attempts.insert( - peer_id, - OutReachAttempt { - id: reach_id, - cur_attempted: first, - next_attempts: rest, - }, - ); - - debug_assert!(former.is_none()); - } - - /// Provides an API similar to `Stream`, except that it cannot error. - pub fn poll<'a>(&'a mut self, cx: &mut Context) -> Poll> - where - TTrans: Transport, - TTrans::Error: Send + 'static, - TTrans::Dial: Send + 'static, - TTrans::ListenerUpgrade: Send + 'static, - TMuxer: Send + Sync + 'static, - TMuxer::OutboundSubstream: Send, - TInEvent: Send + 'static, - TOutEvent: Send + 'static, - THandler: IntoNodeHandler<(TConnInfo, ConnectedPoint)> + Send + 'static, - THandler::Handler: NodeHandler, InEvent = TInEvent, OutEvent = TOutEvent, Error = THandlerErr> + Send + 'static, - THandlerErr: error::Error + Send + 'static, - TConnInfo: Clone, - TPeerId: AsRef<[u8]> + Send + 'static, - { - // Start by polling the listeners for events, but only if the number - // of incoming connections does not exceed the limit. - match self.incoming_limit { - Some(x) if self.incoming_negotiated().count() >= (x as usize) - => (), - _ => { - match ListenersStream::poll(Pin::new(&mut self.listeners), cx) { - Poll::Pending => (), - Poll::Ready(ListenersEvent::Incoming { listener_id, upgrade, local_addr, send_back_addr }) => { - let event = IncomingConnectionEvent { - listener_id, - upgrade, - local_peer_id: self.reach_attempts.local_peer_id.clone(), - local_addr, - send_back_addr, - active_nodes: &mut self.active_nodes, - other_reach_attempts: &mut self.reach_attempts.other_reach_attempts, - }; - return Poll::Ready(NetworkEvent::IncomingConnection(event)); - } - Poll::Ready(ListenersEvent::NewAddress { listener_id, listen_addr }) => { - return Poll::Ready(NetworkEvent::NewListenerAddress { listener_id, listen_addr }) - } - Poll::Ready(ListenersEvent::AddressExpired { listener_id, listen_addr }) => { - return Poll::Ready(NetworkEvent::ExpiredListenerAddress { listener_id, listen_addr }) - } - Poll::Ready(ListenersEvent::Closed { listener_id, reason }) => { - return Poll::Ready(NetworkEvent::ListenerClosed { listener_id, reason }) - } - Poll::Ready(ListenersEvent::Error { listener_id, error }) => { - return Poll::Ready(NetworkEvent::ListenerError { listener_id, error }) - } - } - } - } - - // Attempt to deliver any pending take over messages. - if let Some((id, interrupted)) = self.take_over_to_complete.take() { - if let Some(mut peer) = self.active_nodes.peer_mut(&id) { - if let Poll::Ready(()) = peer.poll_ready_take_over(cx) { - peer.start_take_over(interrupted); - } else { - self.take_over_to_complete = Some((id, interrupted)); - return Poll::Pending; - } - } - } - - // Poll the existing nodes. - let (action, out_event); - match self.active_nodes.poll(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(CollectionEvent::NodeReached(reach_event)) => { - let (a, e) = handle_node_reached(&mut self.reach_attempts, reach_event); - action = a; - out_event = e; - } - Poll::Ready(CollectionEvent::ReachError { id, error, handler }) => { - let (a, e) = handle_reach_error(&mut self.reach_attempts, id, error, handler); - action = a; - out_event = e; - } - Poll::Ready(CollectionEvent::NodeClosed { - conn_info, - error, - .. - }) => { - let endpoint = self.reach_attempts.connected_points.remove(conn_info.peer_id()) - .expect("We insert into connected_points whenever a connection is \ - opened and remove only when a connection is closed; the \ - underlying API is guaranteed to always deliver a connection \ - closed message after it has been opened, and no two closed \ - messages; QED"); - action = Default::default(); - out_event = NetworkEvent::NodeClosed { - conn_info: conn_info.0, - endpoint, - error, - }; - } - Poll::Ready(CollectionEvent::NodeEvent { peer, event }) => { - action = Default::default(); - out_event = NetworkEvent::NodeEvent { conn_info: peer.info().0.clone(), event }; - } - } - - if let Some((peer_id, handler, first, rest)) = action.start_dial_out { - self.start_dial_out(peer_id, handler, first, rest); - } - - if let Some((peer_id, interrupt)) = action.take_over { - // TODO: improve proof or remove; this is too complicated right now - let interrupted = self.active_nodes - .interrupt(interrupt) - .expect("take_over is guaranteed to be gathered from `out_reach_attempts`; - we insert in out_reach_attempts only when we call \ - active_nodes.add_reach_attempt, and we remove only when we call \ - interrupt or when a reach attempt succeeds or errors; therefore the \ - out_reach_attempts should always be in sync with the actual \ - attempts; QED"); - let mut peer = self.active_nodes.peer_mut(&peer_id).unwrap(); - if let Poll::Ready(()) = peer.poll_ready_take_over(cx) { - peer.start_take_over(interrupted); - } else { - self.take_over_to_complete = Some((peer_id, interrupted)); - return Poll::Pending - } - } - - Poll::Ready(out_event) - } -} - -/// Internal struct indicating an action to perform on the network. -#[derive(Debug)] -#[must_use] -struct ActionItem { - start_dial_out: Option<(TPeerId, THandler, Multiaddr, Vec)>, - /// The `ReachAttemptId` should be interrupted, and the task for the given `PeerId` should take - /// over it. - take_over: Option<(TPeerId, ReachAttemptId)>, -} - -impl Default for ActionItem { - fn default() -> Self { - ActionItem { - start_dial_out: None, - take_over: None, - } - } -} - -/// Handles a node reached event from the collection. -/// -/// Returns an event to return from the stream. -/// -/// > **Note**: The event **must** have been produced by the collection of nodes, otherwise -/// > panics will likely happen. -fn handle_node_reached<'a, TTrans, TMuxer, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId>( - reach_attempts: &mut ReachAttempts, - event: CollectionReachEvent<'_, TInEvent, TOutEvent, THandler, InternalReachErr, THandlerErr, (), (TConnInfo, ConnectedPoint), TPeerId>, -) -> (ActionItem, NetworkEvent<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId>) -where - TTrans: Transport + Clone, - TMuxer: StreamMuxer + Send + Sync + 'static, - TMuxer::OutboundSubstream: Send, - TInEvent: Send + 'static, - TOutEvent: Send + 'static, - TConnInfo: ConnectionInfo + Clone + Send + 'static, - TPeerId: Eq + Hash + AsRef<[u8]> + Clone, -{ - // We first start looking in the incoming attempts. While this makes the code less optimal, - // it also makes the logic easier. - if let Some(in_pos) = reach_attempts - .other_reach_attempts - .iter() - .position(|i| i.0 == event.reach_attempt_id()) - { - let (_, opened_endpoint) = reach_attempts.other_reach_attempts.swap_remove(in_pos); - let has_dial_prio = has_dial_prio(&reach_attempts.local_peer_id, event.peer_id()); - - // If we already have an active connection to this peer, a priority system comes into play. - // If we have a lower peer ID than the incoming one, we drop an incoming connection. - if event.would_replace() && has_dial_prio { - if let Some(ConnectedPoint::Dialer { .. }) = reach_attempts.connected_points.get(event.peer_id()) { - if let ConnectedPoint::Listener { local_addr, send_back_addr } = opened_endpoint { - return (Default::default(), NetworkEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error: IncomingError::DeniedLowerPriority, - }); - } - } - } - - // Set the endpoint for this peer. - let closed_endpoint = reach_attempts.connected_points.insert(event.peer_id().clone(), opened_endpoint.clone()); - - // If we have dial priority, we keep the current outgoing attempt because it may already - // have succeeded without us knowing. It is possible that the remote has already closed - // its ougoing attempt because it sees our outgoing attempt as a success. - // However we cancel any further multiaddress to attempt in any situation. - let action = if has_dial_prio { - if let Some(attempt) = reach_attempts.out_reach_attempts.get_mut(&event.peer_id()) { - debug_assert_ne!(attempt.id, event.reach_attempt_id()); - attempt.next_attempts.clear(); - } - ActionItem::default() - } else { - if let Some(attempt) = reach_attempts.out_reach_attempts.remove(&event.peer_id()) { - debug_assert_ne!(attempt.id, event.reach_attempt_id()); - ActionItem { - take_over: Some((event.peer_id().clone(), attempt.id)), - .. Default::default() - } - } else { - ActionItem::default() - } - }; - - let (outcome, conn_info) = event.accept(()); - if let CollectionNodeAccept::ReplacedExisting(old_info, ()) = outcome { - let closed_endpoint = closed_endpoint - .expect("We insert into connected_points whenever a connection is opened and \ - remove only when a connection is closed; the underlying API is \ - guaranteed to always deliver a connection closed message after it has \ - been opened, and no two closed messages; QED"); - return (action, NetworkEvent::Replaced { - new_info: conn_info.0, - old_info: old_info.0, - endpoint: opened_endpoint, - closed_endpoint, - }); - } else { - return (action, NetworkEvent::Connected { - conn_info: conn_info.0, - endpoint: opened_endpoint - }); - } - } - - // Otherwise, try for outgoing attempts. - let is_outgoing_and_ok = if let Some(attempt) = reach_attempts.out_reach_attempts.get(event.peer_id()) { - attempt.id == event.reach_attempt_id() - } else { - false - }; - - // We only remove the attempt from `out_reach_attempts` if it both matches the reach id - // and the expected peer id. - if is_outgoing_and_ok { - let attempt = reach_attempts.out_reach_attempts.remove(event.peer_id()) - .expect("is_outgoing_and_ok is true only if reach_attempts.out_reach_attempts.get(event.peer_id()) \ - returned Some"); - - let opened_endpoint = ConnectedPoint::Dialer { - address: attempt.cur_attempted, - }; - - let closed_endpoint = reach_attempts.connected_points - .insert(event.peer_id().clone(), opened_endpoint.clone()); - - let (outcome, conn_info) = event.accept(()); - if let CollectionNodeAccept::ReplacedExisting(old_info, ()) = outcome { - let closed_endpoint = closed_endpoint - .expect("We insert into connected_points whenever a connection is opened and \ - remove only when a connection is closed; the underlying API is guaranteed \ - to always deliver a connection closed message after it has been opened, \ - and no two closed messages; QED"); - return (Default::default(), NetworkEvent::Replaced { - new_info: conn_info.0, - old_info: old_info.0, - endpoint: opened_endpoint, - closed_endpoint, - }); - - } else { - return (Default::default(), NetworkEvent::Connected { - conn_info: conn_info.0, - endpoint: opened_endpoint - }); - } - } - - // We didn't find any entry in neither the outgoing connections not ingoing connections. - // TODO: improve proof or remove; this is too complicated right now - panic!("The API of collection guarantees that the id sent back in NodeReached (which is where \ - we call handle_node_reached) is one that was passed to add_reach_attempt. Whenever we \ - call add_reach_attempt, we also insert at the same time an entry either in \ - out_reach_attempts or in other_reach_attempts. It is therefore guaranteed that we \ - find back this ID in either of these two sets"); -} - -/// Returns true if `local` has dialing priority over `other`. -/// -/// This means that if `local` and `other` both dial each other, the connection from `local` should -/// be kept and the one from `other` will be dropped. -fn has_dial_prio(local: &TPeerId, other: &TPeerId) -> bool -where - TPeerId: AsRef<[u8]>, -{ - local.as_ref() < other.as_ref() -} - -/// Handles a reach error event from the collection. -/// -/// Optionally returns an event to return from the stream. -/// -/// > **Note**: The event **must** have been produced by the collection of nodes, otherwise -/// > panics will likely happen. -fn handle_reach_error<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId>( - reach_attempts: &mut ReachAttempts, - reach_id: ReachAttemptId, - error: InternalReachErr, - handler: THandler, -) -> (ActionItem, NetworkEvent<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId>) -where - TTrans: Transport, - TConnInfo: ConnectionInfo + Send + 'static, - TPeerId: Eq + Hash + Clone, -{ - // Search for the attempt in `out_reach_attempts`. - // TODO: could be more optimal than iterating over everything - let out_reach_peer_id = reach_attempts - .out_reach_attempts - .iter() - .find(|(_, a)| a.id == reach_id) - .map(|(p, _)| p.clone()); - if let Some(peer_id) = out_reach_peer_id { - let attempt = reach_attempts.out_reach_attempts.remove(&peer_id) - .expect("out_reach_peer_id is a key that is grabbed from out_reach_attempts"); - - let num_remain = attempt.next_attempts.len(); - let failed_addr = attempt.cur_attempted.clone(); - - let new_state = if reach_attempts.connected_points.contains_key(&peer_id) { - PeerState::Connected - } else if num_remain == 0 { - PeerState::NotConnected - } else { - PeerState::Dialing { - num_pending_addresses: NonZeroUsize::new(num_remain) - .expect("We check that num_remain is not 0 right above; QED"), - } - }; - - let action = if !attempt.next_attempts.is_empty() { - let mut attempt = attempt; - let next_attempt = attempt.next_attempts.remove(0); - ActionItem { - start_dial_out: Some((peer_id.clone(), handler, next_attempt, attempt.next_attempts)), - .. Default::default() - } - } else { - Default::default() - }; - - let error = match error { - InternalReachErr::Transport(err) => NetworkReachError::Transport(err), - InternalReachErr::PeerIdMismatch { obtained } => { - NetworkReachError::PeerIdMismatch { obtained } - }, - InternalReachErr::FoundLocalPeerId => { - unreachable!("We only generate FoundLocalPeerId within dial() or accept(); neither \ - of these methods add an entry to out_reach_attempts; QED") - }, - }; - - return (action, NetworkEvent::DialError { - new_state, - peer_id, - multiaddr: failed_addr, - error, - }); - } - - // If this is not an outgoing reach attempt, check the incoming reach attempts. - if let Some(in_pos) = reach_attempts - .other_reach_attempts - .iter() - .position(|i| i.0 == reach_id) - { - let (_, endpoint) = reach_attempts.other_reach_attempts.swap_remove(in_pos); - match endpoint { - ConnectedPoint::Dialer { address } => { - let error = match error { - InternalReachErr::Transport(err) => UnknownPeerDialErr::Transport(err), - InternalReachErr::FoundLocalPeerId => UnknownPeerDialErr::FoundLocalPeerId, - InternalReachErr::PeerIdMismatch { .. } => { - unreachable!("We only generate PeerIdMismatch within start_dial_out(), - which doesn't add any entry in other_reach_attempts; QED") - }, - }; - return (Default::default(), NetworkEvent::UnknownPeerDialError { - multiaddr: address, - error, - handler, - }); - } - ConnectedPoint::Listener { local_addr, send_back_addr } => { - let error = match error { - InternalReachErr::Transport(err) => IncomingError::Transport(err), - InternalReachErr::FoundLocalPeerId => IncomingError::FoundLocalPeerId, - InternalReachErr::PeerIdMismatch { .. } => { - unreachable!("We only generate PeerIdMismatch within start_dial_out(), - which doesn't add any entry in other_reach_attempts; QED") - }, - }; - return (Default::default(), NetworkEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error - }); - } - } - } - - // The id was neither in the outbound list nor the inbound list. - // TODO: improve proof or remove; this is too complicated right now - panic!("The API of collection guarantees that the id sent back in ReachError events \ - (which is where we call handle_reach_error) is one that was passed to \ - add_reach_attempt. Whenever we call add_reach_attempt, we also insert \ - at the same time an entry either in out_reach_attempts or in \ - other_reach_attempts. It is therefore guaranteed that we find back this ID in \ - either of these two sets"); -} - -/// State of a peer in the system. -pub enum Peer<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport, -{ - /// We are connected to this peer. - Connected(PeerConnected<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId>), - - /// We are currently attempting to connect to this peer. - PendingConnect(PeerPendingConnect<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId>), - - /// We are not connected to this peer at all. - /// - /// > **Note**: It is however possible that a pending incoming connection is being negotiated - /// > and will connect to this peer, but we don't know it yet. - NotConnected(PeerNotConnected<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId>), - - /// The requested peer is the local node. - LocalNode, -} - -impl<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> fmt::Debug for - Peer<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport, - TConnInfo: fmt::Debug + ConnectionInfo, - TPeerId: fmt::Debug + Eq + Hash, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - match *self { - Peer::Connected( PeerConnected { ref peer_id, ref connected_points, .. }) => { - f.debug_struct("Connected") - .field("peer_id", peer_id) - .field("connected_points", connected_points) - .finish() - } - Peer::PendingConnect( PeerPendingConnect { ref attempt, .. } ) => { - f.debug_struct("PendingConnect") - .field("attempt", attempt) - .finish() - } - Peer::NotConnected(PeerNotConnected { ref peer_id, .. }) => { - f.debug_struct("NotConnected") - .field("peer_id", peer_id) - .finish() - } - Peer::LocalNode => { - f.debug_struct("LocalNode") - .finish() - } - } - } -} - -// TODO: add other similar methods that wrap to the ones of `PeerNotConnected` -impl<'a, TTrans, TMuxer, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> - Peer<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport + Clone, - TTrans::Error: Send + 'static, - TTrans::Dial: Send + 'static, - TMuxer: StreamMuxer + Send + Sync + 'static, - TMuxer::OutboundSubstream: Send, - TMuxer::Substream: Send, - TInEvent: Send + 'static, - TOutEvent: Send + 'static, - THandler: IntoNodeHandler<(TConnInfo, ConnectedPoint)> + Send + 'static, - THandler::Handler: NodeHandler, InEvent = TInEvent, OutEvent = TOutEvent, Error = THandlerErr> + Send + 'static, - ::OutboundOpenInfo: Send + 'static, // TODO: shouldn't be necessary - THandlerErr: error::Error + Send + 'static, - TConnInfo: fmt::Debug + ConnectionInfo + Send + 'static, - TPeerId: Eq + Hash + Clone + Send + 'static, -{ - /// If we are connected, returns the `PeerConnected`. - pub fn into_connected(self) -> Option> { - match self { - Peer::Connected(peer) => Some(peer), - _ => None, - } - } - - /// If a connection is pending, returns the `PeerPendingConnect`. - pub fn into_pending_connect(self) -> Option> { - match self { - Peer::PendingConnect(peer) => Some(peer), - _ => None, - } - } - - /// If we are not connected, returns the `PeerNotConnected`. - pub fn into_not_connected(self) -> Option> { - match self { - Peer::NotConnected(peer) => Some(peer), - _ => None, - } - } - - /// If we're not connected, opens a new connection to this peer using the given multiaddr. - /// - /// If we reach a peer but the `PeerId` doesn't correspond to the one we're expecting, then - /// the whole connection is immediately closed. - /// - /// Returns an error if we are `LocalNode`. - pub fn or_connect(self, addr: Multiaddr, handler: THandler) - -> Result, Self> - { - self.or_connect_with(move |_| addr, handler) - } - - /// If we're not connected, calls the function passed as parameter and opens a new connection - /// using the returned address. - /// - /// If we reach a peer but the `PeerId` doesn't correspond to the one we're expecting, then - /// the whole connection is immediately closed. - /// - /// Returns an error if we are `LocalNode`. - pub fn or_connect_with(self, addr: TFn, handler: THandler) - -> Result, Self> - where - TFn: FnOnce(&TPeerId) -> Multiaddr, - { - match self { - Peer::Connected(peer) => Ok(PeerPotentialConnect::Connected(peer)), - Peer::PendingConnect(peer) => Ok(PeerPotentialConnect::PendingConnect(peer)), - Peer::NotConnected(peer) => { - let addr = addr(&peer.peer_id); - Ok(PeerPotentialConnect::PendingConnect(peer.connect(addr, handler))) - }, - Peer::LocalNode => Err(Peer::LocalNode), - } - } -} - -/// Peer we are potentially going to connect to. -pub enum PeerPotentialConnect<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport -{ - /// We are connected to this peer. - Connected(PeerConnected<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId>), - - /// We are currently attempting to connect to this peer. - PendingConnect(PeerPendingConnect<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId>), -} - -impl<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> - PeerPotentialConnect<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport, - TConnInfo: ConnectionInfo, - TPeerId: Eq + Hash, -{ - /// Closes the connection or the connection attempt. - // TODO: consider returning a `PeerNotConnected` - pub fn close(self) { - match self { - PeerPotentialConnect::Connected(peer) => peer.close(), - PeerPotentialConnect::PendingConnect(peer) => peer.interrupt(), - } - } - - /// If we are connected, returns the `PeerConnected`. - pub fn into_connected(self) -> Option> { - match self { - PeerPotentialConnect::Connected(peer) => Some(peer), - _ => None, - } - } - - /// If a connection is pending, returns the `PeerPendingConnect`. - pub fn into_pending_connect(self) -> Option> { - match self { - PeerPotentialConnect::PendingConnect(peer) => Some(peer), - _ => None, - } - } -} - -/// Access to a peer we are connected to. -pub struct PeerConnected<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where TTrans: Transport, -{ - /// Reference to the `active_nodes` of the parent. - active_nodes: &'a mut CollectionStream, THandlerErr, (), (TConnInfo, ConnectedPoint), TPeerId>, - /// Reference to the `connected_points` field of the parent. - connected_points: &'a mut FnvHashMap, - /// Reference to the `out_reach_attempts` field of the parent. - out_reach_attempts: &'a mut FnvHashMap, - peer_id: TPeerId, -} - -impl<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> PeerConnected<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport, - TConnInfo: ConnectionInfo, - TPeerId: Eq + Hash, -{ - /// Closes the connection to this node. - /// - /// No `NodeClosed` message will be generated for this node. - // TODO: consider returning a `PeerNotConnected`; however this makes all the borrows things - // much more annoying to deal with - pub fn close(self) { - if let Some(reach_attempt) = self.out_reach_attempts.remove(&self.peer_id) { - self.active_nodes - .interrupt(reach_attempt.id) - .expect("Elements in out_reach_attempts are in sync with active_nodes; QED"); - } - - self.connected_points.remove(&self.peer_id); - self.active_nodes.peer_mut(&self.peer_id) - .expect("A PeerConnected is always created with a PeerId in active_nodes; QED") - .close(); - } - - /// Returns the connection info for this node. - // TODO: we would love to return a `&'a TConnInfo`, but this isn't possible because of lifetime - // issues; see the corresponding method in collection.rs module - // TODO: should take a `&self`, but the API in collection.rs requires &mut - pub fn connection_info(&mut self) -> TConnInfo - where - TConnInfo: Clone, - { - self.active_nodes.peer_mut(&self.peer_id) - .expect("A PeerConnected is always created with a PeerId in active_nodes; QED") - .info().0.clone() - } - - /// Returns the endpoint we're connected to. - pub fn endpoint(&self) -> &ConnectedPoint { - self.connected_points.get(&self.peer_id) - .expect("We insert into connected_points whenever a connection is opened and remove \ - only when a connection is closed; the underlying API is guaranteed to always \ - deliver a connection closed message after it has been opened, and no two \ - closed messages; QED") - } - - /// Sends an event to the handler of the node. - pub fn send_event(&'a mut self, event: TInEvent) -> impl Future + 'a { - let mut event = Some(event); - futures::future::poll_fn(move |cx| { - match self.poll_ready_event(cx) { - Poll::Ready(()) => { - self.start_send_event(event.take().expect("Future called after finished")); - Poll::Ready(()) - }, - Poll::Pending => Poll::Pending, - } - }) - } - - /// Begin sending an event to the node. Must be called only after a successful call to - /// `poll_ready_event`. - pub fn start_send_event(&mut self, event: TInEvent) { - self.active_nodes.peer_mut(&self.peer_id) - .expect("A PeerConnected is always created with a PeerId in active_nodes; QED") - .start_send_event(event) - } - - /// Make sure we are ready to accept an event to be sent with `start_send_event`. - pub fn poll_ready_event(&mut self, cx: &mut Context) -> Poll<()> { - self.active_nodes.peer_mut(&self.peer_id) - .expect("A PeerConnected is always created with a PeerId in active_nodes; QED") - .poll_ready_event(cx) - } -} - -/// Access to a peer we are attempting to connect to. -#[derive(Debug)] -pub struct PeerPendingConnect<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport -{ - attempt: OccupiedEntry<'a, TPeerId, OutReachAttempt>, - active_nodes: &'a mut CollectionStream, THandlerErr, (), (TConnInfo, ConnectedPoint), TPeerId>, -} - -impl<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> - PeerPendingConnect<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport, - TConnInfo: ConnectionInfo, - TPeerId: Eq + Hash, -{ - /// Interrupt this connection attempt. - // TODO: consider returning a PeerNotConnected; however that is really pain in terms of - // borrows - pub fn interrupt(self) { - let attempt = self.attempt.remove(); - if self.active_nodes.interrupt(attempt.id).is_err() { - // TODO: improve proof or remove; this is too complicated right now - panic!("We retreived this attempt.id from out_reach_attempts. We insert in \ - out_reach_attempts only at the same time as we call add_reach_attempt. \ - Whenever we receive a NodeReached, NodeReplaced or ReachError event, which \ - invalidate the attempt.id, we also remove the corresponding entry in \ - out_reach_attempts."); - } - } - - /// Returns the multiaddress we're currently trying to dial. - pub fn attempted_multiaddr(&self) -> &Multiaddr { - &self.attempt.get().cur_attempted - } - - /// Returns a list of the multiaddresses we're going to try if the current dialing fails. - pub fn pending_multiaddrs(&self) -> impl Iterator { - self.attempt.get().next_attempts.iter() - } - - /// Adds new multiaddrs to attempt if the current dialing fails. - /// - /// Doesn't do anything for multiaddresses that are already in the queue. - pub fn append_multiaddr_attempts(&mut self, addrs: impl IntoIterator) { - for addr in addrs { - self.append_multiaddr_attempt(addr); - } - } - - /// Adds a new multiaddr to attempt if the current dialing fails. - /// - /// Doesn't do anything if that multiaddress is already in the queue. - pub fn append_multiaddr_attempt(&mut self, addr: Multiaddr) { - if self.attempt.get().next_attempts.iter().any(|a| a == &addr) { - return; - } - - self.attempt.get_mut().next_attempts.push(addr); - } -} - -/// Access to a peer we're not connected to. -pub struct PeerNotConnected<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport, -{ - peer_id: TPeerId, - nodes: &'a mut Network, -} - -impl<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> fmt::Debug for - PeerNotConnected<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport, - TPeerId: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_struct("PeerNotConnected") - .field("peer_id", &self.peer_id) - .finish() - } -} - -impl<'a, TTrans, TInEvent, TOutEvent, TMuxer, THandler, THandlerErr, TConnInfo, TPeerId> - PeerNotConnected<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> -where - TTrans: Transport + Clone, - TTrans::Error: Send + 'static, - TTrans::Dial: Send + 'static, - TMuxer: StreamMuxer + Send + Sync + 'static, - TMuxer::OutboundSubstream: Send, - TMuxer::Substream: Send, - THandler: IntoNodeHandler<(TConnInfo, ConnectedPoint)> + Send + 'static, - THandler::Handler: NodeHandler, InEvent = TInEvent, OutEvent = TOutEvent, Error = THandlerErr> + Send + 'static, - ::OutboundOpenInfo: Send + 'static, // TODO: shouldn't be necessary - THandlerErr: error::Error + Send + 'static, - TInEvent: Send + 'static, - TOutEvent: Send + 'static, -{ - /// Attempts a new connection to this node using the given multiaddress. - /// - /// If we reach a peer but the `PeerId` doesn't correspond to the one we're expecting, then - /// the whole connection is immediately closed. - pub fn connect(self, addr: Multiaddr, handler: THandler) - -> PeerPendingConnect<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> - where - TConnInfo: fmt::Debug + ConnectionInfo + Send + 'static, - TPeerId: Eq + Hash + Clone + Send + 'static, - { - self.connect_inner(handler, addr, Vec::new()) - } - - /// Attempts a new connection to this node using the given multiaddresses. - /// - /// The multiaddresses passed as parameter will be tried one by one. - /// - /// Returns an error if the iterator is empty. - /// - /// If we reach a peer but the `PeerId` doesn't correspond to the one we're expecting, then - /// the whole connection is immediately closed. - pub fn connect_iter(self, addrs: TIter, handler: THandler) - -> Result, Self> - where - TIter: IntoIterator, - TConnInfo: fmt::Debug + ConnectionInfo + Send + 'static, - TPeerId: Eq + Hash + Clone + Send + 'static, - { - let mut addrs = addrs.into_iter(); - let first = match addrs.next() { - Some(f) => f, - None => return Err(self) - }; - let rest = addrs.collect(); - Ok(self.connect_inner(handler, first, rest)) - } - - /// Moves the given node to a connected state using the given connection info and muxer. - /// - /// No `Connected` event is generated for this action. - /// - /// # Panic - /// - /// Panics if `conn_info.peer_id()` is not the current peer. - /// - pub fn inject_connection(self, conn_info: TConnInfo, connected_point: ConnectedPoint, muxer: TMuxer, handler: THandler::Handler) - -> PeerConnected<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> - where - TConnInfo: fmt::Debug + ConnectionInfo + Clone + Send + 'static, - TPeerId: Eq + Hash + Clone, - { - if conn_info.peer_id() != &self.peer_id { - panic!("Mismatch between conn_info PeerId and request PeerId"); - } - - match self.nodes.active_nodes.add_connection((conn_info, connected_point), (), muxer, handler) { - CollectionNodeAccept::NewEntry => {}, - CollectionNodeAccept::ReplacedExisting { .. } => - unreachable!("We can only build a PeerNotConnected if we don't have this peer in \ - the collection yet"), - } - - PeerConnected { - active_nodes: &mut self.nodes.active_nodes, - connected_points: &mut self.nodes.reach_attempts.connected_points, - out_reach_attempts: &mut self.nodes.reach_attempts.out_reach_attempts, - peer_id: self.peer_id, - } - } - - /// Inner implementation of `connect`. - fn connect_inner(self, handler: THandler, first: Multiaddr, rest: Vec) - -> PeerPendingConnect<'a, TTrans, TInEvent, TOutEvent, THandler, THandlerErr, TConnInfo, TPeerId> - where - TConnInfo: fmt::Debug + ConnectionInfo + Send + 'static, - TPeerId: Eq + Hash + Clone + Send + 'static, - { - self.nodes.start_dial_out(self.peer_id.clone(), handler, first, rest); - PeerPendingConnect { - attempt: match self.nodes.reach_attempts.out_reach_attempts.entry(self.peer_id) { - Entry::Occupied(e) => e, - Entry::Vacant(_) => { - panic!("We called out_reach_attempts.insert with this peer id just above") - }, - }, - active_nodes: &mut self.nodes.active_nodes, - } - } -} diff --git a/core/src/nodes/network/tests.rs b/core/src/nodes/network/tests.rs deleted file mode 100644 index c4f307bb..00000000 --- a/core/src/nodes/network/tests.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -#![cfg(test)] - -use super::*; - -#[test] -fn local_prio_equivalence_relation() { - for _ in 0..1000 { - let a = PeerId::random(); - let b = PeerId::random(); - assert_ne!(has_dial_prio(&a, &b), has_dial_prio(&b, &a)); - } -} diff --git a/core/src/nodes/tasks.rs b/core/src/nodes/tasks.rs deleted file mode 100644 index 5d72fa22..00000000 --- a/core/src/nodes/tasks.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Management of tasks handling nodes. -//! -//! The core type is a [`task::Task`], which implements [`futures::Future`] -//! and connects and handles a node. A task receives and sends messages -//! ([`task::FromTaskMessage`], [`task::ToTaskMessage`]) to the outside. -//! -//! A set of tasks is managed by a [`Manager`] which creates tasks when a -//! node should be connected to (cf. [`Manager::add_reach_attempt`]) or -//! an existing connection to a node should be driven forward (cf. -//! [`Manager::add_connection`]). Tasks can be referred to by [`TaskId`] -//! and messages can be sent to individual tasks or all (cf. -//! [`Manager::poll_broadcast`]). Messages produces by tasks can be -//! retrieved by polling the manager (cf. [`Manager::poll`]). - -mod error; -mod manager; -mod task; - -pub use error::Error; -pub use manager::{ClosedTask, TaskEntry, Manager, Event}; - -/// Task identifier. -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct TaskId(usize); - diff --git a/core/src/nodes/tasks/error.rs b/core/src/nodes/tasks/error.rs deleted file mode 100644 index 311fc1af..00000000 --- a/core/src/nodes/tasks/error.rs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::nodes::handled_node::HandledNodeError; -use std::{fmt, error}; - -/// Error that can happen in a task. -#[derive(Debug)] -pub enum Error { - /// An error happend while we were trying to reach the node. - Reach(R), - /// An error happened after the node has been reached. - Node(HandledNodeError) -} - -impl fmt::Display for Error -where - R: fmt::Display, - H: fmt::Display -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Error::Reach(err) => write!(f, "reach error: {}", err), - Error::Node(err) => write!(f, "node error: {}", err) - } - } -} - -impl error::Error for Error -where - R: error::Error + 'static, - H: error::Error + 'static -{ - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - Error::Reach(err) => Some(err), - Error::Node(err) => Some(err) - } - } -} - diff --git a/core/src/nodes/tasks/manager.rs b/core/src/nodes/tasks/manager.rs deleted file mode 100644 index 47c0a63c..00000000 --- a/core/src/nodes/tasks/manager.rs +++ /dev/null @@ -1,463 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::{ - Executor, PeerId, - muxing::StreamMuxer, - nodes::{ - handled_node::{HandledNode, IntoNodeHandler, NodeHandler}, - node::Substream - } -}; -use fnv::FnvHashMap; -use futures::{prelude::*, channel::mpsc, stream::FuturesUnordered}; -use std::{collections::hash_map::{Entry, OccupiedEntry}, error, fmt, pin::Pin, task::Context, task::Poll}; -use super::{TaskId, task::{Task, FromTaskMessage, ToTaskMessage}, Error}; - -// Implementor notes -// ================= -// -// This collection of nodes spawns a `Task` for each individual node to process. -// This means that events happen asynchronously at the same time as the -// `Manager` is being polled. -// -// In order to make the API non-racy and avoid issues, we completely separate -// the state in the `Manager` from the states that the `Task` can access. -// They are only allowed to exchange messages. The state in the `Manager` is -// therefore delayed compared to the tasks, and is updated only when `poll()` -// is called. -// -// The only thing that we must be careful about is substreams, as they are -// "detached" from the state of the `Manager` and allowed to process -// concurrently. This is why there is no "substream closed" event being -// reported, as it could potentially create confusions and race conditions in -// the user's code. See similar comments in the documentation of `NodeStream`. -// - -/// Implementation of [`Stream`] that handles a collection of nodes. -pub struct Manager { - /// Collection of managed tasks. - /// - /// Closing the sender interrupts the task. It is possible that we receive - /// messages from tasks that used to be in this collection but no longer - /// are, in which case we should ignore them. - tasks: FnvHashMap>, - - /// Identifier for the next task to spawn. - next_task_id: TaskId, - - /// Custom executor where we spawn the nodes' tasks. If `None`, then we push tasks to the - /// `local_spawns` list instead. - executor: Option>, - - /// If no executor is available, we move tasks to this set, and futures are polled on the - /// current thread instead. - local_spawns: FuturesUnordered + Send>>>, - - /// Sender to emit events to the outside. Meant to be cloned and sent to tasks. - events_tx: mpsc::Sender<(FromTaskMessage, TaskId)>, - - /// Receiver side for the events. - events_rx: mpsc::Receiver<(FromTaskMessage, TaskId)> -} - -impl fmt::Debug for Manager -where - T: fmt::Debug -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_map() - .entries(self.tasks.iter().map(|(id, task)| (id, &task.user_data))) - .finish() - } -} - -/// Information about a running task. -/// -/// Contains the sender to deliver event messages to the task, and -/// the associated user data. -struct TaskInfo { - /// channel endpoint to send messages to the task - sender: mpsc::Sender>, - /// task associated data - user_data: T, -} - -/// Event produced by the [`Manager`]. -#[derive(Debug)] -pub enum Event<'a, I, O, H, E, HE, T, C = PeerId> { - /// A task has been closed. - /// - /// This happens once the node handler closes or an error happens. - TaskClosed { - /// The task that has been closed. - task: ClosedTask, - /// What happened. - result: Error, - /// If the task closed before reaching the node, this contains - /// the handler that was passed to `add_reach_attempt`. - handler: Option - }, - - /// A task has successfully connected to a node. - NodeReached { - /// The task that succeeded. - task: TaskEntry<'a, I, T>, - /// Identifier of the node. - conn_info: C - }, - - /// A task has produced an event. - NodeEvent { - /// The task that produced the event. - task: TaskEntry<'a, I, T>, - /// The produced event. - event: O - } -} - -impl Manager { - /// Creates a new task manager. If `Some` is passed, uses the given executor to spawn tasks. - /// Otherwise, background tasks are executed locally when you call `poll`. - pub fn new(executor: Option>) -> Self { - let (tx, rx) = mpsc::channel(1); - Self { - tasks: FnvHashMap::default(), - next_task_id: TaskId(0), - executor, - local_spawns: FuturesUnordered::new(), - events_tx: tx, - events_rx: rx - } - } - - /// Adds to the manager a future that tries to reach a node. - /// - /// This method spawns a task dedicated to resolving this future and - /// processing the node's events. - pub fn add_reach_attempt(&mut self, future: F, user_data: T, handler: H) -> TaskId - where - F: Future> + Send + 'static, - H: IntoNodeHandler + Send + 'static, - H::Handler: NodeHandler, InEvent = I, OutEvent = O, Error = HE> + Send + 'static, - E: error::Error + Send + 'static, - HE: error::Error + Send + 'static, - I: Send + 'static, - O: Send + 'static, - ::OutboundOpenInfo: Send + 'static, - M: StreamMuxer + Send + Sync + 'static, - M::OutboundSubstream: Send + 'static, - C: Send + 'static - { - let task_id = self.next_task_id; - self.next_task_id.0 += 1; - - let (tx, rx) = mpsc::channel(4); - self.tasks.insert(task_id, TaskInfo { sender: tx, user_data }); - - let task = Box::pin(Task::new(task_id, self.events_tx.clone(), rx, future, handler)); - if let Some(executor) = &self.executor { - executor.exec(task as Pin>) - } else { - self.local_spawns.push(task); - } - task_id - } - - /// Adds an existing connection to a node to the collection. - /// - /// This method spawns a task dedicated to processing the node's events. - /// - /// No `NodeReached` event will be emitted for this task, since the node has already been - /// reached. - pub fn add_connection(&mut self, user_data: T, muxer: M, handler: Handler) -> TaskId - where - H: IntoNodeHandler + Send + 'static, - Handler: NodeHandler, InEvent = I, OutEvent = O, Error = HE> + Send + 'static, - E: error::Error + Send + 'static, - HE: error::Error + Send + 'static, - I: Send + 'static, - O: Send + 'static, - ::OutboundOpenInfo: Send + 'static, - M: StreamMuxer + Send + Sync + 'static, - M::OutboundSubstream: Send + 'static, - C: Send + 'static - { - let task_id = self.next_task_id; - self.next_task_id.0 += 1; - - let (tx, rx) = mpsc::channel(4); - self.tasks.insert(task_id, TaskInfo { sender: tx, user_data }); - - let task: Task>>, _, _, _, _, _, _> = - Task::node(task_id, self.events_tx.clone(), rx, HandledNode::new(muxer, handler)); - - if let Some(executor) = &self.executor { - executor.exec(Box::pin(task)) - } else { - self.local_spawns.push(Box::pin(task)); - } - - task_id - } - - /// Sends a message to all the tasks, including the pending ones. - /// - /// This function is "atomic", in the sense that if `Poll::Pending` is returned then no event - /// has been sent to any node yet. - #[must_use] - pub fn poll_broadcast(&mut self, event: &I, cx: &mut Context) -> Poll<()> - where - I: Clone - { - for task in self.tasks.values_mut() { - if let Poll::Pending = task.sender.poll_ready(cx) { - return Poll::Pending; - } - } - - for task in self.tasks.values_mut() { - let msg = ToTaskMessage::HandlerEvent(event.clone()); - match task.sender.start_send(msg) { - Ok(()) => {}, - Err(ref err) if err.is_full() => - panic!("poll_ready returned Poll::Ready just above; qed"), - Err(_) => {}, - } - } - - Poll::Ready(()) - } - - /// Grants access to an object that allows controlling a task of the collection. - /// - /// Returns `None` if the task id is invalid. - pub fn task(&mut self, id: TaskId) -> Option> { - match self.tasks.entry(id) { - Entry::Occupied(inner) => Some(TaskEntry { inner }), - Entry::Vacant(_) => None, - } - } - - /// Returns a list of all the active tasks. - pub fn tasks<'a>(&'a self) -> impl Iterator + 'a { - self.tasks.keys().cloned() - } - - /// Provides an API similar to `Stream`, except that it cannot produce an error. - pub fn poll(&mut self, cx: &mut Context) -> Poll> { - // Advance the content of `local_spawns`. - while let Poll::Ready(Some(_)) = Stream::poll_next(Pin::new(&mut self.local_spawns), cx) {} - - let (message, task_id) = loop { - match Stream::poll_next(Pin::new(&mut self.events_rx), cx) { - Poll::Ready(Some((message, task_id))) => { - // If the task id is no longer in `self.tasks`, that means that the user called - // `close()` on this task earlier. Therefore no new event should be generated - // for this task. - if self.tasks.contains_key(&task_id) { - break (message, task_id) - } - } - Poll::Pending => return Poll::Pending, - Poll::Ready(None) => unreachable!("sender and receiver have same lifetime"), - } - }; - - Poll::Ready(match message { - FromTaskMessage::NodeEvent(event) => - Event::NodeEvent { - task: match self.tasks.entry(task_id) { - Entry::Occupied(inner) => TaskEntry { inner }, - Entry::Vacant(_) => panic!("poll_inner only returns valid TaskIds; QED") - }, - event - }, - FromTaskMessage::NodeReached(conn_info) => - Event::NodeReached { - task: match self.tasks.entry(task_id) { - Entry::Occupied(inner) => TaskEntry { inner }, - Entry::Vacant(_) => panic!("poll_inner only returns valid TaskIds; QED") - }, - conn_info - }, - FromTaskMessage::TaskClosed(result, handler) => { - let entry = self.tasks.remove(&task_id) - .expect("poll_inner only returns valid TaskIds; QED"); - Event::TaskClosed { - task: ClosedTask::new(task_id, entry.sender, entry.user_data), - result, - handler - } - } - }) - } -} - -/// Access to a task in the collection. -pub struct TaskEntry<'a, E, T> { - inner: OccupiedEntry<'a, TaskId, TaskInfo> -} - -impl<'a, E, T> TaskEntry<'a, E, T> { - /// Begin sending an event to the given node. Must be called only after a successful call to - /// `poll_ready_event`. - pub fn start_send_event(&mut self, event: E) { - let msg = ToTaskMessage::HandlerEvent(event); - self.start_send_event_msg(msg); - } - - /// Make sure we are ready to accept an event to be sent with `start_send_event`. - pub fn poll_ready_event(&mut self, cx: &mut Context) -> Poll<()> { - self.poll_ready_event_msg(cx) - } - - /// Returns the user data associated with the task. - pub fn user_data(&self) -> &T { - &self.inner.get().user_data - } - - /// Returns the user data associated with the task. - pub fn user_data_mut(&mut self) -> &mut T { - &mut self.inner.get_mut().user_data - } - - /// Returns the task id. - pub fn id(&self) -> TaskId { - *self.inner.key() - } - - /// Closes the task. Returns the user data. - /// - /// No further event will be generated for this task, but the connection inside the task will - /// continue to run until the `ClosedTask` is destroyed. - pub fn close(self) -> ClosedTask { - let id = *self.inner.key(); - let task = self.inner.remove(); - ClosedTask::new(id, task.sender, task.user_data) - } - - /// Gives ownership of a closed task. - /// As soon as our task (`self`) has some acknowledgment from the remote - /// that its connection is alive, it will close the connection with `other`. - /// - /// Must be called only after a successful call to `poll_ready_take_over`. - pub fn start_take_over(&mut self, t: ClosedTask) { - self.start_send_event_msg(ToTaskMessage::TakeOver(t.sender)); - } - - /// Make sure we are ready to taking over with `start_take_over`. - pub fn poll_ready_take_over(&mut self, cx: &mut Context) -> Poll<()> { - self.poll_ready_event_msg(cx) - } - - /// Sends a message to the task. Must be called only after a successful call to - /// `poll_ready_event`. - /// - /// The API mimicks the one of [`futures::Sink`]. - fn start_send_event_msg(&mut self, msg: ToTaskMessage) { - // It is possible that the sender is closed if the background task has already finished - // but the local state hasn't been updated yet because we haven't been polled in the - // meanwhile. - match self.inner.get_mut().sender.start_send(msg) { - Ok(()) => {}, - Err(ref err) if err.is_full() => {}, // TODO: somehow report to user? - Err(_) => {}, - } - } - - /// Wait until we have space to send an event using `start_send_event_msg`. - fn poll_ready_event_msg(&mut self, cx: &mut Context) -> Poll<()> { - // It is possible that the sender is closed if the background task has already finished - // but the local state hasn't been updated yet because we haven't been polled in the - // meanwhile. - let task = self.inner.get_mut(); - task.sender.poll_ready(cx).map(|_| ()) - } -} - -impl fmt::Debug for TaskEntry<'_, E, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("TaskEntry") - .field(&self.id()) - .field(self.user_data()) - .finish() - } -} - -/// Task after it has been closed. -/// -/// The connection to the remote is potentially still going on, but no new -/// event for this task will be received. -pub struct ClosedTask { - /// Identifier of the task that closed. - /// - /// No longer corresponds to anything, but can be reported to the user. - id: TaskId, - - /// The channel to the task. - /// - /// The task will continue to work for as long as this channel is alive, - /// but events produced by it are ignored. - sender: mpsc::Sender>, - - /// The data provided by the user. - user_data: T -} - -impl ClosedTask { - /// Create a new `ClosedTask` value. - fn new(id: TaskId, sender: mpsc::Sender>, user_data: T) -> Self { - Self { id, sender, user_data } - } - - /// Returns the task id. - /// - /// Note that this task is no longer managed and therefore calling - /// `Manager::task()` with this ID will fail. - pub fn id(&self) -> TaskId { - self.id - } - - /// Returns the user data associated with the task. - pub fn user_data(&self) -> &T { - &self.user_data - } - - /// Returns the user data associated with the task. - pub fn user_data_mut(&mut self) -> &mut T { - &mut self.user_data - } - - /// Finish destroying the task and yield the user data. - /// This closes the connection to the remote. - pub fn into_user_data(self) -> T { - self.user_data - } -} - -impl fmt::Debug for ClosedTask { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("ClosedTask") - .field(&self.id) - .field(&self.user_data) - .finish() - } -} diff --git a/core/src/nodes/tasks/task.rs b/core/src/nodes/tasks/task.rs deleted file mode 100644 index 24c4a280..00000000 --- a/core/src/nodes/tasks/task.rs +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::{ - muxing::StreamMuxer, - nodes::{ - handled_node::{HandledNode, IntoNodeHandler, NodeHandler}, - node::{Close, Substream} - } -}; -use futures::{prelude::*, channel::mpsc, stream}; -use smallvec::SmallVec; -use std::{pin::Pin, task::Context, task::Poll}; -use super::{TaskId, Error}; - -/// Message to transmit from the public API to a task. -#[derive(Debug)] -pub enum ToTaskMessage { - /// An event to transmit to the node handler. - HandlerEvent(T), - /// When received, stores the parameter inside the task and keeps it alive - /// until we have an acknowledgment that the remote has accepted our handshake. - TakeOver(mpsc::Sender>) -} - -/// Message to transmit from a task to the public API. -#[derive(Debug)] -pub enum FromTaskMessage { - /// A connection to a node has succeeded. - NodeReached(C), - /// The task closed. - TaskClosed(Error, Option), - /// An event from the node. - NodeEvent(T) -} - -/// Implementation of [`Future`] that handles a single node. -pub struct Task -where - M: StreamMuxer, - H: IntoNodeHandler, - H::Handler: NodeHandler> -{ - /// The ID of this task. - id: TaskId, - - /// Sender to transmit messages to the outside. - sender: mpsc::Sender<(FromTaskMessage::Error, C>, TaskId)>, - - /// Receiver of messages from the outsize. - receiver: stream::Fuse>>, - - /// Inner state of this `Task`. - state: State, - - /// Channels to keep alive for as long as we don't have an acknowledgment from the remote. - taken_over: SmallVec<[mpsc::Sender>; 1]> -} - -impl Task -where - M: StreamMuxer, - H: IntoNodeHandler, - H::Handler: NodeHandler> -{ - /// Create a new task to connect and handle some node. - pub fn new ( - i: TaskId, - s: mpsc::Sender<(FromTaskMessage::Error, C>, TaskId)>, - r: mpsc::Receiver>, - f: F, - h: H - ) -> Self { - Task { - id: i, - sender: s, - receiver: r.fuse(), - state: State::Future { future: Box::pin(f), handler: h, events_buffer: Vec::new() }, - taken_over: SmallVec::new() - } - } - - /// Create a task for an existing node we are already connected to. - pub fn node ( - i: TaskId, - s: mpsc::Sender<(FromTaskMessage::Error, C>, TaskId)>, - r: mpsc::Receiver>, - n: HandledNode - ) -> Self { - Task { - id: i, - sender: s, - receiver: r.fuse(), - state: State::Node(n), - taken_over: SmallVec::new() - } - } -} - -/// State of the future. -enum State -where - M: StreamMuxer, - H: IntoNodeHandler, - H::Handler: NodeHandler> -{ - /// Future to resolve to connect to the node. - Future { - /// The future that will attempt to reach the node. - // TODO: don't pin this Future; this requires deeper changes though - future: Pin>, - /// The handler that will be used to build the `HandledNode`. - handler: H, - /// While we are dialing the future, we need to buffer the events received on - /// `receiver` so that they get delivered once dialing succeeds. We can't simply leave - /// events in `receiver` because we have to detect if it gets closed. - events_buffer: Vec - }, - - /// An event should be sent to the outside world. - SendEvent { - /// The node, if available. - node: Option>, - /// The actual event message to send. - event: FromTaskMessage::Error, C> - }, - - /// Fully functional node. - Node(HandledNode), - - /// Node closing. - Closing(Close), - - /// Interim state that can only be observed externally if the future - /// resolved to a value previously. - Undefined -} - -impl Unpin for Task -where - M: StreamMuxer, - H: IntoNodeHandler, - H::Handler: NodeHandler> -{ -} - -impl Future for Task -where - M: StreamMuxer, - F: Future>, - H: IntoNodeHandler, - H::Handler: NodeHandler, InEvent = I, OutEvent = O> -{ - type Output = (); - - // NOTE: It is imperative to always consume all incoming event messages - // first in order to not prevent the outside from making progress because - // they are blocked on the channel capacity. - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> { - // We use a `this` because the compiler isn't smart enough to allow mutably borrowing - // multiple different fields from the `Pin` at the same time. - let this = &mut *self; - - 'poll: loop { - match std::mem::replace(&mut this.state, State::Undefined) { - State::Future { mut future, handler, mut events_buffer } => { - // If this.receiver is closed, we stop the task. - loop { - match Stream::poll_next(Pin::new(&mut this.receiver), cx) { - Poll::Pending => break, - Poll::Ready(None) => return Poll::Ready(()), - Poll::Ready(Some(ToTaskMessage::HandlerEvent(event))) => - events_buffer.push(event), - Poll::Ready(Some(ToTaskMessage::TakeOver(take_over))) => - this.taken_over.push(take_over), - } - } - // Check if dialing succeeded. - match Future::poll(Pin::new(&mut future), cx) { - Poll::Ready(Ok((conn_info, muxer))) => { - let mut node = HandledNode::new(muxer, handler.into_handler(&conn_info)); - for event in events_buffer { - node.inject_event(event) - } - this.state = State::SendEvent { - node: Some(node), - event: FromTaskMessage::NodeReached(conn_info) - } - } - Poll::Pending => { - this.state = State::Future { future, handler, events_buffer }; - return Poll::Pending - } - Poll::Ready(Err(e)) => { - let event = FromTaskMessage::TaskClosed(Error::Reach(e), Some(handler)); - this.state = State::SendEvent { node: None, event } - } - } - } - State::Node(mut node) => { - // Start by handling commands received from the outside of the task. - loop { - match Stream::poll_next(Pin::new(&mut this.receiver), cx) { - Poll::Pending => break, - Poll::Ready(Some(ToTaskMessage::HandlerEvent(event))) => - node.inject_event(event), - Poll::Ready(Some(ToTaskMessage::TakeOver(take_over))) => - this.taken_over.push(take_over), - Poll::Ready(None) => { - // Node closed by the external API; start closing. - this.state = State::Closing(node.close()); - continue 'poll - } - } - } - // Process the node. - loop { - if !this.taken_over.is_empty() && node.is_remote_acknowledged() { - this.taken_over.clear() - } - match HandledNode::poll(Pin::new(&mut node), cx) { - Poll::Pending => { - this.state = State::Node(node); - return Poll::Pending - } - Poll::Ready(Ok(event)) => { - this.state = State::SendEvent { - node: Some(node), - event: FromTaskMessage::NodeEvent(event) - }; - continue 'poll - } - Poll::Ready(Err(err)) => { - let event = FromTaskMessage::TaskClosed(Error::Node(err), None); - this.state = State::SendEvent { node: None, event }; - continue 'poll - } - } - } - } - // Deliver an event to the outside. - State::SendEvent { mut node, event } => { - loop { - match Stream::poll_next(Pin::new(&mut this.receiver), cx) { - Poll::Pending => break, - Poll::Ready(Some(ToTaskMessage::HandlerEvent(event))) => - if let Some(ref mut n) = node { - n.inject_event(event) - } - Poll::Ready(Some(ToTaskMessage::TakeOver(take_over))) => - this.taken_over.push(take_over), - Poll::Ready(None) => - // Node closed by the external API; start closing. - if let Some(n) = node { - this.state = State::Closing(n.close()); - continue 'poll - } else { - return Poll::Ready(()) // end task - } - } - } - // Check if this task is about to close. We pass the flag to - // the next state so it knows what to do. - let close = - if let FromTaskMessage::TaskClosed(..) = event { - true - } else { - false - }; - match this.sender.poll_ready(cx) { - Poll::Pending => { - self.state = State::SendEvent { node, event }; - return Poll::Pending - } - Poll::Ready(Ok(())) => { - // We assume that if `poll_ready` has succeeded, then sending the event - // will succeed as well. If it turns out that it didn't, we will detect - // the closing at the next loop iteration. - let _ = this.sender.start_send((event, this.id)); - if let Some(n) = node { - if close { - this.state = State::Closing(n.close()) - } else { - this.state = State::Node(n) - } - } else { - // Since we have no node we terminate this task. - assert!(close); - return Poll::Ready(()) - } - }, - Poll::Ready(Err(_)) => { - if let Some(n) = node { - this.state = State::Closing(n.close()); - continue 'poll - } - // We can not communicate to the outside and there is no - // node to handle, so this is the end of this task. - return Poll::Ready(()) - } - } - } - State::Closing(mut closing) => - match Future::poll(Pin::new(&mut closing), cx) { - Poll::Ready(_) => - return Poll::Ready(()), // end task - Poll::Pending => { - this.state = State::Closing(closing); - return Poll::Pending - } - } - // This happens if a previous poll has resolved the future. - // The API contract of futures is that we should not be polled again. - State::Undefined => panic!("`Task::poll()` called after completion.") - } - } - } -} - diff --git a/core/src/transport/memory.rs b/core/src/transport/memory.rs index 8546163c..2f27e126 100644 --- a/core/src/transport/memory.rs +++ b/core/src/transport/memory.rs @@ -279,8 +279,6 @@ impl> Into>> for Chan { #[cfg(test)] mod tests { use super::*; - use rand::Rng; - use std::io::Write; #[test] fn parse_memory_addr_works() { diff --git a/core/src/transport/upgrade.rs b/core/src/transport/upgrade.rs index fe5c9166..bd862dd8 100644 --- a/core/src/transport/upgrade.rs +++ b/core/src/transport/upgrade.rs @@ -68,7 +68,7 @@ use std::{error::Error, fmt, pin::Pin, task::Context, task::Poll}; /// namely a tuple of a [`ConnectionInfo`] (from the authentication upgrade) and a /// [`StreamMuxer`] (from the multiplexing upgrade). /// -/// [`Network`]: crate::nodes::Network +/// [`Network`]: crate::Network pub struct Builder { inner: T, version: upgrade::Version, diff --git a/core/tests/network_dial_error.rs b/core/tests/network_dial_error.rs index 6f4047cd..68f1386e 100644 --- a/core/tests/network_dial_error.rs +++ b/core/tests/network_dial_error.rs @@ -23,8 +23,15 @@ mod util; use futures::prelude::*; use libp2p_core::identity; use libp2p_core::multiaddr::multiaddr; -use libp2p_core::nodes::network::{Network, NetworkEvent, NetworkReachError, PeerState, UnknownPeerDialErr, IncomingError}; -use libp2p_core::{muxing::StreamMuxerBox, PeerId, Transport, upgrade}; +use libp2p_core::{ + Network, + PeerId, + Transport, + connection::PendingConnectionError, + muxing::StreamMuxerBox, + network::{NetworkEvent, peer::PeerState}, + upgrade, +}; use libp2p_swarm::{ NegotiatedSubstream, ProtocolsHandler, @@ -91,7 +98,7 @@ fn deny_incoming_connec() { .authenticate(libp2p_secio::SecioConfig::new(local_key)) .multiplex(libp2p_mplex::MplexConfig::new()) .map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer))); - Network::new(transport, local_public_key.into(), None) + Network::new(transport, local_public_key.into(), Default::default()) }; let mut swarm2 = { @@ -102,7 +109,7 @@ fn deny_incoming_connec() { .authenticate(libp2p_secio::SecioConfig::new(local_key)) .multiplex(libp2p_mplex::MplexConfig::new()) .map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer))); - Network::new(transport, local_public_key.into(), None) + Network::new(transport, local_public_key.into(), Default::default()) }; swarm1.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); @@ -117,8 +124,9 @@ fn deny_incoming_connec() { swarm2 .peer(swarm1.local_peer_id().clone()) - .into_not_connected().unwrap() - .connect(address.clone(), TestHandler::default().into_node_handler_builder()); + .into_disconnected().unwrap() + .connect(address.clone(), Vec::new(), TestHandler::default().into_node_handler_builder()) + .unwrap(); async_std::task::block_on(future::poll_fn(|cx| -> Poll> { match swarm1.poll(cx) { @@ -129,10 +137,10 @@ fn deny_incoming_connec() { match swarm2.poll(cx) { Poll::Ready(NetworkEvent::DialError { - new_state: PeerState::NotConnected, + new_state: PeerState::Disconnected, peer_id, multiaddr, - error: NetworkReachError::Transport(_) + error: PendingConnectionError::Transport(_) }) => { assert_eq!(peer_id, *swarm1.local_peer_id()); assert_eq!(multiaddr, address); @@ -154,10 +162,10 @@ fn dial_self() { // Dialing the same address we're listening should result in three events: // // - The incoming connection notification (before we know the incoming peer ID). - // - The error about the incoming connection (once we've determined that it's our own ID). - // - The error about the dialing (once we've determined that it's our own ID). + // - The connection error for the dialing endpoint (once we've determined that it's our own ID). + // - The connection error for the listening endpoint (once we've determined that it's our own ID). // - // The last two items can happen in any order. + // The last two can happen in any order. let mut swarm = { let local_key = identity::Keypair::generate_ed25519(); @@ -172,12 +180,12 @@ fn dial_self() { util::CloseMuxer::new(mplex).map_ok(move |mplex| (peer, mplex)) }) .map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer))); - Network::new(transport, local_public_key.into(), None) + Network::new(transport, local_public_key.into(), Default::default()) }; swarm.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); - let (address, mut swarm) = async_std::task::block_on( + let (local_address, mut swarm) = async_std::task::block_on( future::lazy(move |cx| { if let Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) = swarm.poll(cx) { Ok::<_, void::Void>((listen_addr, swarm)) @@ -187,7 +195,7 @@ fn dial_self() { })) .unwrap(); - swarm.dial(address.clone(), TestHandler::default().into_node_handler_builder()).unwrap(); + swarm.dial(&local_address, TestHandler::default().into_node_handler_builder()).unwrap(); let mut got_dial_err = false; let mut got_inc_err = false; @@ -196,31 +204,29 @@ fn dial_self() { match swarm.poll(cx) { Poll::Ready(NetworkEvent::UnknownPeerDialError { multiaddr, - error: UnknownPeerDialErr::FoundLocalPeerId, - handler: _ + error: PendingConnectionError::InvalidPeerId { .. }, + .. }) => { - assert_eq!(multiaddr, address); assert!(!got_dial_err); + assert_eq!(multiaddr, local_address); got_dial_err = true; if got_inc_err { - return Poll::Ready(Ok(())); + return Poll::Ready(Ok(())) } }, Poll::Ready(NetworkEvent::IncomingConnectionError { - local_addr, - send_back_addr: _, - error: IncomingError::FoundLocalPeerId + local_addr, .. }) => { - assert_eq!(address, local_addr); assert!(!got_inc_err); + assert_eq!(local_addr, local_address); got_inc_err = true; if got_dial_err { - return Poll::Ready(Ok(())); + return Poll::Ready(Ok(())) } }, Poll::Ready(NetworkEvent::IncomingConnection(inc)) => { - assert_eq!(*inc.local_addr(), address); - inc.accept(TestHandler::default().into_node_handler_builder()); + assert_eq!(*inc.local_addr(), local_address); + inc.accept(TestHandler::default().into_node_handler_builder()).unwrap(); }, Poll::Ready(ev) => { panic!("Unexpected event: {:?}", ev) @@ -236,7 +242,7 @@ fn dial_self_by_id() { // Trying to dial self by passing the same `PeerId` shouldn't even be possible in the first // place. - let mut swarm: Network<_, _, _, NodeHandlerWrapperBuilder, _, _> = { + let mut swarm: Network<_, _, _, NodeHandlerWrapperBuilder> = { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); let transport = libp2p_tcp::TcpConfig::new() @@ -244,11 +250,11 @@ fn dial_self_by_id() { .authenticate(libp2p_secio::SecioConfig::new(local_key)) .multiplex(libp2p_mplex::MplexConfig::new()) .map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer))); - Network::new(transport, local_public_key.into(), None) + Network::new(transport, local_public_key.into(), Default::default()) }; let peer_id = swarm.local_peer_id().clone(); - assert!(swarm.peer(peer_id).into_not_connected().is_none()); + assert!(swarm.peer(peer_id).into_disconnected().is_none()); } #[test] @@ -263,7 +269,7 @@ fn multiple_addresses_err() { .authenticate(libp2p_secio::SecioConfig::new(local_key)) .multiplex(libp2p_mplex::MplexConfig::new()) .map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer))); - Network::new(transport, local_public_key.into(), None) + Network::new(transport, local_public_key.into(), Default::default()) }; let mut addresses = Vec::new(); @@ -275,10 +281,13 @@ fn multiple_addresses_err() { } addresses.shuffle(&mut rand::thread_rng()); + let first = addresses[0].clone(); + let rest = (&addresses[1..]).iter().cloned(); + let target = PeerId::random(); swarm.peer(target.clone()) - .into_not_connected().unwrap() - .connect_iter(addresses.clone(), TestHandler::default().into_node_handler_builder()) + .into_disconnected().unwrap() + .connect(first, rest, TestHandler::default().into_node_handler_builder()) .unwrap(); async_std::task::block_on(future::poll_fn(|cx| -> Poll> { @@ -288,13 +297,13 @@ fn multiple_addresses_err() { new_state, peer_id, multiaddr, - error: NetworkReachError::Transport(_) + error: PendingConnectionError::Transport(_) }) => { assert_eq!(peer_id, target); let expected = addresses.remove(0); assert_eq!(multiaddr, expected); if addresses.is_empty() { - assert_eq!(new_state, PeerState::NotConnected); + assert_eq!(new_state, PeerState::Disconnected); return Poll::Ready(Ok(())); } else { match new_state { diff --git a/core/tests/network_simult.rs b/core/tests/network_simult.rs deleted file mode 100644 index 78a60f31..00000000 --- a/core/tests/network_simult.rs +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use futures::prelude::*; -use libp2p_core::{identity, muxing::StreamMuxerBox, upgrade, Transport}; -use libp2p_core::nodes::{Network, NetworkEvent, Peer}; -use libp2p_core::nodes::network::IncomingError; -use libp2p_swarm::{ - NegotiatedSubstream, - ProtocolsHandler, - KeepAlive, - SubstreamProtocol, - ProtocolsHandlerEvent, - ProtocolsHandlerUpgrErr, -}; -use std::{io, task::Context, task::Poll, time::Duration}; -use wasm_timer::Delay; - -#[derive(Default)] -struct TestHandler; - -impl ProtocolsHandler for TestHandler { - type InEvent = (); // TODO: cannot be Void (https://github.com/servo/rust-smallvec/issues/139) - type OutEvent = (); // TODO: cannot be Void (https://github.com/servo/rust-smallvec/issues/139) - type Error = io::Error; - type InboundProtocol = upgrade::DeniedUpgrade; - type OutboundProtocol = upgrade::DeniedUpgrade; - type OutboundOpenInfo = (); // TODO: cannot be Void (https://github.com/servo/rust-smallvec/issues/139) - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(upgrade::DeniedUpgrade) - } - - fn inject_fully_negotiated_inbound( - &mut self, - _: >::Output - ) { panic!() } - - fn inject_fully_negotiated_outbound( - &mut self, - _: >::Output, - _: Self::OutboundOpenInfo - ) { panic!() } - - fn inject_event(&mut self, _: Self::InEvent) { - panic!() - } - - fn inject_dial_upgrade_error(&mut self, _: Self::OutboundOpenInfo, _: ProtocolsHandlerUpgrErr<>::Error>) { - - } - - fn connection_keep_alive(&self) -> KeepAlive { KeepAlive::Yes } - - fn poll(&mut self, _: &mut Context) -> Poll> { - Poll::Pending - } -} - -#[test] -fn raw_swarm_simultaneous_connect() { - // Checks whether two swarms dialing each other simultaneously properly works. - - // When two swarms A and B dial each other, the following can happen: - // - // - A and B both successfully open a dialing connection simultaneously, then either A or B - // (but not both) closes its dialing connection and get a `Replaced` event to replace the - // dialing connection with the listening one. The other one gets an `IncomingConnectionError`. - // - A successfully dials B; B doesn't have dialing priority and thus cancels its dialing - // attempt. If A receives B's dialing attempt, it gets an `IncomingConnectionError`. - // - A successfully dials B; B does have dialing priority and thus continues dialing; then B - // successfully dials A; A and B both get a `Replaced` event to replace the dialing - // connection with the listening one. - // - - // Important note: This test is meant to detect race conditions which don't seem to happen - // if we use the `MemoryTransport`. Using the TCP transport is important, - // despite the fact that it adds a dependency. - - for _ in 0 .. 10 { - let mut swarm1 = { - let local_key = identity::Keypair::generate_ed25519(); - let local_public_key = local_key.public(); - let transport = libp2p_tcp::TcpConfig::new() - .upgrade(upgrade::Version::V1Lazy) - .authenticate(libp2p_secio::SecioConfig::new(local_key)) - .multiplex(libp2p_mplex::MplexConfig::new()) - .map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer))); - Network::new(transport, local_public_key.into_peer_id(), None) - }; - - let mut swarm2 = { - let local_key = identity::Keypair::generate_ed25519(); - let local_public_key = local_key.public(); - let transport = libp2p_tcp::TcpConfig::new() - .upgrade(upgrade::Version::V1Lazy) - .authenticate(libp2p_secio::SecioConfig::new(local_key)) - .multiplex(libp2p_mplex::MplexConfig::new()) - .map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer))); - Network::new(transport, local_public_key.into_peer_id(), None) - }; - - swarm1.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); - swarm2.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); - - let swarm1_listen_addr = future::poll_fn(|cx| { - if let Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) = swarm1.poll(cx) { - Poll::Ready(listen_addr) - } else { - panic!("Was expecting the listen address to be reported") - } - }) - .now_or_never() - .expect("listen address of swarm1"); - - let swarm2_listen_addr = future::poll_fn(|cx| { - if let Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) = swarm2.poll(cx) { - Poll::Ready(listen_addr) - } else { - panic!("Was expecting the listen address to be reported") - } - }) - .now_or_never() - .expect("listen address of swarm2"); - - #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] - enum Step { - Start, - Dialing, - Connected, - Replaced, - Denied - } - - loop { - let mut swarm1_step = Step::Start; - let mut swarm2_step = Step::Start; - - let mut swarm1_dial_start = Delay::new(Duration::new(0, rand::random::() % 50_000_000)); - let mut swarm2_dial_start = Delay::new(Duration::new(0, rand::random::() % 50_000_000)); - - let future = future::poll_fn(|cx| { - loop { - let mut swarm1_not_ready = false; - let mut swarm2_not_ready = false; - - // We add a lot of randomness. In a real-life situation the swarm also has to - // handle other nodes, which may delay the processing. - - if swarm1_step == Step::Start { - if swarm1_dial_start.poll_unpin(cx).is_ready() { - let handler = TestHandler::default().into_node_handler_builder(); - swarm1.peer(swarm2.local_peer_id().clone()) - .into_not_connected() - .unwrap() - .connect(swarm2_listen_addr.clone(), handler); - swarm1_step = Step::Dialing; - } else { - swarm1_not_ready = true - } - } - - if swarm2_step == Step::Start { - if swarm2_dial_start.poll_unpin(cx).is_ready() { - let handler = TestHandler::default().into_node_handler_builder(); - swarm2.peer(swarm1.local_peer_id().clone()) - .into_not_connected() - .unwrap() - .connect(swarm1_listen_addr.clone(), handler); - swarm2_step = Step::Dialing; - } else { - swarm2_not_ready = true - } - } - - if rand::random::() < 0.1 { - match swarm1.poll(cx) { - Poll::Ready(NetworkEvent::IncomingConnectionError { - error: IncomingError::DeniedLowerPriority, .. - }) => { - assert_eq!(swarm1_step, Step::Connected); - swarm1_step = Step::Denied - } - Poll::Ready(NetworkEvent::Connected { conn_info, .. }) => { - assert_eq!(conn_info, *swarm2.local_peer_id()); - if swarm1_step == Step::Start { - // The connection was established before - // swarm1 started dialing; discard the test run. - return Poll::Ready(false) - } - assert_eq!(swarm1_step, Step::Dialing); - swarm1_step = Step::Connected - } - Poll::Ready(NetworkEvent::Replaced { new_info, .. }) => { - assert_eq!(new_info, *swarm2.local_peer_id()); - assert_eq!(swarm1_step, Step::Connected); - swarm1_step = Step::Replaced - } - Poll::Ready(NetworkEvent::IncomingConnection(inc)) => { - inc.accept(TestHandler::default().into_node_handler_builder()) - } - Poll::Ready(ev) => panic!("swarm1: unexpected event: {:?}", ev), - Poll::Pending => swarm1_not_ready = true - } - } - - if rand::random::() < 0.1 { - match swarm2.poll(cx) { - Poll::Ready(NetworkEvent::IncomingConnectionError { - error: IncomingError::DeniedLowerPriority, .. - }) => { - assert_eq!(swarm2_step, Step::Connected); - swarm2_step = Step::Denied - } - Poll::Ready(NetworkEvent::Connected { conn_info, .. }) => { - assert_eq!(conn_info, *swarm1.local_peer_id()); - if swarm2_step == Step::Start { - // The connection was established before - // swarm2 started dialing; discard the test run. - return Poll::Ready(false) - } - assert_eq!(swarm2_step, Step::Dialing); - swarm2_step = Step::Connected - } - Poll::Ready(NetworkEvent::Replaced { new_info, .. }) => { - assert_eq!(new_info, *swarm1.local_peer_id()); - assert_eq!(swarm2_step, Step::Connected); - swarm2_step = Step::Replaced - } - Poll::Ready(NetworkEvent::IncomingConnection(inc)) => { - inc.accept(TestHandler::default().into_node_handler_builder()) - } - Poll::Ready(ev) => panic!("swarm2: unexpected event: {:?}", ev), - Poll::Pending => swarm2_not_ready = true - } - } - - match (swarm1_step, swarm2_step) { - | (Step::Connected, Step::Replaced) - | (Step::Connected, Step::Denied) - | (Step::Replaced, Step::Connected) - | (Step::Replaced, Step::Denied) - | (Step::Replaced, Step::Replaced) - | (Step::Denied, Step::Connected) - | (Step::Denied, Step::Replaced) => return Poll::Ready(true), - _else => () - } - - if swarm1_not_ready && swarm2_not_ready { - return Poll::Pending - } - } - }); - - if async_std::task::block_on(future) { - // The test exercised what we wanted to exercise: a simultaneous connect. - break - } - - // The test did not trigger a simultaneous connect; ensure the nodes - // are disconnected and re-run the test. - match swarm1.peer(swarm2.local_peer_id().clone()) { - Peer::Connected(p) => p.close(), - Peer::PendingConnect(p) => p.interrupt(), - x => panic!("Unexpected state for swarm1: {:?}", x) - } - match swarm2.peer(swarm1.local_peer_id().clone()) { - Peer::Connected(p) => p.close(), - Peer::PendingConnect(p) => p.interrupt(), - x => panic!("Unexpected state for swarm2: {:?}", x) - } - } - } -} - diff --git a/misc/core-derive/src/lib.rs b/misc/core-derive/src/lib.rs index 8afaffce..46a21fd3 100644 --- a/misc/core-derive/src/lib.rs +++ b/misc/core-derive/src/lib.rs @@ -56,8 +56,9 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { let protocols_handler = quote!{::libp2p::swarm::ProtocolsHandler}; let into_proto_select_ident = quote!{::libp2p::swarm::IntoProtocolsHandlerSelect}; let peer_id = quote!{::libp2p::core::PeerId}; + let connection_id = quote!{::libp2p::core::connection::ConnectionId}; let connected_point = quote!{::libp2p::core::ConnectedPoint}; - let listener_id = quote!{::libp2p::core::nodes::ListenerId}; + let listener_id = quote!{::libp2p::core::connection::ListenerId}; let poll_parameters = quote!{::libp2p::swarm::PollParameters}; @@ -172,32 +173,6 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { }) }; - // Build the list of statements to put in the body of `inject_replaced()`. - let inject_replaced_stmts = { - let num_fields = data_struct.fields.iter().filter(|f| !is_ignored(f)).count(); - data_struct.fields.iter().enumerate().filter_map(move |(field_n, field)| { - if is_ignored(&field) { - return None; - } - - Some(if field_n == num_fields - 1 { - match field.ident { - Some(ref i) => quote!{ self.#i.inject_replaced(peer_id, closed_endpoint, new_endpoint); }, - None => quote!{ self.#field_n.inject_replaced(peer_id, closed_endpoint, new_endpoint); }, - } - } else { - match field.ident { - Some(ref i) => quote!{ - self.#i.inject_replaced(peer_id.clone(), closed_endpoint.clone(), new_endpoint.clone()); - }, - None => quote!{ - self.#field_n.inject_replaced(peer_id.clone(), closed_endpoint.clone(), new_endpoint.clone()); - }, - } - }) - }) - }; - // Build the list of statements to put in the body of `inject_addr_reach_failure()`. let inject_addr_reach_failure_stmts = { data_struct.fields.iter().enumerate().filter_map(move |(field_n, field)| { @@ -294,10 +269,10 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { }) }; - // Build the list of variants to put in the body of `inject_node_event()`. + // Build the list of variants to put in the body of `inject_event()`. // // The event type is a construction of nested `#either_ident`s of the events of the children. - // We call `inject_node_event` on the corresponding child. + // We call `inject_event` on the corresponding child. let inject_node_event_stmts = data_struct.fields.iter().enumerate().filter(|f| !is_ignored(&f.1)).enumerate().map(|(enum_n, (field_n, field))| { let mut elem = if enum_n != 0 { quote!{ #either_ident::Second(ev) } @@ -310,8 +285,8 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { } Some(match field.ident { - Some(ref i) => quote!{ #elem => self.#i.inject_node_event(peer_id, ev) }, - None => quote!{ #elem => self.#field_n.inject_node_event(peer_id, ev) }, + Some(ref i) => quote!{ #elem => self.#i.inject_event(peer_id, connection_id, ev) }, + None => quote!{ #elem => self.#field_n.inject_event(peer_id, connection_id, ev) }, }) }); @@ -411,9 +386,10 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { std::task::Poll::Ready(#network_behaviour_action::DialPeer { peer_id }) => { return std::task::Poll::Ready(#network_behaviour_action::DialPeer { peer_id }); } - std::task::Poll::Ready(#network_behaviour_action::SendEvent { peer_id, event }) => { - return std::task::Poll::Ready(#network_behaviour_action::SendEvent { + std::task::Poll::Ready(#network_behaviour_action::NotifyHandler { peer_id, handler, event }) => { + return std::task::Poll::Ready(#network_behaviour_action::NotifyHandler { peer_id, + handler, event: #wrapped_event, }); } @@ -453,10 +429,6 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { #(#inject_disconnected_stmts);* } - fn inject_replaced(&mut self, peer_id: #peer_id, closed_endpoint: #connected_point, new_endpoint: #connected_point) { - #(#inject_replaced_stmts);* - } - fn inject_addr_reach_failure(&mut self, peer_id: Option<&#peer_id>, addr: &#multiaddr, error: &dyn std::error::Error) { #(#inject_addr_reach_failure_stmts);* } @@ -485,9 +457,10 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { #(#inject_listener_closed_stmts);* } - fn inject_node_event( + fn inject_event( &mut self, peer_id: #peer_id, + connection_id: #connection_id, event: <::Handler as #protocols_handler>::OutEvent ) { match event { diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index 323221c9..18e87c54 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -22,13 +22,14 @@ use crate::protocol::{FloodsubConfig, FloodsubMessage, FloodsubRpc, FloodsubSubs use crate::topic::Topic; use cuckoofilter::CuckooFilter; use fnv::FnvHashSet; -use libp2p_core::{ConnectedPoint, Multiaddr, PeerId}; +use libp2p_core::{ConnectedPoint, Multiaddr, PeerId, connection::ConnectionId}; use libp2p_swarm::{ NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler, - OneShotHandler + OneShotHandler, + NotifyHandler }; use rand; use smallvec::SmallVec; @@ -80,8 +81,9 @@ impl Floodsub { // Send our topics to this node if we're already connected to it. if self.connected_peers.contains_key(&peer_id) { for topic in self.subscribed_topics.iter().cloned() { - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), + handler: NotifyHandler::Any, event: FloodsubRpc { messages: Vec::new(), subscriptions: vec![FloodsubSubscription { @@ -113,8 +115,9 @@ impl Floodsub { } for peer in self.connected_peers.keys() { - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer.clone(), + handler: NotifyHandler::Any, event: FloodsubRpc { messages: Vec::new(), subscriptions: vec![FloodsubSubscription { @@ -143,8 +146,9 @@ impl Floodsub { self.subscribed_topics.remove(pos); for peer in self.connected_peers.keys() { - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer.clone(), + handler: NotifyHandler::Any, event: FloodsubRpc { messages: Vec::new(), subscriptions: vec![FloodsubSubscription { @@ -208,8 +212,9 @@ impl Floodsub { continue; } - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), + handler: NotifyHandler::Any, event: FloodsubRpc { subscriptions: Vec::new(), messages: vec![message.clone()], @@ -235,8 +240,9 @@ impl NetworkBehaviour for Floodsub { // We need to send our subscriptions to the newly-connected node. if self.target_peers.contains(&id) { for topic in self.subscribed_topics.iter().cloned() { - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: id.clone(), + handler: NotifyHandler::Any, event: FloodsubRpc { messages: Vec::new(), subscriptions: vec![FloodsubSubscription { @@ -262,9 +268,10 @@ impl NetworkBehaviour for Floodsub { } } - fn inject_node_event( + fn inject_event( &mut self, propagation_source: PeerId, + _connection: ConnectionId, event: InnerMessage, ) { // We ignore successful sends event. @@ -338,8 +345,9 @@ impl NetworkBehaviour for Floodsub { } for (peer_id, rpc) in rpcs_to_dispatch { - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id, + handler: NotifyHandler::Any, event: rpc, }); } diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index fe43a97b..2e55571a 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -27,8 +27,14 @@ use crate::protocol::{ }; use crate::topic::{Topic, TopicHash}; use futures::prelude::*; -use libp2p_core::{ConnectedPoint, Multiaddr, PeerId}; -use libp2p_swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; +use libp2p_core::{ConnectedPoint, Multiaddr, PeerId, connection::ConnectionId}; +use libp2p_swarm::{ + NetworkBehaviour, + NetworkBehaviourAction, + NotifyHandler, + PollParameters, + ProtocolsHandler +}; use log::{debug, error, info, trace, warn}; use lru::LruCache; use rand; @@ -146,8 +152,9 @@ impl Gossipsub { for peer in peer_list { debug!("Sending SUBSCRIBE to peer: {:?}", peer); - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer.clone(), + handler: NotifyHandler::Any, event: event.clone(), }); } @@ -191,9 +198,10 @@ impl Gossipsub { for peer in peer_list { debug!("Sending UNSUBSCRIBE to peer: {:?}", peer); - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer.clone(), event: event.clone(), + handler: NotifyHandler::Any, }); } } @@ -281,9 +289,10 @@ impl Gossipsub { // Send to peers we know are subscribed to the topic. for peer_id in recipient_peers.iter() { debug!("Sending message to peer: {:?}", peer_id); - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), event: event.clone(), + handler: NotifyHandler::Any, }); } } @@ -461,8 +470,9 @@ impl Gossipsub { debug!("IWANT: Sending cached messages to peer: {:?}", peer_id); // Send the messages to the peer let message_list = cached_messages.into_iter().map(|entry| entry.1).collect(); - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), + handler: NotifyHandler::Any, event: Arc::new(GossipsubRpc { subscriptions: Vec::new(), messages: message_list, @@ -508,8 +518,9 @@ impl Gossipsub { "GRAFT: Not subscribed to topics - Sending PRUNE to peer: {:?}", peer_id ); - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), + handler: NotifyHandler::Any, event: Arc::new(GossipsubRpc { subscriptions: Vec::new(), messages: Vec::new(), @@ -851,8 +862,9 @@ impl Gossipsub { grafts.append(&mut prunes); // send the control messages - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer.clone(), + handler: NotifyHandler::Any, event: Arc::new(GossipsubRpc { subscriptions: Vec::new(), messages: Vec::new(), @@ -869,8 +881,9 @@ impl Gossipsub { topic_hash: topic_hash.clone(), }) .collect(); - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer.clone(), + handler: NotifyHandler::Any, event: Arc::new(GossipsubRpc { subscriptions: Vec::new(), messages: Vec::new(), @@ -908,9 +921,10 @@ impl Gossipsub { for peer in recipient_peers.iter() { debug!("Sending message: {:?} to peer {:?}", msg_id, peer); - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer.clone(), event: event.clone(), + handler: NotifyHandler::Any, }); } } @@ -970,8 +984,9 @@ impl Gossipsub { /// Takes each control action mapping and turns it into a message fn flush_control_pool(&mut self) { for (peer, controls) in self.control_pool.drain() { - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer, + handler: NotifyHandler::Any, event: Arc::new(GossipsubRpc { subscriptions: Vec::new(), messages: Vec::new(), @@ -1010,8 +1025,9 @@ impl NetworkBehaviour for Gossipsub { if !subscriptions.is_empty() { // send our subscriptions to the peer - self.events.push_back(NetworkBehaviourAction::SendEvent { + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: id.clone(), + handler: NotifyHandler::Any, event: Arc::new(GossipsubRpc { messages: Vec::new(), subscriptions, @@ -1074,7 +1090,7 @@ impl NetworkBehaviour for Gossipsub { debug_assert!(was_in.is_some()); } - fn inject_node_event(&mut self, propagation_source: PeerId, event: GossipsubRpc) { + fn inject_event(&mut self, propagation_source: PeerId, _: ConnectionId, event: GossipsubRpc) { // Handle subscriptions // Update connected peers topics self.handle_received_subscriptions(&event.subscriptions, &propagation_source); @@ -1128,17 +1144,17 @@ impl NetworkBehaviour for Gossipsub { if let Some(event) = self.events.pop_front() { // clone send event reference if others references are present match event { - NetworkBehaviourAction::SendEvent { - peer_id, - event: send_event, + NetworkBehaviourAction::NotifyHandler { + peer_id, handler, event: send_event, } => match Arc::try_unwrap(send_event) { Ok(event) => { - return Poll::Ready(NetworkBehaviourAction::SendEvent { peer_id, event }); + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, event, handler + }); } Err(event) => { - return Poll::Ready(NetworkBehaviourAction::SendEvent { - peer_id, - event: (*event).clone(), + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, event: (*event).clone(), handler }); } }, diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 9a9320df..a3ca6ea5 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -100,7 +100,7 @@ mod tests { gs.events .iter() .fold(vec![], |mut collected_subscriptions, e| match e { - NetworkBehaviourAction::SendEvent { peer_id: _, event } => { + NetworkBehaviourAction::NotifyHandler { event, .. } => { for s in &event.subscriptions { match s.action { GossipsubSubscriptionAction::Subscribe => { @@ -163,7 +163,7 @@ mod tests { gs.events .iter() .fold(vec![], |mut collected_subscriptions, e| match e { - NetworkBehaviourAction::SendEvent { peer_id: _, event } => { + NetworkBehaviourAction::NotifyHandler { event, .. } => { for s in &event.subscriptions { match s.action { GossipsubSubscriptionAction::Unsubscribe => { @@ -328,7 +328,7 @@ mod tests { .events .iter() .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::SendEvent { peer_id: _, event } => { + NetworkBehaviourAction::NotifyHandler { event, .. } => { for s in &event.messages { collected_publish.push(s.clone()); } @@ -394,7 +394,7 @@ mod tests { .events .iter() .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::SendEvent { peer_id: _, event } => { + NetworkBehaviourAction::NotifyHandler { event, .. } => { for s in &event.messages { collected_publish.push(s.clone()); } @@ -437,10 +437,7 @@ mod tests { .events .iter() .filter(|e| match e { - NetworkBehaviourAction::SendEvent { - peer_id: _, - event: _, - } => true, + NetworkBehaviourAction::NotifyHandler { .. } => true, _ => false, }) .collect(); @@ -448,7 +445,7 @@ mod tests { // check that there are two subscriptions sent to each peer for sevent in send_events.clone() { match sevent { - NetworkBehaviourAction::SendEvent { peer_id: _, event } => { + NetworkBehaviourAction::NotifyHandler { event, .. } => { assert!( event.subscriptions.len() == 2, "There should be two subscriptions sent to each peer (1 for each topic)." @@ -625,7 +622,7 @@ mod tests { .events .iter() .fold(vec![], |mut collected_messages, e| match e { - NetworkBehaviourAction::SendEvent { peer_id: _, event } => { + NetworkBehaviourAction::NotifyHandler { event, .. } => { for c in &event.messages { collected_messages.push(c.clone()) } @@ -664,7 +661,7 @@ mod tests { // is the message is being sent? let message_exists = gs.events.iter().any(|e| match e { - NetworkBehaviourAction::SendEvent { peer_id: _, event } => { + NetworkBehaviourAction::NotifyHandler { event, .. } => { event.messages.iter().any(|msg| id(msg) == msg_id) } _ => false, diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index bd6337c2..f16486e6 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -30,11 +30,13 @@ use std::{ }; use libp2p_core::{ + Multiaddr, + Transport, identity, multiaddr::Protocol, muxing::StreamMuxerBox, transport::MemoryTransport, - upgrade, Multiaddr, Transport, + upgrade, }; use libp2p_gossipsub::{Gossipsub, GossipsubConfig, GossipsubEvent, Topic}; use libp2p_plaintext::PlainText2Config; diff --git a/protocols/identify/src/identify.rs b/protocols/identify/src/identify.rs index 756d04ff..fe78bc1b 100644 --- a/protocols/identify/src/identify.rs +++ b/protocols/identify/src/identify.rs @@ -26,6 +26,7 @@ use libp2p_core::{ Multiaddr, PeerId, PublicKey, + connection::ConnectionId, upgrade::{ReadOneError, UpgradeError} }; use libp2p_swarm::{ @@ -109,9 +110,10 @@ impl NetworkBehaviour for Identify { self.observed_addresses.remove(peer_id); } - fn inject_node_event( + fn inject_event( &mut self, peer_id: PeerId, + _connection: ConnectionId, event: ::OutEvent, ) { match event { diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index cfdab025..0c04e5da 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -31,8 +31,14 @@ use crate::protocol::{KadConnectionType, KadPeer}; use crate::query::{Query, QueryId, QueryPool, QueryConfig, QueryPoolState}; use crate::record::{self, store::{self, RecordStore}, Record, ProviderRecord}; use fnv::{FnvHashMap, FnvHashSet}; -use libp2p_core::{ConnectedPoint, Multiaddr, PeerId}; -use libp2p_swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; +use libp2p_core::{ConnectedPoint, Multiaddr, PeerId, connection::ConnectionId}; +use libp2p_swarm::{ + NetworkBehaviour, + NetworkBehaviourAction, + NotifyHandler, + PollParameters, + ProtocolsHandler +}; use log::{info, debug, warn}; use smallvec::SmallVec; use std::{borrow::{Borrow, Cow}, error, iter, time::Duration}; @@ -917,13 +923,20 @@ where } /// Processes a record received from a peer. - fn record_received(&mut self, source: PeerId, request_id: KademliaRequestId, mut record: Record) { + fn record_received( + &mut self, + source: PeerId, + connection: ConnectionId, + request_id: KademliaRequestId, + mut record: Record + ) { if record.publisher.as_ref() == Some(self.kbuckets.local_key().preimage()) { // If the (alleged) publisher is the local node, do nothing. The record of // the original publisher should never change as a result of replication // and the publisher is always assumed to have the "right" value. - self.queued_events.push_back(NetworkBehaviourAction::SendEvent { + self.queued_events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source, + handler: NotifyHandler::One(connection), event: KademliaHandlerIn::PutRecordRes { key: record.key, value: record.value, @@ -974,8 +987,9 @@ where match self.store.put(record.clone()) { Ok(()) => { debug!("Record stored: {:?}; {} bytes", record.key, record.value.len()); - self.queued_events.push_back(NetworkBehaviourAction::SendEvent { + self.queued_events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source, + handler: NotifyHandler::One(connection), event: KademliaHandlerIn::PutRecordRes { key: record.key, value: record.value, @@ -985,8 +999,9 @@ where } Err(e) => { info!("Record not stored: {:?}", e); - self.queued_events.push_back(NetworkBehaviourAction::SendEvent { + self.queued_events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source, + handler: NotifyHandler::One(connection), event: KademliaHandlerIn::Reset(request_id) }) } @@ -1062,7 +1077,9 @@ where .position(|(p, _)| p == &peer) .map(|p| q.inner.pending_rpcs.remove(p))) { - self.queued_events.push_back(NetworkBehaviourAction::SendEvent { peer_id, event }); + self.queued_events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id, event, handler: NotifyHandler::Any + }); } // The remote's address can only be put into the routing table, @@ -1133,30 +1150,18 @@ where self.connected_peers.remove(id); } - fn inject_replaced(&mut self, peer_id: PeerId, _old: ConnectedPoint, new_endpoint: ConnectedPoint) { - // We need to re-send the active queries. - for query in self.queries.iter() { - if query.is_waiting(&peer_id) { - self.queued_events.push_back(NetworkBehaviourAction::SendEvent { - peer_id: peer_id.clone(), - event: query.inner.info.to_request(query.id()), - }); - } - } - - if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::new(peer_id)).value() { - if let ConnectedPoint::Dialer { address } = new_endpoint { - addrs.insert(address); - } - } - } - - fn inject_node_event(&mut self, source: PeerId, event: KademliaHandlerEvent) { + fn inject_event( + &mut self, + source: PeerId, + connection: ConnectionId, + event: KademliaHandlerEvent + ) { match event { KademliaHandlerEvent::FindNodeReq { key, request_id } => { let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); - self.queued_events.push_back(NetworkBehaviourAction::SendEvent { + self.queued_events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source, + handler: NotifyHandler::One(connection), event: KademliaHandlerIn::FindNodeRes { closer_peers, request_id, @@ -1174,8 +1179,9 @@ where KademliaHandlerEvent::GetProvidersReq { key, request_id } => { let provider_peers = self.provider_peers(&key, &source); let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); - self.queued_events.push_back(NetworkBehaviourAction::SendEvent { + self.queued_events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source, + handler: NotifyHandler::One(connection), event: KademliaHandlerIn::GetProvidersRes { closer_peers, provider_peers, @@ -1243,8 +1249,9 @@ where Vec::new() }; - self.queued_events.push_back(NetworkBehaviourAction::SendEvent { + self.queued_events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source, + handler: NotifyHandler::One(connection), event: KademliaHandlerIn::GetRecordRes { record, closer_peers, @@ -1292,7 +1299,7 @@ where record, request_id } => { - self.record_received(source, request_id, record); + self.record_received(source, connection, request_id, record); } KademliaHandlerEvent::PutRecordRes { @@ -1397,8 +1404,8 @@ where query.on_success(&peer_id, vec![]) } if self.connected_peers.contains(&peer_id) { - self.queued_events.push_back(NetworkBehaviourAction::SendEvent { - peer_id, event + self.queued_events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id, event, handler: NotifyHandler::Any }); } else if &peer_id != self.kbuckets.local_key().preimage() { query.inner.pending_rpcs.push((peer_id.clone(), event)); diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index bd6bd8a5..ac2430f0 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -258,7 +258,7 @@ impl From> for KademliaHandlerQueryErr { } /// Event to send to the handler. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum KademliaHandlerIn { /// Resets the (sub)stream associated with the given request ID, /// thus signaling an error to the remote. @@ -358,10 +358,7 @@ pub enum KademliaHandlerIn { /// Unique identifier for a request. Must be passed back in order to answer a request from /// the remote. -/// -/// We don't implement `Clone` on purpose, in order to prevent users from answering the same -/// request twice. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct KademliaRequestId { /// Unique identifier for an incoming connection. connec_unique_id: UniqueConnecId, diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index 499a5e07..85685f72 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -20,7 +20,14 @@ use crate::service::{MdnsService, MdnsPacket, build_query_response, build_service_discovery_response}; use futures::prelude::*; -use libp2p_core::{address_translation, ConnectedPoint, Multiaddr, PeerId, multiaddr::Protocol}; +use libp2p_core::{ + ConnectedPoint, + Multiaddr, + PeerId, + address_translation, + connection::ConnectionId, + multiaddr::Protocol +}; use libp2p_swarm::{ NetworkBehaviour, NetworkBehaviourAction, @@ -196,9 +203,10 @@ impl NetworkBehaviour for Mdns { fn inject_disconnected(&mut self, _: &PeerId, _: ConnectedPoint) {} - fn inject_node_event( + fn inject_event( &mut self, _: PeerId, + _: ConnectionId, _ev: ::OutEvent, ) { void::unreachable(_ev) diff --git a/protocols/noise/src/lib.rs b/protocols/noise/src/lib.rs index 1730160f..da9794fc 100644 --- a/protocols/noise/src/lib.rs +++ b/protocols/noise/src/lib.rs @@ -78,7 +78,7 @@ pub struct NoiseConfig { impl NoiseConfig { /// Turn the `NoiseConfig` into an authenticated upgrade for use - /// with a [`Network`](libp2p_core::nodes::Network). + /// with a [`Network`](libp2p_core::Network). pub fn into_authenticated(self) -> NoiseAuthenticated { NoiseAuthenticated { config: self } } @@ -298,7 +298,7 @@ where /// On success, the upgrade yields the [`PeerId`] obtained from the /// `RemoteIdentity`. The output of this upgrade is thus directly suitable /// for creating an [`authenticated`](libp2p_core::transport::upgrade::Authenticate) -/// transport for use with a [`Network`](libp2p_core::nodes::Network). +/// transport for use with a [`Network`](libp2p_core::Network). #[derive(Clone)] pub struct NoiseAuthenticated { config: NoiseConfig diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs index 024d6b0b..69126644 100644 --- a/protocols/ping/src/lib.rs +++ b/protocols/ping/src/lib.rs @@ -47,7 +47,7 @@ pub mod handler; pub use handler::{PingConfig, PingResult, PingSuccess, PingFailure}; use handler::PingHandler; -use libp2p_core::{ConnectedPoint, Multiaddr, PeerId}; +use libp2p_core::{ConnectedPoint, Multiaddr, PeerId, connection::ConnectionId}; use libp2p_swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use std::{collections::VecDeque, task::Context, task::Poll}; use void::Void; @@ -104,7 +104,7 @@ impl NetworkBehaviour for Ping { fn inject_disconnected(&mut self, _: &PeerId, _: ConnectedPoint) {} - fn inject_node_event(&mut self, peer: PeerId, result: PingResult) { + fn inject_event(&mut self, peer: PeerId, _: ConnectionId, result: PingResult) { self.events.push_front(PingEvent { peer, result }) } diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 250020bd..97497b8a 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -19,7 +19,7 @@ // DEALINGS IN THE SOFTWARE. use crate::protocols_handler::{IntoProtocolsHandler, ProtocolsHandler}; -use libp2p_core::{ConnectedPoint, Multiaddr, PeerId, nodes::ListenerId}; +use libp2p_core::{ConnectedPoint, Multiaddr, PeerId, connection::{ConnectionId, ListenerId}}; use std::{error, task::Context, task::Poll}; /// A behaviour for the network. Allows customizing the swarm. @@ -60,7 +60,7 @@ pub trait NetworkBehaviour: Send + 'static { /// /// The network behaviour (ie. the implementation of this trait) and the handlers it has /// spawned (ie. the objects returned by `new_handler`) can communicate by passing messages. - /// Messages sent from the handler to the behaviour are injected with `inject_node_event`, and + /// Messages sent from the handler to the behaviour are injected with `inject_event`, and /// the behaviour can send a message to the handler by making `poll` return `SendEvent`. fn new_handler(&mut self) -> Self::ProtocolsHandler; @@ -85,27 +85,15 @@ pub trait NetworkBehaviour: Send + 'static { /// or may not have been processed by the handler. fn inject_disconnected(&mut self, peer_id: &PeerId, endpoint: ConnectedPoint); - /// Indicates the behaviour that we replace the connection from the node with another. - /// - /// The handler that used to be dedicated to this node has been destroyed and replaced with a - /// new one. Any event that has been sent to it may or may not have been processed. - /// - /// The default implementation of this method calls `inject_disconnected` followed with - /// `inject_connected`. This is a logically safe way to implement this behaviour. However, you - /// may want to overwrite this method in the situations where this isn't appropriate. - fn inject_replaced(&mut self, peer_id: PeerId, closed_endpoint: ConnectedPoint, new_endpoint: ConnectedPoint) { - self.inject_disconnected(&peer_id, closed_endpoint); - self.inject_connected(peer_id, new_endpoint); - } - /// Informs the behaviour about an event generated by the handler dedicated to the peer identified by `peer_id`. /// for the behaviour. /// /// The `peer_id` is guaranteed to be in a connected state. In other words, `inject_connected` /// has previously been called with this `PeerId`. - fn inject_node_event( + fn inject_event( &mut self, peer_id: PeerId, + connection: ConnectionId, event: <::Handler as ProtocolsHandler>::OutEvent ); @@ -218,18 +206,27 @@ pub enum NetworkBehaviourAction { peer_id: PeerId, }, - /// Instructs the `Swarm` to send a message to the handler dedicated to the connection with the peer. + /// Instructs the `Swarm` to send an event to the handler dedicated to a + /// connection with a peer. + /// + /// If the `Swarm` is connected to the peer, the message is delivered to the + /// `ProtocolsHandler` instance identified by the peer ID and connection ID. /// - /// If the `Swarm` is connected to the peer, the message is delivered to the remote's - /// protocol handler. If there is no connection to the peer, the message is ignored. - /// To ensure delivery, the `NetworkBehaviour` must keep track of connected peers. + /// If the specified connection no longer exists, the event is silently dropped. + /// + /// Typically the connection ID given is the same as the one passed to + /// [`NetworkBehaviour::inject_event`], i.e. whenever the behaviour wishes to + /// respond to a request on the same connection (and possibly the same + /// substream, as per the implementation of `ProtocolsHandler`). /// /// Note that even if the peer is currently connected, connections can get closed - /// at any time and thus the message may not reach its destination. - SendEvent { - /// The peer to which to send the message. + /// at any time and thus the event may not reach a handler. + NotifyHandler { + /// The peer for whom a `ProtocolsHandler` should be notified. peer_id: PeerId, - /// The message to send. + /// The ID of the connection whose `ProtocolsHandler` to notify. + handler: NotifyHandler, + /// The event to send. event: TInEvent, }, @@ -244,3 +241,15 @@ pub enum NetworkBehaviourAction { address: Multiaddr, }, } + +/// The options w.r.t. which connection handlers to notify of an event. +#[derive(Debug, Clone)] +pub enum NotifyHandler { + /// Notify a particular connection handler. + One(ConnectionId), + /// Notify an arbitrary connection handler. + Any, + /// Notify all connection handlers. + All +} + diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 3eb0b860..01bffcce 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -64,7 +64,8 @@ pub use behaviour::{ NetworkBehaviour, NetworkBehaviourAction, NetworkBehaviourEventProcess, - PollParameters + PollParameters, + NotifyHandler }; pub use protocols_handler::{ IntoProtocolsHandler, @@ -78,30 +79,42 @@ pub use protocols_handler::{ SubstreamProtocol }; -/// Substream for which a protocol has been chosen. -/// -/// Implements the [`AsyncRead`](futures::io::AsyncRead) and -/// [`AsyncWrite`](futures::io::AsyncWrite) traits. -pub type NegotiatedSubstream = Negotiated>; - -use protocols_handler::{NodeHandlerWrapperBuilder, NodeHandlerWrapperError}; -use futures::{prelude::*, executor::{ThreadPool, ThreadPoolBuilder}, stream::FusedStream}; +use protocols_handler::NodeHandlerWrapperBuilder; +use futures::{ + prelude::*, + executor::{ThreadPool, ThreadPoolBuilder}, + stream::FusedStream, +}; use libp2p_core::{ - Executor, Negotiated, Transport, Multiaddr, PeerId, ProtocolName, + Executor, + Transport, + Multiaddr, + Negotiated, + PeerId, + connection::{ + ConnectionId, + ConnectionInfo, + EstablishedConnection, + IntoConnectionHandler, + ListenerId, + Substream + }, + transport::{TransportError, boxed::Boxed as BoxTransport}, muxing::{StreamMuxer, StreamMuxerBox}, - nodes::{ - ListenerId, Substream, - collection::ConnectionInfo, - network::{self, Network, NetworkEvent} + network::{ + DialError, + Network, + NetworkInfo, + NetworkEvent, + NetworkConfig, + Peer, + peer::{ConnectedPeer, PeerState}, }, - transport::{ - boxed::Boxed as BoxTransport, - TransportError, - } + upgrade::ProtocolName, }; use registry::{Addresses, AddressIntoIter}; use smallvec::SmallVec; -use std::{error, fmt, io, ops::{Deref, DerefMut}, pin::Pin, task::{Context, Poll}}; +use std::{error, fmt, hash::Hash, io, ops::{Deref, DerefMut}, pin::Pin, task::{Context, Poll}}; use std::collections::HashSet; use upgrade::UpgradeInfoSend as _; @@ -111,10 +124,15 @@ pub type Swarm = ExpandedSwarm< <<::ProtocolsHandler as IntoProtocolsHandler>::Handler as ProtocolsHandler>::InEvent, <<::ProtocolsHandler as IntoProtocolsHandler>::Handler as ProtocolsHandler>::OutEvent, ::ProtocolsHandler, - <<::ProtocolsHandler as IntoProtocolsHandler>::Handler as ProtocolsHandler>::Error, TConnInfo, >; +/// Substream for which a protocol has been chosen. +/// +/// Implements the [`AsyncRead`](futures::io::AsyncRead) and +/// [`AsyncWrite`](futures::io::AsyncWrite) traits. +pub type NegotiatedSubstream = Negotiated>; + /// Event generated by the `Swarm`. #[derive(Debug)] pub enum SwarmEvent { @@ -143,13 +161,16 @@ pub enum SwarmEvent { } /// Contains the state of the network, plus the way it should behave. -pub struct ExpandedSwarm { +pub struct ExpandedSwarm +where + THandler: IntoProtocolsHandler, + TConnInfo: ConnectionInfo, +{ network: Network< BoxTransport<(TConnInfo, StreamMuxerBox), io::Error>, TInEvent, TOutEvent, NodeHandlerWrapperBuilder, - NodeHandlerWrapperError, TConnInfo, PeerId, >, @@ -171,12 +192,17 @@ pub struct ExpandedSwarm, - /// Pending event message to be delivered. - send_event_to_complete: Option<(PeerId, TInEvent)> + /// Pending event to be delivered to connection handlers + /// (or dropped if the peer disconnected) before the `behaviour` + /// can be polled again. + pending_event: Option<(PeerId, PendingNotifyHandler, TInEvent)> } -impl Deref for - ExpandedSwarm +impl Deref for + ExpandedSwarm +where + THandler: IntoProtocolsHandler, + TConnInfo: ConnectionInfo, { type Target = TBehaviour; @@ -185,28 +211,33 @@ impl Deref fo } } -impl DerefMut for - ExpandedSwarm +impl DerefMut for + ExpandedSwarm +where + THandler: IntoProtocolsHandler, + TConnInfo: ConnectionInfo, { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.behaviour } } -impl Unpin for - ExpandedSwarm +impl Unpin for + ExpandedSwarm +where + THandler: IntoProtocolsHandler, + TConnInfo: ConnectionInfo, { } -impl - ExpandedSwarm +impl + ExpandedSwarm where TBehaviour: NetworkBehaviour, - TInEvent: Send + 'static, + TInEvent: Clone + Send + 'static, TOutEvent: Send + 'static, TConnInfo: ConnectionInfo + fmt::Debug + Clone + Send + 'static, - THandlerErr: error::Error + Send + 'static, THandler: IntoProtocolsHandler + Send + 'static, - THandler::Handler: ProtocolsHandler, + THandler::Handler: ProtocolsHandler, { /// Builds a new `Swarm`. pub fn new(transport: TTransport, behaviour: TBehaviour, local_peer_id: PeerId) -> Self @@ -225,6 +256,11 @@ where TBehaviour: NetworkBehaviour, .build() } + /// Returns information about the [`Network`] underlying the `Swarm`. + pub fn network_info(me: &Self) -> NetworkInfo { + me.network.info() + } + /// Starts listening on the given address. /// /// Returns an error if the address is not supported. @@ -242,9 +278,9 @@ where TBehaviour: NetworkBehaviour, /// Tries to dial the given address. /// /// Returns an error if the address is not supported. - pub fn dial_addr(me: &mut Self, addr: Multiaddr) -> Result<(), TransportError> { + pub fn dial_addr(me: &mut Self, addr: Multiaddr) -> Result<(), DialError> { let handler = me.behaviour.new_handler(); - me.network.dial(addr, handler.into_node_handler_builder()) + me.network.dial(&addr, handler.into_node_handler_builder()).map(|_id| ()) } /// Tries to reach the given peer using the elements in the topology. @@ -254,16 +290,19 @@ where TBehaviour: NetworkBehaviour, pub fn dial(me: &mut Self, peer_id: PeerId) { let addrs = me.behaviour.addresses_of_peer(&peer_id); match me.network.peer(peer_id.clone()) { - network::Peer::NotConnected(peer) => { - let handler = me.behaviour.new_handler().into_node_handler_builder(); - if peer.connect_iter(addrs, handler).is_err() { - me.behaviour.inject_dial_failure(&peer_id); + Peer::Disconnected(peer) => { + let mut addrs = addrs.into_iter(); + if let Some(first) = addrs.next() { + let handler = me.behaviour.new_handler().into_node_handler_builder(); + if peer.connect(first, addrs, handler).is_err() { + me.behaviour.inject_dial_failure(&peer_id); + } } }, - network::Peer::PendingConnect(mut peer) => { - peer.append_multiaddr_attempts(addrs) + Peer::Dialing(mut peer) => { + peer.connection().add_addresses(addrs) }, - network::Peer::Connected(_) | network::Peer::LocalNode => {} + Peer::Connected(_) | Peer::Local => {} } } @@ -291,11 +330,12 @@ where TBehaviour: NetworkBehaviour, me.external_addrs.add(addr) } - /// Returns the connection info of a node, or `None` if we're not connected to it. + /// Returns the connection info for an arbitrary connection with the peer, or `None` + /// if there is no connection to that peer. // TODO: should take &self instead of &mut self, but the API in network requires &mut pub fn connection_info(me: &mut Self, peer_id: &PeerId) -> Option { if let Some(mut n) = me.network.peer(peer_id.clone()).into_connected() { - Some(n.connection_info().clone()) + Some(n.some_connection().info().clone()) } else { None } @@ -308,7 +348,7 @@ where TBehaviour: NetworkBehaviour, pub fn ban_peer_id(me: &mut Self, peer_id: PeerId) { me.banned_peers.insert(peer_id.clone()); if let Some(c) = me.network.peer(peer_id).into_connected() { - c.close(); + c.disconnect(); } } @@ -349,55 +389,76 @@ where TBehaviour: NetworkBehaviour, loop { let mut network_not_ready = false; + // First let the network make progress. match this.network.poll(cx) { Poll::Pending => network_not_ready = true, - Poll::Ready(NetworkEvent::NodeEvent { conn_info, event }) => { - this.behaviour.inject_node_event(conn_info.peer_id().clone(), event); + Poll::Ready(NetworkEvent::ConnectionEvent { connection, event }) => { + let peer = connection.peer_id().clone(); + let connection = connection.id(); + this.behaviour.inject_event(peer, connection, event); }, - Poll::Ready(NetworkEvent::Connected { conn_info, endpoint }) => { - if this.banned_peers.contains(conn_info.peer_id()) { - this.network.peer(conn_info.peer_id().clone()) + Poll::Ready(NetworkEvent::ConnectionEstablished { connection, num_established }) => { + let peer = connection.peer_id().clone(); + if this.banned_peers.contains(&peer) { + this.network.peer(peer) .into_connected() .expect("the Network just notified us that we were connected; QED") - .close(); + .disconnect(); + } else if num_established == 1 { + let endpoint = connection.endpoint().clone(); + this.behaviour.inject_connected(peer.clone(), endpoint); + return Poll::Ready(SwarmEvent::Connected(peer)); } else { - this.behaviour.inject_connected(conn_info.peer_id().clone(), endpoint); - return Poll::Ready(SwarmEvent::Connected(conn_info.peer_id().clone())); + // For now, secondary connections are not explicitly reported to + // the behaviour. A behaviour only gets awareness of the + // connections via the events emitted from the connection handlers. + log::trace!("Secondary connection established: {:?}; Total (peer): {}.", + connection.connected(), num_established); } }, - Poll::Ready(NetworkEvent::NodeClosed { conn_info, endpoint, error }) => { - log::trace!("Connection {:?} with endpoint {:?} closed by {:?}", - conn_info, endpoint, error); - this.behaviour.inject_disconnected(conn_info.peer_id(), endpoint); - return Poll::Ready(SwarmEvent::Disconnected(conn_info.peer_id().clone())); - }, - Poll::Ready(NetworkEvent::Replaced { new_info, closed_endpoint, endpoint, .. }) => { - this.behaviour.inject_replaced(new_info.peer_id().clone(), closed_endpoint, endpoint); + Poll::Ready(NetworkEvent::ConnectionError { connected, error, num_established }) => { + log::debug!("Connection {:?} closed by {:?}", connected, error); + if num_established == 0 { + let peer = connected.peer_id().clone(); + let endpoint = connected.endpoint; + this.behaviour.inject_disconnected(&peer, endpoint); + return Poll::Ready(SwarmEvent::Disconnected(peer)); + } }, Poll::Ready(NetworkEvent::IncomingConnection(incoming)) => { let handler = this.behaviour.new_handler(); - incoming.accept(handler.into_node_handler_builder()); + if let Err(e) = incoming.accept(handler.into_node_handler_builder()) { + log::warn!("Incoming connection rejected: {:?}", e); + } }, - Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) => { + Poll::Ready(NetworkEvent::NewListenerAddress { listener_id, listen_addr }) => { + log::debug!("Listener {:?}; New address: {:?}", listener_id, listen_addr); if !this.listened_addrs.contains(&listen_addr) { this.listened_addrs.push(listen_addr.clone()) } this.behaviour.inject_new_listen_addr(&listen_addr); return Poll::Ready(SwarmEvent::NewListenAddr(listen_addr)); } - Poll::Ready(NetworkEvent::ExpiredListenerAddress { listen_addr, .. }) => { + Poll::Ready(NetworkEvent::ExpiredListenerAddress { listener_id, listen_addr }) => { + log::debug!("Listener {:?}; Expired address {:?}.", listener_id, listen_addr); this.listened_addrs.retain(|a| a != &listen_addr); this.behaviour.inject_expired_listen_addr(&listen_addr); return Poll::Ready(SwarmEvent::ExpiredListenAddr(listen_addr)); } - Poll::Ready(NetworkEvent::ListenerClosed { listener_id, .. }) => - this.behaviour.inject_listener_closed(listener_id), + Poll::Ready(NetworkEvent::ListenerClosed { listener_id, reason }) => { + log::debug!("Listener {:?}; Closed by {:?}.", listener_id, reason); + this.behaviour.inject_listener_closed(listener_id); + } Poll::Ready(NetworkEvent::ListenerError { listener_id, error }) => this.behaviour.inject_listener_error(listener_id, &error), - Poll::Ready(NetworkEvent::IncomingConnectionError { .. }) => {}, + Poll::Ready(NetworkEvent::IncomingConnectionError { error, .. }) => { + log::debug!("Incoming connection failed: {:?}", error); + }, Poll::Ready(NetworkEvent::DialError { peer_id, multiaddr, error, new_state }) => { + log::debug!("Connection attempt to peer {:?} at address {:?} failed with {:?}", + peer_id, multiaddr, error); this.behaviour.inject_addr_reach_failure(Some(&peer_id), &multiaddr, &error); - if let network::PeerState::NotConnected = new_state { + if let PeerState::Disconnected = new_state { this.behaviour.inject_dial_failure(&peer_id); } return Poll::Ready(SwarmEvent::UnreachableAddr { @@ -407,6 +468,8 @@ where TBehaviour: NetworkBehaviour, }); }, Poll::Ready(NetworkEvent::UnknownPeerDialError { multiaddr, error, .. }) => { + log::debug!("Connection attempt to address {:?} of unknown peer failed with {:?}", + multiaddr, error); this.behaviour.inject_addr_reach_failure(None, &multiaddr, &error); return Poll::Ready(SwarmEvent::UnreachableAddr { peer_id: None, @@ -416,19 +479,41 @@ where TBehaviour: NetworkBehaviour, }, } - // Try to deliver pending event. - if let Some((id, pending)) = this.send_event_to_complete.take() { - if let Some(mut peer) = this.network.peer(id.clone()).into_connected() { - match peer.poll_ready_event(cx) { - Poll::Ready(()) => peer.start_send_event(pending), - Poll::Pending => { - this.send_event_to_complete = Some((id, pending)); - return Poll::Pending - }, + // After the network had a chance to make progress, try to deliver + // the pending event emitted by the behaviour in the previous iteration + // to the connection handler(s). The pending event must be delivered + // before polling the behaviour again. If the targeted peer + // meanwhie disconnected, the event is discarded. + if let Some((peer_id, handler, event)) = this.pending_event.take() { + if let Some(mut peer) = this.network.peer(peer_id.clone()).into_connected() { + match handler { + PendingNotifyHandler::One(conn_id) => + if let Some(mut conn) = peer.connection(conn_id) { + if let Some(event) = notify_one(&mut conn, event, cx) { + this.pending_event = Some((peer_id, handler, event)); + return Poll::Pending + } + }, + PendingNotifyHandler::Any(ids) => { + if let Some((event, ids)) = notify_any(ids, &mut peer, event, cx) { + let handler = PendingNotifyHandler::Any(ids); + this.pending_event = Some((peer_id, handler, event)); + return Poll::Pending + } + } + PendingNotifyHandler::All(ids) => { + if let Some((event, ids)) = notify_all(ids, &mut peer, event, cx) { + let handler = PendingNotifyHandler::All(ids); + this.pending_event = Some((peer_id, handler, event)); + return Poll::Pending + } + } } } } + debug_assert!(this.pending_event.is_none()); + let behaviour_poll = { let mut parameters = SwarmPollParameters { local_peer_id: &mut this.network.local_peer_id(), @@ -456,14 +541,34 @@ where TBehaviour: NetworkBehaviour, return Poll::Ready(SwarmEvent::StartConnect(peer_id)) } }, - Poll::Ready(NetworkBehaviourAction::SendEvent { peer_id, event }) => { + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => { if let Some(mut peer) = this.network.peer(peer_id.clone()).into_connected() { - if let Poll::Ready(()) = peer.poll_ready_event(cx) { - peer.start_send_event(event); - } else { - debug_assert!(this.send_event_to_complete.is_none()); - this.send_event_to_complete = Some((peer_id, event)); - return Poll::Pending; + match handler { + NotifyHandler::One(connection) => { + if let Some(mut conn) = peer.connection(connection) { + if let Some(event) = notify_one(&mut conn, event, cx) { + let handler = PendingNotifyHandler::One(connection); + this.pending_event = Some((peer_id, handler, event)); + return Poll::Pending + } + } + } + NotifyHandler::Any => { + let ids = peer.connections().into_ids().collect(); + if let Some((event, ids)) = notify_any(ids, &mut peer, event, cx) { + let handler = PendingNotifyHandler::Any(ids); + this.pending_event = Some((peer_id, handler, event)); + return Poll::Pending + } + } + NotifyHandler::All => { + let ids = peer.connections().into_ids().collect(); + if let Some((event, ids)) = notify_all(ids, &mut peer, event, cx) { + let handler = PendingNotifyHandler::All(ids); + this.pending_event = Some((peer_id, handler, event)); + return Poll::Pending + } + } } } }, @@ -480,14 +585,156 @@ where TBehaviour: NetworkBehaviour, } } -impl Stream for - ExpandedSwarm +/// Connections to notify of a pending event. +/// +/// The connection IDs to notify of an event are captured at the time +/// the behaviour emits the event, in order not to forward the event +/// to new connections which the behaviour may not have been aware of +/// at the time it issued the request for sending it. +enum PendingNotifyHandler { + One(ConnectionId), + Any(SmallVec<[ConnectionId; 10]>), + All(SmallVec<[ConnectionId; 10]>), +} + +/// Notify a single connection of an event. +/// +/// Returns `Some` with the given event if the connection is not currently +/// ready to receive another event, in which case the current task is +/// scheduled to be woken up. +/// +/// Returns `None` if the connection is closing or the event has been +/// successfully sent, in either case the event is consumed. +fn notify_one<'a, TInEvent, TConnInfo, TPeerId>( + conn: &mut EstablishedConnection<'a, TInEvent, TConnInfo, TPeerId>, + event: TInEvent, + cx: &mut Context, +) -> Option +where + TPeerId: Eq + std::hash::Hash + Clone, + TConnInfo: ConnectionInfo +{ + match conn.poll_ready_notify_handler(cx) { + Poll::Pending => Some(event), + Poll::Ready(Err(())) => None, // connection is closing + Poll::Ready(Ok(())) => { + // Can now only fail if connection is closing. + let _ = conn.notify_handler(event); + None + } + } +} + +/// Notify any one of a given list of connections of a peer of an event. +/// +/// Returns `Some` with the given event and a new list of connections if +/// none of the given connections was able to receive the event but at +/// least one of them is not closing, in which case the current task +/// is scheduled to be woken up. The returned connections are those which +/// may still become ready to receive another event. +/// +/// Returns `None` if either all connections are closing or the event +/// was successfully sent to a handler, in either case the event is consumed. +fn notify_any<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>( + ids: SmallVec<[ConnectionId; 10]>, + peer: &mut ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>, + event: TInEvent, + cx: &mut Context, +) -> Option<(TInEvent, SmallVec<[ConnectionId; 10]>)> +where + TTrans: Transport, + THandler: IntoConnectionHandler, + TPeerId: Eq + Hash + Clone, + TConnInfo: ConnectionInfo +{ + let mut pending = SmallVec::new(); + let mut event = Some(event); // (1) + for id in ids.into_iter() { + if let Some(mut conn) = peer.connection(id) { + match conn.poll_ready_notify_handler(cx) { + Poll::Pending => pending.push(id), + Poll::Ready(Err(())) => {} // connection is closing + Poll::Ready(Ok(())) => { + let e = event.take().expect("by (1),(2)"); + if let Err(e) = conn.notify_handler(e) { + event = Some(e) // (2) + } else { + break + } + } + } + } + } + + event.and_then(|e| + if !pending.is_empty() { + Some((e, pending)) + } else { + None + }) +} + +/// Notify all of the given connections of a peer of an event. +/// +/// Returns `Some` with the given event and a new list of connections if +/// at least one of the given connections is not currently able to receive the event +/// but is not closing, in which case the current task is scheduled to be woken up. +/// The returned connections are those which are not closing. +/// +/// Returns `None` if all connections are either closing or the event +/// was successfully sent to all handlers whose connections are not closing, +/// in either case the event is consumed. +fn notify_all<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>( + ids: SmallVec<[ConnectionId; 10]>, + peer: &mut ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>, + event: TInEvent, + cx: &mut Context, +) -> Option<(TInEvent, SmallVec<[ConnectionId; 10]>)> +where + TTrans: Transport, + TInEvent: Clone, + THandler: IntoConnectionHandler, + TPeerId: Eq + Hash + Clone, + TConnInfo: ConnectionInfo +{ + if ids.len() == 1 { + if let Some(mut conn) = peer.connection(ids[0]) { + return notify_one(&mut conn, event, cx).map(|e| (e, ids)) + } + } + + { + let mut pending = SmallVec::new(); + for id in ids.iter() { + if let Some(mut conn) = peer.connection(*id) { // (*) + if conn.poll_ready_notify_handler(cx).is_pending() { + pending.push(*id) + } + } + } + if !pending.is_empty() { + return Some((event, pending)) + } + } + + for id in ids.into_iter() { + if let Some(mut conn) = peer.connection(id) { + // All connections were ready. Can now only fail due + // to a connection suddenly closing, which we ignore. + let _ = conn.notify_handler(event.clone()); + } + } + + None +} + +impl Stream for + ExpandedSwarm where TBehaviour: NetworkBehaviour, - THandlerErr: error::Error + Send + 'static, THandler: IntoProtocolsHandler + Send + 'static, - TInEvent: Send + 'static, + TInEvent: Clone + Send + 'static, TOutEvent: Send + 'static, - THandler::Handler: ProtocolsHandler, + THandler::Handler: ProtocolsHandler, TConnInfo: ConnectionInfo + fmt::Debug + Clone + Send + 'static, { type Item = TBehaviour::OutEvent; @@ -503,14 +750,13 @@ where TBehaviour: NetworkBehaviour, } /// the stream of behaviour events never terminates, so we can implement fused for it -impl FusedStream for - ExpandedSwarm +impl FusedStream for + ExpandedSwarm where TBehaviour: NetworkBehaviour, - THandlerErr: error::Error + Send + 'static, THandler: IntoProtocolsHandler + Send + 'static, - TInEvent: Send + 'static, + TInEvent: Clone + Send + 'static, TOutEvent: Send + 'static, - THandler::Handler: ProtocolsHandler, + THandler::Handler: ProtocolsHandler, TConnInfo: ConnectionInfo + fmt::Debug + Clone + Send + 'static, { fn is_terminated(&self) -> bool { @@ -550,11 +796,10 @@ impl<'a> PollParameters for SwarmPollParameters<'a> { } pub struct SwarmBuilder { - incoming_limit: Option, - executor: Option>, local_peer_id: PeerId, transport: BoxTransport<(TConnInfo, StreamMuxerBox), io::Error>, behaviour: TBehaviour, + network: NetworkConfig, } impl SwarmBuilder @@ -579,16 +824,15 @@ where TBehaviour: NetworkBehaviour, .boxed(); SwarmBuilder { - incoming_limit: None, local_peer_id, - executor: None, transport, behaviour, + network: NetworkConfig::default(), } } - pub fn incoming_limit(mut self, incoming_limit: Option) -> Self { - self.incoming_limit = incoming_limit; + pub fn incoming_limit(mut self, incoming_limit: usize) -> Self { + self.network.set_pending_incoming_limit(incoming_limit); self } @@ -596,7 +840,7 @@ where TBehaviour: NetworkBehaviour, /// /// By default, uses a threads pool. pub fn executor(mut self, executor: impl Executor + Send + 'static) -> Self { - self.executor = Some(Box::new(executor)); + self.network.set_executor(Box::new(executor)); self } @@ -608,7 +852,7 @@ where TBehaviour: NetworkBehaviour, (self.0)(f) } } - self.executor = Some(Box::new(SpawnImpl(executor))); + self.network.set_executor(Box::new(SpawnImpl(executor))); self } @@ -621,7 +865,9 @@ where TBehaviour: NetworkBehaviour, .map(|info| info.protocol_name().to_vec()) .collect(); - let executor = self.executor.or_else(|| { + // If no executor has been explicitly configured, try to set up + // a thread pool. + if self.network.executor().is_none() { struct PoolWrapper(ThreadPool); impl Executor for PoolWrapper { fn exec(&self, f: Pin + Send>>) { @@ -629,18 +875,20 @@ where TBehaviour: NetworkBehaviour, } } - ThreadPoolBuilder::new() + if let Some(executor) = ThreadPoolBuilder::new() .name_prefix("libp2p-task-") .create() .ok() .map(|tp| Box::new(PoolWrapper(tp)) as Box<_>) - }); + { + self.network.set_executor(Box::new(executor)); + } + } - let network = Network::new_with_incoming_limit( + let network = Network::new( self.transport, self.local_peer_id, - executor, - self.incoming_limit + self.network, ); ExpandedSwarm { @@ -650,7 +898,7 @@ where TBehaviour: NetworkBehaviour, listened_addrs: SmallVec::new(), external_addrs: Addresses::default(), banned_peers: HashSet::new(), - send_event_to_complete: None + pending_event: None } } } @@ -676,7 +924,7 @@ impl NetworkBehaviour for DummyBehaviour { fn inject_disconnected(&mut self, _: &PeerId, _: libp2p_core::ConnectedPoint) {} - fn inject_node_event(&mut self, _: PeerId, + fn inject_event(&mut self, _: PeerId, _: ConnectionId, _: ::OutEvent) {} fn poll(&mut self, _: &mut Context, _: &mut impl PollParameters) -> @@ -707,8 +955,9 @@ mod tests { fn test_build_swarm() { let id = get_random_id(); let transport = DummyTransport::<(PeerId, Multiplex)>::new(); - let swarm = SwarmBuilder::new(transport, DummyBehaviour {}, id.into()) - .incoming_limit(Some(4)).build(); + let behaviour = DummyBehaviour {}; + let swarm = SwarmBuilder::new(transport, behaviour, id.into()) + .incoming_limit(4).build(); assert_eq!(swarm.network.incoming_limit(), Some(4)); } diff --git a/swarm/src/protocols_handler.rs b/swarm/src/protocols_handler.rs index b689cd9d..4be0d76c 100644 --- a/swarm/src/protocols_handler.rs +++ b/swarm/src/protocols_handler.rs @@ -220,19 +220,6 @@ pub trait ProtocolsHandler: Send + 'static { { IntoProtocolsHandler::into_node_handler_builder(self) } - - /// Builds an implementation of `NodeHandler` that handles this protocol exclusively. - /// - /// > **Note**: This is a shortcut for `self.into_node_handler_builder().build()`. - #[inline] - #[deprecated(note = "Use into_node_handler_builder instead")] - fn into_node_handler(self) -> NodeHandlerWrapper - where - Self: Sized, - { - #![allow(deprecated)] - self.into_node_handler_builder().build() - } } /// Configuration of inbound or outbound substream protocol(s) diff --git a/swarm/src/protocols_handler/node_handler.rs b/swarm/src/protocols_handler/node_handler.rs index 9814c71a..3191ca9e 100644 --- a/swarm/src/protocols_handler/node_handler.rs +++ b/swarm/src/protocols_handler/node_handler.rs @@ -29,12 +29,17 @@ use crate::protocols_handler::{ use futures::prelude::*; use libp2p_core::{ - ConnectedPoint, PeerId, + ConnectionInfo, + Connected, + connection::{ + ConnectionHandler, + ConnectionHandlerEvent, + IntoConnectionHandler, + Substream, + SubstreamEndpoint, + }, muxing::StreamMuxerBox, - nodes::Substream, - nodes::collection::ConnectionInfo, - nodes::handled_node::{IntoNodeHandler, NodeHandler, NodeHandlerEndpoint, NodeHandlerEvent}, upgrade::{self, InboundUpgradeApply, OutboundUpgradeApply} }; use std::{error, fmt, pin::Pin, task::Context, task::Poll, time::Duration}; @@ -51,31 +56,14 @@ where TIntoProtoHandler: IntoProtocolsHandler { /// Builds a `NodeHandlerWrapperBuilder`. - #[inline] pub(crate) fn new(handler: TIntoProtoHandler) -> Self { NodeHandlerWrapperBuilder { handler, } } - - /// Builds the `NodeHandlerWrapper`. - #[deprecated(note = "Pass the NodeHandlerWrapperBuilder directly")] - #[inline] - pub fn build(self) -> NodeHandlerWrapper - where TIntoProtoHandler: ProtocolsHandler - { - NodeHandlerWrapper { - handler: self.handler, - negotiating_in: Vec::new(), - negotiating_out: Vec::new(), - queued_dial_upgrades: Vec::new(), - unique_dial_upgrade_id: 0, - shutdown: Shutdown::None, - } - } } -impl IntoNodeHandler<(TConnInfo, ConnectedPoint)> +impl IntoConnectionHandler for NodeHandlerWrapperBuilder where TIntoProtoHandler: IntoProtocolsHandler, @@ -84,9 +72,9 @@ where { type Handler = NodeHandlerWrapper; - fn into_handler(self, remote_info: &(TConnInfo, ConnectedPoint)) -> Self::Handler { + fn into_handler(self, connected: &Connected) -> Self::Handler { NodeHandlerWrapper { - handler: self.handler.into_handler(&remote_info.0.peer_id(), &remote_info.1), + handler: self.handler.into_handler(connected.peer_id(), &connected.endpoint), negotiating_in: Vec::new(), negotiating_out: Vec::new(), queued_dial_upgrades: Vec::new(), @@ -96,6 +84,7 @@ where } } +// A `ConnectionHandler` for an underlying `ProtocolsHandler`. /// Wraps around an implementation of `ProtocolsHandler`, and implements `NodeHandler`. // TODO: add a caching system for protocols that are supported or not pub struct NodeHandlerWrapper @@ -181,7 +170,7 @@ where } } -impl NodeHandler for NodeHandlerWrapper +impl ConnectionHandler for NodeHandlerWrapper where TProtoHandler: ProtocolsHandler, { @@ -196,17 +185,17 @@ where fn inject_substream( &mut self, substream: Self::Substream, - endpoint: NodeHandlerEndpoint, + endpoint: SubstreamEndpoint, ) { match endpoint { - NodeHandlerEndpoint::Listener => { + SubstreamEndpoint::Listener => { let protocol = self.handler.listen_protocol(); let timeout = protocol.timeout().clone(); let upgrade = upgrade::apply_inbound(substream, SendWrapper(protocol.into_upgrade().1)); let timeout = Delay::new(timeout); self.negotiating_in.push((upgrade, timeout)); } - NodeHandlerEndpoint::Dialer((upgrade_id, user_data, timeout)) => { + SubstreamEndpoint::Dialer((upgrade_id, user_data, timeout)) => { let pos = match self .queued_dial_upgrades .iter() @@ -227,12 +216,13 @@ where } } - #[inline] fn inject_event(&mut self, event: Self::InEvent) { self.handler.inject_event(event); } - fn poll(&mut self, cx: &mut Context) -> Poll, Self::Error>> { + fn poll(&mut self, cx: &mut Context) -> Poll< + Result, Self::Error> + > { // Continue negotiation of newly-opened substreams on the listening side. // We remove each element from `negotiating_in` one by one and add them back if not ready. for n in (0..self.negotiating_in.len()).rev() { @@ -300,7 +290,7 @@ where match poll_result { Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { - return Poll::Ready(Ok(NodeHandlerEvent::Custom(event))); + return Poll::Ready(Ok(ConnectionHandlerEvent::Custom(event))); } Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, @@ -312,7 +302,7 @@ where let (version, upgrade) = protocol.into_upgrade(); self.queued_dial_upgrades.push((id, (version, SendWrapper(upgrade)))); return Poll::Ready(Ok( - NodeHandlerEvent::OutboundSubstreamRequest((id, info, timeout)), + ConnectionHandlerEvent::OutboundSubstreamRequest((id, info, timeout)), )); } Poll::Ready(ProtocolsHandlerEvent::Close(err)) => return Poll::Ready(Err(err.into())), diff --git a/swarm/src/toggle.rs b/swarm/src/toggle.rs index 3161d181..f38e8b06 100644 --- a/swarm/src/toggle.rs +++ b/swarm/src/toggle.rs @@ -33,6 +33,7 @@ use libp2p_core::{ ConnectedPoint, PeerId, Multiaddr, + connection::ConnectionId, either::EitherOutput, upgrade::{DeniedUpgrade, EitherUpgrade} }; @@ -87,19 +88,14 @@ where } } - fn inject_replaced(&mut self, peer_id: PeerId, closed_endpoint: ConnectedPoint, new_endpoint: ConnectedPoint) { - if let Some(inner) = self.inner.as_mut() { - inner.inject_replaced(peer_id, closed_endpoint, new_endpoint) - } - } - - fn inject_node_event( + fn inject_event( &mut self, peer_id: PeerId, + connection: ConnectionId, event: <::Handler as ProtocolsHandler>::OutEvent ) { if let Some(inner) = self.inner.as_mut() { - inner.inject_node_event(peer_id, event); + inner.inject_event(peer_id, connection, event); } }