From faf6f1f9eac859b9e8a0c1f9e2b6dd2cc5490b34 Mon Sep 17 00:00:00 2001 From: Sfxdx <35330335+IntegralTeam@users.noreply.github.com> Date: Tue, 4 Jun 2019 18:35:33 +0700 Subject: [PATCH 01/16] Merge `Notifier` and `TransactionsPoolNotifier` (#10591) * Merge `Notifier` and `TransactionsPoolNotifier` * fix tests --- ethcore/light/src/transaction_queue.rs | 37 +++++---- ethcore/src/miner/miner.rs | 12 +-- miner/src/pool/listener.rs | 100 +++++++++---------------- miner/src/pool/queue.rs | 16 ++-- parity/rpc_apis.rs | 26 ++----- parity/run.rs | 23 +++--- rpc/src/v1/impls/eth_pubsub.rs | 43 +++++++---- rpc/src/v1/tests/mocked/eth_pubsub.rs | 20 +++-- 8 files changed, 138 insertions(+), 139 deletions(-) diff --git a/ethcore/light/src/transaction_queue.rs b/ethcore/light/src/transaction_queue.rs index 2040eabe299..52defd654f8 100644 --- a/ethcore/light/src/transaction_queue.rs +++ b/ethcore/light/src/transaction_queue.rs @@ -129,15 +129,13 @@ pub enum ImportDestination { Future, } -type Listener = Box; - /// Light transaction queue. See module docs for more details. #[derive(Default)] pub struct TransactionQueue { by_account: HashMap, by_hash: H256FastMap, - listeners: Vec, - tx_statuses_listeners: Vec>>>, + pending_listeners: Vec>>>, + full_listeners: Vec>>>, } impl fmt::Debug for TransactionQueue { @@ -145,7 +143,8 @@ impl fmt::Debug for TransactionQueue { fmt.debug_struct("TransactionQueue") .field("by_account", &self.by_account) .field("by_hash", &self.by_hash) - .field("listeners", &self.listeners.len()) + .field("pending_listeners", &self.pending_listeners.len()) + .field("full_listeners", &self.pending_listeners.len()) .finish() } } @@ -360,30 +359,40 @@ impl TransactionQueue { } /// Add a transaction queue listener. - pub fn add_listener(&mut self, f: Listener) { - self.listeners.push(f); + pub fn pending_transactions_receiver(&mut self) -> mpsc::UnboundedReceiver>> { + let (sender, receiver) = mpsc::unbounded(); + self.pending_listeners.push(sender); + receiver } /// Add a transaction queue listener. - pub fn tx_statuses_receiver(&mut self) -> mpsc::UnboundedReceiver>> { + pub fn full_transactions_receiver(&mut self) -> mpsc::UnboundedReceiver>> { let (sender, receiver) = mpsc::unbounded(); - self.tx_statuses_listeners.push(sender); + self.full_listeners.push(sender); receiver } /// Notifies all listeners about new pending transaction. fn notify(&mut self, hashes: &[H256], status: TxStatus) { - for listener in &self.listeners { - listener(hashes) + if status == TxStatus::Added { + let to_pending_send: Arc> = Arc::new( + hashes + .into_iter() + .map(|hash| hash.clone()) + .collect() + ); + self.pending_listeners.retain(|listener| listener.unbounded_send(to_pending_send.clone()).is_ok()); + } - let to_send: Arc> = Arc::new( + let to_full_send: Arc> = Arc::new( hashes .into_iter() - .map(|hash| (hash.clone(), status)).collect() + .map(|hash| (hash.clone(), status)) + .collect() ); - self.tx_statuses_listeners.retain(| listener| listener.unbounded_send(to_send.clone()).is_ok()); + self.full_listeners.retain(|listener| listener.unbounded_send(to_full_send.clone()).is_ok()); } } diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 0c3f94acd44..67ef9e6181c 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -260,14 +260,16 @@ impl Miner { } /// Set a callback to be notified about imported transactions' hashes. - pub fn add_transactions_listener(&self, f: Box) { - self.transaction_queue.add_listener(f); + pub fn pending_transactions_receiver(&self) -> mpsc::UnboundedReceiver>> { + let (sender, receiver) = mpsc::unbounded(); + self.transaction_queue.add_pending_listener(sender); + receiver } - /// Set a callback to be notified - pub fn tx_pool_receiver(&self) -> mpsc::UnboundedReceiver>> { + /// Set a callback to be notified about imported transactions' hashes. + pub fn full_transactions_receiver(&self) -> mpsc::UnboundedReceiver>> { let (sender, receiver) = mpsc::unbounded(); - self.transaction_queue.add_tx_pool_listener(sender); + self.transaction_queue.add_full_listener(sender); receiver } diff --git a/miner/src/pool/listener.rs b/miner/src/pool/listener.rs index 464f73be190..9983059048c 100644 --- a/miner/src/pool/listener.rs +++ b/miner/src/pool/listener.rs @@ -26,50 +26,6 @@ use txpool::{self, VerifiedTransaction}; use pool::VerifiedTransaction as Transaction; use pool::TxStatus; -type Listener = Box; - -/// Manages notifications to pending transaction listeners. -#[derive(Default)] -pub struct Notifier { - listeners: Vec, - pending: Vec, -} - -impl fmt::Debug for Notifier { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Notifier") - .field("listeners", &self.listeners.len()) - .field("pending", &self.pending) - .finish() - } -} - -impl Notifier { - /// Add new listener to receive notifications. - pub fn add(&mut self, f: Listener) { - self.listeners.push(f) - } - - /// Notify listeners about all currently pending transactions. - pub fn notify(&mut self) { - if self.pending.is_empty() { - return; - } - - for l in &self.listeners { - (l)(&self.pending); - } - - self.pending.clear(); - } -} - -impl txpool::Listener for Notifier { - fn added(&mut self, tx: &Arc, _old: Option<&Arc>) { - self.pending.push(*tx.hash()); - } -} - /// Transaction pool logger. #[derive(Default, Debug)] pub struct Logger; @@ -121,14 +77,20 @@ impl txpool::Listener for Logger { /// Transactions pool notifier #[derive(Default)] pub struct TransactionsPoolNotifier { - listeners: Vec>>>, + full_listeners: Vec>>>, + pending_listeners: Vec>>>, tx_statuses: Vec<(H256, TxStatus)>, } impl TransactionsPoolNotifier { - /// Add new listener to receive notifications. - pub fn add(&mut self, f: mpsc::UnboundedSender>>) { - self.listeners.push(f); + /// Add new full listener to receive notifications. + pub fn add_full_listener(&mut self, f: mpsc::UnboundedSender>>) { + self.full_listeners.push(f); + } + + /// Add new pending listener to receive notifications. + pub fn add_pending_listener(&mut self, f: mpsc::UnboundedSender>>) { + self.pending_listeners.push(f); } /// Notify listeners about all currently transactions. @@ -137,16 +99,25 @@ impl TransactionsPoolNotifier { return; } - let to_send = Arc::new(std::mem::replace(&mut self.tx_statuses, Vec::new())); - self.listeners - .retain(|listener| listener.unbounded_send(to_send.clone()).is_ok()); + let to_pending_send: Arc> = Arc::new( + self.tx_statuses.clone() + .into_iter() + .map(|(hash, _)| hash) + .collect() + ); + self.pending_listeners.retain(|listener| listener.unbounded_send(to_pending_send.clone()).is_ok()); + + let to_full_send = Arc::new(std::mem::replace(&mut self.tx_statuses, Vec::new())); + self.full_listeners + .retain(|listener| listener.unbounded_send(to_full_send.clone()).is_ok()); } } impl fmt::Debug for TransactionsPoolNotifier { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("TransactionsPoolNotifier") - .field("listeners", &self.listeners.len()) + .field("full_listeners", &self.full_listeners.len()) + .field("pending_listeners", &self.pending_listeners.len()) .finish() } } @@ -180,33 +151,36 @@ impl txpool::Listener for TransactionsPoolNotifier { #[cfg(test)] mod tests { use super::*; - use parking_lot::Mutex; use types::transaction; use txpool::Listener; + use futures::{Stream, Future}; use ethereum_types::Address; #[test] fn should_notify_listeners() { // given - let received = Arc::new(Mutex::new(vec![])); - let r = received.clone(); - let listener = Box::new(move |hashes: &[H256]| { - *r.lock() = hashes.iter().map(|x| *x).collect(); - }); + let (full_sender, full_receiver) = mpsc::unbounded(); + let (pending_sender, pending_receiver) = mpsc::unbounded(); - let mut tx_listener = Notifier::default(); - tx_listener.add(listener); + let mut tx_listener = TransactionsPoolNotifier::default(); + tx_listener.add_full_listener(full_sender); + tx_listener.add_pending_listener(pending_sender); // when let tx = new_tx(); tx_listener.added(&tx, None); - assert_eq!(*received.lock(), vec![]); // then tx_listener.notify(); + let (full_res , _full_receiver)= full_receiver.into_future().wait().unwrap(); + let (pending_res , _pending_receiver)= pending_receiver.into_future().wait().unwrap(); + assert_eq!( + full_res, + Some(Arc::new(vec![(serde_json::from_str::("\"0x13aff4201ac1dc49daf6a7cf07b558ed956511acbaabf9502bdacc353953766d\"").unwrap(), TxStatus::Added)])) + ); assert_eq!( - *received.lock(), - vec!["13aff4201ac1dc49daf6a7cf07b558ed956511acbaabf9502bdacc353953766d".parse().unwrap()] + pending_res, + Some(Arc::new(vec![serde_json::from_str::("\"0x13aff4201ac1dc49daf6a7cf07b558ed956511acbaabf9502bdacc353953766d\"").unwrap()])) ); } diff --git a/miner/src/pool/queue.rs b/miner/src/pool/queue.rs index 2d8046bd958..8d804e9683a 100644 --- a/miner/src/pool/queue.rs +++ b/miner/src/pool/queue.rs @@ -33,7 +33,7 @@ use pool::{ }; use pool::local_transactions::LocalTransactionsList; -type Listener = (LocalTransactionsList, (listener::Notifier, (listener::Logger, listener::TransactionsPoolNotifier))); +type Listener = (LocalTransactionsList, (listener::TransactionsPoolNotifier, listener::Logger)); type Pool = txpool::Pool; /// Max cache time in milliseconds for pending transactions. @@ -305,8 +305,6 @@ impl TransactionQueue { // Notify about imported transactions. (self.pool.write().listener_mut().1).0.notify(); - ((self.pool.write().listener_mut().1).1).1.notify(); - if results.iter().any(|r| r.is_ok()) { self.cached_pending.write().clear(); } @@ -499,7 +497,7 @@ impl TransactionQueue { /// removes them from the pool. /// That method should be used if invalid transactions are detected /// or you want to cancel a transaction. - pub fn remove<'a, T: IntoIterator>( + pub fn remove<'a, T: IntoIterator>( &self, hashes: T, is_invalid: bool, @@ -571,16 +569,16 @@ impl TransactionQueue { self.pool.read().listener().0.all_transactions().iter().map(|(a, b)| (*a, b.clone())).collect() } - /// Add a callback to be notified about all transactions entering the pool. - pub fn add_listener(&self, f: Box) { + /// Add a listener to be notified about all transactions the pool + pub fn add_pending_listener(&self, f: mpsc::UnboundedSender>>) { let mut pool = self.pool.write(); - (pool.listener_mut().1).0.add(f); + (pool.listener_mut().1).0.add_pending_listener(f); } /// Add a listener to be notified about all transactions the pool - pub fn add_tx_pool_listener(&self, f: mpsc::UnboundedSender>>) { + pub fn add_full_listener(&self, f: mpsc::UnboundedSender>>) { let mut pool = self.pool.write(); - ((pool.listener_mut().1).1).1.add(f); + (pool.listener_mut().1).0.add_full_listener(f); } /// Check if pending set is cached. diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 4693f18ab16..66d376c03fa 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -329,8 +329,9 @@ impl FullDependencies { } Api::EthPubSub => { if !for_generic_pubsub { + let pool_receiver = self.miner.pending_transactions_receiver(); let mut client = - EthPubSubClient::new(self.client.clone(), self.executor.clone()); + EthPubSubClient::new(self.client.clone(), self.executor.clone(), pool_receiver); let weak_client = Arc::downgrade(&self.client); client.add_sync_notifier(self.sync.sync_notification(), move |state| { @@ -345,14 +346,6 @@ impl FullDependencies { }) }); - let h = client.handler(); - self.miner - .add_transactions_listener(Box::new(move |hashes| { - if let Some(h) = h.upgrade() { - h.notify_new_transactions(hashes); - } - })); - if let Some(h) = client.handler().upgrade() { self.client.add_notify(h); } @@ -361,7 +354,7 @@ impl FullDependencies { } Api::ParityTransactionsPool => { if !for_generic_pubsub { - let receiver = self.miner.tx_pool_receiver(); + let receiver = self.miner.full_transactions_receiver(); let client = TransactionsPoolClient::new(self.executor.clone(), receiver); handler.extend_with(TransactionsPoolClient::to_delegate(client)); } @@ -583,6 +576,8 @@ impl LightDependencies { } } Api::EthPubSub => { + let receiver = self.transaction_queue.write().pending_transactions_receiver(); + let mut client = EthPubSubClient::light( self.client.clone(), self.on_demand.clone(), @@ -590,6 +585,7 @@ impl LightDependencies { self.cache.clone(), self.executor.clone(), self.gas_price_percentile, + receiver ); let weak_client = Arc::downgrade(&self.client); @@ -607,19 +603,11 @@ impl LightDependencies { }); self.client.add_listener(client.handler() as Weak<_>); - let h = client.handler(); - self.transaction_queue - .write() - .add_listener(Box::new(move |transactions| { - if let Some(h) = h.upgrade() { - h.notify_new_transactions(transactions); - } - })); handler.extend_with(EthPubSub::to_delegate(client)); } Api::ParityTransactionsPool => { if !for_generic_pubsub { - let receiver = self.transaction_queue.write().tx_statuses_receiver(); + let receiver = self.transaction_queue.write().full_transactions_receiver(); let client = TransactionsPoolClient::new(self.executor.clone(), receiver); handler.extend_with(TransactionsPoolClient::to_delegate(client)); } diff --git a/parity/run.rs b/parity/run.rs index 5858dcf5ee8..c21399290b8 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -30,7 +30,7 @@ use ethcore::verification::queue::VerifierSettings; use ethcore_logger::{Config as LogConfig, RotatingLogger}; use ethcore_service::ClientService; use ethereum_types::Address; -use futures::IntoFuture; +use futures::{IntoFuture, Stream}; use hash_fetch::{self, fetch}; use informant::{Informant, LightNodeInformantData, FullNodeInformantData}; use journaldb::Algorithm; @@ -668,14 +668,19 @@ fn execute_impl(cmd: RunCmd, logger: Arc, on_client_rq: // Propagate transactions as soon as they are imported. let tx = ::parking_lot::Mutex::new(priority_tasks); let is_ready = Arc::new(atomic::AtomicBool::new(true)); - miner.add_transactions_listener(Box::new(move |_hashes| { - // we want to have only one PendingTransactions task in the queue. - if is_ready.compare_and_swap(true, false, atomic::Ordering::SeqCst) { - let task = ::sync::PriorityTask::PropagateTransactions(Instant::now(), is_ready.clone()); - // we ignore error cause it means that we are closing - let _ = tx.lock().send(task); - } - })); + let executor = runtime.executor(); + let pool_receiver = miner.pending_transactions_receiver(); + executor.spawn( + pool_receiver.for_each(move |_hashes| { + // we want to have only one PendingTransactions task in the queue. + if is_ready.compare_and_swap(true, false, atomic::Ordering::SeqCst) { + let task = ::sync::PriorityTask::PropagateTransactions(Instant::now(), is_ready.clone()); + // we ignore error cause it means that we are closing + let _ = tx.lock().send(task); + } + Ok(()) + }) + ); // provider not added to a notification center is effectively disabled // TODO [debris] refactor it later on diff --git a/rpc/src/v1/impls/eth_pubsub.rs b/rpc/src/v1/impls/eth_pubsub.rs index 019cbd39fb5..acb0d5f0e25 100644 --- a/rpc/src/v1/impls/eth_pubsub.rs +++ b/rpc/src/v1/impls/eth_pubsub.rs @@ -20,7 +20,7 @@ use std::sync::{Arc, Weak}; use std::collections::BTreeMap; use jsonrpc_core::{BoxFuture, Result, Error}; -use jsonrpc_core::futures::{self, Future, IntoFuture, Stream}; +use jsonrpc_core::futures::{self, Future, IntoFuture, Stream, sync::mpsc}; use jsonrpc_pubsub::typed::{Sink, Subscriber}; use jsonrpc_pubsub::SubscriptionId; @@ -80,23 +80,39 @@ impl EthPubSubClient } } -impl EthPubSubClient { +impl EthPubSubClient + where + C: 'static + Send + Sync { + /// Creates new `EthPubSubClient`. - pub fn new(client: Arc, executor: Executor) -> Self { + pub fn new(client: Arc, executor: Executor, pool_receiver: mpsc::UnboundedReceiver>>) -> Self { let heads_subscribers = Arc::new(RwLock::new(Subscribers::default())); let logs_subscribers = Arc::new(RwLock::new(Subscribers::default())); let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default())); let sync_subscribers = Arc::new(RwLock::new(Subscribers::default())); + let handler = Arc::new(ChainNotificationHandler { + client, + executor, + heads_subscribers: heads_subscribers.clone(), + logs_subscribers: logs_subscribers.clone(), + transactions_subscribers: transactions_subscribers.clone(), + sync_subscribers: sync_subscribers.clone(), + }); + let handler2 = Arc::downgrade(&handler); + + handler.executor.spawn(pool_receiver + .for_each(move |hashes| { + if let Some(handler2) = handler2.upgrade() { + handler2.notify_new_transactions(&hashes.to_vec()); + return Ok(()) + } + Err(()) + }) + ); + EthPubSubClient { - handler: Arc::new(ChainNotificationHandler { - client, - executor, - heads_subscribers: heads_subscribers.clone(), - logs_subscribers: logs_subscribers.clone(), - transactions_subscribers: transactions_subscribers.clone(), - sync_subscribers: sync_subscribers.clone(), - }), + handler, sync_subscribers, heads_subscribers, logs_subscribers, @@ -123,6 +139,7 @@ where cache: Arc>, executor: Executor, gas_price_percentile: usize, + pool_receiver: mpsc::UnboundedReceiver>> ) -> Self { let fetch = LightFetch { client, @@ -131,7 +148,7 @@ where cache, gas_price_percentile, }; - EthPubSubClient::new(Arc::new(fetch), executor) + EthPubSubClient::new(Arc::new(fetch), executor, pool_receiver) } } @@ -205,7 +222,7 @@ impl ChainNotificationHandler { } /// Notify all subscribers about new transaction hashes. - pub fn notify_new_transactions(&self, hashes: &[H256]) { + fn notify_new_transactions(&self, hashes: &[H256]) { for subscriber in self.transactions_subscribers.read().values() { for hash in hashes { Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash(*hash)); diff --git a/rpc/src/v1/tests/mocked/eth_pubsub.rs b/rpc/src/v1/tests/mocked/eth_pubsub.rs index 071e0eaced3..1336f4e154e 100644 --- a/rpc/src/v1/tests/mocked/eth_pubsub.rs +++ b/rpc/src/v1/tests/mocked/eth_pubsub.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use jsonrpc_core::MetaIoHandler; -use jsonrpc_core::futures::{self, Stream, Future}; +use jsonrpc_core::futures::{self, Stream, Future, sync::mpsc}; use jsonrpc_pubsub::Session; use std::time::Duration; @@ -40,7 +40,9 @@ fn should_subscribe_to_new_heads() { let h2 = client.block_hash_delta_minus(2); let h1 = client.block_hash_delta_minus(3); - let pubsub = EthPubSubClient::new(Arc::new(client), el.executor()); + let (_, pool_receiver) = mpsc::unbounded(); + + let pubsub = EthPubSubClient::new(Arc::new(client), el.executor(), pool_receiver); let handler = pubsub.handler().upgrade().unwrap(); let pubsub = pubsub.to_delegate(); @@ -112,7 +114,9 @@ fn should_subscribe_to_logs() { } ]); - let pubsub = EthPubSubClient::new(Arc::new(client), el.executor()); + let (_, pool_receiver) = mpsc::unbounded(); + + let pubsub = EthPubSubClient::new(Arc::new(client), el.executor(), pool_receiver); let handler = pubsub.handler().upgrade().unwrap(); let pubsub = pubsub.to_delegate(); @@ -159,8 +163,9 @@ fn should_subscribe_to_pending_transactions() { let el = Runtime::with_thread_count(1); let client = TestBlockChainClient::new(); - let pubsub = EthPubSubClient::new(Arc::new(client), el.executor()); - let handler = pubsub.handler().upgrade().unwrap(); + let (pool_sender, pool_receiver) = mpsc::unbounded(); + + let pubsub = EthPubSubClient::new(Arc::new(client), el.executor(), pool_receiver); let pubsub = pubsub.to_delegate(); let mut io = MetaIoHandler::default(); @@ -181,7 +186,7 @@ fn should_subscribe_to_pending_transactions() { assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); // Send new transactions - handler.notify_new_transactions(&[H256::from_low_u64_be(5), H256::from_low_u64_be(7)]); + pool_sender.unbounded_send(Arc::new(vec![H256::from_low_u64_be(5), H256::from_low_u64_be(7)])).unwrap(); let (res, receiver) = receiver.into_future().wait().unwrap(); let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":"0x0000000000000000000000000000000000000000000000000000000000000005","subscription":"0x43ca64edf03768e1"}}"#; @@ -205,7 +210,8 @@ fn eth_subscribe_syncing() { // given let el = Runtime::with_thread_count(1); let client = TestBlockChainClient::new(); - let pubsub = EthPubSubClient::new(Arc::new(client), el.executor()); + let (_, pool_receiver) = mpsc::unbounded(); + let pubsub = EthPubSubClient::new(Arc::new(client), el.executor(), pool_receiver); let pubsub = pubsub.to_delegate(); let mut io = MetaIoHandler::default(); From 44161874ffb4959c35f60a358e648e1830ea58ed Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 4 Jun 2019 19:01:18 +0200 Subject: [PATCH 02/16] enable lto for release builds (#10717) --- Cargo.toml | 5 +++++ scripts/gitlab/test-linux.sh | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 5a6c93d6493..7b75eda49a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -119,8 +119,13 @@ name = "parity" [profile.dev] +[profile.test] +lto = false +opt-level = 3 # makes tests slower to compile, but faster to run + [profile.release] debug = false +lto = true [workspace] # This should only list projects that are not diff --git a/scripts/gitlab/test-linux.sh b/scripts/gitlab/test-linux.sh index 2854508bb56..6a3a1f0ac80 100755 --- a/scripts/gitlab/test-linux.sh +++ b/scripts/gitlab/test-linux.sh @@ -6,7 +6,7 @@ set -e # fail on any error set -u # treat unset variables as error FEATURES="json-tests,ci-skip-tests" -OPTIONS="--release" +OPTIONS="" #use nproc `linux only THREADS=$(nproc) From f7dae48c176ea27af42d03de088e6c5f8dcdda0e Mon Sep 17 00:00:00 2001 From: David Date: Wed, 5 Jun 2019 11:57:09 +0200 Subject: [PATCH 03/16] Revert "enable lto for release builds (#10717)" (#10721) This reverts commit 44161874ffb4959c35f60a358e648e1830ea58ed. --- Cargo.toml | 5 ----- scripts/gitlab/test-linux.sh | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7b75eda49a4..5a6c93d6493 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -119,13 +119,8 @@ name = "parity" [profile.dev] -[profile.test] -lto = false -opt-level = 3 # makes tests slower to compile, but faster to run - [profile.release] debug = false -lto = true [workspace] # This should only list projects that are not diff --git a/scripts/gitlab/test-linux.sh b/scripts/gitlab/test-linux.sh index 6a3a1f0ac80..2854508bb56 100755 --- a/scripts/gitlab/test-linux.sh +++ b/scripts/gitlab/test-linux.sh @@ -6,7 +6,7 @@ set -e # fail on any error set -u # treat unset variables as error FEATURES="json-tests,ci-skip-tests" -OPTIONS="" +OPTIONS="--release" #use nproc `linux only THREADS=$(nproc) From 6be45367e94c7e8f78504d3607aacee00eeea35a Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 5 Jun 2019 15:04:00 +0300 Subject: [PATCH 04/16] SecretStore: expose restore_key_public in HTTP API (#10241) --- secret-store/src/key_server.rs | 20 +++ .../key_version_negotiation_session.rs | 162 +++++++++++++----- .../servers_set_change_session.rs | 2 +- .../admin_sessions/share_add_session.rs | 16 +- .../src/key_server_cluster/message.rs | 23 ++- secret-store/src/key_storage.rs | 3 +- secret-store/src/listener/http_listener.rs | 22 ++- secret-store/src/listener/mod.rs | 4 + secret-store/src/traits.rs | 4 + 9 files changed, 189 insertions(+), 67 deletions(-) diff --git a/secret-store/src/key_server.rs b/secret-store/src/key_server.rs index 77b32d623ba..5418ec00a3c 100644 --- a/secret-store/src/key_server.rs +++ b/secret-store/src/key_server.rs @@ -78,6 +78,22 @@ impl ServerKeyGenerator for KeyServerImpl { .expect("when wait is called without timeout it always returns Some; qed") .map_err(Into::into) } + + fn restore_key_public(&self, key_id: &ServerKeyId, author: &Requester) -> Result { + // recover requestor' public key from signature + let address = author.address(key_id).map_err(Error::InsufficientRequesterData)?; + + // negotiate key version && retrieve common key data + let negotiation_session = self.data.lock().cluster.new_key_version_negotiation_session(*key_id)?; + negotiation_session.wait() + .and_then(|_| negotiation_session.common_key_data()) + .and_then(|key_share| if key_share.author == address { + Ok(key_share.public) + } else { + Err(Error::AccessDenied) + }) + .map_err(Into::into) + } } impl DocumentKeyServer for KeyServerImpl { @@ -237,6 +253,10 @@ pub mod tests { fn generate_key(&self, _key_id: &ServerKeyId, _author: &Requester, _threshold: usize) -> Result { unimplemented!("test-only") } + + fn restore_key_public(&self, _key_id: &ServerKeyId, _author: &Requester) -> Result { + unimplemented!("test-only") + } } impl DocumentKeyServer for DummyKeyServer { diff --git a/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs b/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs index 34d7bde715d..e2d115bcf23 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs @@ -26,7 +26,7 @@ use key_server_cluster::decryption_session::SessionImpl as DecryptionSession; use key_server_cluster::signing_session_ecdsa::SessionImpl as EcdsaSigningSession; use key_server_cluster::signing_session_schnorr::SessionImpl as SchnorrSigningSession; use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions, - KeyVersions, KeyVersionsError, FailedKeyVersionContinueAction}; + KeyVersions, KeyVersionsError, FailedKeyVersionContinueAction, CommonKeyData}; use key_server_cluster::admin_sessions::ShareChangeSessionMeta; // TODO [Opt]: change sessions so that versions are sent by chunks. @@ -97,8 +97,8 @@ struct SessionData { pub state: SessionState, /// Initialization confirmations. pub confirmations: Option>, - /// Key threshold. - pub threshold: Option, + /// Common key data that nodes have agreed upon. + pub key_share: Option, /// { Version => Nodes } pub versions: Option>>, /// Session result. @@ -167,12 +167,11 @@ pub struct LargestSupportResultComputer; impl SessionImpl where T: SessionTransport { /// Create new session. pub fn new(params: SessionParams) -> Self { - let threshold = params.key_share.as_ref().map(|key_share| key_share.threshold); SessionImpl { core: SessionCore { meta: params.meta, sub_session: params.sub_session, - key_share: params.key_share, + key_share: params.key_share.clone(), result_computer: params.result_computer, transport: params.transport, nonce: params.nonce, @@ -181,7 +180,12 @@ impl SessionImpl where T: SessionTransport { data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, confirmations: None, - threshold: threshold, + key_share: params.key_share.map(|key_share| DocumentKeyShare { + threshold: key_share.threshold, + author: key_share.author, + public: key_share.public, + ..Default::default() + }), versions: None, result: None, continue_with: None, @@ -195,12 +199,6 @@ impl SessionImpl where T: SessionTransport { &self.core.meta } - /// Return key threshold. - pub fn key_threshold(&self) -> Result { - self.data.lock().threshold.clone() - .ok_or(Error::InvalidStateForRequest) - } - /// Return result computer reference. pub fn version_holders(&self, version: &H256) -> Result, Error> { Ok(self.data.lock().versions.as_ref().ok_or(Error::InvalidStateForRequest)? @@ -229,6 +227,12 @@ impl SessionImpl where T: SessionTransport { .expect("wait_session returns Some if called without timeout; qed") } + /// Retrieve common key data (author, threshold, public), if available. + pub fn common_key_data(&self) -> Result { + self.data.lock().key_share.clone() + .ok_or(Error::InvalidStateForRequest) + } + /// Initialize session. pub fn initialize(&self, connected_nodes: BTreeSet) -> Result<(), Error> { // check state @@ -322,7 +326,11 @@ impl SessionImpl where T: SessionTransport { session: self.core.meta.id.clone().into(), sub_session: self.core.sub_session.clone().into(), session_nonce: self.core.nonce, - threshold: self.core.key_share.as_ref().map(|key_share| key_share.threshold), + key_common: self.core.key_share.as_ref().map(|key_share| CommonKeyData { + threshold: key_share.threshold, + author: key_share.author.into(), + public: key_share.public.into(), + }), versions: self.core.key_share.as_ref().map(|key_share| key_share.versions.iter().rev() .filter(|v| v.id_numbers.contains_key(sender)) @@ -357,12 +365,25 @@ impl SessionImpl where T: SessionTransport { // remember versions that sender have { - match message.threshold.clone() { - Some(threshold) if data.threshold.is_none() => { - data.threshold = Some(threshold); + match message.key_common.as_ref() { + Some(key_common) if data.key_share.is_none() => { + data.key_share = Some(DocumentKeyShare { + threshold: key_common.threshold, + author: key_common.author.clone().into(), + public: key_common.public.clone().into(), + ..Default::default() + }); + }, + Some(key_common) => { + let prev_key_share = data.key_share.as_ref() + .expect("data.key_share.is_none() is matched by previous branch; qed"); + if prev_key_share.threshold != key_common.threshold || + prev_key_share.author.as_bytes() != key_common.author.as_bytes() || + prev_key_share.public.as_bytes() != key_common.public.as_bytes() + { + return Err(Error::InvalidMessage); + } }, - Some(threshold) if data.threshold.as_ref() == Some(&threshold) => (), - Some(_) => return Err(Error::InvalidMessage), None if message.versions.is_empty() => (), None => return Err(Error::InvalidMessage), } @@ -388,7 +409,8 @@ impl SessionImpl where T: SessionTransport { let reason = "this field is filled on master node when initializing; try_complete is only called on initialized master node; qed"; let confirmations = data.confirmations.as_ref().expect(reason); let versions = data.versions.as_ref().expect(reason); - if let Some(result) = core.result_computer.compute_result(data.threshold.clone(), confirmations, versions) { + let threshold = data.key_share.as_ref().map(|key_share| key_share.threshold); + if let Some(result) = core.result_computer.compute_result(threshold, confirmations, versions) { // when the master node processing decryption service request, it starts with a key version negotiation session // if the negotiation fails, only master node knows about it // => if the error is fatal, only the master will know about it and report it to the contract && the request will never be rejected @@ -590,7 +612,7 @@ impl SessionResultComputer for LargestSupportResultComputer { mod tests { use std::sync::Arc; use std::collections::{VecDeque, BTreeMap, BTreeSet}; - use ethereum_types::{H512, Address}; + use ethereum_types::{H512, H160, Address}; use ethkey::public_to_address; use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion}; @@ -600,7 +622,10 @@ mod tests { use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::admin_sessions::ShareChangeSessionMeta; use key_server_cluster::decryption_session::create_default_decryption_session; - use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions, KeyVersions}; + use key_server_cluster::message::{ + Message, KeyVersionNegotiationMessage, RequestKeyVersions, + CommonKeyData, KeyVersions, + }; use super::{ SessionImpl, SessionTransport, SessionParams, FastestResultComputer, LargestSupportResultComputer, SessionResultComputer, SessionState, ContinueAction, FailedContinueAction, @@ -759,7 +784,11 @@ mod tests { session: Default::default(), sub_session: math::generate_random_scalar().unwrap().into(), session_nonce: 0, - threshold: Some(10), + key_common: Some(CommonKeyData { + threshold: 10, + author: Default::default(), + public: Default::default(), + }), versions: Vec::new(), })), Err(Error::InvalidStateForRequest)); } @@ -775,7 +804,12 @@ mod tests { session: Default::default(), sub_session: math::generate_random_scalar().unwrap().into(), session_nonce: 0, - threshold: Some(0), + key_common: Some(CommonKeyData { + threshold: 0, + author: Default::default(), + public: Default::default(), + }), + versions: vec![version_id.clone().into()] })), Ok(())); assert_eq!(ml.session(0).data.lock().state, SessionState::Finished); @@ -784,32 +818,61 @@ mod tests { session: Default::default(), sub_session: math::generate_random_scalar().unwrap().into(), session_nonce: 0, - threshold: Some(0), + key_common: Some(CommonKeyData { + threshold: 0, + author: Default::default(), + public: Default::default(), + }), + versions: vec![version_id.clone().into()] })), Ok(())); assert_eq!(ml.session(0).data.lock().state, SessionState::Finished); } #[test] - fn negotiation_fails_if_wrong_threshold_sent() { - let ml = MessageLoop::empty(3); - ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap(); + fn negotiation_fails_if_wrong_common_data_sent() { + fn run_test(key_common: CommonKeyData) { + let ml = MessageLoop::empty(3); + ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap(); + + let version_id = (*math::generate_random_scalar().unwrap()).clone(); + assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 0, + key_common: Some(CommonKeyData { + threshold: 1, + author: Default::default(), + public: Default::default(), + }), + versions: vec![version_id.clone().into()] + })), Ok(())); + assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { + session: Default::default(), + sub_session: math::generate_random_scalar().unwrap().into(), + session_nonce: 0, + key_common: Some(key_common), + versions: vec![version_id.clone().into()] + })), Err(Error::InvalidMessage)); + } + + run_test(CommonKeyData { + threshold: 2, + author: Default::default(), + public: Default::default(), + }); - let version_id = (*math::generate_random_scalar().unwrap()).clone(); - assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - threshold: Some(1), - versions: vec![version_id.clone().into()] - })), Ok(())); - assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - threshold: Some(2), - versions: vec![version_id.clone().into()] - })), Err(Error::InvalidMessage)); + run_test(CommonKeyData { + threshold: 1, + author: H160::from_low_u64_be(1).into(), + public: Default::default(), + }); + + run_test(CommonKeyData { + threshold: 1, + author: H160::from_low_u64_be(2).into(), + public: Default::default(), + }); } #[test] @@ -822,7 +885,7 @@ mod tests { session: Default::default(), sub_session: math::generate_random_scalar().unwrap().into(), session_nonce: 0, - threshold: None, + key_common: None, versions: vec![version_id.clone().into()] })), Err(Error::InvalidMessage)); } @@ -832,9 +895,9 @@ mod tests { let nodes = MessageLoop::prepare_nodes(2); let version_id = (*math::generate_random_scalar().unwrap()).clone(); nodes.values().nth(0).unwrap().insert(Default::default(), DocumentKeyShare { - author: Default::default(), + author: H160::from_low_u64_be(2), threshold: 1, - public: Default::default(), + public: H512::from_low_u64_be(3), common_point: None, encrypted_point: None, versions: vec![DocumentKeyShareVersion { @@ -848,8 +911,13 @@ mod tests { // we can't be sure that node has given key version because previous ShareAdd session could fail assert!(ml.session(0).data.lock().state != SessionState::Finished); - // check that upon completion, threshold is known - assert_eq!(ml.session(0).key_threshold(), Ok(1)); + // check that upon completion, commmon key data is known + assert_eq!(ml.session(0).common_key_data(), Ok(DocumentKeyShare { + author: H160::from_low_u64_be(2), + threshold: 1, + public: H512::from_low_u64_be(3), + ..Default::default() + })); } #[test] diff --git a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs b/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs index 18c63587959..a0d4acdc125 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs @@ -800,7 +800,7 @@ impl SessionImpl { .wait()? .expect("initialize_share_change_session is only called on share change master; negotiation session completes with some on master; qed"); let selected_version_holders = negotiation_session.version_holders(&selected_version)?; - let selected_version_threshold = negotiation_session.key_threshold()?; + let selected_version_threshold = negotiation_session.common_key_data()?.threshold; // prepare session change plan && check if something needs to be changed let old_nodes_set = selected_version_holders; diff --git a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs index e2af7bc7fa2..b5195a62939 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs @@ -25,7 +25,7 @@ use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::math; use key_server_cluster::message::{Message, ShareAddMessage, ShareAddConsensusMessage, ConsensusMessageOfShareAdd, InitializeConsensusSessionOfShareAdd, KeyShareCommon, NewKeysDissemination, ShareAddError, - ConfirmConsensusInitialization}; + ConfirmConsensusInitialization, CommonKeyData}; use key_server_cluster::jobs::job_session::JobTransport; use key_server_cluster::jobs::dummy_job::{DummyJob, DummyJobTransport}; use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}; @@ -469,9 +469,9 @@ impl SessionImpl where T: SessionTransport { // update data data.state = SessionState::WaitingForKeysDissemination; data.new_key_share = Some(NewKeyShare { - threshold: message.threshold, - author: message.author.clone().into(), - joint_public: message.joint_public.clone().into(), + threshold: message.key_common.threshold, + author: message.key_common.author.clone().into(), + joint_public: message.key_common.public.clone().into(), common_point: message.common_point.clone().map(Into::into), encrypted_point: message.encrypted_point.clone().map(Into::into), }); @@ -645,9 +645,11 @@ impl SessionImpl where T: SessionTransport { core.transport.send(new_node, ShareAddMessage::KeyShareCommon(KeyShareCommon { session: core.meta.id.clone().into(), session_nonce: core.nonce, - threshold: old_key_share.threshold, - author: old_key_share.author.clone().into(), - joint_public: old_key_share.public.clone().into(), + key_common: CommonKeyData { + threshold: old_key_share.threshold, + author: old_key_share.author.into(), + public: old_key_share.public.into(), + }, common_point: old_key_share.common_point.clone().map(Into::into), encrypted_point: old_key_share.encrypted_point.clone().map(Into::into), id_numbers: old_key_version.id_numbers.iter() diff --git a/secret-store/src/key_server_cluster/message.rs b/secret-store/src/key_server_cluster/message.rs index 4b850ec3d3b..98520564fe8 100644 --- a/secret-store/src/key_server_cluster/message.rs +++ b/secret-store/src/key_server_cluster/message.rs @@ -970,12 +970,8 @@ pub struct KeyShareCommon { pub session: MessageSessionId, /// Session-level nonce. pub session_nonce: u64, - /// Key threshold. - pub threshold: usize, - /// Author of key share entry. - pub author: SerializableAddress, - /// Joint public. - pub joint_public: SerializablePublic, + /// Common key data. + pub key_common: CommonKeyData, /// Common (shared) encryption point. pub common_point: Option, /// Encrypted point. @@ -1026,12 +1022,23 @@ pub struct KeyVersions { pub sub_session: SerializableSecret, /// Session-level nonce. pub session_nonce: u64, - /// Key threshold. - pub threshold: Option, + /// Common key data, shared by all versions. + pub key_common: Option, /// Key versions. pub versions: Vec, } +/// Common key data. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct CommonKeyData { + /// Key threshold. + pub threshold: usize, + /// Author of the key entry. + pub author: SerializableAddress, + /// Joint public. + pub public: SerializablePublic, +} + /// When key versions error has occured. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct KeyVersionsError { diff --git a/secret-store/src/key_storage.rs b/secret-store/src/key_storage.rs index 183235d1c13..a75c94de62d 100644 --- a/secret-store/src/key_storage.rs +++ b/secret-store/src/key_storage.rs @@ -34,8 +34,7 @@ type CurrentSerializableDocumentKeyShare = SerializableDocumentKeyShareV3; type CurrentSerializableDocumentKeyVersion = SerializableDocumentKeyShareVersionV3; /// Encrypted key share, stored by key storage on the single key server. -#[derive(Debug, Clone, PartialEq)] -#[cfg_attr(test, derive(Default))] +#[derive(Debug, Default, Clone, PartialEq)] pub struct DocumentKeyShare { /// Author of the entry. pub author: Address, diff --git a/secret-store/src/listener/http_listener.rs b/secret-store/src/listener/http_listener.rs index e3b0bf595df..7c5113204c4 100644 --- a/secret-store/src/listener/http_listener.rs +++ b/secret-store/src/listener/http_listener.rs @@ -40,6 +40,7 @@ use jsonrpc_server_utils::cors::{self, AllowCors, AccessControlAllowOrigin}; /// To generate server key: POST /shadow/{server_key_id}/{signature}/{threshold} /// To store pregenerated encrypted document key: POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key} /// To generate server && document key: POST /{server_key_id}/{signature}/{threshold} +/// To get public portion of server key: GET /server/{server_key_id}/{signature} /// To get document key: GET /{server_key_id}/{signature} /// To get document key shadow: GET /shadow/{server_key_id}/{signature} /// To generate Schnorr signature with server key: GET /schnorr/{server_key_id}/{signature}/{message_hash} @@ -64,6 +65,8 @@ enum Request { StoreDocumentKey(ServerKeyId, RequestSignature, Public, Public), /// Generate encryption key. GenerateDocumentKey(ServerKeyId, RequestSignature, usize), + /// Request public portion of server key. + GetServerKey(ServerKeyId, RequestSignature), /// Request encryption key of given document for given requestor. GetDocumentKey(ServerKeyId, RequestSignature), /// Request shadow of encryption key of given document for given requestor. @@ -155,6 +158,15 @@ impl KeyServerHttpHandler { err })) }, + Request::GetServerKey(document, signature) => { + return_server_public_key(&req_uri, cors, self.handler.key_server.upgrade() + .map(|key_server| key_server.restore_key_public(&document, &signature.into())) + .unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into()))) + .map_err(|err| { + warn!(target: "secretstore", "GetServerKey request {} has failed with: {}", req_uri, err); + err + })) + }, Request::GetDocumentKey(document, signature) => { return_document_key(&req_uri, cors, self.handler.key_server.upgrade() .map(|key_server| key_server.restore_document_key(&document, &signature.into())) @@ -361,8 +373,8 @@ fn parse_request(method: &HttpMethod, uri_path: &str, body: &[u8]) -> Request { return parse_admin_request(method, path, body); } - let (prefix, args_offset) = if &path[0] == "shadow" || &path[0] == "schnorr" || &path[0] == "ecdsa" - { (&*path[0], 1) } else { ("", 0) }; + let is_known_prefix = &path[0] == "shadow" || &path[0] == "schnorr" || &path[0] == "ecdsa" || &path[0] == "server"; + let (prefix, args_offset) = if is_known_prefix { (&*path[0], 1) } else { ("", 0) }; let args_count = path.len() - args_offset; if args_count < 2 || path[args_offset].is_empty() || path[args_offset + 1].is_empty() { return Request::Invalid; @@ -388,6 +400,8 @@ fn parse_request(method: &HttpMethod, uri_path: &str, body: &[u8]) -> Request { Request::StoreDocumentKey(document, signature, common_point, encrypted_key), ("", 3, &HttpMethod::POST, Some(Ok(threshold)), _, _, _) => Request::GenerateDocumentKey(document, signature, threshold), + ("server", 2, &HttpMethod::GET, _, _, _, _) => + Request::GetServerKey(document, signature), ("", 2, &HttpMethod::GET, _, _, _, _) => Request::GetDocumentKey(document, signature), ("shadow", 2, &HttpMethod::GET, _, _, _, _) => @@ -466,6 +480,10 @@ mod tests { Request::GenerateDocumentKey(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), 2)); + // GET /server/{server_key_id}/{signature} => get public portion of server key + assert_eq!(parse_request(&HttpMethod::GET, "/server/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), + Request::GetServerKey(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(), + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); // GET /{server_key_id}/{signature} => get document key assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), Request::GetDocumentKey(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(), diff --git a/secret-store/src/listener/mod.rs b/secret-store/src/listener/mod.rs index b28375d8ec2..0fde173c88e 100644 --- a/secret-store/src/listener/mod.rs +++ b/secret-store/src/listener/mod.rs @@ -75,6 +75,10 @@ impl ServerKeyGenerator for Listener { fn generate_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result { self.key_server.generate_key(key_id, author, threshold) } + + fn restore_key_public(&self, key_id: &ServerKeyId, author: &Requester) -> Result { + self.key_server.restore_key_public(key_id, author) + } } impl DocumentKeyServer for Listener { diff --git a/secret-store/src/traits.rs b/secret-store/src/traits.rs index fdfa0589795..e12c75e5ddd 100644 --- a/secret-store/src/traits.rs +++ b/secret-store/src/traits.rs @@ -40,6 +40,10 @@ pub trait ServerKeyGenerator { /// `threshold + 1` is the minimal number of nodes, required to restore private key. /// Result is a public portion of SK. fn generate_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result; + /// Retrieve public portion of previously generated SK. + /// `key_id` is identifier of previously generated SK. + /// `author` is the same author, that has created the server key. + fn restore_key_public(&self, key_id: &ServerKeyId, author: &Requester) -> Result; } /// Document key (DK) server. From eed630a002bbebed3a3097127f2483213ff52079 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 5 Jun 2019 15:58:11 +0300 Subject: [PATCH 05/16] removed secret_store folder (#10722) --- .../key_server_cluster/cluster_connections.rs | 176 ------ .../cluster_connections_net.rs | 539 ------------------ .../cluster_message_processor.rs | 357 ------------ 3 files changed, 1072 deletions(-) delete mode 100644 secret_store/src/key_server_cluster/cluster_connections.rs delete mode 100644 secret_store/src/key_server_cluster/cluster_connections_net.rs delete mode 100644 secret_store/src/key_server_cluster/cluster_message_processor.rs diff --git a/secret_store/src/key_server_cluster/cluster_connections.rs b/secret_store/src/key_server_cluster/cluster_connections.rs deleted file mode 100644 index b484e6d8e0b..00000000000 --- a/secret_store/src/key_server_cluster/cluster_connections.rs +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::collections::BTreeSet; -use std::sync::Arc; -use key_server_cluster::{Error, NodeId}; -use key_server_cluster::message::Message; - -/// Connection to the single node. Provides basic information about connected node and -/// allows sending messages to this node. -pub trait Connection: Send + Sync { - /// Is this inbound connection? This only matters when both nodes are simultaneously establishing - /// two connections to each other. The agreement is that the inbound connection from the node with - /// lower NodeId is used and the other connection is closed. - fn is_inbound(&self) -> bool; - /// Returns id of the connected node. - fn node_id(&self) -> &NodeId; - /// Returns 'address' of the node to use in traces. - fn node_address(&self) -> String; - /// Send message to the connected node. - fn send_message(&self, message: Message); -} - -/// Connections manager. Responsible for keeping us connected to all required nodes. -pub trait ConnectionManager: 'static + Send + Sync { - /// Returns shared reference to connections provider. - fn provider(&self) -> Arc; - /// Try to reach all disconnected nodes immediately. This method is exposed mostly for - /// tests, where all 'nodes' are starting listening for incoming connections first and - /// only after this, they're actually start connecting to each other. - fn connect(&self); -} - -/// Connections provider. Holds all active connections and the set of nodes that we need to -/// connect to. At any moment connection could be lost and the set of connected/disconnected -/// nodes could change (at behalf of the connection manager). -/// Clone operation should be cheap (Arc). -pub trait ConnectionProvider: Send + Sync { - /// Returns the set of currently connected nodes. Error is returned when our node is - /// not a part of the cluster ('isolated' node). - fn connected_nodes(&self) -> Result, Error>; - /// Returns the set of currently disconnected nodes. - fn disconnected_nodes(&self) -> BTreeSet; - /// Returns the reference to the active node connection or None if the node is not connected. - fn connection(&self, node: &NodeId) -> Option>; -} - -#[cfg(test)] -pub mod tests { - use std::collections::{BTreeSet, VecDeque}; - use std::sync::Arc; - use std::sync::atomic::{AtomicBool, Ordering}; - use parking_lot::Mutex; - use key_server_cluster::{Error, NodeId}; - use key_server_cluster::message::Message; - use super::{ConnectionManager, Connection, ConnectionProvider}; - - /// Shared messages queue. - pub type MessagesQueue = Arc>>; - - /// Single node connections. - pub struct TestConnections { - node: NodeId, - is_isolated: AtomicBool, - connected_nodes: Mutex>, - disconnected_nodes: Mutex>, - messages: MessagesQueue, - } - - /// Single connection. - pub struct TestConnection { - from: NodeId, - to: NodeId, - messages: MessagesQueue, - } - - impl TestConnections { - pub fn isolate(&self) { - let connected_nodes = ::std::mem::replace(&mut *self.connected_nodes.lock(), Default::default()); - self.is_isolated.store(true, Ordering::Relaxed); - self.disconnected_nodes.lock().extend(connected_nodes) - } - - pub fn disconnect(&self, node: NodeId) { - self.connected_nodes.lock().remove(&node); - self.disconnected_nodes.lock().insert(node); - } - - pub fn exclude(&self, node: NodeId) { - self.connected_nodes.lock().remove(&node); - self.disconnected_nodes.lock().remove(&node); - } - - pub fn include(&self, node: NodeId) { - self.connected_nodes.lock().insert(node); - } - } - - impl ConnectionManager for Arc { - fn provider(&self) -> Arc { - self.clone() - } - - fn connect(&self) {} - } - - impl ConnectionProvider for TestConnections { - fn connected_nodes(&self) -> Result, Error> { - match self.is_isolated.load(Ordering::Relaxed) { - false => Ok(self.connected_nodes.lock().clone()), - true => Err(Error::NodeDisconnected), - } - } - - fn disconnected_nodes(&self) -> BTreeSet { - self.disconnected_nodes.lock().clone() - } - - fn connection(&self, node: &NodeId) -> Option> { - match self.connected_nodes.lock().contains(node) { - true => Some(Arc::new(TestConnection { - from: self.node, - to: *node, - messages: self.messages.clone(), - })), - false => None, - } - } - } - - impl Connection for TestConnection { - fn is_inbound(&self) -> bool { - false - } - - fn node_id(&self) -> &NodeId { - &self.to - } - - fn node_address(&self) -> String { - format!("{}", self.to) - } - - fn send_message(&self, message: Message) { - self.messages.lock().push_back((self.from, self.to, message)) - } - } - - pub fn new_test_connections( - messages: MessagesQueue, - node: NodeId, - mut nodes: BTreeSet - ) -> Arc { - let is_isolated = !nodes.remove(&node); - Arc::new(TestConnections { - node, - is_isolated: AtomicBool::new(is_isolated), - connected_nodes: Mutex::new(nodes), - disconnected_nodes: Default::default(), - messages, - }) - } -} diff --git a/secret_store/src/key_server_cluster/cluster_connections_net.rs b/secret_store/src/key_server_cluster/cluster_connections_net.rs deleted file mode 100644 index bda7f7dd283..00000000000 --- a/secret_store/src/key_server_cluster/cluster_connections_net.rs +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::collections::{BTreeMap, BTreeSet}; -use std::collections::btree_map::Entry; -use std::io; -use std::net::{SocketAddr, IpAddr}; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use futures::{future, Future, Stream}; -use parking_lot::{Mutex, RwLock}; -use tokio::net::{TcpListener, TcpStream}; -use tokio::timer::{Interval, timeout::Error as TimeoutError}; -use tokio_io::IoFuture; -use ethkey::KeyPair; -use parity_runtime::Executor; -use key_server_cluster::{Error, NodeId, ClusterConfiguration, NodeKeyPair}; -use key_server_cluster::cluster_connections::{ConnectionProvider, Connection, ConnectionManager}; -use key_server_cluster::connection_trigger::{Maintain, ConnectionTrigger}; -use key_server_cluster::cluster_message_processor::MessageProcessor; -use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, - read_encrypted_message, WriteMessage, write_encrypted_message}; -use key_server_cluster::message::{self, ClusterMessage, Message}; -use key_server_cluster::net::{accept_connection as io_accept_connection, - connect as io_connect, Connection as IoConnection}; - -/// Empty future. -pub type BoxedEmptyFuture = Box + Send>; - -/// Maintain interval (seconds). Every MAINTAIN_INTERVAL seconds node: -/// 1) checks if connected nodes are responding to KeepAlive messages -/// 2) tries to connect to disconnected nodes -/// 3) checks if enc/dec sessions are time-outed -const MAINTAIN_INTERVAL: u64 = 10; - -/// When no messages have been received from node within KEEP_ALIVE_SEND_INTERVAL seconds, -/// we must send KeepAlive message to the node to check if it still responds to messages. -const KEEP_ALIVE_SEND_INTERVAL: Duration = Duration::from_secs(30); -/// When no messages have been received from node within KEEP_ALIVE_DISCONNECT_INTERVAL seconds, -/// we must treat this node as non-responding && disconnect from it. -const KEEP_ALIVE_DISCONNECT_INTERVAL: Duration = Duration::from_secs(60); - -/// Network connection manager configuration. -pub struct NetConnectionsManagerConfig { - /// Allow connecting to 'higher' nodes. - pub allow_connecting_to_higher_nodes: bool, - /// Interface to listen to. - pub listen_address: (String, u16), - /// True if we should autostart key servers set change session when servers set changes? - /// This will only work when servers set is configured using KeyServerSet contract. - pub auto_migrate_enabled: bool, -} - -/// Network connections manager. -pub struct NetConnectionsManager { - /// Address we're listening for incoming connections. - listen_address: SocketAddr, - /// Shared cluster connections data reference. - data: Arc, -} - -/// Network connections data. Shared among NetConnectionsManager and spawned futures. -struct NetConnectionsData { - /// Allow connecting to 'higher' nodes. - allow_connecting_to_higher_nodes: bool, - /// Reference to tokio task executor. - executor: Executor, - /// Key pair of this node. - self_key_pair: Arc, - /// Network messages processor. - message_processor: Arc, - /// Connections trigger. - trigger: Mutex>, - /// Mutable connection data. - container: Arc>, -} - -/// Network connections container. This is the only mutable data of NetConnectionsManager. -/// The set of nodes is mutated by the connection trigger and the connections set is also -/// mutated by spawned futures. -pub struct NetConnectionsContainer { - /// Is this node isolated from cluster? - pub is_isolated: bool, - /// Current key servers set. - pub nodes: BTreeMap, - /// Active connections to key servers. - pub connections: BTreeMap>, -} - -/// Network connection to single key server node. -pub struct NetConnection { - executor: Executor, - /// Id of the peer node. - node_id: NodeId, - /// Address of the peer node. - node_address: SocketAddr, - /// Is this inbound (true) or outbound (false) connection? - is_inbound: bool, - /// Key pair that is used to encrypt connection' messages. - key: KeyPair, - /// Last message time. - last_message_time: RwLock, - /// Underlying TCP stream. - stream: SharedTcpStream, -} - -impl NetConnectionsManager { - /// Create new network connections manager. - pub fn new( - executor: Executor, - message_processor: Arc, - trigger: Box, - container: Arc>, - config: &ClusterConfiguration, - net_config: NetConnectionsManagerConfig, - ) -> Result { - let listen_address = make_socket_address( - &net_config.listen_address.0, - net_config.listen_address.1)?; - - Ok(NetConnectionsManager { - listen_address, - data: Arc::new(NetConnectionsData { - allow_connecting_to_higher_nodes: net_config.allow_connecting_to_higher_nodes, - executor, - message_processor, - self_key_pair: config.self_key_pair.clone(), - trigger: Mutex::new(trigger), - container, - }), - }) - } - - /// Start listening for connections and schedule connections maintenance. - pub fn start(&self) -> Result<(), Error> { - net_listen(&self.listen_address, self.data.clone())?; - net_schedule_maintain(self.data.clone()); - Ok(()) - } -} - -impl ConnectionManager for NetConnectionsManager { - fn provider(&self) -> Arc { - self.data.container.clone() - } - - fn connect(&self) { - net_connect_disconnected(self.data.clone()); - } -} - -impl ConnectionProvider for RwLock { - fn connected_nodes(&self) -> Result, Error> { - let connections = self.read(); - if connections.is_isolated { - return Err(Error::NodeDisconnected); - } - - Ok(connections.connections.keys().cloned().collect()) - } - - fn disconnected_nodes(&self) -> BTreeSet { - let connections = self.read(); - connections.nodes.keys() - .filter(|node_id| !connections.connections.contains_key(node_id)) - .cloned() - .collect() - } - - fn connection(&self, node: &NodeId) -> Option> { - match self.read().connections.get(node).cloned() { - Some(connection) => Some(connection), - None => None, - } - } -} - -impl NetConnection { - /// Create new connection. - pub fn new(executor: Executor, is_inbound: bool, connection: IoConnection) -> NetConnection { - NetConnection { - executor, - node_id: connection.node_id, - node_address: connection.address, - is_inbound: is_inbound, - stream: connection.stream, - key: connection.key, - last_message_time: RwLock::new(Instant::now()), - } - } - - /// Get last message time. - pub fn last_message_time(&self) -> Instant { - *self.last_message_time.read() - } - - /// Update last message time - pub fn set_last_message_time(&self, last_message_time: Instant) { - *self.last_message_time.write() = last_message_time - } - - /// Returns future that sends encrypted message over this connection. - pub fn send_message_future(&self, message: Message) -> WriteMessage { - write_encrypted_message(self.stream.clone(), &self.key, message) - } - - /// Returns future that reads encrypted message from this connection. - pub fn read_message_future(&self) -> ReadMessage { - read_encrypted_message(self.stream.clone(), self.key.clone()) - } -} - -impl Connection for NetConnection { - fn is_inbound(&self) -> bool { - self.is_inbound - } - - fn node_id(&self) -> &NodeId { - &self.node_id - } - - fn node_address(&self) -> String { - format!("{}", self.node_address) - } - - fn send_message(&self, message: Message) { - execute(&self.executor, self.send_message_future(message).then(|_| Ok(()))); - } -} - -impl NetConnectionsData { - /// Executes closure for each active connection. - pub fn active_connections(&self) -> Vec> { - self.container.read().connections.values().cloned().collect() - } - - /// Executes closure for each disconnected node. - pub fn disconnected_nodes(&self) -> Vec<(NodeId, SocketAddr)> { - let container = self.container.read(); - container.nodes.iter() - .filter(|(node_id, _)| !container.connections.contains_key(node_id)) - .map(|(node_id, addr)| (*node_id, *addr)) - .collect() - } - - /// Try to insert new connection. Returns true if connection has been inserted. - /// Returns false (and ignores connections) if: - /// - we do not expect connection from this node - /// - we are already connected to the node and existing connection 'supersede' - /// new connection by agreement - pub fn insert(&self, connection: Arc) -> bool { - let node = *connection.node_id(); - let mut container = self.container.write(); - if !container.nodes.contains_key(&node) { - trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}", - self.self_key_pair.public(), node, connection.node_address()); - return false; - } - - if container.connections.contains_key(&node) { - // we have already connected to the same node - // the agreement is that node with lower id must establish connection to node with higher id - if (*self.self_key_pair.public() < node && connection.is_inbound()) - || (*self.self_key_pair.public() > node && !connection.is_inbound()) { - return false; - } - } - - trace!(target: "secretstore_net", - "{}: inserting connection to {} at {}. Connected to {} of {} nodes", - self.self_key_pair.public(), node, connection.node_address(), - container.connections.len() + 1, container.nodes.len()); - container.connections.insert(node, connection); - - true - } - - /// Tries to remove connection. Returns true if connection has been removed. - /// Returns false if we do not know this connection. - pub fn remove(&self, connection: &NetConnection) -> bool { - let node_id = *connection.node_id(); - let is_inbound = connection.is_inbound(); - let mut container = self.container.write(); - if let Entry::Occupied(entry) = container.connections.entry(node_id) { - if entry.get().is_inbound() != is_inbound { - return false; - } - - trace!(target: "secretstore_net", "{}: removing connection to {} at {}", - self.self_key_pair.public(), node_id, entry.get().node_address()); - entry.remove_entry(); - - true - } else { - false - } - } -} - -/// Listen incoming connections. -fn net_listen( - listen_address: &SocketAddr, - data: Arc, -) -> Result<(), Error> { - execute(&data.executor, net_listen_future(listen_address, data.clone())?); - Ok(()) -} - -/// Listen incoming connections future. -fn net_listen_future( - listen_address: &SocketAddr, - data: Arc, -) -> Result { - Ok(Box::new(TcpListener::bind(listen_address)? - .incoming() - .and_then(move |stream| { - net_accept_connection(data.clone(), stream); - Ok(()) - }) - .for_each(|_| Ok(())) - .then(|_| future::ok(())))) -} - -/// Accept incoming connection. -fn net_accept_connection( - data: Arc, - stream: TcpStream, -) { - execute(&data.executor, net_accept_connection_future(data.clone(), stream)); -} - -/// Accept incoming connection future. -fn net_accept_connection_future(data: Arc, stream: TcpStream) -> BoxedEmptyFuture { - Box::new(io_accept_connection(stream, data.self_key_pair.clone()) - .then(move |result| net_process_connection_result(data, None, result)) - .then(|_| future::ok(()))) -} - -/// Connect to remote node. -fn net_connect( - data: Arc, - remote: SocketAddr, -) { - execute(&data.executor, net_connect_future(data.clone(), remote)); -} - -/// Connect to remote node future. -fn net_connect_future( - data: Arc, - remote: SocketAddr, -) -> BoxedEmptyFuture { - let disconnected_nodes = data.container.disconnected_nodes(); - Box::new(io_connect(&remote, data.self_key_pair.clone(), disconnected_nodes) - .then(move |result| net_process_connection_result(data, Some(remote), result)) - .then(|_| future::ok(()))) -} - -/// Process network connection result. -fn net_process_connection_result( - data: Arc, - outbound_addr: Option, - result: Result>, TimeoutError>, -) -> IoFuture> { - match result { - Ok(DeadlineStatus::Meet(Ok(connection))) => { - let connection = Arc::new(NetConnection::new(data.executor.clone(), outbound_addr.is_none(), connection)); - if data.insert(connection.clone()) { - let maintain_action = data.trigger.lock().on_connection_established(connection.node_id()); - maintain_connection_trigger(data.clone(), maintain_action); - - return net_process_connection_messages(data, connection); - } - }, - Ok(DeadlineStatus::Meet(Err(err))) => { - warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}", - data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, - outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - }, - Ok(DeadlineStatus::Timeout) => { - warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}", - data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" }, - outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - }, - Err(err) => { - warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}", - data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, - outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - }, - } - - Box::new(future::ok(Ok(()))) -} - -/// Process connection messages. -fn net_process_connection_messages( - data: Arc, - connection: Arc, -) -> IoFuture> { - Box::new(connection - .read_message_future() - .then(move |result| - match result { - Ok((_, Ok(message))) => { - connection.set_last_message_time(Instant::now()); - data.message_processor.process_connection_message(connection.clone(), message); - // continue serving connection - let process_messages_future = net_process_connection_messages( - data.clone(), connection).then(|_| Ok(())); - execute(&data.executor, process_messages_future); - Box::new(future::ok(Ok(()))) - }, - Ok((_, Err(err))) => { - warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", - data.self_key_pair.public(), err, connection.node_id()); - // continue serving connection - let process_messages_future = net_process_connection_messages( - data.clone(), connection).then(|_| Ok(())); - execute(&data.executor, process_messages_future); - Box::new(future::ok(Err(err))) - }, - Err(err) => { - let node_id = *connection.node_id(); - warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", - data.self_key_pair.public(), err, node_id); - // close connection - if data.remove(&*connection) { - let maintain_action = data.trigger.lock().on_connection_closed(&node_id); - maintain_connection_trigger(data, maintain_action); - } - Box::new(future::err(err)) - }, - } - )) -} - -/// Schedule connections. maintain. -fn net_schedule_maintain(data: Arc) { - let closure_data = data.clone(); - execute(&data.executor, Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0)) - .and_then(move |_| Ok(net_maintain(closure_data.clone()))) - .for_each(|_| Ok(())) - .then(|_| future::ok(()))); -} - -/// Maintain network connections. -fn net_maintain(data: Arc) { - trace!(target: "secretstore_net", "{}: executing maintain procedures", data.self_key_pair.public()); - - update_nodes_set(data.clone()); - data.message_processor.maintain_sessions(); - net_keep_alive(data.clone()); - net_connect_disconnected(data); -} - -/// Send keep alive messages to remote nodes. -fn net_keep_alive(data: Arc) { - let now = Instant::now(); - let active_connections = data.active_connections(); - for connection in active_connections { - let last_message_diff = now - connection.last_message_time(); - if last_message_diff > KEEP_ALIVE_DISCONNECT_INTERVAL { - warn!(target: "secretstore_net", "{}: keep alive timeout for node {}", - data.self_key_pair.public(), connection.node_id()); - - let node_id = *connection.node_id(); - if data.remove(&*connection) { - let maintain_action = data.trigger.lock().on_connection_closed(&node_id); - maintain_connection_trigger(data.clone(), maintain_action); - } - data.message_processor.process_disconnect(&node_id); - } - else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL { - connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {}))); - } - } -} - -/// Connect disconnected nodes. -fn net_connect_disconnected(data: Arc) { - let disconnected_nodes = data.disconnected_nodes(); - for (node_id, address) in disconnected_nodes { - if data.allow_connecting_to_higher_nodes || *data.self_key_pair.public() < node_id { - net_connect(data.clone(), address); - } - } -} - -/// Schedule future execution. -fn execute + Send + 'static>(executor: &Executor, f: F) { - if let Err(err) = future::Executor::execute(executor, Box::new(f)) { - error!("Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", err); - } -} - -/// Try to update active nodes set from connection trigger. -fn update_nodes_set(data: Arc) { - let maintain_action = data.trigger.lock().on_maintain(); - maintain_connection_trigger(data, maintain_action); -} - -/// Execute maintain procedures of connections trigger. -fn maintain_connection_trigger(data: Arc, maintain_action: Option) { - if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Session) { - let session_params = data.trigger.lock().maintain_session(); - if let Some(session_params) = session_params { - let session = data.message_processor.start_servers_set_change_session(session_params); - match session { - Ok(_) => trace!(target: "secretstore_net", "{}: started auto-migrate session", - data.self_key_pair.public()), - Err(err) => trace!(target: "secretstore_net", "{}: failed to start auto-migrate session with: {}", - data.self_key_pair.public(), err), - } - } - } - if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Connections) { - let mut trigger = data.trigger.lock(); - let mut data = data.container.write(); - trigger.maintain_connections(&mut *data); - } -} - -/// Compose SocketAddr from configuration' address and port. -fn make_socket_address(address: &str, port: u16) -> Result { - let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?; - Ok(SocketAddr::new(ip_address, port)) -} diff --git a/secret_store/src/key_server_cluster/cluster_message_processor.rs b/secret_store/src/key_server_cluster/cluster_message_processor.rs deleted file mode 100644 index b4ba5ef03b2..00000000000 --- a/secret_store/src/key_server_cluster/cluster_message_processor.rs +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::sync::Arc; -use key_server_cluster::{Error, NodeId, NodeKeyPair}; -use key_server_cluster::cluster::{ServersSetChangeParams, new_servers_set_change_session}; -use key_server_cluster::cluster_sessions::{AdminSession}; -use key_server_cluster::cluster_connections::{ConnectionProvider, Connection}; -use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, ClusterSessionsContainer, - create_cluster_view}; -use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId}; -use key_server_cluster::message::{self, Message, ClusterMessage}; -use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, - IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction}; -use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector; - -/// Something that is able to process signals/messages from other nodes. -pub trait MessageProcessor: Send + Sync { - /// Process disconnect from the remote node. - fn process_disconnect(&self, node: &NodeId); - /// Process single message from the connection. - fn process_connection_message(&self, connection: Arc, message: Message); - - /// Start servers set change session. This is typically used by ConnectionManager when - /// it detects that auto-migration session needs to be started. - fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result, Error>; - /// Try to continue session after key version negotiation session is completed. - fn try_continue_session( - &self, - session: Option>> - ); - /// Maintain active sessions. Typically called by the ConnectionManager at some intervals. - /// Should cancel stalled sessions and send keep-alive messages for sessions that support it. - fn maintain_sessions(&self); -} - -/// Bridge between ConnectionManager and ClusterSessions. -pub struct SessionsMessageProcessor { - self_key_pair: Arc, - servers_set_change_creator_connector: Arc, - sessions: Arc, - connections: Arc, -} - -impl SessionsMessageProcessor { - /// Create new instance of SessionsMessageProcessor. - pub fn new( - self_key_pair: Arc, - servers_set_change_creator_connector: Arc, - sessions: Arc, - connections: Arc, - ) -> Self { - SessionsMessageProcessor { - self_key_pair, - servers_set_change_creator_connector, - sessions, - connections, - } - } - - /// Process single session message from connection. - fn process_message, D>( - &self, - sessions: &ClusterSessionsContainer, - connection: Arc, - mut message: Message, - ) -> Option> - where - Message: IntoSessionId - { - // get or create new session, if required - let mut sender = *connection.node_id(); - let session = self.prepare_session(sessions, &sender, &message); - // send error if session is not found, or failed to create - let session = match session { - Ok(session) => session, - Err(error) => { - // this is new session => it is not yet in container - warn!(target: "secretstore_net", - "{}: {} session read error '{}' when requested for session from node {}", - self.self_key_pair.public(), S::type_name(), error, sender); - if !message.is_error_message() { - let qed = "session_id only fails for cluster messages; - only session messages are passed to process_message; - qed"; - let session_id = message.into_session_id().expect(qed); - let session_nonce = message.session_nonce().expect(qed); - - connection.send_message(SC::make_error_message(session_id, session_nonce, error)); - } - return None; - }, - }; - - let session_id = session.id(); - let mut is_queued_message = false; - loop { - let message_result = session.on_message(&sender, &message); - match message_result { - Ok(_) => { - // if session is completed => stop - if session.is_finished() { - info!(target: "secretstore_net", - "{}: {} session completed", self.self_key_pair.public(), S::type_name()); - sessions.remove(&session_id); - return Some(session); - } - - // try to dequeue message - match sessions.dequeue_message(&session_id) { - Some((msg_sender, msg)) => { - is_queued_message = true; - sender = msg_sender; - message = msg; - }, - None => return Some(session), - } - }, - Err(Error::TooEarlyForRequest) => { - sessions.enqueue_message(&session_id, sender, message, is_queued_message); - return Some(session); - }, - Err(err) => { - warn!( - target: "secretstore_net", - "{}: {} session error '{}' when processing message {} from node {}", - self.self_key_pair.public(), - S::type_name(), - err, - message, - sender); - session.on_session_error(self.self_key_pair.public(), err); - sessions.remove(&session_id); - return Some(session); - }, - } - } - } - - /// Get or insert new session. - fn prepare_session, D>( - &self, - sessions: &ClusterSessionsContainer, - sender: &NodeId, - message: &Message - ) -> Result, Error> - where - Message: IntoSessionId - { - fn requires_all_connections(message: &Message) -> bool { - match *message { - Message::Generation(_) => true, - Message::ShareAdd(_) => true, - Message::ServersSetChange(_) => true, - _ => false, - } - } - - // get or create new session, if required - let session_id = message.into_session_id() - .expect("into_session_id fails for cluster messages only; - only session messages are passed to prepare_session; - qed"); - let is_initialization_message = message.is_initialization_message(); - let is_delegation_message = message.is_delegation_message(); - match is_initialization_message || is_delegation_message { - false => sessions.get(&session_id, true).ok_or(Error::NoActiveSessionWithId), - true => { - let creation_data = SC::creation_data_from_message(&message)?; - let master = if is_initialization_message { - *sender - } else { - *self.self_key_pair.public() - }; - let cluster = create_cluster_view( - self.self_key_pair.clone(), - self.connections.clone(), - requires_all_connections(&message))?; - - let nonce = Some(message.session_nonce().ok_or(Error::InvalidMessage)?); - let exclusive = message.is_exclusive_session_message(); - sessions.insert(cluster, master, session_id, nonce, exclusive, creation_data) - }, - } - } - - /// Process single cluster message from the connection. - fn process_cluster_message(&self, connection: Arc, message: ClusterMessage) { - match message { - ClusterMessage::KeepAlive(_) => { - let msg = Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { - session_id: None, - })); - connection.send_message(msg) - }, - ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id { - self.sessions.on_session_keep_alive(connection.node_id(), session_id.into()); - }, - _ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", - self.self_key_pair.public(), message, connection.node_id(), connection.node_address()), - } - } -} - -impl MessageProcessor for SessionsMessageProcessor { - fn process_disconnect(&self, node: &NodeId) { - self.sessions.on_connection_timeout(node); - } - - fn process_connection_message(&self, connection: Arc, message: Message) { - trace!(target: "secretstore_net", "{}: received message {} from {}", - self.self_key_pair.public(), message, connection.node_id()); - - // error is ignored as we only process errors on session level - match message { - Message::Generation(message) => self - .process_message(&self.sessions.generation_sessions, connection, Message::Generation(message)) - .map(|_| ()).unwrap_or_default(), - Message::Encryption(message) => self - .process_message(&self.sessions.encryption_sessions, connection, Message::Encryption(message)) - .map(|_| ()).unwrap_or_default(), - Message::Decryption(message) => self - .process_message(&self.sessions.decryption_sessions, connection, Message::Decryption(message)) - .map(|_| ()).unwrap_or_default(), - Message::SchnorrSigning(message) => self - .process_message(&self.sessions.schnorr_signing_sessions, connection, Message::SchnorrSigning(message)) - .map(|_| ()).unwrap_or_default(), - Message::EcdsaSigning(message) => self - .process_message(&self.sessions.ecdsa_signing_sessions, connection, Message::EcdsaSigning(message)) - .map(|_| ()).unwrap_or_default(), - Message::ServersSetChange(message) => { - let message = Message::ServersSetChange(message); - let is_initialization_message = message.is_initialization_message(); - let session = self.process_message(&self.sessions.admin_sessions, connection, message); - if is_initialization_message { - if let Some(session) = session { - self.servers_set_change_creator_connector - .set_key_servers_set_change_session(session.clone()); - } - } - }, - Message::KeyVersionNegotiation(message) => { - let session = self.process_message( - &self.sessions.negotiation_sessions, connection, Message::KeyVersionNegotiation(message)); - self.try_continue_session(session); - }, - Message::ShareAdd(message) => self.process_message( - &self.sessions.admin_sessions, connection, Message::ShareAdd(message)) - .map(|_| ()).unwrap_or_default(), - Message::Cluster(message) => self.process_cluster_message(connection, message), - } - } - - fn try_continue_session( - &self, - session: Option>> - ) { - if let Some(session) = session { - let meta = session.meta(); - let is_master_node = meta.self_node_id == meta.master_node_id; - if is_master_node && session.is_finished() { - self.sessions.negotiation_sessions.remove(&session.id()); - match session.wait() { - Ok(Some((version, master))) => match session.take_continue_action() { - Some(ContinueAction::Decrypt( - session, origin, is_shadow_decryption, is_broadcast_decryption - )) => { - let initialization_error = if self.self_key_pair.public() == &master { - session.initialize( - origin, version, is_shadow_decryption, is_broadcast_decryption) - } else { - session.delegate( - master, origin, version, is_shadow_decryption, is_broadcast_decryption) - }; - - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - self.sessions.decryption_sessions.remove(&session.id()); - } - }, - Some(ContinueAction::SchnorrSign(session, message_hash)) => { - let initialization_error = if self.self_key_pair.public() == &master { - session.initialize(version, message_hash) - } else { - session.delegate(master, version, message_hash) - }; - - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - self.sessions.schnorr_signing_sessions.remove(&session.id()); - } - }, - Some(ContinueAction::EcdsaSign(session, message_hash)) => { - let initialization_error = if self.self_key_pair.public() == &master { - session.initialize(version, message_hash) - } else { - session.delegate(master, version, message_hash) - }; - - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - self.sessions.ecdsa_signing_sessions.remove(&session.id()); - } - }, - None => (), - }, - Ok(None) => unreachable!("is_master_node; session is finished; - negotiation version always finished with result on master; - qed"), - Err(error) => match session.take_continue_action() { - Some(ContinueAction::Decrypt(session, _, _, _)) => { - session.on_session_error(&meta.self_node_id, error); - self.sessions.decryption_sessions.remove(&session.id()); - }, - Some(ContinueAction::SchnorrSign(session, _)) => { - session.on_session_error(&meta.self_node_id, error); - self.sessions.schnorr_signing_sessions.remove(&session.id()); - }, - Some(ContinueAction::EcdsaSign(session, _)) => { - session.on_session_error(&meta.self_node_id, error); - self.sessions.ecdsa_signing_sessions.remove(&session.id()); - }, - None => (), - }, - } - } - } - } - - fn maintain_sessions(&self) { - self.sessions.stop_stalled_sessions(); - self.sessions.sessions_keep_alive(); - } - - fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result, Error> { - new_servers_set_change_session( - self.self_key_pair.clone(), - &*self.sessions, - self.connections.clone(), - self.servers_set_change_creator_connector.clone(), - params, - ) - } -} From 9de1afeeb68d4d5a8717ff8ecc0e9238e52905b6 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 6 Jun 2019 13:35:06 +0300 Subject: [PATCH 06/16] SecretStore: non-blocking wait of session completion (#10303) * make SS sessions return future * fix grumbles * do not create unused Condvar in production mode --- secret-store/src/key_server.rs | 384 +++++++++++++----- .../key_version_negotiation_session.rs | 42 +- .../servers_set_change_session.rs | 46 ++- .../admin_sessions/share_add_session.rs | 29 +- .../admin_sessions/share_change_session.rs | 2 +- .../client_sessions/decryption_session.rs | 48 ++- .../client_sessions/encryption_session.rs | 39 +- .../client_sessions/generation_session.rs | 50 ++- .../client_sessions/signing_session_ecdsa.rs | 36 +- .../signing_session_schnorr.rs | 48 ++- .../src/key_server_cluster/cluster.rs | 274 ++++++++++--- .../cluster_message_processor.rs | 24 +- .../key_server_cluster/cluster_sessions.rs | 118 +++++- .../cluster_sessions_creator.rs | 145 +++++-- .../connection_trigger_with_migration.rs | 7 +- secret-store/src/key_server_cluster/mod.rs | 2 +- secret-store/src/listener/http_listener.rs | 236 +++++------ secret-store/src/listener/mod.rs | 62 ++- .../src/listener/service_contract_listener.rs | 8 +- secret-store/src/traits.rs | 62 ++- 20 files changed, 1133 insertions(+), 529 deletions(-) diff --git a/secret-store/src/key_server.rs b/secret-store/src/key_server.rs index 5418ec00a3c..f93f92d5a43 100644 --- a/secret-store/src/key_server.rs +++ b/secret-store/src/key_server.rs @@ -16,14 +16,15 @@ use std::collections::BTreeSet; use std::sync::Arc; +use futures::{future::{err, result}, Future}; use parking_lot::Mutex; use crypto::DEFAULT_MAC; -use ethkey::crypto; +use ethkey::{crypto, public_to_address}; use parity_runtime::Executor; use super::acl_storage::AclStorage; use super::key_storage::KeyStorage; use super::key_server_set::KeyServerSet; -use key_server_cluster::{math, new_network_cluster}; +use key_server_cluster::{math, new_network_cluster, ClusterSession, WaitableSession}; use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer, NodeKeyPair}; use types::{Error, Public, RequestSignature, Requester, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, ClusterConfiguration, MessageHash, EncryptedMessageSignature, NodeId}; @@ -58,132 +59,212 @@ impl KeyServerImpl { impl KeyServer for KeyServerImpl {} impl AdminSessionsServer for KeyServerImpl { - fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet) -> Result<(), Error> { - let servers_set_change_session = self.data.lock().cluster - .new_servers_set_change_session(None, None, new_servers_set, old_set_signature, new_set_signature)?; - servers_set_change_session.as_servers_set_change() - .expect("new_servers_set_change_session creates servers_set_change_session; qed") - .wait().map_err(Into::into) + fn change_servers_set( + &self, + old_set_signature: RequestSignature, + new_set_signature: RequestSignature, + new_servers_set: BTreeSet, + ) -> Box + Send> { + return_session(self.data.lock().cluster + .new_servers_set_change_session(None, None, new_servers_set, old_set_signature, new_set_signature)) } } impl ServerKeyGenerator for KeyServerImpl { - fn generate_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result { - // recover requestor' public key from signature - let address = author.address(key_id).map_err(Error::InsufficientRequesterData)?; + fn generate_key( + &self, + key_id: ServerKeyId, + author: Requester, + threshold: usize, + ) -> Box + Send> { + // recover requestor' address key from signature + let address = author.address(&key_id).map_err(Error::InsufficientRequesterData); // generate server key - let generation_session = self.data.lock().cluster.new_generation_session(key_id.clone(), None, address, threshold)?; - generation_session.wait(None) - .expect("when wait is called without timeout it always returns Some; qed") - .map_err(Into::into) + return_session(address.and_then(|address| self.data.lock().cluster + .new_generation_session(key_id, None, address, threshold))) } - fn restore_key_public(&self, key_id: &ServerKeyId, author: &Requester) -> Result { + fn restore_key_public( + &self, + key_id: ServerKeyId, + author: Requester, + ) -> Box + Send> { // recover requestor' public key from signature - let address = author.address(key_id).map_err(Error::InsufficientRequesterData)?; + let session_and_address = author + .address(&key_id) + .map_err(Error::InsufficientRequesterData) + .and_then(|address| self.data.lock().cluster.new_key_version_negotiation_session(key_id) + .map(|session| (session, address))); + let (session, address) = match session_and_address { + Ok((session, address)) => (session, address), + Err(error) => return Box::new(err(error)), + }; // negotiate key version && retrieve common key data - let negotiation_session = self.data.lock().cluster.new_key_version_negotiation_session(*key_id)?; - negotiation_session.wait() - .and_then(|_| negotiation_session.common_key_data()) - .and_then(|key_share| if key_share.author == address { + let core_session = session.session.clone(); + Box::new(session.into_wait_future() + .and_then(move |_| core_session.common_key_data() + .map(|key_share| (key_share, address))) + .and_then(|(key_share, address)| if key_share.author == address { Ok(key_share.public) } else { Err(Error::AccessDenied) - }) - .map_err(Into::into) + })) } } impl DocumentKeyServer for KeyServerImpl { - fn store_document_key(&self, key_id: &ServerKeyId, author: &Requester, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> { + fn store_document_key( + &self, + key_id: ServerKeyId, + author: Requester, + common_point: Public, + encrypted_document_key: Public, + ) -> Box + Send> { // store encrypted key - let encryption_session = self.data.lock().cluster.new_encryption_session(key_id.clone(), - author.clone(), common_point, encrypted_document_key)?; - encryption_session.wait(None).map_err(Into::into) + return_session(self.data.lock().cluster.new_encryption_session(key_id, + author.clone(), common_point, encrypted_document_key)) } - fn generate_document_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result { + fn generate_document_key( + &self, + key_id: ServerKeyId, + author: Requester, + threshold: usize, + ) -> Box + Send> { // recover requestor' public key from signature - let public = author.public(key_id).map_err(Error::InsufficientRequesterData)?; + let public = result(author.public(&key_id).map_err(Error::InsufficientRequesterData)); // generate server key - let server_key = self.generate_key(key_id, author, threshold)?; + let data = self.data.clone(); + let server_key = public.and_then(move |public| { + let data = data.lock(); + let session = data.cluster.new_generation_session(key_id, None, public_to_address(&public), threshold); + result(session.map(|session| (public, session))) + }) + .and_then(|(public, session)| session.into_wait_future().map(move |server_key| (public, server_key))); // generate random document key - let document_key = math::generate_random_point()?; - let encrypted_document_key = math::encrypt_secret(&document_key, &server_key)?; + let document_key = server_key.and_then(|(public, server_key)| + result(math::generate_random_point() + .and_then(|document_key| math::encrypt_secret(&document_key, &server_key) + .map(|encrypted_document_key| (public, document_key, encrypted_document_key)))) + ); // store document key in the storage - self.store_document_key(key_id, author, encrypted_document_key.common_point, encrypted_document_key.encrypted_point)?; + let data = self.data.clone(); + let stored_document_key = document_key.and_then(move |(public, document_key, encrypted_document_key)| { + let data = data.lock(); + let session = data.cluster.new_encryption_session(key_id, + author.clone(), encrypted_document_key.common_point, encrypted_document_key.encrypted_point); + result(session.map(|session| (public, document_key, session))) + }) + .and_then(|(public, document_key, session)| session.into_wait_future().map(move |_| (public, document_key))); // encrypt document key with requestor public key - let document_key = crypto::ecies::encrypt(&public, &DEFAULT_MAC, document_key.as_bytes()) - .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; - Ok(document_key) + let encrypted_document_key = stored_document_key + .and_then(|(public, document_key)| crypto::ecies::encrypt(&public, &DEFAULT_MAC, document_key.as_bytes()) + .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))); + + Box::new(encrypted_document_key) } - fn restore_document_key(&self, key_id: &ServerKeyId, requester: &Requester) -> Result { + fn restore_document_key( + &self, + key_id: ServerKeyId, + requester: Requester, + ) -> Box + Send> { // recover requestor' public key from signature - let public = requester.public(key_id).map_err(Error::InsufficientRequesterData)?; + let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData)); // decrypt document key - let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), - None, requester.clone(), None, false, false)?; - let document_key = decryption_session.wait(None) - .expect("when wait is called without timeout it always returns Some; qed")? - .decrypted_secret; + let data = self.data.clone(); + let stored_document_key = public.and_then(move |public| { + let data = data.lock(); + let session = data.cluster.new_decryption_session(key_id, None, requester.clone(), None, false, false); + result(session.map(|session| (public, session))) + }) + .and_then(|(public, session)| session.into_wait_future().map(move |document_key| (public, document_key))); // encrypt document key with requestor public key - let document_key = crypto::ecies::encrypt(&public, &DEFAULT_MAC, document_key.as_bytes()) - .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; - Ok(document_key) + let encrypted_document_key = stored_document_key + .and_then(|(public, document_key)| + crypto::ecies::encrypt(&public, &DEFAULT_MAC, document_key.decrypted_secret.as_bytes()) + .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))); + + Box::new(encrypted_document_key) } - fn restore_document_key_shadow(&self, key_id: &ServerKeyId, requester: &Requester) -> Result { - let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), - None, requester.clone(), None, true, false)?; - decryption_session.wait(None) - .expect("when wait is called without timeout it always returns Some; qed") - .map_err(Into::into) + fn restore_document_key_shadow( + &self, + key_id: ServerKeyId, + requester: Requester, + ) -> Box + Send> { + return_session(self.data.lock().cluster.new_decryption_session(key_id, + None, requester.clone(), None, true, false)) } } impl MessageSigner for KeyServerImpl { - fn sign_message_schnorr(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result { + fn sign_message_schnorr( + &self, + key_id: ServerKeyId, + requester: Requester, + message: MessageHash, + ) -> Box + Send> { // recover requestor' public key from signature - let public = requester.public(key_id).map_err(Error::InsufficientRequesterData)?; + let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData)); // sign message - let signing_session = self.data.lock().cluster.new_schnorr_signing_session(key_id.clone(), - requester.clone().into(), None, message)?; - let message_signature = signing_session.wait()?; + let data = self.data.clone(); + let signature = public.and_then(move |public| { + let data = data.lock(); + let session = data.cluster.new_schnorr_signing_session(key_id, requester.clone().into(), None, message); + result(session.map(|session| (public, session))) + }) + .and_then(|(public, session)| session.into_wait_future().map(move |signature| (public, signature))); // compose two message signature components into single one - let mut combined_signature = [0; 64]; - combined_signature[..32].clone_from_slice(message_signature.0.as_bytes()); - combined_signature[32..].clone_from_slice(message_signature.1.as_bytes()); - - // encrypt combined signature with requestor public key - let message_signature = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &combined_signature) - .map_err(|err| Error::Internal(format!("Error encrypting message signature: {}", err)))?; - Ok(message_signature) + let combined_signature = signature.map(|(public, signature)| { + let mut combined_signature = [0; 64]; + combined_signature[..32].clone_from_slice(signature.0.as_bytes()); + combined_signature[32..].clone_from_slice(signature.1.as_bytes()); + (public, combined_signature) + }); + + // encrypt signature with requestor public key + let encrypted_signature = combined_signature + .and_then(|(public, combined_signature)| crypto::ecies::encrypt(&public, &DEFAULT_MAC, &combined_signature) + .map_err(|err| Error::Internal(format!("Error encrypting message signature: {}", err)))); + + Box::new(encrypted_signature) } - fn sign_message_ecdsa(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result { + fn sign_message_ecdsa( + &self, + key_id: ServerKeyId, + requester: Requester, + message: MessageHash, + ) -> Box + Send> { // recover requestor' public key from signature - let public = requester.public(key_id).map_err(Error::InsufficientRequesterData)?; + let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData)); // sign message - let signing_session = self.data.lock().cluster.new_ecdsa_signing_session(key_id.clone(), - requester.clone().into(), None, message)?; - let message_signature = signing_session.wait()?; + let data = self.data.clone(); + let signature = public.and_then(move |public| { + let data = data.lock(); + let session = data.cluster.new_ecdsa_signing_session(key_id, requester.clone().into(), None, message); + result(session.map(|session| (public, session))) + }) + .and_then(|(public, session)| session.into_wait_future().map(move |signature| (public, signature))); // encrypt combined signature with requestor public key - let message_signature = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &*message_signature) - .map_err(|err| Error::Internal(format!("Error encrypting message signature: {}", err)))?; - Ok(message_signature) + let encrypted_signature = signature + .and_then(|(public, signature)| crypto::ecies::encrypt(&public, &DEFAULT_MAC, &*signature) + .map_err(|err| Error::Internal(format!("Error encrypting message signature: {}", err)))); + + Box::new(encrypted_signature) } } @@ -215,6 +296,15 @@ impl KeyServerCore { } } +fn return_session( + session: Result, Error>, +) -> Box + Send> { + match session { + Ok(session) => Box::new(session.into_wait_future()), + Err(error) => Box::new(err(error)) + } +} + #[cfg(test)] pub mod tests { use std::collections::BTreeSet; @@ -222,6 +312,7 @@ pub mod tests { use std::sync::Arc; use std::net::SocketAddr; use std::collections::BTreeMap; + use futures::Future; use crypto::DEFAULT_MAC; use ethkey::{self, crypto, Secret, Random, Generator, verify_public}; use acl_storage::DummyAclStorage; @@ -244,45 +335,88 @@ pub mod tests { impl KeyServer for DummyKeyServer {} impl AdminSessionsServer for DummyKeyServer { - fn change_servers_set(&self, _old_set_signature: RequestSignature, _new_set_signature: RequestSignature, _new_servers_set: BTreeSet) -> Result<(), Error> { + fn change_servers_set( + &self, + _old_set_signature: RequestSignature, + _new_set_signature: RequestSignature, + _new_servers_set: BTreeSet, + ) -> Box + Send> { unimplemented!("test-only") } } impl ServerKeyGenerator for DummyKeyServer { - fn generate_key(&self, _key_id: &ServerKeyId, _author: &Requester, _threshold: usize) -> Result { + fn generate_key( + &self, + _key_id: ServerKeyId, + _author: Requester, + _threshold: usize, + ) -> Box + Send> { unimplemented!("test-only") } - fn restore_key_public(&self, _key_id: &ServerKeyId, _author: &Requester) -> Result { + fn restore_key_public( + &self, + _key_id: ServerKeyId, + _author: Requester, + ) -> Box + Send> { unimplemented!("test-only") } } impl DocumentKeyServer for DummyKeyServer { - fn store_document_key(&self, _key_id: &ServerKeyId, _author: &Requester, _common_point: Public, _encrypted_document_key: Public) -> Result<(), Error> { + fn store_document_key( + &self, + _key_id: ServerKeyId, + _author: Requester, + _common_point: Public, + _encrypted_document_key: Public, + ) -> Box + Send> { unimplemented!("test-only") } - fn generate_document_key(&self, _key_id: &ServerKeyId, _author: &Requester, _threshold: usize) -> Result { + fn generate_document_key( + &self, + _key_id: ServerKeyId, + _author: Requester, + _threshold: usize, + ) -> Box + Send> { unimplemented!("test-only") } - fn restore_document_key(&self, _key_id: &ServerKeyId, _requester: &Requester) -> Result { + fn restore_document_key( + &self, + _key_id: ServerKeyId, + _requester: Requester, + ) -> Box + Send> { unimplemented!("test-only") } - fn restore_document_key_shadow(&self, _key_id: &ServerKeyId, _requester: &Requester) -> Result { + fn restore_document_key_shadow( + &self, + _key_id: ServerKeyId, + _requester: Requester, + ) -> Box + Send> { unimplemented!("test-only") } } impl MessageSigner for DummyKeyServer { - fn sign_message_schnorr(&self, _key_id: &ServerKeyId, _requester: &Requester, _message: MessageHash) -> Result { + fn sign_message_schnorr( + &self, + _key_id: ServerKeyId, + _requester: Requester, + _message: MessageHash, + ) -> Box + Send> { unimplemented!("test-only") } - fn sign_message_ecdsa(&self, _key_id: &ServerKeyId, _requester: &Requester, _message: MessageHash) -> Result { + fn sign_message_ecdsa( + &self, + _key_id: ServerKeyId, + _requester: Requester, + _message: MessageHash, + ) -> Box + Send> { unimplemented!("test-only") } } @@ -355,13 +489,20 @@ pub mod tests { let threshold = 0; let document = Random.generate().unwrap().secret().clone(); let secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&secret, &document).unwrap(); - let generated_key = key_servers[0].generate_document_key(&document, &signature.clone().into(), threshold).unwrap(); + let signature: Requester = ethkey::sign(&secret, &document).unwrap().into(); + let generated_key = key_servers[0].generate_document_key( + *document, + signature.clone(), + threshold, + ).wait().unwrap(); let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); // now let's try to retrieve key back for key_server in key_servers.iter() { - let retrieved_key = key_server.restore_document_key(&document, &signature.clone().into()).unwrap(); + let retrieved_key = key_server.restore_document_key( + *document, + signature.clone(), + ).wait().unwrap(); let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); assert_eq!(retrieved_key, generated_key); } @@ -378,13 +519,20 @@ pub mod tests { // generate document key let document = Random.generate().unwrap().secret().clone(); let secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&secret, &document).unwrap(); - let generated_key = key_servers[0].generate_document_key(&document, &signature.clone().into(), *threshold).unwrap(); + let signature: Requester = ethkey::sign(&secret, &document).unwrap().into(); + let generated_key = key_servers[0].generate_document_key( + *document, + signature.clone(), + *threshold, + ).wait().unwrap(); let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); // now let's try to retrieve key back for (i, key_server) in key_servers.iter().enumerate() { - let retrieved_key = key_server.restore_document_key(&document, &signature.clone().into()).unwrap(); + let retrieved_key = key_server.restore_document_key( + *document, + signature.clone(), + ).wait().unwrap(); let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); assert_eq!(retrieved_key, generated_key); @@ -406,20 +554,24 @@ pub mod tests { // generate server key let server_key_id = Random.generate().unwrap().secret().clone(); let requestor_secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), *threshold).unwrap(); + let signature: Requester = ethkey::sign(&requestor_secret, &server_key_id).unwrap().into(); + let server_public = key_servers[0].generate_key( + *server_key_id, + signature.clone(), + *threshold, + ).wait().unwrap(); // generate document key (this is done by KS client so that document key is unknown to any KS) let generated_key = Random.generate().unwrap().public().clone(); let encrypted_document_key = math::encrypt_secret(&generated_key, &server_public).unwrap(); // store document key - key_servers[0].store_document_key(&server_key_id, &signature.clone().into(), - encrypted_document_key.common_point, encrypted_document_key.encrypted_point).unwrap(); + key_servers[0].store_document_key(*server_key_id, signature.clone(), + encrypted_document_key.common_point, encrypted_document_key.encrypted_point).wait().unwrap(); // now let's try to retrieve key back for key_server in key_servers.iter() { - let retrieved_key = key_server.restore_document_key(&server_key_id, &signature.clone().into()).unwrap(); + let retrieved_key = key_server.restore_document_key(*server_key_id, signature.clone()).wait().unwrap(); let retrieved_key = crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &retrieved_key).unwrap(); let retrieved_key = Public::from_slice(&retrieved_key); assert_eq!(retrieved_key, generated_key); @@ -438,12 +590,20 @@ pub mod tests { // generate server key let server_key_id = Random.generate().unwrap().secret().clone(); let requestor_secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), *threshold).unwrap(); + let signature: Requester = ethkey::sign(&requestor_secret, &server_key_id).unwrap().into(); + let server_public = key_servers[0].generate_key( + *server_key_id, + signature.clone(), + *threshold, + ).wait().unwrap(); // sign message let message_hash = H256::from_low_u64_be(42); - let combined_signature = key_servers[0].sign_message_schnorr(&server_key_id, &signature.into(), message_hash.clone()).unwrap(); + let combined_signature = key_servers[0].sign_message_schnorr( + *server_key_id, + signature, + message_hash, + ).wait().unwrap(); let combined_signature = crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &combined_signature).unwrap(); let signature_c = Secret::from_slice(&combined_signature[..32]).unwrap(); let signature_s = Secret::from_slice(&combined_signature[32..]).unwrap(); @@ -463,15 +623,19 @@ pub mod tests { let threshold = 0; let document = Random.generate().unwrap().secret().clone(); let secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&secret, &document).unwrap(); - let generated_key = key_servers[0].generate_document_key(&document, &signature.clone().into(), threshold).unwrap(); + let signature: Requester = ethkey::sign(&secret, &document).unwrap().into(); + let generated_key = key_servers[0].generate_document_key( + *document, + signature.clone(), + threshold, + ).wait().unwrap(); let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); // remove key from node0 key_storages[0].remove(&document).unwrap(); // now let's try to retrieve key back by requesting it from node0, so that session must be delegated - let retrieved_key = key_servers[0].restore_document_key(&document, &signature.into()).unwrap(); + let retrieved_key = key_servers[0].restore_document_key(*document, signature).wait().unwrap(); let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); assert_eq!(retrieved_key, generated_key); drop(runtime); @@ -486,15 +650,19 @@ pub mod tests { // generate server key let server_key_id = Random.generate().unwrap().secret().clone(); let requestor_secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap(); + let signature: Requester = ethkey::sign(&requestor_secret, &server_key_id).unwrap().into(); + let server_public = key_servers[0].generate_key(*server_key_id, signature.clone(), threshold).wait().unwrap(); // remove key from node0 key_storages[0].remove(&server_key_id).unwrap(); // sign message let message_hash = H256::from_low_u64_be(42); - let combined_signature = key_servers[0].sign_message_schnorr(&server_key_id, &signature.into(), message_hash.clone()).unwrap(); + let combined_signature = key_servers[0].sign_message_schnorr( + *server_key_id, + signature, + message_hash, + ).wait().unwrap(); let combined_signature = crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &combined_signature).unwrap(); let signature_c = Secret::from_slice(&combined_signature[..32]).unwrap(); let signature_s = Secret::from_slice(&combined_signature[32..]).unwrap(); @@ -514,14 +682,22 @@ pub mod tests { let server_key_id = Random.generate().unwrap().secret().clone(); let requestor_secret = Random.generate().unwrap().secret().clone(); let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap(); + let server_public = key_servers[0].generate_key( + *server_key_id, + signature.clone().into(), + threshold, + ).wait().unwrap(); // remove key from node0 key_storages[0].remove(&server_key_id).unwrap(); // sign message let message_hash = H256::random(); - let signature = key_servers[0].sign_message_ecdsa(&server_key_id, &signature.into(), message_hash.clone()).unwrap(); + let signature = key_servers[0].sign_message_ecdsa( + *server_key_id, + signature.clone().into(), + message_hash, + ).wait().unwrap(); let signature = crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &signature).unwrap(); let signature = H520::from_slice(&signature[0..65]); diff --git a/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs b/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs index e2d115bcf23..8db04632647 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs @@ -18,10 +18,11 @@ use std::sync::Arc; use std::collections::{BTreeSet, BTreeMap}; use ethereum_types::{Address, H256}; use ethkey::Secret; -use parking_lot::{Mutex, Condvar}; +use futures::Oneshot; +use parking_lot::Mutex; use key_server_cluster::{Error, SessionId, NodeId, DocumentKeyShare}; use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession}; +use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession, CompletionSignal}; use key_server_cluster::decryption_session::SessionImpl as DecryptionSession; use key_server_cluster::signing_session_ecdsa::SessionImpl as EcdsaSigningSession; use key_server_cluster::signing_session_schnorr::SessionImpl as SchnorrSigningSession; @@ -87,8 +88,8 @@ struct SessionCore { pub transport: T, /// Session nonce. pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// Session completion signal. + pub completed: CompletionSignal>, } /// Mutable session data. @@ -166,8 +167,9 @@ pub struct LargestSupportResultComputer; impl SessionImpl where T: SessionTransport { /// Create new session. - pub fn new(params: SessionParams) -> Self { - SessionImpl { + pub fn new(params: SessionParams) -> (Self, Oneshot, Error>>) { + let (completed, oneshot) = CompletionSignal::new(); + (SessionImpl { core: SessionCore { meta: params.meta, sub_session: params.sub_session, @@ -175,7 +177,7 @@ impl SessionImpl where T: SessionTransport { result_computer: params.result_computer, transport: params.transport, nonce: params.nonce, - completed: Condvar::new(), + completed, }, data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, @@ -191,7 +193,7 @@ impl SessionImpl where T: SessionTransport { continue_with: None, failed_continue_with: None, }) - } + }, oneshot) } /// Return session meta. @@ -221,10 +223,9 @@ impl SessionImpl where T: SessionTransport { self.data.lock().failed_continue_with.take() } - /// Wait for session completion. - pub fn wait(&self) -> Result, Error> { - Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) - .expect("wait_session returns Some if called without timeout; qed") + /// Return session completion result (if available). + pub fn result(&self) -> Option, Error>> { + self.data.lock().result.clone() } /// Retrieve common key data (author, threshold, public), if available. @@ -344,7 +345,7 @@ impl SessionImpl where T: SessionTransport { // update state data.state = SessionState::Finished; data.result = Some(Ok(None)); - self.core.completed.notify_all(); + self.core.completed.send(Ok(None)); Ok(()) } @@ -450,15 +451,18 @@ impl SessionImpl where T: SessionTransport { } } + let result = result.map(Some); data.state = SessionState::Finished; - data.result = Some(result.map(Some)); - core.completed.notify_all(); + data.result = Some(result.clone()); + core.completed.send(result); } } } impl ClusterSession for SessionImpl where T: SessionTransport { type Id = SessionIdWithSubSession; + type CreationData = (); + type SuccessfulResult = Option<(H256, NodeId)>; fn type_name() -> &'static str { "version negotiation" @@ -482,7 +486,7 @@ impl ClusterSession for SessionImpl where T: SessionTransport { warn!(target: "secretstore_net", "{}: key version negotiation session failed with timeout", self.core.meta.self_node_id); data.result = Some(Err(Error::ConsensusTemporaryUnreachable)); - self.core.completed.notify_all(); + self.core.completed.send(Err(Error::ConsensusTemporaryUnreachable)); } } } @@ -510,8 +514,8 @@ impl ClusterSession for SessionImpl where T: SessionTransport { self.core.meta.self_node_id, error, node); data.state = SessionState::Finished; - data.result = Some(Err(error)); - self.core.completed.notify_all(); + data.result = Some(Err(error.clone())); + self.core.completed.send(Err(error)); } fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { @@ -698,7 +702,7 @@ mod tests { cluster: cluster, }, nonce: 0, - }), + }).0, }) }).collect(), queue: VecDeque::new(), diff --git a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs b/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs index a0d4acdc125..299d02121f1 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs @@ -17,13 +17,14 @@ use std::sync::Arc; use std::collections::{BTreeSet, BTreeMap}; use std::collections::btree_map::Entry; -use parking_lot::{Mutex, Condvar}; +use futures::Oneshot; +use parking_lot::Mutex; use ethereum_types::H256; use ethkey::{Public, Signature}; use key_server_cluster::{Error, NodeId, SessionId, KeyStorage}; use key_server_cluster::math; use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::cluster_sessions::{ClusterSession, CompletionSignal}; use key_server_cluster::message::{Message, ServersSetChangeMessage, ConsensusMessageWithServersSet, InitializeConsensusSessionWithServersSet, ServersSetChangeConsensusMessage, ConfirmConsensusInitialization, UnknownSessionsRequest, UnknownSessions, @@ -93,8 +94,8 @@ struct SessionCore { pub admin_public: Public, /// Migration id (if this session is a part of auto-migration process). pub migration_id: Option, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// Session completion signal. + pub completed: CompletionSignal<()>, } /// Servers set change consensus session type. @@ -182,8 +183,9 @@ struct ServersSetChangeKeyVersionNegotiationTransport { impl SessionImpl { /// Create new servers set change session. - pub fn new(params: SessionParams) -> Result { - Ok(SessionImpl { + pub fn new(params: SessionParams) -> Result<(Self, Oneshot>), Error> { + let (completed, oneshot) = CompletionSignal::new(); + Ok((SessionImpl { core: SessionCore { meta: params.meta, cluster: params.cluster, @@ -192,7 +194,7 @@ impl SessionImpl { all_nodes_set: params.all_nodes_set, admin_public: params.admin_public, migration_id: params.migration_id, - completed: Condvar::new(), + completed, }, data: Mutex::new(SessionData { state: SessionState::EstablishingConsensus, @@ -205,7 +207,7 @@ impl SessionImpl { active_key_sessions: BTreeMap::new(), result: None, }), - }) + }, oneshot)) } /// Get session id. @@ -218,10 +220,9 @@ impl SessionImpl { self.core.migration_id.as_ref() } - /// Wait for session completion. - pub fn wait(&self) -> Result<(), Error> { - Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) - .expect("wait_session returns Some if called without timeout; qed") + /// Return session completion result (if available). + pub fn result(&self) -> Option> { + self.data.lock().result.clone() } /// Initialize servers set change session on master node. @@ -423,7 +424,7 @@ impl SessionImpl { &KeyVersionNegotiationMessage::RequestKeyVersions(ref message) if sender == &self.core.meta.master_node_id => { let key_id = message.session.clone().into(); let key_share = self.core.key_storage.get(&key_id)?; - let negotiation_session = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { + let (negotiation_session, _) = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { meta: ShareChangeSessionMeta { id: key_id.clone(), self_node_id: self.core.meta.self_node_id.clone(), @@ -671,7 +672,7 @@ impl SessionImpl { } data.state = SessionState::Finished; - self.core.completed.notify_all(); + self.core.completed.send(Ok(())); Ok(()) } @@ -741,7 +742,7 @@ impl SessionImpl { }; let key_share = core.key_storage.get(&key_id)?; - let negotiation_session = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { + let (negotiation_session, _) = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { meta: ShareChangeSessionMeta { id: key_id, self_node_id: core.meta.self_node_id.clone(), @@ -797,7 +798,8 @@ impl SessionImpl { let negotiation_session = data.negotiation_sessions.remove(&key_id) .expect("share change session is only initialized when negotiation is completed; qed"); let (selected_version, selected_master) = negotiation_session - .wait()? + .result() + .expect("share change session is only initialized when negotiation is completed; qed")? .expect("initialize_share_change_session is only called on share change master; negotiation session completes with some on master; qed"); let selected_version_holders = negotiation_session.version_holders(&selected_version)?; let selected_version_threshold = negotiation_session.common_key_data()?.threshold; @@ -882,7 +884,7 @@ impl SessionImpl { if data.result.is_some() && data.active_key_sessions.len() == 0 { data.state = SessionState::Finished; - core.completed.notify_all(); + core.completed.send(Ok(())); } Ok(()) @@ -907,7 +909,7 @@ impl SessionImpl { data.state = SessionState::Finished; data.result = Some(Ok(())); - core.completed.notify_all(); + core.completed.send(Ok(())); Ok(()) } @@ -915,6 +917,8 @@ impl SessionImpl { impl ClusterSession for SessionImpl { type Id = SessionId; + type CreationData = (); // never used directly + type SuccessfulResult = (); fn type_name() -> &'static str { "servers set change" @@ -954,8 +958,8 @@ impl ClusterSession for SessionImpl { self.core.meta.self_node_id, error, node); data.state = SessionState::Finished; - data.result = Some(Err(error)); - self.core.completed.notify_all(); + data.result = Some(Err(error.clone())); + self.core.completed.send(Err(error)); } fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { @@ -1109,7 +1113,7 @@ pub mod tests { nonce: 1, admin_public: admin_public, migration_id: None, - }).unwrap() + }).unwrap().0 } } diff --git a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs index b5195a62939..ef7882d68c0 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs @@ -18,10 +18,11 @@ use std::sync::Arc; use std::collections::{BTreeSet, BTreeMap}; use ethereum_types::{H256, Address}; use ethkey::{Public, Secret, Signature}; -use parking_lot::{Mutex, Condvar}; +use futures::Oneshot; +use parking_lot::Mutex; use key_server_cluster::{Error, SessionId, NodeId, DocumentKeyShare, DocumentKeyShareVersion, KeyStorage}; use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::cluster_sessions::{ClusterSession, CompletionSignal}; use key_server_cluster::math; use key_server_cluster::message::{Message, ShareAddMessage, ShareAddConsensusMessage, ConsensusMessageOfShareAdd, InitializeConsensusSessionOfShareAdd, KeyShareCommon, NewKeysDissemination, ShareAddError, @@ -71,8 +72,8 @@ struct SessionCore { pub key_storage: Arc, /// Administrator public key. pub admin_public: Option, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// Session completion signal. + pub completed: CompletionSignal<()>, } /// Share add consensus session type. @@ -158,10 +159,10 @@ pub struct IsolatedSessionTransport { impl SessionImpl where T: SessionTransport { /// Create new share addition session. - pub fn new(params: SessionParams) -> Result { + pub fn new(params: SessionParams) -> Result<(Self, Oneshot>), Error> { let key_share = params.key_storage.get(¶ms.meta.id)?; - - Ok(SessionImpl { + let (completed, oneshot) = CompletionSignal::new(); + Ok((SessionImpl { core: SessionCore { meta: params.meta, nonce: params.nonce, @@ -169,7 +170,7 @@ impl SessionImpl where T: SessionTransport { transport: params.transport, key_storage: params.key_storage, admin_public: params.admin_public, - completed: Condvar::new(), + completed, }, data: Mutex::new(SessionData { state: SessionState::ConsensusEstablishing, @@ -181,7 +182,7 @@ impl SessionImpl where T: SessionTransport { secret_subshares: None, result: None, }), - }) + }, oneshot)) } /// Set pre-established consensus data. @@ -752,7 +753,7 @@ impl SessionImpl where T: SessionTransport { // signal session completion data.state = SessionState::Finished; data.result = Some(Ok(())); - core.completed.notify_all(); + core.completed.send(Ok(())); Ok(()) } @@ -760,6 +761,8 @@ impl SessionImpl where T: SessionTransport { impl ClusterSession for SessionImpl where T: SessionTransport { type Id = SessionId; + type CreationData = (); // never used directly + type SuccessfulResult = (); fn type_name() -> &'static str { "share add" @@ -801,8 +804,8 @@ impl ClusterSession for SessionImpl where T: SessionTransport { self.core.meta.self_node_id, error, node); data.state = SessionState::Finished; - data.result = Some(Err(error)); - self.core.completed.notify_all(); + data.result = Some(Err(error.clone())); + self.core.completed.send(Err(error)); } fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { @@ -914,7 +917,7 @@ pub mod tests { key_storage, admin_public: Some(admin_public), nonce: 1, - }).unwrap() + }).unwrap().0 } } diff --git a/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs b/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs index bd2bed2d61d..d6af236d4b2 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs @@ -166,7 +166,7 @@ impl ShareChangeSession { let consensus_group = self.consensus_group.take().ok_or(Error::InvalidStateForRequest)?; let version_holders = self.version_holders.take().ok_or(Error::InvalidStateForRequest)?; let new_nodes_map = self.new_nodes_map.take().ok_or(Error::InvalidStateForRequest)?; - let share_add_session = ShareAddSessionImpl::new(ShareAddSessionParams { + let (share_add_session, _) = ShareAddSessionImpl::new(ShareAddSessionParams { meta: self.meta.clone(), nonce: self.nonce, transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()), diff --git a/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs b/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs index b47c81b98ce..1bda0bc3305 100644 --- a/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs +++ b/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs @@ -16,14 +16,14 @@ use std::collections::{BTreeSet, BTreeMap}; use std::sync::Arc; -use std::time; -use parking_lot::{Mutex, Condvar}; +use futures::Oneshot; +use parking_lot::Mutex; use ethereum_types::{Address, H256}; use ethkey::Secret; use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId, Requester, EncryptedDocumentKeyShadow, SessionMeta}; use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession}; +use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession, CompletionSignal}; use key_server_cluster::message::{Message, DecryptionMessage, DecryptionConsensusMessage, RequestPartialDecryption, PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted, ConsensusMessage, InitializeConsensusSession, ConfirmConsensusInitialization, DecryptionSessionDelegation, DecryptionSessionDelegationCompleted}; @@ -59,8 +59,8 @@ struct SessionCore { pub cluster: Arc, /// Session-level nonce. pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// Session completion signal. + pub completed: CompletionSignal, } /// Decryption consensus session type. @@ -147,7 +147,10 @@ enum DelegationStatus { impl SessionImpl { /// Create new decryption session. - pub fn new(params: SessionParams, requester: Option) -> Result { + pub fn new( + params: SessionParams, + requester: Option, + ) -> Result<(Self, Oneshot>), Error> { debug_assert_eq!(params.meta.threshold, params.key_share.as_ref().map(|ks| ks.threshold).unwrap_or_default()); // check that common_point and encrypted_point are already set @@ -175,14 +178,15 @@ impl SessionImpl { consensus_transport: consensus_transport, })?; - Ok(SessionImpl { + let (completed, oneshot) = CompletionSignal::new(); + Ok((SessionImpl { core: SessionCore { meta: params.meta, access_key: params.access_key, key_share: params.key_share, cluster: params.cluster, nonce: params.nonce, - completed: Condvar::new(), + completed, }, data: Mutex::new(SessionData { version: None, @@ -194,7 +198,7 @@ impl SessionImpl { delegation_status: None, result: None, }), - }) + }, oneshot)) } /// Get this node id. @@ -209,7 +213,7 @@ impl SessionImpl { &self.core.access_key } - /// Get session state. + /// Get session state (tests only). #[cfg(test)] pub fn state(&self) -> ConsensusSessionState { self.data.lock().consensus_session.state() @@ -231,9 +235,9 @@ impl SessionImpl { self.data.lock().origin.clone() } - /// Wait for session completion. - pub fn wait(&self, timeout: Option) -> Option> { - Self::wait_session(&self.core.completed, &self.data, timeout, |data| data.result.clone()) + /// Get session completion result (if available). + pub fn result(&self) -> Option> { + self.data.lock().result.clone() } /// Get broadcasted shadows. @@ -667,13 +671,15 @@ impl SessionImpl { }; } - data.result = Some(result); - core.completed.notify_all(); + data.result = Some(result.clone()); + core.completed.send(result); } } impl ClusterSession for SessionImpl { type Id = SessionIdWithSubSession; + type CreationData = Requester; + type SuccessfulResult = EncryptedDocumentKeyShadow; fn type_name() -> &'static str { "decryption" @@ -832,7 +838,7 @@ pub fn create_default_decryption_session() -> Arc { acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(Default::default())), nonce: 0, - }, Some(Requester::Public(H512::from_low_u64_be(2)))).unwrap()) + }, Some(Requester::Public(H512::from_low_u64_be(2)))).unwrap().0) } #[cfg(test)] @@ -915,7 +921,7 @@ mod tests { acl_storage: acl_storages[i].clone(), cluster: clusters[i].clone(), nonce: 0, - }, if i == 0 { signature.clone().map(Into::into) } else { None }).unwrap()).collect(); + }, if i == 0 { signature.clone().map(Into::into) } else { None }).unwrap().0).collect(); (requester, clusters, acl_storages, sessions) } @@ -1014,7 +1020,9 @@ mod tests { acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(self_node_id.clone())), nonce: 0, - }, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap(); + }, Some(Requester::Signature( + ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap() + ))).unwrap().0; assert_eq!(session.initialize(Default::default(), Default::default(), false, false), Err(Error::InvalidMessage)); } @@ -1049,7 +1057,9 @@ mod tests { acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(self_node_id.clone())), nonce: 0, - }, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap(); + }, Some(Requester::Signature( + ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap() + ))).unwrap().0; assert_eq!(session.initialize(Default::default(), Default::default(), false, false), Err(Error::ConsensusUnreachable)); } diff --git a/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs b/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs index a3eabc35c61..bdead62e58c 100644 --- a/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs +++ b/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs @@ -16,15 +16,15 @@ use std::collections::BTreeMap; use std::fmt::{Debug, Formatter, Error as FmtError}; -use std::time; use std::sync::Arc; -use parking_lot::{Condvar, Mutex}; +use futures::Oneshot; +use parking_lot::Mutex; use ethereum_types::Address; use ethkey::Public; use key_server_cluster::{Error, NodeId, SessionId, Requester, KeyStorage, DocumentKeyShare, ServerKeyId}; use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::cluster_sessions::{ClusterSession, CompletionSignal}; use key_server_cluster::message::{Message, EncryptionMessage, InitializeEncryptionSession, ConfirmEncryptionInitialization, EncryptionSessionError}; @@ -49,8 +49,8 @@ pub struct SessionImpl { cluster: Arc, /// Session nonce. nonce: u64, - /// SessionImpl completion condvar. - completed: Condvar, + /// Session completion signal. + completed: CompletionSignal<()>, /// Mutable session data. data: Mutex, } @@ -108,23 +108,24 @@ pub enum SessionState { impl SessionImpl { /// Create new encryption session. - pub fn new(params: SessionParams) -> Result { + pub fn new(params: SessionParams) -> Result<(Self, Oneshot>), Error> { check_encrypted_data(params.encrypted_data.as_ref())?; - Ok(SessionImpl { + let (completed, oneshot) = CompletionSignal::new(); + Ok((SessionImpl { id: params.id, self_node_id: params.self_node_id, encrypted_data: params.encrypted_data, key_storage: params.key_storage, cluster: params.cluster, nonce: params.nonce, - completed: Condvar::new(), + completed, data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, nodes: BTreeMap::new(), result: None, }), - }) + }, oneshot)) } /// Get this node Id. @@ -132,12 +133,6 @@ impl SessionImpl { &self.self_node_id } - /// Wait for session completion. - pub fn wait(&self, timeout: Option) -> Result<(), Error> { - Self::wait_session(&self.completed, &self.data, timeout, |data| data.result.clone()) - .expect("wait_session returns Some if called without timeout; qed") - } - /// Start new session initialization. This must be called on master node. pub fn initialize(&self, requester: Requester, common_point: Public, encrypted_point: Public) -> Result<(), Error> { let mut data = self.data.lock(); @@ -175,7 +170,7 @@ impl SessionImpl { } else { data.state = SessionState::Finished; data.result = Some(Ok(())); - self.completed.notify_all(); + self.completed.send(Ok(())); Ok(()) } @@ -230,7 +225,7 @@ impl SessionImpl { // update state data.state = SessionState::Finished; data.result = Some(Ok(())); - self.completed.notify_all(); + self.completed.send(Ok(())); Ok(()) } @@ -238,6 +233,8 @@ impl SessionImpl { impl ClusterSession for SessionImpl { type Id = SessionId; + type CreationData = (); + type SuccessfulResult = (); fn type_name() -> &'static str { "encryption" @@ -260,7 +257,7 @@ impl ClusterSession for SessionImpl { data.state = SessionState::Failed; data.result = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); + self.completed.send(Err(Error::NodeDisconnected)); } fn on_session_timeout(&self) { @@ -270,7 +267,7 @@ impl ClusterSession for SessionImpl { data.state = SessionState::Failed; data.result = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); + self.completed.send(Err(Error::NodeDisconnected)); } fn on_session_error(&self, node: &NodeId, error: Error) { @@ -290,8 +287,8 @@ impl ClusterSession for SessionImpl { warn!("{}: encryption session failed with error: {} from {}", self.node(), error, node); data.state = SessionState::Failed; - data.result = Some(Err(error)); - self.completed.notify_all(); + data.result = Some(Err(error.clone())); + self.completed.send(Err(error)); } fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { diff --git a/secret-store/src/key_server_cluster/client_sessions/generation_session.rs b/secret-store/src/key_server_cluster/client_sessions/generation_session.rs index 0fa805f5718..02d33f5dbee 100644 --- a/secret-store/src/key_server_cluster/client_sessions/generation_session.rs +++ b/secret-store/src/key_server_cluster/client_sessions/generation_session.rs @@ -16,15 +16,15 @@ use std::collections::{BTreeSet, BTreeMap, VecDeque}; use std::fmt::{Debug, Formatter, Error as FmtError}; -use std::time::Duration; use std::sync::Arc; -use parking_lot::{Condvar, Mutex}; +use futures::Oneshot; +use parking_lot::Mutex; use ethereum_types::Address; use ethkey::{Public, Secret}; use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare, DocumentKeyShareVersion}; use key_server_cluster::math; use key_server_cluster::cluster::Cluster; -use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::cluster_sessions::{ClusterSession, CompletionSignal}; use key_server_cluster::message::{Message, GenerationMessage, InitializeSession, ConfirmInitialization, CompleteInitialization, KeysDissemination, PublicKeyShare, SessionError, SessionCompleted}; @@ -47,10 +47,10 @@ pub struct SessionImpl { cluster: Arc, /// Session-level nonce. nonce: u64, - /// SessionImpl completion condvar. - completed: Condvar, /// Mutable session data. data: Mutex, + /// Session completion signal. + completed: CompletionSignal, } /// SessionImpl creation parameters @@ -204,8 +204,9 @@ impl From> for InitializationNodes { impl SessionImpl { /// Create new generation session. - pub fn new(params: SessionParams) -> Self { - SessionImpl { + pub fn new(params: SessionParams) -> (Self, Oneshot>) { + let (completed, oneshot) = CompletionSignal::new(); + (SessionImpl { id: params.id, self_node_id: params.self_node_id, key_storage: params.key_storage, @@ -213,7 +214,7 @@ impl SessionImpl { // when nonce.is_nonce(), generation session is wrapped // => nonce is checked somewhere else && we can pass any value nonce: params.nonce.unwrap_or_default(), - completed: Condvar::new(), + completed, data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, simulate_faulty_behaviour: false, @@ -230,7 +231,7 @@ impl SessionImpl { key_share: None, joint_public_and_secret: None, }), - } + }, oneshot) } /// Get this node Id. @@ -259,10 +260,10 @@ impl SessionImpl { self.data.lock().origin.clone() } - /// Wait for session completion. - pub fn wait(&self, timeout: Option) -> Option> { - Self::wait_session(&self.completed, &self.data, timeout, |data| data.joint_public_and_secret.clone() - .map(|r| r.map(|r| r.0.clone()))) + /// Get session completion result (if available). + pub fn result(&self) -> Option> { + self.data.lock().joint_public_and_secret.clone() + .map(|r| r.map(|r| r.0.clone())) } /// Get generated public and secret (if any). @@ -328,8 +329,12 @@ impl SessionImpl { self.verify_keys()?; self.complete_generation()?; - self.data.lock().state = SessionState::Finished; - self.completed.notify_all(); + let mut data = self.data.lock(); + let result = data.joint_public_and_secret.clone() + .expect("session is instantly completed on a single node; qed") + .map(|(p, _, _)| p); + data.state = SessionState::Finished; + self.completed.send(result); Ok(()) } @@ -619,8 +624,11 @@ impl SessionImpl { } // we have received enough confirmations => complete session + let result = data.joint_public_and_secret.clone() + .expect("we're on master node; we have received last completion confirmation; qed") + .map(|(p, _, _)| p); data.state = SessionState::Finished; - self.completed.notify_all(); + self.completed.send(result); Ok(()) } @@ -813,6 +821,8 @@ impl SessionImpl { impl ClusterSession for SessionImpl { type Id = SessionId; + type CreationData = (); + type SuccessfulResult = Public; fn type_name() -> &'static str { "generation" @@ -838,7 +848,7 @@ impl ClusterSession for SessionImpl { data.state = SessionState::Failed; data.key_share = Some(Err(Error::NodeDisconnected)); data.joint_public_and_secret = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); + self.completed.send(Err(Error::NodeDisconnected)); } fn on_session_timeout(&self) { @@ -849,7 +859,7 @@ impl ClusterSession for SessionImpl { data.state = SessionState::Failed; data.key_share = Some(Err(Error::NodeDisconnected)); data.joint_public_and_secret = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); + self.completed.send(Err(Error::NodeDisconnected)); } fn on_session_error(&self, node: &NodeId, error: Error) { @@ -867,8 +877,8 @@ impl ClusterSession for SessionImpl { let mut data = self.data.lock(); data.state = SessionState::Failed; data.key_share = Some(Err(error.clone())); - data.joint_public_and_secret = Some(Err(error)); - self.completed.notify_all(); + data.joint_public_and_secret = Some(Err(error.clone())); + self.completed.send(Err(error)); } fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { diff --git a/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs b/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs index fe3bd4f1143..d3c801af6e4 100644 --- a/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs +++ b/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs @@ -17,12 +17,13 @@ use std::collections::{BTreeSet, BTreeMap}; use std::collections::btree_map::Entry; use std::sync::Arc; -use parking_lot::{Mutex, Condvar}; +use futures::Oneshot; +use parking_lot::Mutex; use ethkey::{Public, Secret, Signature, sign}; use ethereum_types::H256; use key_server_cluster::{Error, NodeId, SessionId, SessionMeta, AclStorage, DocumentKeyShare, Requester}; use key_server_cluster::cluster::{Cluster}; -use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession}; +use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession, CompletionSignal}; use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams, SessionState as GenerationSessionState}; use key_server_cluster::math; @@ -58,8 +59,8 @@ struct SessionCore { pub cluster: Arc, /// Session-level nonce. pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// Session completion signal. + pub completed: CompletionSignal, } /// Signing consensus session type. @@ -170,7 +171,10 @@ enum DelegationStatus { impl SessionImpl { /// Create new signing session. - pub fn new(params: SessionParams, requester: Option) -> Result { + pub fn new( + params: SessionParams, + requester: Option, + ) -> Result<(Self, Oneshot>), Error> { debug_assert_eq!(params.meta.threshold, params.key_share.as_ref().map(|ks| ks.threshold).unwrap_or_default()); let consensus_transport = SigningConsensusTransport { @@ -197,14 +201,15 @@ impl SessionImpl { consensus_transport: consensus_transport, })?; - Ok(SessionImpl { + let (completed, oneshot) = CompletionSignal::new(); + Ok((SessionImpl { core: SessionCore { meta: params.meta, access_key: params.access_key, key_share: params.key_share, cluster: params.cluster, nonce: params.nonce, - completed: Condvar::new(), + completed, }, data: Mutex::new(SessionData { state: SessionState::ConsensusEstablishing, @@ -218,10 +223,11 @@ impl SessionImpl { delegation_status: None, result: None, }), - }) + }, oneshot)) } /// Wait for session completion. + #[cfg(test)] pub fn wait(&self) -> Result { Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) .expect("wait_session returns Some if called without timeout; qed") @@ -251,7 +257,6 @@ impl SessionImpl { })))?; data.delegation_status = Some(DelegationStatus::DelegatedTo(master)); Ok(()) - } /// Initialize signing session on master node. @@ -284,8 +289,9 @@ impl SessionImpl { // consensus established => threshold is 0 => we can generate signature on this node if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { - data.result = Some(sign(&key_version.secret_share, &message_hash).map_err(Into::into)); - self.core.completed.notify_all(); + let result = sign(&key_version.secret_share, &message_hash).map_err(Into::into); + data.result = Some(result.clone()); + self.core.completed.send(result); } Ok(()) @@ -797,7 +803,7 @@ impl SessionImpl { map: map_message, }), nonce: None, - }) + }).0 } /// Set signing session result. @@ -820,8 +826,8 @@ impl SessionImpl { }; } - data.result = Some(result); - core.completed.notify_all(); + data.result = Some(result.clone()); + core.completed.send(result); } /// Check if all nonces are generated. @@ -883,6 +889,8 @@ impl SessionImpl { impl ClusterSession for SessionImpl { type Id = SessionIdWithSubSession; + type CreationData = Requester; + type SuccessfulResult = Signature; fn type_name() -> &'static str { "ecdsa_signing" diff --git a/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs b/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs index b0881dfff07..ff901fc1581 100644 --- a/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs +++ b/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs @@ -16,12 +16,13 @@ use std::collections::BTreeSet; use std::sync::Arc; -use parking_lot::{Mutex, Condvar}; +use futures::Oneshot; +use parking_lot::Mutex; use ethkey::{Public, Secret}; use ethereum_types::H256; use key_server_cluster::{Error, NodeId, SessionId, Requester, SessionMeta, AclStorage, DocumentKeyShare}; use key_server_cluster::cluster::{Cluster}; -use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession}; +use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession, CompletionSignal}; use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams, SessionState as GenerationSessionState}; use key_server_cluster::message::{Message, SchnorrSigningMessage, SchnorrSigningConsensusMessage, SchnorrSigningGenerationMessage, @@ -59,8 +60,8 @@ struct SessionCore { pub cluster: Arc, /// Session-level nonce. pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, + /// SessionImpl completion signal. + pub completed: CompletionSignal<(Secret, Secret)>, } /// Signing consensus session type. @@ -160,7 +161,10 @@ enum DelegationStatus { impl SessionImpl { /// Create new signing session. - pub fn new(params: SessionParams, requester: Option) -> Result { + pub fn new( + params: SessionParams, + requester: Option, + ) -> Result<(Self, Oneshot>), Error> { debug_assert_eq!(params.meta.threshold, params.key_share.as_ref().map(|ks| ks.threshold).unwrap_or_default()); let consensus_transport = SigningConsensusTransport { @@ -179,14 +183,15 @@ impl SessionImpl { consensus_transport: consensus_transport, })?; - Ok(SessionImpl { + let (completed, oneshot) = CompletionSignal::new(); + Ok((SessionImpl { core: SessionCore { meta: params.meta, access_key: params.access_key, key_share: params.key_share, cluster: params.cluster, nonce: params.nonce, - completed: Condvar::new(), + completed, }, data: Mutex::new(SessionData { state: SessionState::ConsensusEstablishing, @@ -197,21 +202,22 @@ impl SessionImpl { delegation_status: None, result: None, }), - }) - } - - /// Get session state. - #[cfg(test)] - pub fn state(&self) -> SessionState { - self.data.lock().state + }, oneshot)) } /// Wait for session completion. + #[cfg(test)] pub fn wait(&self) -> Result<(Secret, Secret), Error> { Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) .expect("wait_session returns Some if called without timeout; qed") } + /// Get session state (tests only). + #[cfg(test)] + pub fn state(&self) -> SessionState { + self.data.lock().state + } + /// Delegate session to other node. pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> { if self.core.meta.master_node_id != self.core.meta.self_node_id { @@ -277,7 +283,7 @@ impl SessionImpl { other_nodes_ids: BTreeSet::new() }), nonce: None, - }); + }).0; generation_session.initialize(Default::default(), Default::default(), false, 0, vec![self.core.meta.self_node_id.clone()].into_iter().collect::>().into())?; debug_assert_eq!(generation_session.state(), GenerationSessionState::Finished); @@ -405,7 +411,7 @@ impl SessionImpl { other_nodes_ids: other_consensus_group_nodes, }), nonce: None, - }); + }).0; generation_session.initialize(Default::default(), Default::default(), false, key_share.threshold, consensus_group.into())?; data.generation_session = Some(generation_session); @@ -445,7 +451,7 @@ impl SessionImpl { other_nodes_ids: other_consensus_group_nodes }), nonce: None, - }); + }).0; data.generation_session = Some(generation_session); data.state = SessionState::SessionKeyGeneration; } @@ -617,13 +623,15 @@ impl SessionImpl { }; } - data.result = Some(result); - core.completed.notify_all(); + data.result = Some(result.clone()); + core.completed.send(result); } } impl ClusterSession for SessionImpl { type Id = SessionIdWithSubSession; + type CreationData = Requester; + type SuccessfulResult = (Secret, Secret); fn type_name() -> &'static str { "signing" @@ -850,7 +858,7 @@ mod tests { acl_storage: Arc::new(DummyAclStorage::default()), cluster: self.0.cluster(0).view().unwrap(), nonce: 0, - }, requester).unwrap() + }, requester).unwrap().0 } pub fn init_with_version(self, key_version: Option) -> Result<(Self, Public, H256), Error> { diff --git a/secret-store/src/key_server_cluster/cluster.rs b/secret-store/src/key_server_cluster/cluster.rs index a8416a8f700..c6ffd446ee0 100644 --- a/secret-store/src/key_server_cluster/cluster.rs +++ b/secret-store/src/key_server_cluster/cluster.rs @@ -21,8 +21,8 @@ use ethkey::{Public, Signature, Random, Generator}; use ethereum_types::{Address, H256}; use parity_runtime::Executor; use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair}; -use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessions, SessionIdWithSubSession, - ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, +use key_server_cluster::cluster_sessions::{WaitableSession, ClusterSession, AdminSession, ClusterSessions, + SessionIdWithSubSession, ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData, ClusterSessionsListener}; use key_server_cluster::cluster_sessions_creator::ClusterSessionCreator; use key_server_cluster::cluster_connections::{ConnectionProvider, ConnectionManager}; @@ -47,19 +47,61 @@ use key_server_cluster::cluster_connections::tests::{MessagesQueue, TestConnecti /// Cluster interface for external clients. pub trait ClusterClient: Send + Sync { /// Start new generation session. - fn new_generation_session(&self, session_id: SessionId, origin: Option
, author: Address, threshold: usize) -> Result, Error>; + fn new_generation_session( + &self, + session_id: SessionId, + origin: Option
, + author: Address, + threshold: usize, + ) -> Result, Error>; /// Start new encryption session. - fn new_encryption_session(&self, session_id: SessionId, author: Requester, common_point: Public, encrypted_point: Public) -> Result, Error>; + fn new_encryption_session( + &self, + session_id: SessionId, + author: Requester, + common_point: Public, + encrypted_point: Public, + ) -> Result, Error>; /// Start new decryption session. - fn new_decryption_session(&self, session_id: SessionId, origin: Option
, requester: Requester, version: Option, is_shadow_decryption: bool, is_broadcast_decryption: bool) -> Result, Error>; + fn new_decryption_session( + &self, + session_id: SessionId, + origin: Option
, + requester: Requester, + version: Option, + is_shadow_decryption: bool, + is_broadcast_decryption: bool, + ) -> Result, Error>; /// Start new Schnorr signing session. - fn new_schnorr_signing_session(&self, session_id: SessionId, requester: Requester, version: Option, message_hash: H256) -> Result, Error>; + fn new_schnorr_signing_session( + &self, + session_id: SessionId, + requester: Requester, + version: Option, + message_hash: H256, + ) -> Result, Error>; /// Start new ECDSA session. - fn new_ecdsa_signing_session(&self, session_id: SessionId, requester: Requester, version: Option, message_hash: H256) -> Result, Error>; + fn new_ecdsa_signing_session( + &self, + session_id: SessionId, + requester: Requester, + version: Option, + message_hash: H256, + ) -> Result, Error>; /// Start new key version negotiation session. - fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error>; + fn new_key_version_negotiation_session( + &self, + session_id: SessionId, + ) -> Result>, Error>; /// Start new servers set change session. - fn new_servers_set_change_session(&self, session_id: Option, migration_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error>; + fn new_servers_set_change_session( + &self, + session_id: Option, + migration_id: Option, + new_nodes_set: BTreeSet, + old_set_signature: Signature, + new_set_signature: Signature, + ) -> Result, Error>; /// Listen for new generation sessions. fn add_generation_listener(&self, listener: Arc>); @@ -324,7 +366,10 @@ impl ClusterClientImpl { } } - fn create_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error> { + fn create_key_version_negotiation_session( + &self, + session_id: SessionId, + ) -> Result>, Error> { let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); @@ -332,10 +377,10 @@ impl ClusterClientImpl { let session_id = SessionIdWithSubSession::new(session_id, access_key); let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), false)?; let session = self.data.sessions.negotiation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id.clone(), None, false, None)?; - match session.initialize(connected_nodes) { + match session.session.initialize(connected_nodes) { Ok(()) => Ok(session), Err(error) => { - self.data.sessions.negotiation_sessions.remove(&session.id()); + self.data.sessions.negotiation_sessions.remove(&session.session.id()); Err(error) } } @@ -343,29 +388,49 @@ impl ClusterClientImpl { } impl ClusterClient for ClusterClientImpl { - fn new_generation_session(&self, session_id: SessionId, origin: Option
, author: Address, threshold: usize) -> Result, Error> { + fn new_generation_session( + &self, + session_id: SessionId, + origin: Option
, + author: Address, + threshold: usize, + ) -> Result, Error> { let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), true)?; let session = self.data.sessions.generation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?; process_initialization_result( - session.initialize(origin, author, false, threshold, connected_nodes.into()), + session.session.initialize(origin, author, false, threshold, connected_nodes.into()), session, &self.data.sessions.generation_sessions) } - fn new_encryption_session(&self, session_id: SessionId, requester: Requester, common_point: Public, encrypted_point: Public) -> Result, Error> { + fn new_encryption_session( + &self, + session_id: SessionId, + requester: Requester, + common_point: Public, + encrypted_point: Public, + ) -> Result, Error> { let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), true)?; let session = self.data.sessions.encryption_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?; process_initialization_result( - session.initialize(requester, common_point, encrypted_point), + session.session.initialize(requester, common_point, encrypted_point), session, &self.data.sessions.encryption_sessions) } - fn new_decryption_session(&self, session_id: SessionId, origin: Option
, requester: Requester, version: Option, is_shadow_decryption: bool, is_broadcast_decryption: bool) -> Result, Error> { + fn new_decryption_session( + &self, + session_id: SessionId, + origin: Option
, + requester: Requester, + version: Option, + is_shadow_decryption: bool, + is_broadcast_decryption: bool, + ) -> Result, Error> { let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); @@ -376,12 +441,18 @@ impl ClusterClient for ClusterClientImpl { session_id.clone(), None, false, Some(requester))?; let initialization_result = match version { - Some(version) => session.initialize(origin, version, is_shadow_decryption, is_broadcast_decryption), + Some(version) => session.session.initialize(origin, version, is_shadow_decryption, is_broadcast_decryption), None => { self.create_key_version_negotiation_session(session_id.id.clone()) .map(|version_session| { - version_session.set_continue_action(ContinueAction::Decrypt(session.clone(), origin, is_shadow_decryption, is_broadcast_decryption)); - self.data.message_processor.try_continue_session(Some(version_session)); + let continue_action = ContinueAction::Decrypt( + session.session.clone(), + origin, + is_shadow_decryption, + is_broadcast_decryption, + ); + version_session.session.set_continue_action(continue_action); + self.data.message_processor.try_continue_session(Some(version_session.session)); }) }, }; @@ -391,7 +462,13 @@ impl ClusterClient for ClusterClientImpl { session, &self.data.sessions.decryption_sessions) } - fn new_schnorr_signing_session(&self, session_id: SessionId, requester: Requester, version: Option, message_hash: H256) -> Result, Error> { + fn new_schnorr_signing_session( + &self, + session_id: SessionId, + requester: Requester, + version: Option, + message_hash: H256, + ) -> Result, Error> { let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); @@ -401,12 +478,13 @@ impl ClusterClient for ClusterClientImpl { let session = self.data.sessions.schnorr_signing_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id.clone(), None, false, Some(requester))?; let initialization_result = match version { - Some(version) => session.initialize(version, message_hash), + Some(version) => session.session.initialize(version, message_hash), None => { self.create_key_version_negotiation_session(session_id.id.clone()) .map(|version_session| { - version_session.set_continue_action(ContinueAction::SchnorrSign(session.clone(), message_hash)); - self.data.message_processor.try_continue_session(Some(version_session)); + let continue_action = ContinueAction::SchnorrSign(session.session.clone(), message_hash); + version_session.session.set_continue_action(continue_action); + self.data.message_processor.try_continue_session(Some(version_session.session)); }) }, }; @@ -416,7 +494,13 @@ impl ClusterClient for ClusterClientImpl { session, &self.data.sessions.schnorr_signing_sessions) } - fn new_ecdsa_signing_session(&self, session_id: SessionId, requester: Requester, version: Option, message_hash: H256) -> Result, Error> { + fn new_ecdsa_signing_session( + &self, + session_id: SessionId, + requester: Requester, + version: Option, + message_hash: H256, + ) -> Result, Error> { let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); @@ -426,12 +510,13 @@ impl ClusterClient for ClusterClientImpl { let session = self.data.sessions.ecdsa_signing_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id.clone(), None, false, Some(requester))?; let initialization_result = match version { - Some(version) => session.initialize(version, message_hash), + Some(version) => session.session.initialize(version, message_hash), None => { self.create_key_version_negotiation_session(session_id.id.clone()) .map(|version_session| { - version_session.set_continue_action(ContinueAction::EcdsaSign(session.clone(), message_hash)); - self.data.message_processor.try_continue_session(Some(version_session)); + let continue_action = ContinueAction::EcdsaSign(session.session.clone(), message_hash); + version_session.session.set_continue_action(continue_action); + self.data.message_processor.try_continue_session(Some(version_session.session)); }) }, }; @@ -441,12 +526,21 @@ impl ClusterClient for ClusterClientImpl { session, &self.data.sessions.ecdsa_signing_sessions) } - fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error> { - let session = self.create_key_version_negotiation_session(session_id)?; - Ok(session) + fn new_key_version_negotiation_session( + &self, + session_id: SessionId, + ) -> Result>, Error> { + self.create_key_version_negotiation_session(session_id) } - fn new_servers_set_change_session(&self, session_id: Option, migration_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { + fn new_servers_set_change_session( + &self, + session_id: Option, + migration_id: Option, + new_nodes_set: BTreeSet, + old_set_signature: Signature, + new_set_signature: Signature, + ) -> Result, Error> { new_servers_set_change_session( self.data.self_key_pair.clone(), &self.data.sessions, @@ -508,7 +602,7 @@ pub fn new_servers_set_change_session( connections: Arc, servers_set_change_creator_connector: Arc, params: ServersSetChangeParams, -) -> Result, Error> { +) -> Result, Error> { let session_id = match params.session_id { Some(session_id) if session_id == *SERVERS_SET_CHANGE_SESSION_ID => session_id, Some(_) => return Err(Error::InvalidMessage), @@ -519,11 +613,11 @@ pub fn new_servers_set_change_session( let creation_data = AdminSessionCreationData::ServersSetChange(params.migration_id, params.new_nodes_set.clone()); let session = sessions.admin_sessions .insert(cluster, *self_key_pair.public(), session_id, None, true, Some(creation_data))?; - let initialization_result = session.as_servers_set_change().expect("servers set change session is created; qed") + let initialization_result = session.session.as_servers_set_change().expect("servers set change session is created; qed") .initialize(params.new_nodes_set, params.old_set_signature, params.new_set_signature); if initialization_result.is_ok() { - servers_set_change_creator_connector.set_key_servers_set_change_session(session.clone()); + servers_set_change_creator_connector.set_key_servers_set_change_session(session.session.clone()); } process_initialization_result( @@ -531,23 +625,23 @@ pub fn new_servers_set_change_session( session, &sessions.admin_sessions) } -fn process_initialization_result( +fn process_initialization_result( result: Result<(), Error>, - session: Arc, - sessions: &ClusterSessionsContainer -) -> Result, Error> + session: WaitableSession, + sessions: &ClusterSessionsContainer +) -> Result, Error> where S: ClusterSession, - SC: ClusterSessionCreator + SC: ClusterSessionCreator { match result { - Ok(()) if session.is_finished() => { - sessions.remove(&session.id()); + Ok(()) if session.session.is_finished() => { + sessions.remove(&session.session.id()); Ok(session) }, Ok(()) => Ok(session), Err(error) => { - sessions.remove(&session.id()); + sessions.remove(&session.session.id()); Err(error) }, } @@ -558,6 +652,7 @@ pub mod tests { use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use std::collections::{BTreeMap, BTreeSet, VecDeque}; + use futures::Future; use parking_lot::{Mutex, RwLock}; use ethereum_types::{Address, H256}; use ethkey::{Random, Generator, Public, Signature, sign}; @@ -567,7 +662,8 @@ pub mod tests { use key_server_cluster::cluster::{new_test_cluster, Cluster, ClusterCore, ClusterConfiguration, ClusterClient}; use key_server_cluster::cluster_connections::ConnectionManager; use key_server_cluster::cluster_connections::tests::{MessagesQueue, TestConnections}; - use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, AdminSession, ClusterSessionsListener}; + use key_server_cluster::cluster_sessions::{WaitableSession, ClusterSession, ClusterSessions, AdminSession, + ClusterSessionsListener}; use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionState as GenerationSessionState}; use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession}; @@ -595,17 +691,71 @@ pub mod tests { } impl ClusterClient for DummyClusterClient { - fn new_generation_session(&self, _session_id: SessionId, _origin: Option
, _author: Address, _threshold: usize) -> Result, Error> { + fn new_generation_session( + &self, + _session_id: SessionId, + _origin: Option
, + _author: Address, + _threshold: usize, + ) -> Result, Error> { self.generation_requests_count.fetch_add(1, Ordering::Relaxed); Err(Error::Internal("test-error".into())) } - fn new_encryption_session(&self, _session_id: SessionId, _requester: Requester, _common_point: Public, _encrypted_point: Public) -> Result, Error> { unimplemented!("test-only") } - fn new_decryption_session(&self, _session_id: SessionId, _origin: Option
, _requester: Requester, _version: Option, _is_shadow_decryption: bool, _is_broadcast_session: bool) -> Result, Error> { unimplemented!("test-only") } - fn new_schnorr_signing_session(&self, _session_id: SessionId, _requester: Requester, _version: Option, _message_hash: H256) -> Result, Error> { unimplemented!("test-only") } - fn new_ecdsa_signing_session(&self, _session_id: SessionId, _requester: Requester, _version: Option, _message_hash: H256) -> Result, Error> { unimplemented!("test-only") } + fn new_encryption_session( + &self, + _session_id: SessionId, + _requester: Requester, + _common_point: Public, + _encrypted_point: Public, + ) -> Result, Error> { + unimplemented!("test-only") + } + fn new_decryption_session( + &self, + _session_id: SessionId, + _origin: Option
, + _requester: Requester, + _version: Option, + _is_shadow_decryption: bool, + _is_broadcast_session: bool, + ) -> Result, Error> { + unimplemented!("test-only") + } + fn new_schnorr_signing_session( + &self, + _session_id: SessionId, + _requester: Requester, + _version: Option, + _message_hash: H256, + ) -> Result, Error> { + unimplemented!("test-only") + } + fn new_ecdsa_signing_session( + &self, + _session_id: SessionId, + _requester: Requester, + _version: Option, + _message_hash: H256, + ) -> Result, Error> { + unimplemented!("test-only") + } - fn new_key_version_negotiation_session(&self, _session_id: SessionId) -> Result>, Error> { unimplemented!("test-only") } - fn new_servers_set_change_session(&self, _session_id: Option, _migration_id: Option, _new_nodes_set: BTreeSet, _old_set_signature: Signature, _new_set_signature: Signature) -> Result, Error> { unimplemented!("test-only") } + fn new_key_version_negotiation_session( + &self, + _session_id: SessionId, + ) -> Result>, Error> { + unimplemented!("test-only") + } + fn new_servers_set_change_session( + &self, + _session_id: Option, + _migration_id: Option, + _new_nodes_set: BTreeSet, + _old_set_signature: Signature, + _new_set_signature: Signature, + ) -> Result, Error> { + unimplemented!("test-only") + } fn add_generation_listener(&self, _listener: Arc>) {} fn add_decryption_listener(&self, _listener: Arc>) {} @@ -897,7 +1047,7 @@ pub mod tests { // start && wait for generation session to fail let session = ml.cluster(0).client() - .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); + .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap().session; ml.loop_until(|| session.joint_public_and_secret().is_some() && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_err()); @@ -924,7 +1074,7 @@ pub mod tests { // start && wait for generation session to fail let session = ml.cluster(0).client() - .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); + .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap().session; ml.loop_until(|| session.joint_public_and_secret().is_some() && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_err()); @@ -949,7 +1099,7 @@ pub mod tests { // start && wait for generation session to complete let session = ml.cluster(0).client() - .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); + .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap().session; ml.loop_until(|| (session.state() == GenerationSessionState::Finished || session.state() == GenerationSessionState::Failed) && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); @@ -1017,7 +1167,7 @@ pub mod tests { // start && wait for generation session to complete let session = ml.cluster(0).client(). - new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); + new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap().session; ml.loop_until(|| (session.state() == GenerationSessionState::Finished || session.state() == GenerationSessionState::Failed) && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); @@ -1035,7 +1185,7 @@ pub mod tests { ml.loop_until(|| session.is_finished() && (0..3).all(|i| ml.cluster(i).data.sessions.schnorr_signing_sessions.is_empty())); - session0.wait().unwrap(); + session0.into_wait_future().wait().unwrap(); // and try to sign message with generated key using node that has no key share let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); @@ -1045,7 +1195,7 @@ pub mod tests { ml.loop_until(|| session.is_finished() && (0..3).all(|i| ml.cluster(i).data.sessions.schnorr_signing_sessions.is_empty())); - session2.wait().unwrap(); + session2.into_wait_future().wait().unwrap(); // now remove share from node1 ml.cluster(1).data.config.key_storage.remove(&Default::default()).unwrap(); @@ -1057,7 +1207,7 @@ pub mod tests { let session = ml.cluster(0).data.sessions.schnorr_signing_sessions.first().unwrap(); ml.loop_until(|| session.is_finished()); - session1.wait().unwrap_err(); + session1.into_wait_future().wait().unwrap_err(); } #[test] @@ -1067,7 +1217,7 @@ pub mod tests { // start && wait for generation session to complete let session = ml.cluster(0).client() - .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); + .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap().session; ml.loop_until(|| (session.state() == GenerationSessionState::Finished || session.state() == GenerationSessionState::Failed) && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); @@ -1085,7 +1235,7 @@ pub mod tests { ml.loop_until(|| session.is_finished() && (0..3).all(|i| ml.cluster(i).data.sessions.ecdsa_signing_sessions.is_empty())); - session0.wait().unwrap(); + session0.into_wait_future().wait().unwrap(); // and try to sign message with generated key using node that has no key share let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); @@ -1094,7 +1244,7 @@ pub mod tests { let session = ml.cluster(2).data.sessions.ecdsa_signing_sessions.first().unwrap(); ml.loop_until(|| session.is_finished() && (0..3).all(|i| ml.cluster(i).data.sessions.ecdsa_signing_sessions.is_empty())); - session2.wait().unwrap(); + session2.into_wait_future().wait().unwrap(); // now remove share from node1 ml.cluster(1).data.config.key_storage.remove(&Default::default()).unwrap(); @@ -1105,6 +1255,6 @@ pub mod tests { .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); let session = ml.cluster(0).data.sessions.ecdsa_signing_sessions.first().unwrap(); ml.loop_until(|| session.is_finished()); - session1.wait().unwrap_err(); + session1.into_wait_future().wait().unwrap_err(); } } diff --git a/secret-store/src/key_server_cluster/cluster_message_processor.rs b/secret-store/src/key_server_cluster/cluster_message_processor.rs index b4ba5ef03b2..0624d50b13b 100644 --- a/secret-store/src/key_server_cluster/cluster_message_processor.rs +++ b/secret-store/src/key_server_cluster/cluster_message_processor.rs @@ -72,9 +72,9 @@ impl SessionsMessageProcessor { } /// Process single session message from connection. - fn process_message, D>( + fn process_message>( &self, - sessions: &ClusterSessionsContainer, + sessions: &ClusterSessionsContainer, connection: Arc, mut message: Message, ) -> Option> @@ -151,9 +151,9 @@ impl SessionsMessageProcessor { } /// Get or insert new session. - fn prepare_session, D>( + fn prepare_session>( &self, - sessions: &ClusterSessionsContainer, + sessions: &ClusterSessionsContainer, sender: &NodeId, message: &Message ) -> Result, Error> @@ -192,7 +192,7 @@ impl SessionsMessageProcessor { let nonce = Some(message.session_nonce().ok_or(Error::InvalidMessage)?); let exclusive = message.is_exclusive_session_message(); - sessions.insert(cluster, master, session_id, nonce, exclusive, creation_data) + sessions.insert(cluster, master, session_id, nonce, exclusive, creation_data).map(|s| s.session) }, } } @@ -273,8 +273,8 @@ impl MessageProcessor for SessionsMessageProcessor { let is_master_node = meta.self_node_id == meta.master_node_id; if is_master_node && session.is_finished() { self.sessions.negotiation_sessions.remove(&session.id()); - match session.wait() { - Ok(Some((version, master))) => match session.take_continue_action() { + match session.result() { + Some(Ok(Some((version, master)))) => match session.take_continue_action() { Some(ContinueAction::Decrypt( session, origin, is_shadow_decryption, is_broadcast_decryption )) => { @@ -317,10 +317,7 @@ impl MessageProcessor for SessionsMessageProcessor { }, None => (), }, - Ok(None) => unreachable!("is_master_node; session is finished; - negotiation version always finished with result on master; - qed"), - Err(error) => match session.take_continue_action() { + Some(Err(error)) => match session.take_continue_action() { Some(ContinueAction::Decrypt(session, _, _, _)) => { session.on_session_error(&meta.self_node_id, error); self.sessions.decryption_sessions.remove(&session.id()); @@ -335,6 +332,9 @@ impl MessageProcessor for SessionsMessageProcessor { }, None => (), }, + None | Some(Ok(None)) => unreachable!("is_master_node; session is finished; + negotiation version always finished with result on master; + qed"), } } } @@ -352,6 +352,6 @@ impl MessageProcessor for SessionsMessageProcessor { self.connections.clone(), self.servers_set_change_creator_connector.clone(), params, - ) + ).map(|s| s.session) } } diff --git a/secret-store/src/key_server_cluster/cluster_sessions.rs b/secret-store/src/key_server_cluster/cluster_sessions.rs index 53eec133467..888499202db 100644 --- a/secret-store/src/key_server_cluster/cluster_sessions.rs +++ b/secret-store/src/key_server_cluster/cluster_sessions.rs @@ -18,10 +18,11 @@ use std::time::{Duration, Instant}; use std::sync::{Arc, Weak}; use std::sync::atomic::AtomicBool; use std::collections::{VecDeque, BTreeMap, BTreeSet}; +use futures::{oneshot, Oneshot, Complete, Future}; use parking_lot::{Mutex, RwLock, Condvar}; use ethereum_types::H256; use ethkey::Secret; -use key_server_cluster::{Error, NodeId, SessionId, Requester, NodeKeyPair}; +use key_server_cluster::{Error, NodeId, SessionId, NodeKeyPair}; use key_server_cluster::cluster::{Cluster, ClusterConfiguration, ClusterView}; use key_server_cluster::cluster_connections::ConnectionProvider; use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector; @@ -68,6 +69,10 @@ pub struct SessionIdWithSubSession { pub trait ClusterSession { /// Session identifier type. type Id: ::std::fmt::Debug + Ord + Clone; + /// Session creation data type. + type CreationData; + /// Session (successful) result type. + type SuccessfulResult: Send + 'static; /// Session type name. fn type_name() -> &'static str; @@ -85,15 +90,22 @@ pub trait ClusterSession { fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>; /// 'Wait for session completion' helper. - fn wait_session Option>>(completion_event: &Condvar, session_data: &Mutex, timeout: Option, result_reader: F) -> Option> { + #[cfg(test)] + fn wait_session Option>>( + completion: &CompletionSignal, + session_data: &Mutex, + timeout: Option, + result_reader: F + ) -> Option> { let mut locked_data = session_data.lock(); match result_reader(&locked_data) { Some(result) => Some(result), None => { + let completion_condvar = completion.completion_condvar.as_ref().expect("created in test mode"); match timeout { - None => completion_event.wait(&mut locked_data), + None => completion_condvar.wait(&mut locked_data), Some(timeout) => { - completion_event.wait_for(&mut locked_data, timeout); + completion_condvar.wait_for(&mut locked_data, timeout); }, } @@ -103,6 +115,23 @@ pub trait ClusterSession { } } +/// Waitable cluster session. +pub struct WaitableSession { + /// Session handle. + pub session: Arc, + /// Session result oneshot. + pub oneshot: Oneshot>, +} + +/// Session completion signal. +pub struct CompletionSignal { + /// Completion future. + pub completion_future: Mutex>>>, + + /// Completion condvar. + pub completion_condvar: Option, +} + /// Administrative session. pub enum AdminSession { /// Share add session. @@ -122,19 +151,22 @@ pub enum AdminSessionCreationData { /// Active sessions on this cluster. pub struct ClusterSessions { /// Key generation sessions. - pub generation_sessions: ClusterSessionsContainer, + pub generation_sessions: ClusterSessionsContainer, /// Encryption sessions. - pub encryption_sessions: ClusterSessionsContainer, + pub encryption_sessions: ClusterSessionsContainer, /// Decryption sessions. - pub decryption_sessions: ClusterSessionsContainer, + pub decryption_sessions: ClusterSessionsContainer, /// Schnorr signing sessions. - pub schnorr_signing_sessions: ClusterSessionsContainer, + pub schnorr_signing_sessions: ClusterSessionsContainer, /// ECDSA signing sessions. - pub ecdsa_signing_sessions: ClusterSessionsContainer, + pub ecdsa_signing_sessions: ClusterSessionsContainer, /// Key version negotiation sessions. - pub negotiation_sessions: ClusterSessionsContainer, KeyVersionNegotiationSessionCreator, ()>, + pub negotiation_sessions: ClusterSessionsContainer< + KeyVersionNegotiationSessionImpl, + KeyVersionNegotiationSessionCreator + >, /// Administrative sessions. - pub admin_sessions: ClusterSessionsContainer, + pub admin_sessions: ClusterSessionsContainer, /// Self node id. self_node_id: NodeId, /// Creator core. @@ -150,7 +182,7 @@ pub trait ClusterSessionsListener: Send + Sync { } /// Active sessions container. -pub struct ClusterSessionsContainer, D> { +pub struct ClusterSessionsContainer> { /// Sessions creator. pub creator: SC, /// Active sessions. @@ -161,8 +193,6 @@ pub struct ClusterSessionsContainer>, /// Do not actually remove sessions. preserve_sessions: bool, - /// Phantom data. - _pd: ::std::marker::PhantomData, } /// Session and its message queue. @@ -279,7 +309,7 @@ impl ClusterSessions { } } -impl ClusterSessionsContainer where S: ClusterSession, SC: ClusterSessionCreator { +impl ClusterSessionsContainer where S: ClusterSession, SC: ClusterSessionCreator { pub fn new(creator: SC, container_state: Arc>) -> Self { ClusterSessionsContainer { creator: creator, @@ -287,7 +317,6 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: C listeners: Mutex::new(Vec::new()), container_state: container_state, preserve_sessions: false, - _pd: Default::default(), } } @@ -316,7 +345,15 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: C self.sessions.read().values().nth(0).map(|s| s.session.clone()) } - pub fn insert(&self, cluster: Arc, master: NodeId, session_id: S::Id, session_nonce: Option, is_exclusive_session: bool, creation_data: Option) -> Result, Error> { + pub fn insert( + &self, + cluster: Arc, + master: NodeId, + session_id: S::Id, + session_nonce: Option, + is_exclusive_session: bool, + creation_data: Option, + ) -> Result, Error> { let mut sessions = self.sessions.write(); if sessions.contains_key(&session_id) { return Err(Error::DuplicateSessionId); @@ -335,11 +372,11 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: C cluster_view: cluster, last_keep_alive_time: Instant::now(), last_message_time: Instant::now(), - session: session.clone(), + session: session.session.clone(), queue: VecDeque::new(), }; sessions.insert(session_id, queued_session); - self.notify_listeners(|l| l.on_session_inserted(session.clone())); + self.notify_listeners(|l| l.on_session_inserted(session.session.clone())); Ok(session) } @@ -419,7 +456,12 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: C } } -impl ClusterSessionsContainer where S: ClusterSession, SC: ClusterSessionCreator, SessionId: From { +impl ClusterSessionsContainer + where + S: ClusterSession, + SC: ClusterSessionCreator, + SessionId: From, +{ pub fn send_keep_alive(&self, session_id: &S::Id, self_node_id: &NodeId) { if let Some(session) = self.sessions.write().get_mut(session_id) { let now = Instant::now(); @@ -521,6 +563,8 @@ impl AdminSession { impl ClusterSession for AdminSession { type Id = SessionId; + type CreationData = AdminSessionCreationData; + type SuccessfulResult = (); fn type_name() -> &'static str { "admin" @@ -569,6 +613,40 @@ impl ClusterSession for AdminSession { } } +impl WaitableSession { + pub fn new(session: S, oneshot: Oneshot>) -> Self { + WaitableSession { + session: Arc::new(session), + oneshot, + } + } + + pub fn into_wait_future(self) -> Box + Send> { + Box::new(self.oneshot + .map_err(|e| Error::Internal(e.to_string())) + .and_then(|res| res)) + } +} + +impl CompletionSignal { + pub fn new() -> (Self, Oneshot>) { + let (complete, oneshot) = oneshot(); + let completion_condvar = if cfg!(test) { Some(Condvar::new()) } else { None }; + (CompletionSignal { + completion_future: Mutex::new(Some(complete)), + completion_condvar, + }, oneshot) + } + + pub fn send(&self, result: Result) { + let completion_future = ::std::mem::replace(&mut *self.completion_future.lock(), None); + completion_future.map(|c| c.send(result)); + if let Some(ref completion_condvar) = self.completion_condvar { + completion_condvar.notify_all(); + } + } +} + pub fn create_cluster_view(self_key_pair: Arc, connections: Arc, requires_all_connections: bool) -> Result, Error> { let mut connected_nodes = connections.connected_nodes()?; let disconnected_nodes = connections.disconnected_nodes(); diff --git a/secret-store/src/key_server_cluster/cluster_sessions_creator.rs b/secret-store/src/key_server_cluster/cluster_sessions_creator.rs index 23a3657c42b..20d007d6f4b 100644 --- a/secret-store/src/key_server_cluster/cluster_sessions_creator.rs +++ b/secret-store/src/key_server_cluster/cluster_sessions_creator.rs @@ -22,7 +22,8 @@ use ethkey::Public; use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, DocumentKeyShare, SessionMeta}; use key_server_cluster::cluster::{Cluster, ClusterConfiguration}; use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector; -use key_server_cluster::cluster_sessions::{ClusterSession, SessionIdWithSubSession, AdminSession, AdminSessionCreationData}; +use key_server_cluster::cluster_sessions::{WaitableSession, ClusterSession, SessionIdWithSubSession, + AdminSession, AdminSessionCreationData}; use key_server_cluster::message::{self, Message, DecryptionMessage, SchnorrSigningMessage, ConsensusMessageOfShareAdd, ShareAddMessage, ServersSetChangeMessage, ConsensusMessage, ConsensusMessageWithServersSet, EcdsaSigningMessage}; use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl, SessionParams as GenerationSessionParams}; @@ -43,9 +44,9 @@ use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVers use key_server_cluster::admin_sessions::ShareChangeSessionMeta; /// Generic cluster session creator. -pub trait ClusterSessionCreator { +pub trait ClusterSessionCreator { /// Get creation data from message. - fn creation_data_from_message(_message: &Message) -> Result, Error> { + fn creation_data_from_message(_message: &Message) -> Result, Error> { Ok(None) } @@ -53,7 +54,14 @@ pub trait ClusterSessionCreator { fn make_error_message(sid: S::Id, nonce: u64, err: Error) -> Message; /// Create cluster session. - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: S::Id, creation_data: Option) -> Result, Error>; + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: S::Id, + creation_data: Option, + ) -> Result, Error>; } /// Message with session id. @@ -134,7 +142,7 @@ impl GenerationSessionCreator { } } -impl ClusterSessionCreator for GenerationSessionCreator { +impl ClusterSessionCreator for GenerationSessionCreator { fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message { message::Message::Generation(message::GenerationMessage::SessionError(message::SessionError { session: sid.into(), @@ -143,27 +151,33 @@ impl ClusterSessionCreator for GenerationSessionCreat })) } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionId, _creation_data: Option<()>) -> Result, Error> { + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionId, + _creation_data: Option<()>, + ) -> Result, Error> { // check that there's no finished encryption session with the same id if self.core.key_storage.contains(&id) { return Err(Error::ServerKeyAlreadyGenerated); } let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(GenerationSessionImpl::new(GenerationSessionParams { + let (session, oneshot) = GenerationSessionImpl::new(GenerationSessionParams { id: id.clone(), self_node_id: self.core.self_node_id.clone(), key_storage: Some(self.core.key_storage.clone()), cluster: cluster, nonce: Some(nonce), - })) - .map(|session| { - if self.make_faulty_generation_sessions.load(Ordering::Relaxed) { - session.simulate_faulty_behaviour(); - } - session - }) - .map(Arc::new) + }); + + if self.make_faulty_generation_sessions.load(Ordering::Relaxed) { + session.simulate_faulty_behaviour(); + } + + Ok(WaitableSession::new(session, oneshot)) } } @@ -173,7 +187,7 @@ pub struct EncryptionSessionCreator { pub core: Arc, } -impl ClusterSessionCreator for EncryptionSessionCreator { +impl ClusterSessionCreator for EncryptionSessionCreator { fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message { message::Message::Encryption(message::EncryptionMessage::EncryptionSessionError(message::EncryptionSessionError { session: sid.into(), @@ -182,17 +196,26 @@ impl ClusterSessionCreator for EncryptionSessionCreat })) } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionId, _creation_data: Option<()>) -> Result, Error> { + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionId, + _creation_data: Option<()>, + ) -> Result, Error> { let encrypted_data = self.core.read_key_share(&id)?; let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(Arc::new(EncryptionSessionImpl::new(EncryptionSessionParams { + let (session, oneshot) = EncryptionSessionImpl::new(EncryptionSessionParams { id: id, self_node_id: self.core.self_node_id.clone(), encrypted_data: encrypted_data, key_storage: self.core.key_storage.clone(), cluster: cluster, nonce: nonce, - })?)) + })?; + + Ok(WaitableSession::new(session, oneshot)) } } @@ -202,7 +225,7 @@ pub struct DecryptionSessionCreator { pub core: Arc, } -impl ClusterSessionCreator for DecryptionSessionCreator { +impl ClusterSessionCreator for DecryptionSessionCreator { fn creation_data_from_message(message: &Message) -> Result, Error> { match *message { Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(ref message)) => match &message.message { @@ -223,10 +246,17 @@ impl ClusterSessionCreator for DecryptionSessi })) } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, requester: Option) -> Result, Error> { + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionIdWithSubSession, + requester: Option, + ) -> Result, Error> { let encrypted_data = self.core.read_key_share(&id.id)?; let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(Arc::new(DecryptionSessionImpl::new(DecryptionSessionParams { + let (session, oneshot) = DecryptionSessionImpl::new(DecryptionSessionParams { meta: SessionMeta { id: id.id, self_node_id: self.core.self_node_id.clone(), @@ -240,7 +270,9 @@ impl ClusterSessionCreator for DecryptionSessi acl_storage: self.core.acl_storage.clone(), cluster: cluster, nonce: nonce, - }, requester)?)) + }, requester)?; + + Ok(WaitableSession::new(session, oneshot)) } } @@ -250,7 +282,7 @@ pub struct SchnorrSigningSessionCreator { pub core: Arc, } -impl ClusterSessionCreator for SchnorrSigningSessionCreator { +impl ClusterSessionCreator for SchnorrSigningSessionCreator { fn creation_data_from_message(message: &Message) -> Result, Error> { match *message { Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref message)) => match &message.message { @@ -271,10 +303,17 @@ impl ClusterSessionCreator for SchnorrSign })) } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, requester: Option) -> Result, Error> { + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionIdWithSubSession, + requester: Option, + ) -> Result, Error> { let encrypted_data = self.core.read_key_share(&id.id)?; let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(Arc::new(SchnorrSigningSessionImpl::new(SchnorrSigningSessionParams { + let (session, oneshot) = SchnorrSigningSessionImpl::new(SchnorrSigningSessionParams { meta: SessionMeta { id: id.id, self_node_id: self.core.self_node_id.clone(), @@ -288,7 +327,8 @@ impl ClusterSessionCreator for SchnorrSign acl_storage: self.core.acl_storage.clone(), cluster: cluster, nonce: nonce, - }, requester)?)) + }, requester)?; + Ok(WaitableSession::new(session, oneshot)) } } @@ -298,7 +338,7 @@ pub struct EcdsaSigningSessionCreator { pub core: Arc, } -impl ClusterSessionCreator for EcdsaSigningSessionCreator { +impl ClusterSessionCreator for EcdsaSigningSessionCreator { fn creation_data_from_message(message: &Message) -> Result, Error> { match *message { Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref message)) => match &message.message { @@ -319,10 +359,10 @@ impl ClusterSessionCreator for EcdsaSigningS })) } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, requester: Option) -> Result, Error> { + fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, requester: Option) -> Result, Error> { let encrypted_data = self.core.read_key_share(&id.id)?; let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(Arc::new(EcdsaSigningSessionImpl::new(EcdsaSigningSessionParams { + let (session, oneshot) = EcdsaSigningSessionImpl::new(EcdsaSigningSessionParams { meta: SessionMeta { id: id.id, self_node_id: self.core.self_node_id.clone(), @@ -336,7 +376,9 @@ impl ClusterSessionCreator for EcdsaSigningS acl_storage: self.core.acl_storage.clone(), cluster: cluster, nonce: nonce, - }, requester)?)) + }, requester)?; + + Ok(WaitableSession::new(session, oneshot)) } } @@ -346,7 +388,7 @@ pub struct KeyVersionNegotiationSessionCreator { pub core: Arc, } -impl ClusterSessionCreator, ()> for KeyVersionNegotiationSessionCreator { +impl ClusterSessionCreator> for KeyVersionNegotiationSessionCreator { fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message { message::Message::KeyVersionNegotiation(message::KeyVersionNegotiationMessage::KeyVersionsError(message::KeyVersionsError { session: sid.id.into(), @@ -359,14 +401,21 @@ impl ClusterSessionCreator, master: NodeId, nonce: Option, id: SessionIdWithSubSession, _creation_data: Option<()>) -> Result>, Error> { + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionIdWithSubSession, + _creation_data: Option<()>, + ) -> Result>, Error> { let configured_nodes_count = cluster.configured_nodes_count(); let connected_nodes_count = cluster.connected_nodes_count(); let encrypted_data = self.core.read_key_share(&id.id)?; let nonce = self.core.check_session_nonce(&master, nonce)?; let computer = Arc::new(FastestResultKeyVersionsResultComputer::new(self.core.self_node_id.clone(), encrypted_data.as_ref(), configured_nodes_count, configured_nodes_count)); - Ok(Arc::new(KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { + let (session, oneshot) = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { meta: ShareChangeSessionMeta { id: id.id.clone(), self_node_id: self.core.self_node_id.clone(), @@ -384,7 +433,8 @@ impl ClusterSessionCreator, } -impl ClusterSessionCreator for AdminSessionCreator { +impl ClusterSessionCreator for AdminSessionCreator { fn creation_data_from_message(message: &Message) -> Result, Error> { match *message { Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message)) => match &message.message { @@ -424,11 +474,18 @@ impl ClusterSessionCreator for AdminSess })) } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionId, creation_data: Option) -> Result, Error> { + fn create( + &self, + cluster: Arc, + master: NodeId, + nonce: Option, + id: SessionId, + creation_data: Option, + ) -> Result, Error> { let nonce = self.core.check_session_nonce(&master, nonce)?; - Ok(Arc::new(match creation_data { + match creation_data { Some(AdminSessionCreationData::ShareAdd(version)) => { - AdminSession::ShareAdd(ShareAddSessionImpl::new(ShareAddSessionParams { + let (session, oneshot) = ShareAddSessionImpl::new(ShareAddSessionParams { meta: ShareChangeSessionMeta { id: id.clone(), self_node_id: self.core.self_node_id.clone(), @@ -440,13 +497,14 @@ impl ClusterSessionCreator for AdminSess key_storage: self.core.key_storage.clone(), nonce: nonce, admin_public: Some(self.admin_public.clone().ok_or(Error::AccessDenied)?), - })?) + })?; + Ok(WaitableSession::new(AdminSession::ShareAdd(session), oneshot)) }, Some(AdminSessionCreationData::ServersSetChange(migration_id, new_nodes_set)) => { let admin_public = self.servers_set_change_session_creator_connector.admin_public(migration_id.as_ref(), new_nodes_set) .map_err(|_| Error::AccessDenied)?; - AdminSession::ServersSetChange(ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams { + let (session, oneshot) = ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams { meta: ShareChangeSessionMeta { id: id.clone(), self_node_id: self.core.self_node_id.clone(), @@ -460,10 +518,11 @@ impl ClusterSessionCreator for AdminSess all_nodes_set: cluster.nodes(), admin_public: admin_public, migration_id: migration_id, - })?) + })?; + Ok(WaitableSession::new(AdminSession::ServersSetChange(session), oneshot)) }, None => unreachable!("expected to call with non-empty creation data; qed"), - })) + } } } diff --git a/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs b/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs index ba11ea60c85..b4dcfad634c 100644 --- a/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs +++ b/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs @@ -324,9 +324,10 @@ fn session_state(session: Option>) -> SessionState { session .and_then(|s| match s.as_servers_set_change() { Some(s) if !s.is_finished() => Some(SessionState::Active(s.migration_id().cloned())), - Some(s) => match s.wait() { - Ok(_) => Some(SessionState::Finished(s.migration_id().cloned())), - Err(_) => Some(SessionState::Failed(s.migration_id().cloned())), + Some(s) => match s.result() { + Some(Ok(_)) => Some(SessionState::Finished(s.migration_id().cloned())), + Some(Err(_)) => Some(SessionState::Failed(s.migration_id().cloned())), + None => unreachable!("s.is_finished() == true; when session is finished, result is available; qed"), }, None => None, }) diff --git a/secret-store/src/key_server_cluster/mod.rs b/secret-store/src/key_server_cluster/mod.rs index fc46e10318d..c1c91ef8a53 100644 --- a/secret-store/src/key_server_cluster/mod.rs +++ b/secret-store/src/key_server_cluster/mod.rs @@ -25,7 +25,7 @@ pub use super::serialization::{SerializableSignature, SerializableH256, Serializ SerializableRequester, SerializableMessageHash, SerializableAddress}; pub use self::cluster::{new_network_cluster, ClusterCore, ClusterConfiguration, ClusterClient}; pub use self::cluster_connections_net::NetConnectionsManagerConfig; -pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener}; +pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener, WaitableSession}; #[cfg(test)] pub use self::cluster::tests::DummyClusterClient; diff --git a/secret-store/src/listener/http_listener.rs b/secret-store/src/listener/http_listener.rs index 7c5113204c4..5b037add7f9 100644 --- a/secret-store/src/listener/http_listener.rs +++ b/secret-store/src/listener/http_listener.rs @@ -16,6 +16,7 @@ use std::collections::BTreeSet; use std::sync::{Arc, Weak}; +use futures::future::{ok, result}; use hyper::{self, Uri, Request as HttpRequest, Response as HttpResponse, Method as HttpMethod, StatusCode as HttpStatusCode, Body, header::{self, HeaderValue}, @@ -129,95 +130,86 @@ impl KeyServerHttpListener { } impl KeyServerHttpHandler { - fn process(self, req_method: HttpMethod, req_uri: Uri, path: &str, req_body: &[u8], cors: AllowCors) -> HttpResponse { + fn key_server(&self) -> Result, Error> { + self.handler.key_server.upgrade() + .ok_or_else(|| Error::Internal("KeyServer is already destroyed".into())) + } + + fn process( + self, + req_method: HttpMethod, + req_uri: Uri, + path: &str, + req_body: &[u8], + cors: AllowCors, + ) -> Box, Error=hyper::Error> + Send> { match parse_request(&req_method, &path, &req_body) { - Request::GenerateServerKey(document, signature, threshold) => { - return_server_public_key(&req_uri, cors, self.handler.key_server.upgrade() - .map(|key_server| key_server.generate_key(&document, &signature.into(), threshold)) - .unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into()))) - .map_err(|err| { - warn!(target: "secretstore", "GenerateServerKey request {} has failed with: {}", req_uri, err); - err - })) - }, - Request::StoreDocumentKey(document, signature, common_point, encrypted_document_key) => { - return_empty(&req_uri, cors, self.handler.key_server.upgrade() - .map(|key_server| key_server.store_document_key(&document, &signature.into(), common_point, encrypted_document_key)) - .unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into()))) - .map_err(|err| { - warn!(target: "secretstore", "StoreDocumentKey request {} has failed with: {}", req_uri, err); - err - })) - }, - Request::GenerateDocumentKey(document, signature, threshold) => { - return_document_key(&req_uri, cors, self.handler.key_server.upgrade() - .map(|key_server| key_server.generate_document_key(&document, &signature.into(), threshold)) - .unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into()))) - .map_err(|err| { - warn!(target: "secretstore", "GenerateDocumentKey request {} has failed with: {}", req_uri, err); - err - })) - }, - Request::GetServerKey(document, signature) => { - return_server_public_key(&req_uri, cors, self.handler.key_server.upgrade() - .map(|key_server| key_server.restore_key_public(&document, &signature.into())) - .unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into()))) - .map_err(|err| { - warn!(target: "secretstore", "GetServerKey request {} has failed with: {}", req_uri, err); - err - })) - }, - Request::GetDocumentKey(document, signature) => { - return_document_key(&req_uri, cors, self.handler.key_server.upgrade() - .map(|key_server| key_server.restore_document_key(&document, &signature.into())) - .unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into()))) - .map_err(|err| { - warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req_uri, err); - err - })) - }, - Request::GetDocumentKeyShadow(document, signature) => { - return_document_key_shadow(&req_uri, cors, self.handler.key_server.upgrade() - .map(|key_server| key_server.restore_document_key_shadow(&document, &signature.into())) - .unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into()))) - .map_err(|err| { - warn!(target: "secretstore", "GetDocumentKeyShadow request {} has failed with: {}", req_uri, err); - err - })) - }, - Request::SchnorrSignMessage(document, signature, message_hash) => { - return_message_signature(&req_uri, cors, self.handler.key_server.upgrade() - .map(|key_server| key_server.sign_message_schnorr(&document, &signature.into(), message_hash)) - .unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into()))) - .map_err(|err| { - warn!(target: "secretstore", "SchnorrSignMessage request {} has failed with: {}", req_uri, err); - err - })) - }, - Request::EcdsaSignMessage(document, signature, message_hash) => { - return_message_signature(&req_uri, cors, self.handler.key_server.upgrade() - .map(|key_server| key_server.sign_message_ecdsa(&document, &signature.into(), message_hash)) - .unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into()))) - .map_err(|err| { - warn!(target: "secretstore", "EcdsaSignMessage request {} has failed with: {}", req_uri, err); - err - })) - }, - Request::ChangeServersSet(old_set_signature, new_set_signature, new_servers_set) => { - return_empty(&req_uri, cors, self.handler.key_server.upgrade() - .map(|key_server| key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set)) - .unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into()))) - .map_err(|err| { - warn!(target: "secretstore", "ChangeServersSet request {} has failed with: {}", req_uri, err); - err - })) - }, + Request::GenerateServerKey(document, signature, threshold) => + Box::new(result(self.key_server()) + .and_then(move |key_server| key_server.generate_key(document, signature.into(), threshold)) + .then(move |result| ok(return_server_public_key("GenerateServerKey", &req_uri, cors, result)))), + Request::StoreDocumentKey(document, signature, common_point, encrypted_document_key) => + Box::new(result(self.key_server()) + .and_then(move |key_server| key_server.store_document_key( + document, + signature.into(), + common_point, + encrypted_document_key, + )) + .then(move |result| ok(return_empty("StoreDocumentKey", &req_uri, cors, result)))), + Request::GenerateDocumentKey(document, signature, threshold) => + Box::new(result(self.key_server()) + .and_then(move |key_server| key_server.generate_document_key( + document, + signature.into(), + threshold, + )) + .then(move |result| ok(return_document_key("GenerateDocumentKey", &req_uri, cors, result)))), + Request::GetServerKey(document, signature) => + Box::new(result(self.key_server()) + .and_then(move |key_server| key_server.restore_key_public( + document, + signature.into(), + )) + .then(move |result| ok(return_server_public_key("GetServerKey", &req_uri, cors, result)))), + Request::GetDocumentKey(document, signature) => + Box::new(result(self.key_server()) + .and_then(move |key_server| key_server.restore_document_key(document, signature.into())) + .then(move |result| ok(return_document_key("GetDocumentKey", &req_uri, cors, result)))), + Request::GetDocumentKeyShadow(document, signature) => + Box::new(result(self.key_server()) + .and_then(move |key_server| key_server.restore_document_key_shadow(document, signature.into())) + .then(move |result| ok(return_document_key_shadow("GetDocumentKeyShadow", &req_uri, cors, result)))), + Request::SchnorrSignMessage(document, signature, message_hash) => + Box::new(result(self.key_server()) + .and_then(move |key_server| key_server.sign_message_schnorr( + document, + signature.into(), + message_hash, + )) + .then(move |result| ok(return_message_signature("SchnorrSignMessage", &req_uri, cors, result)))), + Request::EcdsaSignMessage(document, signature, message_hash) => + Box::new(result(self.key_server()) + .and_then(move |key_server| key_server.sign_message_ecdsa( + document, + signature.into(), + message_hash, + )) + .then(move |result| ok(return_message_signature("EcdsaSignMessage", &req_uri, cors, result)))), + Request::ChangeServersSet(old_set_signature, new_set_signature, new_servers_set) => + Box::new(result(self.key_server()) + .and_then(move |key_server| key_server.change_servers_set( + old_set_signature, + new_set_signature, + new_servers_set, + )) + .then(move |result| ok(return_empty("ChangeServersSet", &req_uri, cors, result)))), Request::Invalid => { warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); - HttpResponse::builder() + Box::new(ok(HttpResponse::builder() .status(HttpStatusCode::BAD_REQUEST) .body(Body::empty()) - .expect("Nothing to parse, cannot fail; qed") + .expect("Nothing to parse, cannot fail; qed"))) }, } } @@ -239,61 +231,74 @@ impl Service for KeyServerHttpHandler { AllowCors::Invalid => { warn!(target: "secretstore", "Ignoring {}-request {} with unauthorized Origin header", req.method(), req.uri()); Box::new(future::ok(HttpResponse::builder() - .status(HttpStatusCode::NOT_FOUND) - .body(Body::empty()) - .expect("Nothing to parse, cannot fail; qed") - )) + .status(HttpStatusCode::NOT_FOUND) + .body(Body::empty()) + .expect("Nothing to parse, cannot fail; qed"))) }, _ => { let req_method = req.method().clone(); let req_uri = req.uri().clone(); + let path = req_uri.path().to_string(); // We cannot consume Self because of the Service trait requirement. let this = self.clone(); - Box::new(req.into_body().concat2().map(move |body| { - let path = req_uri.path().to_string(); - if path.starts_with("/") { - this.process(req_method, req_uri, &path, &body, cors) - } else { - warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); - HttpResponse::builder() - .status(HttpStatusCode::NOT_FOUND) - .body(Body::empty()) - .expect("Nothing to parse, cannot fail; qed") - } - })) + Box::new(req.into_body().concat2() + .and_then(move |body| this.process(req_method, req_uri, &path, &body, cors))) } } } } -fn return_empty(req_uri: &Uri, cors: AllowCors, empty: Result<(), Error>) -> HttpResponse { - return_bytes::(req_uri, cors, empty.map(|_| None)) +fn return_empty(req_type: &str, req_uri: &Uri, cors: AllowCors, empty: Result<(), Error>) -> HttpResponse { + return_bytes::(req_type, req_uri, cors, empty.map(|_| None)) } -fn return_server_public_key(req_uri: &Uri, cors: AllowCors, server_public: Result) -> HttpResponse { - return_bytes(req_uri, cors, server_public.map(|k| Some(SerializablePublic(k)))) +fn return_server_public_key( + req_type: &str, + req_uri: &Uri, + cors: AllowCors, + server_public: Result, +) -> HttpResponse { + return_bytes(req_type, req_uri, cors, server_public.map(|k| Some(SerializablePublic(k)))) } -fn return_message_signature(req_uri: &Uri, cors: AllowCors, signature: Result) -> HttpResponse { - return_bytes(req_uri, cors, signature.map(|s| Some(SerializableBytes(s)))) +fn return_message_signature( + req_type: &str, + req_uri: &Uri, + cors: AllowCors, + signature: Result, +) -> HttpResponse { + return_bytes(req_type, req_uri, cors, signature.map(|s| Some(SerializableBytes(s)))) } -fn return_document_key(req_uri: &Uri, cors: AllowCors, document_key: Result) -> HttpResponse { - return_bytes(req_uri, cors, document_key.map(|k| Some(SerializableBytes(k)))) +fn return_document_key( + req_type: &str, + req_uri: &Uri, + cors: AllowCors, + document_key: Result, +) -> HttpResponse { + return_bytes(req_type, req_uri, cors, document_key.map(|k| Some(SerializableBytes(k)))) } -fn return_document_key_shadow(req_uri: &Uri, cors: AllowCors, document_key_shadow: Result) - -> HttpResponse -{ - return_bytes(req_uri, cors, document_key_shadow.map(|k| Some(SerializableEncryptedDocumentKeyShadow { +fn return_document_key_shadow( + req_type: &str, + req_uri: &Uri, + cors: AllowCors, + document_key_shadow: Result, +) -> HttpResponse { + return_bytes(req_type, req_uri, cors, document_key_shadow.map(|k| Some(SerializableEncryptedDocumentKeyShadow { decrypted_secret: k.decrypted_secret.into(), common_point: k.common_point.expect("always filled when requesting document_key_shadow; qed").into(), decrypt_shadows: k.decrypt_shadows.expect("always filled when requesting document_key_shadow; qed").into_iter().map(Into::into).collect() }))) } -fn return_bytes(req_uri: &Uri, cors: AllowCors, result: Result, Error>) -> HttpResponse { +fn return_bytes( + req_type: &str, + req_uri: &Uri, + cors: AllowCors, + result: Result, Error>, +) -> HttpResponse { match result { Ok(Some(result)) => match serde_json::to_vec(&result) { Ok(result) => { @@ -321,7 +326,10 @@ fn return_bytes(req_uri: &Uri, cors: AllowCors return_error(err), + Err(err) => { + warn!(target: "secretstore", "{} request {} has failed with: {}", req_type, req_uri, err); + return_error(err) + }, } } diff --git a/secret-store/src/listener/mod.rs b/secret-store/src/listener/mod.rs index 0fde173c88e..f260c28ed80 100644 --- a/secret-store/src/listener/mod.rs +++ b/secret-store/src/listener/mod.rs @@ -22,6 +22,7 @@ mod tasks_queue; use std::collections::BTreeSet; use std::sync::Arc; +use futures::Future; use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, AdminSessionsServer, KeyServer}; use types::{Error, Public, MessageHash, EncryptedMessageSignature, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId, Requester}; @@ -72,45 +73,88 @@ impl Listener { impl KeyServer for Listener {} impl ServerKeyGenerator for Listener { - fn generate_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result { + fn generate_key( + &self, + key_id: ServerKeyId, + author: Requester, + threshold: usize, + ) -> Box + Send> { self.key_server.generate_key(key_id, author, threshold) } - fn restore_key_public(&self, key_id: &ServerKeyId, author: &Requester) -> Result { + fn restore_key_public( + &self, + key_id: ServerKeyId, + author: Requester, + ) -> Box + Send> { self.key_server.restore_key_public(key_id, author) } } impl DocumentKeyServer for Listener { - fn store_document_key(&self, key_id: &ServerKeyId, author: &Requester, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> { + fn store_document_key( + &self, + key_id: ServerKeyId, + author: Requester, + common_point: Public, + encrypted_document_key: Public, + ) -> Box + Send> { self.key_server.store_document_key(key_id, author, common_point, encrypted_document_key) } - fn generate_document_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result { + fn generate_document_key( + &self, + key_id: ServerKeyId, + author: Requester, + threshold: usize, + ) -> Box + Send> { self.key_server.generate_document_key(key_id, author, threshold) } - fn restore_document_key(&self, key_id: &ServerKeyId, requester: &Requester) -> Result { + fn restore_document_key( + &self, + key_id: ServerKeyId, + requester: Requester, + ) -> Box + Send> { self.key_server.restore_document_key(key_id, requester) } - fn restore_document_key_shadow(&self, key_id: &ServerKeyId, requester: &Requester) -> Result { + fn restore_document_key_shadow( + &self, + key_id: ServerKeyId, + requester: Requester, + ) -> Box + Send> { self.key_server.restore_document_key_shadow(key_id, requester) } } impl MessageSigner for Listener { - fn sign_message_schnorr(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result { + fn sign_message_schnorr( + &self, + key_id: ServerKeyId, + requester: Requester, + message: MessageHash, + ) -> Box + Send> { self.key_server.sign_message_schnorr(key_id, requester, message) } - fn sign_message_ecdsa(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result { + fn sign_message_ecdsa( + &self, + key_id: ServerKeyId, + requester: Requester, + message: MessageHash, + ) -> Box + Send> { self.key_server.sign_message_ecdsa(key_id, requester, message) } } impl AdminSessionsServer for Listener { - fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet) -> Result<(), Error> { + fn change_servers_set( + &self, + old_set_signature: RequestSignature, + new_set_signature: RequestSignature, + new_servers_set: BTreeSet, + ) -> Box + Send> { self.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set) } } diff --git a/secret-store/src/listener/service_contract_listener.rs b/secret-store/src/listener/service_contract_listener.rs index db4253e25a9..1b6a6afdfef 100644 --- a/secret-store/src/listener/service_contract_listener.rs +++ b/secret-store/src/listener/service_contract_listener.rs @@ -467,7 +467,7 @@ impl ClusterSessionsListener for ServiceContractListener { // ignore result - the only thing that we can do is to log the error let server_key_id = session.id(); if let Some(origin) = session.origin() { - if let Some(generation_result) = session.wait(Some(Default::default())) { + if let Some(generation_result) = session.result() { let generation_result = generation_result.map(Some).map_err(Into::into); let _ = Self::process_server_key_generation_result(&self.data, origin, &server_key_id, generation_result); } @@ -484,7 +484,7 @@ impl ClusterSessionsListener for ServiceContractListener { let session_id = session.id(); let server_key_id = session_id.id; if let (Some(requester), Some(origin)) = (session.requester().and_then(|r| r.address(&server_key_id).ok()), session.origin()) { - if let Some(retrieval_result) = session.wait(Some(Default::default())) { + if let Some(retrieval_result) = session.result() { let retrieval_result = retrieval_result.map(|key_shadow| session.broadcast_shadows() .and_then(|broadcast_shadows| @@ -509,8 +509,8 @@ impl ClusterSessionsListener error.clone(), + let error = match session.result() { + Some(Err(ref error)) if !error.is_non_fatal() => error.clone(), _ => return, }; diff --git a/secret-store/src/traits.rs b/secret-store/src/traits.rs index e12c75e5ddd..ed44e2503dc 100644 --- a/secret-store/src/traits.rs +++ b/secret-store/src/traits.rs @@ -15,6 +15,7 @@ // along with Parity Ethereum. If not, see . use std::collections::BTreeSet; +use futures::Future; use ethkey::{KeyPair, Signature, Error as EthKeyError}; use ethereum_types::{H256, Address}; use types::{Error, Public, ServerKeyId, MessageHash, EncryptedMessageSignature, RequestSignature, Requester, @@ -39,11 +40,20 @@ pub trait ServerKeyGenerator { /// `author` is the author of key entry. /// `threshold + 1` is the minimal number of nodes, required to restore private key. /// Result is a public portion of SK. - fn generate_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result; + fn generate_key( + &self, + key_id: ServerKeyId, + author: Requester, + threshold: usize, + ) -> Box + Send>; /// Retrieve public portion of previously generated SK. /// `key_id` is identifier of previously generated SK. /// `author` is the same author, that has created the server key. - fn restore_key_public(&self, key_id: &ServerKeyId, author: &Requester) -> Result; + fn restore_key_public( + &self, + key_id: ServerKeyId, + author: Requester, + ) -> Box + Send>; } /// Document key (DK) server. @@ -54,20 +64,35 @@ pub trait DocumentKeyServer: ServerKeyGenerator { /// `common_point` is a result of `k * T` expression, where `T` is generation point and `k` is random scalar in EC field. /// `encrypted_document_key` is a result of `M + k * y` expression, where `M` is unencrypted document key (point on EC), /// `k` is the same scalar used in `common_point` calculation and `y` is previously generated public part of SK. - fn store_document_key(&self, key_id: &ServerKeyId, author: &Requester, common_point: Public, encrypted_document_key: Public) -> Result<(), Error>; + fn store_document_key( + &self, + key_id: ServerKeyId, + author: Requester, + common_point: Public, + encrypted_document_key: Public, + ) -> Box + Send>; /// Generate and store both SK and DK. This is a shortcut for consequent calls of `generate_key` and `store_document_key`. /// The only difference is that DK is generated by DocumentKeyServer (which might be considered unsafe). /// `key_id` is the caller-provided identifier of generated SK. /// `author` is the author of server && document key entry. /// `threshold + 1` is the minimal number of nodes, required to restore private key. /// Result is a DK, encrypted with caller public key. - fn generate_document_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result; + fn generate_document_key( + &self, + key_id: ServerKeyId, + author: Requester, + threshold: usize, + ) -> Box + Send>; /// Restore previously stored DK. /// DK is decrypted on the key server (which might be considered unsafe), and then encrypted with caller public key. /// `key_id` is identifier of previously generated SK. /// `requester` is the one who requests access to document key. Caller must be on ACL for this function to succeed. /// Result is a DK, encrypted with caller public key. - fn restore_document_key(&self, key_id: &ServerKeyId, requester: &Requester) -> Result; + fn restore_document_key( + &self, + key_id: ServerKeyId, + requester: Requester, + ) -> Box + Send>; /// Restore previously stored DK. /// To decrypt DK on client: /// 1) use requestor secret key to decrypt secret coefficients from result.decrypt_shadows @@ -75,7 +100,11 @@ pub trait DocumentKeyServer: ServerKeyGenerator { /// 3) calculate decrypt_shadow_point: decrypt_shadows_sum * result.common_point /// 4) calculate decrypted_secret: result.decrypted_secret + decrypt_shadow_point /// Result is a DK shadow. - fn restore_document_key_shadow(&self, key_id: &ServerKeyId, requester: &Requester) -> Result; + fn restore_document_key_shadow( + &self, + key_id: ServerKeyId, + requester: Requester, + ) -> Box + Send>; } /// Message signer. @@ -85,14 +114,24 @@ pub trait MessageSigner: ServerKeyGenerator { /// `requester` is the one who requests access to server key private. /// `message` is the message to be signed. /// Result is a signed message, encrypted with caller public key. - fn sign_message_schnorr(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result; + fn sign_message_schnorr( + &self, + key_id: ServerKeyId, + requester: Requester, + message: MessageHash, + ) -> Box + Send>; /// Generate ECDSA signature for message with previously generated SK. /// WARNING: only possible when SK was generated using t <= 2 * N. /// `key_id` is the caller-provided identifier of generated SK. /// `signature` is `key_id`, signed with caller public key. /// `message` is the message to be signed. /// Result is a signed message, encrypted with caller public key. - fn sign_message_ecdsa(&self, key_id: &ServerKeyId, signature: &Requester, message: MessageHash) -> Result; + fn sign_message_ecdsa( + &self, + key_id: ServerKeyId, + signature: Requester, + message: MessageHash, + ) -> Box + Send>; } /// Administrative sessions server. @@ -101,7 +140,12 @@ pub trait AdminSessionsServer { /// And old nodes (i.e. cluster nodes except new_servers_set) have clear databases. /// WARNING: newly generated keys will be distributed among all cluster nodes. So this session /// must be followed with cluster nodes change (either via contract, or config files). - fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet) -> Result<(), Error>; + fn change_servers_set( + &self, + old_set_signature: RequestSignature, + new_set_signature: RequestSignature, + new_servers_set: BTreeSet, + ) -> Box + Send>; } /// Key server. From 7827cc048e60bdb48216fdfcaeb0140e03f6d782 Mon Sep 17 00:00:00 2001 From: David Date: Fri, 7 Jun 2019 11:25:45 +0200 Subject: [PATCH 07/16] Use RUSTFLAGS to set the optimization level (#10719) * Use RUSTFLAGS to set the optimization level Cargo has a [quirk]() in how configuration settings are propagated when `cargo test` runs: local code respect the settings in `[profile.test]` but all dependencies use the `[profile.dev]` settings. Here we force `opt-level=3` for all dependencies. * Remove unused profile settings * Maybe like this? * Turn off incremental compilation * Remove colors; try again with overflow-checks on * Use quiet CI machine * Turn overflow checking back on * Be explicit about what options we use * Remove "quiet machine" override --- Cargo.toml | 3 +-- scripts/gitlab/test-linux.sh | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5a6c93d6493..1a5e32d1011 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,10 +117,9 @@ path = "parity/lib.rs" path = "parity/main.rs" name = "parity" -[profile.dev] - [profile.release] debug = false +lto = true [workspace] # This should only list projects that are not diff --git a/scripts/gitlab/test-linux.sh b/scripts/gitlab/test-linux.sh index 2854508bb56..aac007ea63e 100755 --- a/scripts/gitlab/test-linux.sh +++ b/scripts/gitlab/test-linux.sh @@ -14,4 +14,5 @@ rustup default $1 rustup show echo "________Running Parity Full Test Suite________" -time cargo test $OPTIONS --features "$FEATURES" --locked --all --target $CARGO_TARGET --verbose --color=always -- --test-threads $THREADS +# Why are we using RUSTFLAGS? See https://github.com/paritytech/parity-ethereum/pull/10719 +CARGO_INCREMENTAL=0 RUSTFLAGS="-C opt-level=3 -C overflow-checks=on -C debuginfo=2" time cargo test $OPTIONS --features "$FEATURES" --locked --all --target $CARGO_TARGET --verbose --color=never -- --test-threads $THREADS From 083dcc369bac4ec105a2d65364c4731187e66095 Mon Sep 17 00:00:00 2001 From: David Date: Mon, 10 Jun 2019 12:10:26 +0200 Subject: [PATCH 08/16] Refactor Clique stepping (#10691) * Use Drop to shutdown stepper thread Make period == 0 an error and remove the Option from step_service * Remove StepService Remove StepService and spawn the stepping thread in `Clique::new()`. Don't store the thread handle and instead trust the `AtomicBool` to signal shutdown time. Don't check for `period > 0`: we assume a valid chainspec file. * Don't shutdown the stepper thread at all, just let it run until exit Also: fix a few warnings and tests * Put kvdb_memorydb back * Warn&exit when engine is dropped Don't sleep too long! * Don't delay stepping thread * Better formatting --- ethcore/src/engines/clique/mod.rs | 55 ++++++++------- ethcore/src/engines/clique/step_service.rs | 80 ---------------------- ethcore/src/engines/mod.rs | 3 - 3 files changed, 27 insertions(+), 111 deletions(-) delete mode 100644 ethcore/src/engines/clique/step_service.rs diff --git a/ethcore/src/engines/clique/mod.rs b/ethcore/src/engines/clique/mod.rs index 180237b01b3..5884ab05d3a 100644 --- a/ethcore/src/engines/clique/mod.rs +++ b/ethcore/src/engines/clique/mod.rs @@ -64,7 +64,7 @@ use std::collections::VecDeque; use std::sync::{Arc, Weak}; use std::thread; use std::time; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::time::{Instant, Duration, SystemTime, UNIX_EPOCH}; use block::ExecutedBlock; use client::{BlockId, EngineClient}; @@ -89,11 +89,9 @@ use time_utils::CheckedSystemTime; use self::block_state::CliqueBlockState; use self::params::CliqueParams; -use self::step_service::StepService; mod params; mod block_state; -mod step_service; mod util; // TODO(niklasad1): extract tester types into a separate mod to be shared in the code base @@ -168,7 +166,6 @@ pub struct Clique { block_state_by_hash: RwLock>, proposals: RwLock>, signer: RwLock>>, - step_service: Option, } #[cfg(test)] @@ -181,13 +178,15 @@ pub struct Clique { pub block_state_by_hash: RwLock>, pub proposals: RwLock>, pub signer: RwLock>>, - pub step_service: Option, } impl Clique { /// Initialize Clique engine from empty state. pub fn new(params: CliqueParams, machine: EthereumMachine) -> Result, Error> { - let mut engine = Clique { + /// Step Clique at most every 2 seconds + const SEALING_FREQ: Duration = Duration::from_secs(2); + + let engine = Clique { epoch_length: params.epoch, period: params.period, client: Default::default(), @@ -195,19 +194,29 @@ impl Clique { proposals: Default::default(), signer: Default::default(), machine, - step_service: None, }; - if params.period > 0 { - engine.step_service = Some(StepService::new()); - let engine = Arc::new(engine); - let weak_eng = Arc::downgrade(&engine); - if let Some(step_service) = &engine.step_service { - step_service.start(weak_eng); - } - Ok(engine) - } else { - Ok(Arc::new(engine)) - } + let engine = Arc::new(engine); + let weak_eng = Arc::downgrade(&engine); + + thread::Builder::new().name("StepService".into()) + .spawn(move || { + loop { + let next_step_at = Instant::now() + SEALING_FREQ; + trace!(target: "miner", "StepService: triggering sealing"); + if let Some(eng) = weak_eng.upgrade() { + eng.step() + } else { + warn!(target: "shutdown", "StepService: engine is dropped; exiting."); + break; + } + + let now = Instant::now(); + if now < next_step_at { + thread::sleep(next_step_at - now); + } + } + })?; + Ok(engine) } #[cfg(test)] @@ -225,7 +234,6 @@ impl Clique { proposals: Default::default(), signer: Default::default(), machine: Spec::new_test_machine(), - step_service: None, } } @@ -348,15 +356,6 @@ impl Clique { } } -impl Drop for Clique { - fn drop(&mut self) { - if let Some(step_service) = &self.step_service { - trace!(target: "shutdown", "Clique; stopping step service"); - step_service.stop(); - } - } -} - impl Engine for Clique { fn name(&self) -> &str { "Clique" } diff --git a/ethcore/src/engines/clique/step_service.rs b/ethcore/src/engines/clique/step_service.rs deleted file mode 100644 index a7c977953ff..00000000000 --- a/ethcore/src/engines/clique/step_service.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . - - -use std::sync::Weak; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::time::Duration; -use std::thread; -use std::sync::Arc; -use parking_lot::RwLock; - -use engines::Engine; -use machine::Machine; - -/// Service that is managing the engine -pub struct StepService { - shutdown: Arc, - thread: RwLock>>, -} - -impl StepService { - /// Create a new StepService without spawning a sealing thread. - pub fn new() -> Self { - let shutdown = Arc::new(AtomicBool::new(false)); - StepService { shutdown, thread: RwLock::new(None) } - } - - /// Start the StepService: spawns a thread that loops and triggers a sealing operation every 2sec. - pub fn start(&self, engine: Weak>) { - let shutdown = self.shutdown.clone(); - - let thr = thread::Builder::new() - .name("CliqueStepService".into()) - .spawn(move || { - // startup delay. - thread::sleep(Duration::from_secs(5)); - - loop { - // see if we are in shutdown. - if shutdown.load(Ordering::Acquire) { - trace!(target: "shutdown", "CliqueStepService: received shutdown signal!"); - break; - } - - trace!(target: "miner", "CliqueStepService: triggering sealing"); - - // Try sealing - engine.upgrade().map(|x| x.step()); - - // Yield - thread::sleep(Duration::from_millis(2000)); - } - trace!(target: "shutdown", "CliqueStepService: exited loop, shutdown."); - }).expect("CliqueStepService thread failed"); - - *self.thread.write() = Some(thr); - } - - /// Stop the `StepService` - pub fn stop(&self) { - trace!(target: "shutdown", "CliqueStepService: signalling shutting to stepping thread."); - self.shutdown.store(true, Ordering::Release); - if let Some(t) = self.thread.write().take() { - t.join().expect("CliqueStepService thread panicked!"); - } - } -} diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index ec0e5b020fa..d51e31ea17a 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -437,9 +437,6 @@ pub trait Engine: Sync + Send { /// Trigger next step of the consensus engine. fn step(&self) {} - /// Stops any services that the may hold the Engine and makes it safe to drop. - fn stop(&mut self) {} - /// Create a factory for building snapshot chunks and restoring from them. /// Returning `None` indicates that this engine doesn't support snapshot creation. fn snapshot_components(&self) -> Option> { From 5da8da68cc21edb7416f5b575e2bfda8a5dd88cf Mon Sep 17 00:00:00 2001 From: David Date: Mon, 10 Jun 2019 18:58:14 +0200 Subject: [PATCH 09/16] Stop breaking out of loop if a non-canonical hash is found (#10729) * Stop breaking out of loop if a non-canonical hash is found * include expected hash in log msg * More logging * Scope * Syntax * Log in blank RollingFinality Escalate bad proposer to warning * More readable code * Extensive logging on unexpected non-canonical hash * Wording --- ethcore/blockchain/src/blockchain.rs | 24 +++++++++++++++---- .../src/engines/authority_round/finality.rs | 12 ++++++++-- ethcore/src/engines/authority_round/mod.rs | 8 +++---- ethcore/src/verification/verifier.rs | 2 +- 4 files changed, 34 insertions(+), 12 deletions(-) diff --git a/ethcore/blockchain/src/blockchain.rs b/ethcore/blockchain/src/blockchain.rs index b528334cad3..7cf6b12dd5c 100644 --- a/ethcore/blockchain/src/blockchain.rs +++ b/ethcore/blockchain/src/blockchain.rs @@ -963,6 +963,7 @@ impl BlockChain { /// Iterate over all epoch transitions. /// This will only return transitions within the canonical chain. pub fn epoch_transitions(&self) -> EpochTransitionIter { + trace!(target: "blockchain", "Iterating over all epoch transitions"); let iter = self.db.key_value().iter_from_prefix(db::COL_EXTRA, &EPOCH_KEY_PREFIX[..]); EpochTransitionIter { chain: self, @@ -988,7 +989,9 @@ impl BlockChain { pub fn epoch_transition_for(&self, parent_hash: H256) -> Option { // slow path: loop back block by block for hash in self.ancestry_iter(parent_hash)? { + trace!(target: "blockchain", "Got parent hash {} from ancestry_iter", hash); let details = self.block_details(&hash)?; + trace!(target: "blockchain", "Block #{}: Got block details", details.number); // look for transition in database. if let Some(transition) = self.epoch_transition(details.number, hash) { @@ -1000,11 +1003,22 @@ impl BlockChain { // // if `block_hash` is canonical it will only return transitions up to // the parent. - if self.block_hash(details.number)? == hash { - return self.epoch_transitions() - .map(|(_, t)| t) - .take_while(|t| t.block_number <= details.number) - .last() + match self.block_hash(details.number) { + Some(h) if h == hash => { + return self.epoch_transitions() + .map(|(_, t)| t) + .take_while(|t| t.block_number <= details.number) + .last() + }, + Some(h) => { + warn!(target: "blockchain", "Block #{}: Found non-canonical block hash {} (expected {})", details.number, h, hash); + + trace!(target: "blockchain", "Block #{} Mismatched hashes. Ancestor {} != Own {} – Own block #{}", details.number, hash, h, self.block_number(&h).unwrap_or_default() ); + trace!(target: "blockchain", " Ancestor {}: #{:#?}", hash, self.block_details(&hash)); + trace!(target: "blockchain", " Own {}: #{:#?}", h, self.block_details(&h)); + + }, + None => trace!(target: "blockchain", "Block #{}: hash {} not found in cache or DB", details.number, hash), } } diff --git a/ethcore/src/engines/authority_round/finality.rs b/ethcore/src/engines/authority_round/finality.rs index af57278c9f3..e88458d81aa 100644 --- a/ethcore/src/engines/authority_round/finality.rs +++ b/ethcore/src/engines/authority_round/finality.rs @@ -39,6 +39,7 @@ pub struct RollingFinality { impl RollingFinality { /// Create a blank finality checker under the given validator set. pub fn blank(signers: Vec
) -> Self { + trace!(target: "finality", "Instantiating blank RollingFinality with {} signers: {:?}", signers.len(), signers); RollingFinality { headers: VecDeque::new(), signers: SimpleList::new(signers), @@ -110,7 +111,14 @@ impl RollingFinality { /// Returns a list of all newly finalized headers. // TODO: optimize with smallvec. pub fn push_hash(&mut self, head: H256, signers: Vec
) -> Result, UnknownValidator> { - if signers.iter().any(|s| !self.signers.contains(s)) { return Err(UnknownValidator) } + // TODO: seems bad to iterate over signers twice like this. + // Can do the work in a single loop and call `clear()` if an unknown validator was found? + for their_signer in signers.iter() { + if !self.signers.contains(their_signer) { + warn!(target: "finality", "Unknown validator: {}", their_signer); + return Err(UnknownValidator) + } + } for signer in signers.iter() { *self.sign_count.entry(*signer).or_insert(0) += 1; @@ -141,7 +149,7 @@ impl RollingFinality { } } - trace!(target: "finality", "Blocks finalized by {:?}: {:?}", head, newly_finalized); + trace!(target: "finality", "{} Blocks finalized by {:?}: {:?}", newly_finalized.len(), head, newly_finalized); self.last_pushed = Some(head); Ok(newly_finalized) diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index 0942c0881ba..8bf838d2dc5 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -247,7 +247,7 @@ impl EpochManager { None => { // this really should never happen unless the block passed // hasn't got a parent in the database. - debug!(target: "engine", "No genesis transition found."); + warn!(target: "engine", "No genesis transition found."); return false; } }; @@ -280,8 +280,8 @@ impl EpochManager { true } - // note new epoch hash. this will force the next block to re-load - // the epoch set + // Note new epoch hash. This will force the next block to re-load + // the epoch set. // TODO: optimize and don't require re-loading after epoch change. fn note_new_epoch(&mut self) { self.force = true; @@ -614,7 +614,7 @@ fn verify_external(header: &Header, validators: &ValidatorSet, empty_steps_trans }; if is_invalid_proposer { - trace!(target: "engine", "verify_block_external: bad proposer for step: {}", header_step); + warn!(target: "engine", "verify_block_external: bad proposer for step: {}", header_step); Err(EngineError::NotProposer(Mismatch { expected: correct_proposer, found: *header.author() }))? } else { Ok(()) diff --git a/ethcore/src/verification/verifier.rs b/ethcore/src/verification/verifier.rs index 76eb60b9a18..f7221dae81d 100644 --- a/ethcore/src/verification/verifier.rs +++ b/ethcore/src/verification/verifier.rs @@ -38,6 +38,6 @@ pub trait Verifier: Send + Sync /// Do a final verification check for an enacted header vs its expected counterpart. fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>; - /// Verify a block, inspecing external state. + /// Verify a block, inspecting external state. fn verify_block_external(&self, header: &Header, engine: &EthEngine) -> Result<(), Error>; } From bf4fa658f37a97fcaad9f4b2f7f800fa71220326 Mon Sep 17 00:00:00 2001 From: Talha Cross <47772477+soc1c@users.noreply.github.com> Date: Tue, 11 Jun 2019 19:55:05 +0200 Subject: [PATCH 10/16] ethcore: enable ECIP-1054 for classic (#10731) * config: enable atlantis on ethereum classic * config: enable atlantis on morden classic * config: enable atlantis on morden classic * config: enable atlantis on kotti classic * ethcore: move kotti fork block to 0xAEF49 * ethcore: move morden fork block to 0x4829BA * ethcore: move classic fork block to 0x81B320 * remove trailing comma * remove trailing comma * fix chainspec * ethcore: move classic fork block to 0x7fffffffffffffff --- ethcore/res/ethereum/classic.json | 120 ++++++++++++++++++++++++++---- ethcore/res/ethereum/kotti.json | 57 ++++++++++++-- ethcore/res/ethereum/morden.json | 74 +++++++++++++++--- 3 files changed, 222 insertions(+), 29 deletions(-) diff --git a/ethcore/res/ethereum/classic.json b/ethcore/res/ethereum/classic.json index 393129c12f0..682b752991e 100644 --- a/ethcore/res/ethereum/classic.json +++ b/ethcore/res/ethereum/classic.json @@ -8,11 +8,12 @@ "difficultyBoundDivisor": "0x0800", "durationLimit": "0x0d", "blockReward": "0x4563918244F40000", - "homesteadTransition": 1150000, - "ecip1010PauseTransition": 3000000, - "ecip1010ContinueTransition": 5000000, - "ecip1017EraRounds": 5000000, - "bombDefuseTransition": 5900000 + "homesteadTransition": "0x118c30", + "ecip1010PauseTransition": "0x2dc6c0", + "ecip1010ContinueTransition": "0x4c4b40", + "ecip1017EraRounds": "0x4c4b40", + "eip100bTransition": "0x7fffffffffffffff", + "bombDefuseTransition": "0x5a06e0" } } }, @@ -26,11 +27,17 @@ "chainID": "0x3d", "forkBlock": "0x1d4c00", "forkCanonHash": "0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f", - "eip150Transition": 2500000, - "eip160Transition": 3000000, + "eip150Transition": "0x2625a0", + "eip160Transition": "0x2dc6c0", "eip161abcTransition": "0x7fffffffffffffff", "eip161dTransition": "0x7fffffffffffffff", - "eip155Transition": 3000000 + "eip155Transition": "0x2dc6c0", + "maxCodeSize": "0x6000", + "maxCodeSizeTransition": "0x7fffffffffffffff", + "eip140Transition": "0x7fffffffffffffff", + "eip211Transition": "0x7fffffffffffffff", + "eip214Transition": "0x7fffffffffffffff", + "eip658Transition": "0x7fffffffffffffff" }, "genesis": { "seal": { @@ -3835,7 +3842,7 @@ "0xc32fd5318214071a41cd8e98499b2b65942c5837c686a06b536146fd0bf294bf", "0xac390c012eecd83fa8f4cc77a59992914b5c95af36b28747e07adea13228acbc" ] - }, + }, "nodes": [ "enode://efd48ad0879eeb7f9cb5e50f33f7bc21e805a72e90361f145baaa22dd75d111e7cd9c93f1b7060dcb30aa1b3e620269336dbf32339fea4c18925a4c15fe642df@18.205.66.229:30303", "enode://5fbfb426fbb46f8b8c1bd3dd140f5b511da558cd37d60844b525909ab82e13a25ee722293c829e52cb65c2305b1637fa9a2ea4d6634a224d5f400bfe244ac0de@162.243.55.45:30303", @@ -3851,10 +3858,97 @@ "enode://5cd218959f8263bc3721d7789070806b0adff1a0ed3f95ec886fb469f9362c7507e3b32b256550b9a7964a23a938e8d42d45a0c34b332bfebc54b29081e83b93@35.187.57.94:30303" ], "accounts": { - "0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, - "0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, - "0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, - "0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "0x0000000000000000000000000000000000000001": { + "builtin": { + "name": "ecrecover", + "pricing": { + "linear": { + "base": 3000, + "word": 0 + } + } + } + }, + "0x0000000000000000000000000000000000000002": { + "builtin": { + "name": "sha256", + "pricing": { + "linear": { + "base": 60, + "word": 12 + } + } + } + }, + "0x0000000000000000000000000000000000000003": { + "builtin": { + "name": "ripemd160", + "pricing": { + "linear": { + "base": 600, + "word": 120 + } + } + } + }, + "0x0000000000000000000000000000000000000004": { + "builtin": { + "name": "identity", + "pricing": { + "linear": { + "base": 15, + "word": 3 + } + } + } + }, + "0x0000000000000000000000000000000000000005": { + "builtin": { + "name": "modexp", + "activate_at": "0x7fffffffffffffff", + "pricing": { + "modexp": { + "divisor": 20 + } + } + } + }, + "0x0000000000000000000000000000000000000006": { + "builtin": { + "name": "alt_bn128_add", + "activate_at": "0x7fffffffffffffff", + "pricing": { + "linear": { + "base": 500, + "word": 0 + } + } + } + }, + "0x0000000000000000000000000000000000000007": { + "builtin": { + "name": "alt_bn128_mul", + "activate_at": "0x7fffffffffffffff", + "pricing": { + "linear": { + "base": 40000, + "word": 0 + } + } + } + }, + "0x0000000000000000000000000000000000000008": { + "builtin": { + "name": "alt_bn128_pairing", + "activate_at": "0x7fffffffffffffff", + "pricing": { + "alt_bn128_pairing": { + "base": 100000, + "pair": 80000 + } + } + } + }, "3282791d6fd713f1e94f4bfd565eaa78b3a0599d": { "balance": "1337000000000000000000" }, diff --git a/ethcore/res/ethereum/kotti.json b/ethcore/res/ethereum/kotti.json index cf531f6a5d9..690269625b0 100644 --- a/ethcore/res/ethereum/kotti.json +++ b/ethcore/res/ethereum/kotti.json @@ -12,12 +12,18 @@ "params": { "accountStartNonce": "0x0", "chainID": "0x6", + "eip140Transition": "0xaef49", "eip150Transition": "0x0", "eip155Transition": "0x0", "eip160Transition": "0x0", - "eip161abcTransition": "0x7fffffffffffffff", - "eip161dTransition": "0x7fffffffffffffff", + "eip161abcTransition": "0xaef49", + "eip161dTransition": "0xaef49", + "eip211Transition": "0xaef49", + "eip214Transition": "0xaef49", + "eip658Transition": "0xaef49", "gasLimitBoundDivisor": "0x400", + "maxCodeSize": "0x6000", + "maxCodeSizeTransition": "0xaef49", "maximumExtraDataSize": "0xffff", "minGasLimit": "0x1388", "networkID": "0x6" @@ -95,16 +101,55 @@ } }, "0x0000000000000000000000000000000000000005": { - "balance": "0x1" + "balance": "0x1", + "builtin": { + "name": "modexp", + "activate_at": "0xaef49", + "pricing": { + "modexp": { + "divisor": 20 + } + } + } }, "0x0000000000000000000000000000000000000006": { - "balance": "0x1" + "balance": "0x1", + "builtin": { + "name": "alt_bn128_add", + "activate_at": "0xaef49", + "pricing": { + "linear": { + "base": 500, + "word": 0 + } + } + } }, "0x0000000000000000000000000000000000000007": { - "balance": "0x1" + "balance": "0x1", + "builtin": { + "name": "alt_bn128_mul", + "activate_at": "0xaef49", + "pricing": { + "linear": { + "base": 40000, + "word": 0 + } + } + } }, "0x0000000000000000000000000000000000000008": { - "balance": "0x1" + "balance": "0x1", + "builtin": { + "name": "alt_bn128_pairing", + "activate_at": "0xaef49", + "pricing": { + "alt_bn128_pairing": { + "base": 100000, + "pair": 80000 + } + } + } }, "0x0000000000000000000000000000000000000009": { "balance": "0x1" diff --git a/ethcore/res/ethereum/morden.json b/ethcore/res/ethereum/morden.json index e626152adc9..5ea0f211432 100644 --- a/ethcore/res/ethereum/morden.json +++ b/ethcore/res/ethereum/morden.json @@ -8,11 +8,12 @@ "difficultyBoundDivisor": "0x0800", "durationLimit": "0x0d", "blockReward": "0x4563918244F40000", - "homesteadTransition": 494000, - "ecip1010PauseTransition": 1915000, - "ecip1010ContinueTransition": 3415000, - "ecip1017EraRounds": 2000000, - "bombDefuseTransition": 2300000 + "homesteadTransition": "0x789b0", + "ecip1010PauseTransition": "0x1d3878", + "ecip1010ContinueTransition": "0x341bd8", + "ecip1017EraRounds": "0x1e8480", + "eip100bTransition": "0x4829ba", + "bombDefuseTransition": "0x231860" } } }, @@ -26,11 +27,17 @@ "chainID": "0x3e", "forkBlock": "0x1b34d8", "forkCanonHash": "0xf376243aeff1f256d970714c3de9fd78fa4e63cf63e32a51fe1169e375d98145", - "eip150Transition": 1783000, - "eip160Transition": 1915000, - "eip161abcTransition": "0x7fffffffffffffff", - "eip161dTransition": "0x7fffffffffffffff", - "eip155Transition": 1915000 + "eip150Transition": "0x1b34d8", + "eip160Transition": "0x1d3878", + "eip161abcTransition": "0x4829ba", + "eip161dTransition": "0x4829ba", + "eip155Transition": "0x1d3878", + "maxCodeSize": "0x6000", + "maxCodeSizeTransition": "0x4829ba", + "eip140Transition": "0x4829ba", + "eip211Transition": "0x4829ba", + "eip214Transition": "0x4829ba", + "eip658Transition": "0x4829ba" }, "genesis": { "seal": { @@ -68,6 +75,53 @@ "0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, "0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "0x0000000000000000000000000000000000000005": { + "builtin": { + "name": "modexp", + "activate_at": "0x4829ba", + "pricing": { + "modexp": { + "divisor": 20 + } + } + } + }, + "0x0000000000000000000000000000000000000006": { + "builtin": { + "name": "alt_bn128_add", + "activate_at": "0x4829ba", + "pricing": { + "linear": { + "base": 500, + "word": 0 + } + } + } + }, + "0x0000000000000000000000000000000000000007": { + "builtin": { + "name": "alt_bn128_mul", + "activate_at": "0x4829ba", + "pricing": { + "linear": { + "base": 40000, + "word": 0 + } + } + } + }, + "0x0000000000000000000000000000000000000008": { + "builtin": { + "name": "alt_bn128_pairing", + "activate_at": "0x4829ba", + "pricing": { + "alt_bn128_pairing": { + "base": 100000, + "pair": 80000 + } + } + } + }, "102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c": { "balance": "1606938044258990275541962092341162602522202993782792835301376", "nonce": "1048576" } }, "hardcodedSync": { From 1c076af5ee16dce5ce942245373db427b4b96fd4 Mon Sep 17 00:00:00 2001 From: Seun LanLege Date: Wed, 12 Jun 2019 09:42:16 +0100 Subject: [PATCH 11/16] DevP2p: Get node IP address and udp port from Socket, if not included in PING packet (#10705) * get node IP address and udp port from Socket, if not included in PING packet * prevent bootnodes from being added to host nodes * code corrections * code corrections * code corrections * code corrections * docs * code corrections * code corrections * Apply suggestions from code review Co-Authored-By: David --- util/network-devp2p/src/discovery.rs | 29 ++++++++++++++++++++------- util/network-devp2p/src/node_table.rs | 14 +++++++++---- 2 files changed, 32 insertions(+), 11 deletions(-) diff --git a/util/network-devp2p/src/discovery.rs b/util/network-devp2p/src/discovery.rs index 2777505b3a9..bd5cd09a639 100644 --- a/util/network-devp2p/src/discovery.rs +++ b/util/network-devp2p/src/discovery.rs @@ -169,7 +169,6 @@ pub struct Discovery<'a> { discovery_id: NodeId, discovery_nodes: HashSet, node_buckets: Vec, - // Sometimes we don't want to add nodes to the NodeTable, but still want to // keep track of them to avoid excessive pinging (happens when an unknown node sends // a discovery request to us -- the node might be on a different net). @@ -258,7 +257,7 @@ impl<'a> Discovery<'a> { Ok(()) => None, Err(BucketError::Ourselves) => None, Err(BucketError::NotInTheBucket{node_entry, bucket_distance}) => Some((node_entry, bucket_distance)) - }.map(|(node_entry, bucket_distance)| { + }.and_then(|(node_entry, bucket_distance)| { trace!(target: "discovery", "Adding a new node {:?} into our bucket {}", &node_entry, bucket_distance); let mut added = HashMap::with_capacity(1); @@ -266,7 +265,7 @@ impl<'a> Discovery<'a> { let node_to_ping = { let bucket = &mut self.node_buckets[bucket_distance]; - bucket.nodes.push_front(BucketEntry::new(node_entry)); + bucket.nodes.push_front(BucketEntry::new(node_entry.clone())); if bucket.nodes.len() > BUCKET_SIZE { select_bucket_ping(bucket.nodes.iter()) } else { @@ -276,7 +275,12 @@ impl<'a> Discovery<'a> { if let Some(node) = node_to_ping { self.try_ping(node, PingReason::Default); }; - TableUpdates{added, removed: HashSet::new()} + + if node_entry.endpoint.is_valid_sync_node() { + Some(TableUpdates { added, removed: HashSet::new() }) + } else { + None + } }) } @@ -519,7 +523,18 @@ impl<'a> Discovery<'a> { fn on_ping(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr, echo_hash: &[u8]) -> Result, Error> { trace!(target: "discovery", "Got Ping from {:?}", &from); - let ping_from = NodeEndpoint::from_rlp(&rlp.at(1)?)?; + let ping_from = if let Ok(node_endpoint) = NodeEndpoint::from_rlp(&rlp.at(1)?) { + node_endpoint + } else { + let mut address = from.clone(); + // address here is the node's tcp port. If we are unable to get the `NodeEndpoint` from the `ping_from` + // rlp field then this is most likely a BootNode, set the tcp port to 0 because it can not be used for syncing. + address.set_port(0); + NodeEndpoint { + address, + udp_port: from.port() + } + }; let ping_to = NodeEndpoint::from_rlp(&rlp.at(2)?)?; let timestamp: u64 = rlp.val_at(3)?; self.check_timestamp(timestamp)?; @@ -541,7 +556,7 @@ impl<'a> Discovery<'a> { self.send_packet(PACKET_PONG, from, &response.drain())?; let entry = NodeEntry { id: *node_id, endpoint: pong_to.clone() }; - if !entry.endpoint.is_valid() { + if !entry.endpoint.is_valid_discovery_node() { debug!(target: "discovery", "Got bad address: {:?}", entry); } else if !self.is_allowed(&entry) { debug!(target: "discovery", "Address not allowed: {:?}", entry); @@ -729,7 +744,7 @@ impl<'a> Discovery<'a> { trace!(target: "discovery", "Got {} Neighbours from {:?}", results_count, &from); for r in rlp.at(0)?.iter() { let endpoint = NodeEndpoint::from_rlp(&r)?; - if !endpoint.is_valid() { + if !endpoint.is_valid_discovery_node() { debug!(target: "discovery", "Bad address: {:?}", endpoint); continue; } diff --git a/util/network-devp2p/src/node_table.rs b/util/network-devp2p/src/node_table.rs index 739babfe0a7..0e39c43ce65 100644 --- a/util/network-devp2p/src/node_table.rs +++ b/util/network-devp2p/src/node_table.rs @@ -106,10 +106,16 @@ impl NodeEndpoint { self.to_rlp(rlp); } - /// Validates that the port is not 0 and address IP is specified - pub fn is_valid(&self) -> bool { - self.udp_port != 0 && self.address.port() != 0 && - match self.address { + /// Validates that the tcp port is not 0 and that the node is a valid discovery node (i.e. `is_valid_discovery_node()` is true). + /// Sync happens over tcp. + pub fn is_valid_sync_node(&self) -> bool { + self.is_valid_discovery_node() && self.address.port() != 0 + } + + /// Validates that the udp port is not 0 and address IP is specified. + /// Peer discovery happens over udp. + pub fn is_valid_discovery_node(&self) -> bool { + self.udp_port != 0 && match self.address { SocketAddr::V4(a) => !a.ip().is_unspecified(), SocketAddr::V6(a) => !a.ip().is_unspecified() } From 78a1d8b7b43064ff2ec353a8fd8bd112bc57a432 Mon Sep 17 00:00:00 2001 From: TriplEight Date: Fri, 14 Jun 2019 13:00:57 +0200 Subject: [PATCH 12/16] fix docker tags for publishing (#10741) --- scripts/docker/hub/publish-docker.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/scripts/docker/hub/publish-docker.sh b/scripts/docker/hub/publish-docker.sh index 6602d55c23a..84feedb2817 100755 --- a/scripts/docker/hub/publish-docker.sh +++ b/scripts/docker/hub/publish-docker.sh @@ -3,7 +3,9 @@ set -e # fail on any error VERSION=$(cat ./tools/VERSION) +TRACK=$(cat ./tools/TRACK) echo "Parity Ethereum version = ${VERSION}" +echo "Parity Ethereum track = ${TRACK}" test "$Docker_Hub_User_Parity" -a "$Docker_Hub_Pass_Parity" \ || ( echo "no docker credentials provided"; exit 1 ) @@ -44,6 +46,14 @@ case "${SCHEDULE_TAG:-${CI_COMMIT_REF_NAME}}" in --file tools/Dockerfile .; docker push "parity/parity:${VERSION}-${CI_COMMIT_REF_NAME}"; docker push "parity/parity:stable";; + v[0-9]*.[0-9]*) + echo "Docker TAG - 'parity/parity:${VERSION}-${TRACK}'" + docker build --no-cache \ + --build-arg VCS_REF="${CI_COMMIT_SHA}" \ + --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" \ + --tag "parity/parity:${VERSION}-${TRACK}" \ + --file tools/Dockerfile .; + docker push "parity/parity:${VERSION}-${TRACK}";; *) echo "Docker TAG - 'parity/parity:${VERSION}-${CI_COMMIT_REF_NAME}'" docker build --no-cache \ From dbdb57a8c00148df1018db9e7649b12335c303c2 Mon Sep 17 00:00:00 2001 From: David Date: Fri, 14 Jun 2019 18:48:35 +0200 Subject: [PATCH 13/16] Fix deprectation warnings on nightly (#10746) * Run `cargo fix` on ethcore * Add note about funky import needed-but-not-needed * Fix benches * cargo fix blockchain --- ethcore/benches/builtin.rs | 11 ++--- ethcore/blockchain/src/blockchain.rs | 22 +++++----- ethcore/src/account_db.rs | 36 ++++++++-------- ethcore/src/block.rs | 22 +++++----- ethcore/src/builtin.rs | 28 ++++++------- ethcore/src/client/ancient_import.rs | 8 ++-- ethcore/src/client/client.rs | 34 +++++++-------- ethcore/src/client/io_message.rs | 2 +- ethcore/src/client/test_client.rs | 6 +-- ethcore/src/client/trace.rs | 4 +- ethcore/src/client/traits.rs | 12 +++--- ethcore/src/engines/authority_round/mod.rs | 42 +++++++++---------- ethcore/src/engines/basic_authority.rs | 12 +++--- ethcore/src/engines/clique/mod.rs | 14 +++---- ethcore/src/engines/mod.rs | 24 +++++------ ethcore/src/engines/null_engine.rs | 2 +- ethcore/src/engines/signer.rs | 2 +- ethcore/src/engines/validator_set/contract.rs | 4 +- ethcore/src/engines/validator_set/mod.rs | 4 +- ethcore/src/engines/validator_set/multi.rs | 14 +++---- .../engines/validator_set/safe_contract.rs | 4 +- .../src/engines/validator_set/simple_list.rs | 4 +- ethcore/src/error.rs | 4 +- ethcore/src/ethereum/ethash.rs | 2 +- ethcore/src/executive.rs | 4 +- ethcore/src/factory.rs | 2 +- ethcore/src/lib.rs | 4 ++ ethcore/src/machine/impls.rs | 6 +-- ethcore/src/miner/miner.rs | 6 +-- ethcore/src/miner/pool_client.rs | 8 ++-- ethcore/src/pod_account.rs | 2 +- ethcore/src/snapshot/account.rs | 2 +- ethcore/src/snapshot/consensus/authority.rs | 16 +++---- ethcore/src/snapshot/consensus/mod.rs | 10 ++--- ethcore/src/snapshot/consensus/work.rs | 12 +++--- ethcore/src/snapshot/error.rs | 2 +- ethcore/src/snapshot/mod.rs | 20 ++++----- ethcore/src/snapshot/service.rs | 24 +++++------ ethcore/src/snapshot/tests/helpers.rs | 10 ++--- ethcore/src/snapshot/watcher.rs | 4 +- ethcore/src/spec/spec.rs | 4 +- ethcore/src/state/account.rs | 16 +++---- ethcore/src/state/backend.rs | 26 ++++++------ ethcore/src/state/mod.rs | 4 +- ethcore/src/state_db.rs | 16 +++---- ethcore/src/test_helpers.rs | 16 +++---- ethcore/src/trace/db.rs | 4 +- ethcore/src/verification/canon_verifier.rs | 4 +- ethcore/src/verification/mod.rs | 2 +- ethcore/src/verification/noop_verifier.rs | 4 +- ethcore/src/verification/queue/kind.rs | 12 +++--- ethcore/src/verification/queue/mod.rs | 6 +-- ethcore/src/verification/verification.rs | 22 +++++----- ethcore/src/verification/verifier.rs | 4 +- 54 files changed, 295 insertions(+), 294 deletions(-) diff --git a/ethcore/benches/builtin.rs b/ethcore/benches/builtin.rs index d7ed483dd03..974284bb014 100644 --- a/ethcore/benches/builtin.rs +++ b/ethcore/benches/builtin.rs @@ -29,7 +29,7 @@ use criterion::{Criterion, Bencher}; use bytes::BytesRef; use ethcore::builtin::Builtin; use ethcore::machine::EthereumMachine; -use ethereum_types::U256; +use ethereum_types::H160; use ethcore::ethereum::new_byzantium_test_machine; use rustc_hex::FromHex; @@ -46,8 +46,9 @@ struct BuiltinBenchmark<'a> { impl<'a> BuiltinBenchmark<'a> { fn new(builtin_address: &'static str, input: &str, expected: &str) -> BuiltinBenchmark<'a> { let builtins = BYZANTIUM_MACHINE.builtins(); - - let builtin = builtins.get(&builtin_address.into()).unwrap().clone(); + use std::str::FromStr; + let addr = H160::from_str(builtin_address).unwrap(); + let builtin = builtins.get(&addr).unwrap().clone(); let input = FromHex::from_hex(input).unwrap(); let expected = FromHex::from_hex(expected).unwrap(); @@ -56,10 +57,6 @@ impl<'a> BuiltinBenchmark<'a> { } } - fn gas_cost(&self) -> U256 { - self.builtin.cost(&self.input) - } - fn run(&self, b: &mut Bencher) { let mut output = vec![0; self.expected.len()]; diff --git a/ethcore/blockchain/src/blockchain.rs b/ethcore/blockchain/src/blockchain.rs index 7cf6b12dd5c..afebacad9b4 100644 --- a/ethcore/blockchain/src/blockchain.rs +++ b/ethcore/blockchain/src/blockchain.rs @@ -57,7 +57,7 @@ use crate::{CacheSize, ImportRoute, Config}; /// Database backing `BlockChain`. pub trait BlockChainDB: Send + Sync { /// Generic key value store. - fn key_value(&self) -> &Arc; + fn key_value(&self) -> &Arc; /// Header blooms database. fn blooms(&self) -> &blooms_db::Database; @@ -85,7 +85,7 @@ pub trait BlockChainDB: Send + Sync { /// predefined config. pub trait BlockChainDBHandler: Send + Sync { /// Open the predefined key-value database. - fn open(&self, path: &Path) -> io::Result>; + fn open(&self, path: &Path) -> io::Result>; } /// Interface for querying blocks by hash and by number. @@ -228,7 +228,7 @@ pub struct BlockChain { transaction_addresses: RwLock>, block_receipts: RwLock>, - db: Arc, + db: Arc, cache_man: Mutex>, @@ -481,7 +481,7 @@ impl<'a> Iterator for AncestryWithMetadataIter<'a> { /// Returns epoch transitions. pub struct EpochTransitionIter<'a> { chain: &'a BlockChain, - prefix_iter: Box, Box<[u8]>)> + 'a>, + prefix_iter: Box, Box<[u8]>)> + 'a>, } impl<'a> Iterator for EpochTransitionIter<'a> { @@ -521,7 +521,7 @@ impl<'a> Iterator for EpochTransitionIter<'a> { impl BlockChain { /// Create new instance of blockchain from given Genesis. - pub fn new(config: Config, genesis: &[u8], db: Arc) -> BlockChain { + pub fn new(config: Config, genesis: &[u8], db: Arc) -> BlockChain { // 400 is the average size of the key let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400); @@ -1592,11 +1592,11 @@ mod tests { _trace_blooms_dir: TempDir, blooms: blooms_db::Database, trace_blooms: blooms_db::Database, - key_value: Arc, + key_value: Arc, } impl BlockChainDB for TestBlockChainDB { - fn key_value(&self) -> &Arc { + fn key_value(&self) -> &Arc { &self.key_value } @@ -1610,7 +1610,7 @@ mod tests { } /// Creates new test instance of `BlockChainDB` - pub fn new_db() -> Arc { + pub fn new_db() -> Arc { let blooms_dir = TempDir::new("").unwrap(); let trace_blooms_dir = TempDir::new("").unwrap(); @@ -1625,15 +1625,15 @@ mod tests { Arc::new(db) } - fn new_chain(genesis: encoded::Block, db: Arc) -> BlockChain { + fn new_chain(genesis: encoded::Block, db: Arc) -> BlockChain { BlockChain::new(Config::default(), genesis.raw(), db) } - fn insert_block(db: &Arc, bc: &BlockChain, block: encoded::Block, receipts: Vec) -> ImportRoute { + fn insert_block(db: &Arc, bc: &BlockChain, block: encoded::Block, receipts: Vec) -> ImportRoute { insert_block_commit(db, bc, block, receipts, true) } - fn insert_block_commit(db: &Arc, bc: &BlockChain, block: encoded::Block, receipts: Vec, commit: bool) -> ImportRoute { + fn insert_block_commit(db: &Arc, bc: &BlockChain, block: encoded::Block, receipts: Vec, commit: bool) -> ImportRoute { let mut batch = db.key_value().transaction(); let res = insert_block_batch(&mut batch, bc, block, receipts); db.key_value().write(batch).unwrap(); diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index fcc6f74c6cd..bf1380908f8 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -57,7 +57,7 @@ impl Default for Factory { impl Factory { /// Create a read-only accountdb. /// This will panic when write operations are called. - pub fn readonly<'db>(&self, db: &'db HashDB, address_hash: H256) -> Box + 'db> { + pub fn readonly<'db>(&self, db: &'db dyn HashDB, address_hash: H256) -> Box + 'db> { match *self { Factory::Mangled => Box::new(AccountDB::from_hash(db, address_hash)), Factory::Plain => Box::new(Wrapping(db)), @@ -65,7 +65,7 @@ impl Factory { } /// Create a new mutable hashdb. - pub fn create<'db>(&self, db: &'db mut HashDB, address_hash: H256) -> Box + 'db> { + pub fn create<'db>(&self, db: &'db mut dyn HashDB, address_hash: H256) -> Box + 'db> { match *self { Factory::Mangled => Box::new(AccountDBMut::from_hash(db, address_hash)), Factory::Plain => Box::new(WrappingMut(db)), @@ -77,19 +77,19 @@ impl Factory { /// DB backend wrapper for Account trie /// Transforms trie node keys for the database pub struct AccountDB<'db> { - db: &'db HashDB, + db: &'db dyn HashDB, address_hash: H256, } impl<'db> AccountDB<'db> { /// Create a new AccountDB from an address. #[cfg(test)] - pub fn new(db: &'db HashDB, address: &Address) -> Self { + pub fn new(db: &'db dyn HashDB, address: &Address) -> Self { Self::from_hash(db, keccak(address)) } /// Create a new AcountDB from an address' hash. - pub fn from_hash(db: &'db HashDB, address_hash: H256) -> Self { + pub fn from_hash(db: &'db dyn HashDB, address_hash: H256) -> Self { AccountDB { db: db, address_hash: address_hash, @@ -98,8 +98,8 @@ impl<'db> AccountDB<'db> { } impl<'db> AsHashDB for AccountDB<'db> { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &dyn HashDB { self } + fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self } } impl<'db> HashDB for AccountDB<'db> { @@ -132,19 +132,19 @@ impl<'db> HashDB for AccountDB<'db> { /// DB backend wrapper for Account trie pub struct AccountDBMut<'db> { - db: &'db mut HashDB, + db: &'db mut dyn HashDB, address_hash: H256, } impl<'db> AccountDBMut<'db> { /// Create a new AccountDB from an address. #[cfg(test)] - pub fn new(db: &'db mut HashDB, address: &Address) -> Self { + pub fn new(db: &'db mut dyn HashDB, address: &Address) -> Self { Self::from_hash(db, keccak(address)) } /// Create a new AcountDB from an address' hash. - pub fn from_hash(db: &'db mut HashDB, address_hash: H256) -> Self { + pub fn from_hash(db: &'db mut dyn HashDB, address_hash: H256) -> Self { AccountDBMut { db: db, address_hash: address_hash, @@ -200,15 +200,15 @@ impl<'db> HashDB for AccountDBMut<'db>{ } impl<'db> AsHashDB for AccountDBMut<'db> { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &dyn HashDB { self } + fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self } } -struct Wrapping<'db>(&'db HashDB); +struct Wrapping<'db>(&'db dyn HashDB); impl<'db> AsHashDB for Wrapping<'db> { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &dyn HashDB { self } + fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self } } impl<'db> HashDB for Wrapping<'db> { @@ -239,10 +239,10 @@ impl<'db> HashDB for Wrapping<'db> { } } -struct WrappingMut<'db>(&'db mut HashDB); +struct WrappingMut<'db>(&'db mut dyn HashDB); impl<'db> AsHashDB for WrappingMut<'db> { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &dyn HashDB { self } + fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self } } impl<'db> HashDB for WrappingMut<'db>{ diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index fcecc53ab88..4a75034f025 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -61,7 +61,7 @@ use types::receipt::{Receipt, TransactionOutcome}; /// maintain the system `state()`. We also archive execution receipts in preparation for later block creation. pub struct OpenBlock<'x> { block: ExecutedBlock, - engine: &'x EthEngine, + engine: &'x dyn EthEngine, } /// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields, @@ -163,7 +163,7 @@ pub trait Drain { impl<'x> OpenBlock<'x> { /// Create a new `OpenBlock` ready for transaction pushing. pub fn new<'a, I: IntoIterator>( - engine: &'x EthEngine, + engine: &'x dyn EthEngine, factories: Factories, tracing: bool, db: StateDB, @@ -374,7 +374,7 @@ impl ClosedBlock { } /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. - pub fn reopen(self, engine: &EthEngine) -> OpenBlock { + pub fn reopen(self, engine: &dyn EthEngine) -> OpenBlock { // revert rewards (i.e. set state back at last transaction's state). let mut block = self.block; block.state = self.unclosed_state; @@ -404,7 +404,7 @@ impl LockedBlock { /// Provide a valid seal in order to turn this into a `SealedBlock`. /// /// NOTE: This does not check the validity of `seal` with the engine. - pub fn seal(self, engine: &EthEngine, seal: Vec) -> Result { + pub fn seal(self, engine: &dyn EthEngine, seal: Vec) -> Result { let expected_seal_fields = engine.seal_fields(&self.header); let mut s = self; if seal.len() != expected_seal_fields { @@ -429,7 +429,7 @@ impl LockedBlock { /// TODO(https://github.com/paritytech/parity-ethereum/issues/10407): This is currently only used in POW chain call paths, we should really merge it with seal() above. pub fn try_seal( self, - engine: &EthEngine, + engine: &dyn EthEngine, seal: Vec, ) -> Result { let mut s = self; @@ -472,14 +472,14 @@ pub(crate) fn enact( header: Header, transactions: Vec, uncles: Vec
, - engine: &EthEngine, + engine: &dyn EthEngine, tracing: bool, db: StateDB, parent: &Header, last_hashes: Arc, factories: Factories, is_epoch_begin: bool, - ancestry: &mut Iterator, + ancestry: &mut dyn Iterator, ) -> Result { // For trace log let trace_state = if log_enabled!(target: "enact", ::log::Level::Trace) { @@ -525,14 +525,14 @@ pub(crate) fn enact( /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header pub fn enact_verified( block: PreverifiedBlock, - engine: &EthEngine, + engine: &dyn EthEngine, tracing: bool, db: StateDB, parent: &Header, last_hashes: Arc, factories: Factories, is_epoch_begin: bool, - ancestry: &mut Iterator, + ancestry: &mut dyn Iterator, ) -> Result { enact( @@ -570,7 +570,7 @@ mod tests { /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header fn enact_bytes( block_bytes: Vec, - engine: &EthEngine, + engine: &dyn EthEngine, tracing: bool, db: StateDB, parent: &Header, @@ -623,7 +623,7 @@ mod tests { /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards fn enact_and_seal( block_bytes: Vec, - engine: &EthEngine, + engine: &dyn EthEngine, tracing: bool, db: StateDB, parent: &Header, diff --git a/ethcore/src/builtin.rs b/ethcore/src/builtin.rs index b9d79fc3d2b..76d00ba5269 100644 --- a/ethcore/src/builtin.rs +++ b/ethcore/src/builtin.rs @@ -157,8 +157,8 @@ impl ModexpPricer { /// /// Unless `is_active` is true, pub struct Builtin { - pricer: Box, - native: Box, + pricer: Box, + native: Box, activate_at: u64, } @@ -177,7 +177,7 @@ impl Builtin { impl From for Builtin { fn from(b: ethjson::spec::Builtin) -> Self { - let pricer: Box = match b.pricing { + let pricer: Box = match b.pricing { ethjson::spec::Pricing::Linear(linear) => { Box::new(Linear { base: linear.base, @@ -211,16 +211,16 @@ impl From for Builtin { } /// Ethereum built-in factory. -pub fn ethereum_builtin(name: &str) -> Box { +pub fn ethereum_builtin(name: &str) -> Box { match name { - "identity" => Box::new(Identity) as Box, - "ecrecover" => Box::new(EcRecover) as Box, - "sha256" => Box::new(Sha256) as Box, - "ripemd160" => Box::new(Ripemd160) as Box, - "modexp" => Box::new(ModexpImpl) as Box, - "alt_bn128_add" => Box::new(Bn128AddImpl) as Box, - "alt_bn128_mul" => Box::new(Bn128MulImpl) as Box, - "alt_bn128_pairing" => Box::new(Bn128PairingImpl) as Box, + "identity" => Box::new(Identity) as Box, + "ecrecover" => Box::new(EcRecover) as Box, + "sha256" => Box::new(Sha256) as Box, + "ripemd160" => Box::new(Ripemd160) as Box, + "modexp" => Box::new(ModexpImpl) as Box, + "alt_bn128_add" => Box::new(Bn128AddImpl) as Box, + "alt_bn128_mul" => Box::new(Bn128MulImpl) as Box, + "alt_bn128_pairing" => Box::new(Bn128PairingImpl) as Box, _ => panic!("invalid builtin name: {}", name), } } @@ -1008,7 +1008,7 @@ mod tests { fn is_active() { let pricer = Box::new(Linear { base: 10, word: 20} ); let b = Builtin { - pricer: pricer as Box, + pricer: pricer as Box, native: ethereum_builtin("identity"), activate_at: 100_000, }; @@ -1022,7 +1022,7 @@ mod tests { fn from_named_linear() { let pricer = Box::new(Linear { base: 10, word: 20 }); let b = Builtin { - pricer: pricer as Box, + pricer: pricer as Box, native: ethereum_builtin("identity"), activate_at: 1, }; diff --git a/ethcore/src/client/ancient_import.rs b/ethcore/src/client/ancient_import.rs index 2a0a970cd69..46cd519a7aa 100644 --- a/ethcore/src/client/ancient_import.rs +++ b/ethcore/src/client/ancient_import.rs @@ -32,13 +32,13 @@ const HEAVY_VERIFY_RATE: f32 = 0.02; /// Ancient block verifier: import an ancient sequence of blocks in order from a starting /// epoch. pub struct AncientVerifier { - cur_verifier: RwLock>>>, - engine: Arc, + cur_verifier: RwLock>>>, + engine: Arc, } impl AncientVerifier { /// Create a new ancient block verifier with the given engine. - pub fn new(engine: Arc) -> Self { + pub fn new(engine: Arc) -> Self { AncientVerifier { cur_verifier: RwLock::new(None), engine, @@ -87,7 +87,7 @@ impl AncientVerifier { } fn initial_verifier(&self, header: &Header, chain: &BlockChain) - -> Result>, ::error::Error> + -> Result>, ::error::Error> { trace!(target: "client", "Initializing ancient block restoration."); let current_epoch_data = chain.epoch_transitions() diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index d22cff0942d..f0a2cb3e185 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -153,7 +153,7 @@ struct Importer { pub import_lock: Mutex<()>, // FIXME Maybe wrap the whole `Importer` instead? /// Used to verify blocks - pub verifier: Box>, + pub verifier: Box>, /// Queue containing pending blocks pub block_queue: BlockQueue, @@ -165,7 +165,7 @@ struct Importer { pub ancient_verifier: AncientVerifier, /// Ethereum engine to be used during import - pub engine: Arc, + pub engine: Arc, /// A lru cache of recently detected bad blocks pub bad_blocks: bad_blocks::BadBlocks, @@ -187,7 +187,7 @@ pub struct Client { chain: RwLock>, tracedb: RwLock>, - engine: Arc, + engine: Arc, /// Client configuration config: ClientConfig, @@ -196,7 +196,7 @@ pub struct Client { pruning: journaldb::Algorithm, /// Client uses this to store blocks, traces, etc. - db: RwLock>, + db: RwLock>, state_db: RwLock, @@ -210,7 +210,7 @@ pub struct Client { io_channel: RwLock>, /// List of actors to be notified on certain chain events - notify: RwLock>>, + notify: RwLock>>, /// Queued transactions from IO queue_transactions: IoChannelQueue, @@ -232,12 +232,12 @@ pub struct Client { history: u64, /// An action to be done if a mode/spec_name change happens - on_user_defaults_change: Mutex) + 'static + Send>>>, + on_user_defaults_change: Mutex) + 'static + Send>>>, registrar_address: Option
, /// A closure to call when we want to restart the client - exit_handler: Mutex>>, + exit_handler: Mutex>>, importer: Importer, } @@ -245,7 +245,7 @@ pub struct Client { impl Importer { pub fn new( config: &ClientConfig, - engine: Arc, + engine: Arc, message_channel: IoChannel, miner: Arc, ) -> Result { @@ -449,7 +449,7 @@ impl Importer { /// /// The block is guaranteed to be the next best blocks in the /// first block sequence. Does no sealing or transaction validation. - fn import_old_block(&self, unverified: Unverified, receipts_bytes: &[u8], db: &KeyValueDB, chain: &BlockChain) -> EthcoreResult<()> { + fn import_old_block(&self, unverified: Unverified, receipts_bytes: &[u8], db: &dyn KeyValueDB, chain: &BlockChain) -> EthcoreResult<()> { let receipts = ::rlp::decode_list(receipts_bytes); let _import_lock = self.import_lock.lock(); @@ -702,7 +702,7 @@ impl Client { pub fn new( config: ClientConfig, spec: &Spec, - db: Arc, + db: Arc, miner: Arc, message_channel: IoChannel, ) -> Result, ::error::Error> { @@ -844,7 +844,7 @@ impl Client { } /// Adds an actor to be notified on certain events - pub fn add_notify(&self, target: Arc) { + pub fn add_notify(&self, target: Arc) { self.notify.write().push(Arc::downgrade(&target)); } @@ -857,11 +857,11 @@ impl Client { } /// Returns engine reference. - pub fn engine(&self) -> &EthEngine { + pub fn engine(&self) -> &dyn EthEngine { &*self.engine } - fn notify(&self, f: F) where F: Fn(&ChainNotify) { + fn notify(&self, f: F) where F: Fn(&dyn ChainNotify) { for np in &*self.notify.read() { if let Some(n) = np.upgrade() { f(&*n); @@ -1071,7 +1071,7 @@ impl Client { } /// Get a copy of the best block's state. - pub fn state(&self) -> Box { + pub fn state(&self) -> Box { Box::new(self.latest_state()) as Box<_> } @@ -1648,7 +1648,7 @@ impl Call for Client { } impl EngineInfo for Client { - fn engine(&self) -> &EthEngine { + fn engine(&self) -> &dyn EthEngine { Client::engine(self) } } @@ -1668,7 +1668,7 @@ impl BlockChainClient for Client { Ok(self.replay_block_transactions(block, analytics)?.nth(address.index).expect(PROOF).1) } - fn replay_block_transactions(&self, block: BlockId, analytics: CallAnalytics) -> Result>, CallError> { + fn replay_block_transactions(&self, block: BlockId, analytics: CallAnalytics) -> Result>, CallError> { let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?; let body = self.block_body(block).ok_or(CallError::StatePruned)?; let mut state = self.state_at_beginning(block).ok_or(CallError::StatePruned)?; @@ -2495,7 +2495,7 @@ impl super::traits::EngineClient for Client { self.chain.read().epoch_transition_for(parent_hash) } - fn as_full_client(&self) -> Option<&BlockChainClient> { Some(self) } + fn as_full_client(&self) -> Option<&dyn BlockChainClient> { Some(self) } fn block_number(&self, id: BlockId) -> Option { BlockChainClient::block_number(self, id) diff --git a/ethcore/src/client/io_message.rs b/ethcore/src/client/io_message.rs index 92e2d3e258e..5fcf8f9ff8d 100644 --- a/ethcore/src/client/io_message.rs +++ b/ethcore/src/client/io_message.rs @@ -47,7 +47,7 @@ impl ClientIoMessage { } /// A function to invoke in the client thread. -pub struct Callback(pub Box); +pub struct Callback(pub Box); impl fmt::Debug for Callback { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index c9c81ecb858..1b28ca2cdbd 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -627,7 +627,7 @@ impl StateClient for TestBlockChainClient { } impl EngineInfo for TestBlockChainClient { - fn engine(&self) -> &EthEngine { + fn engine(&self) -> &dyn EthEngine { unimplemented!() } } @@ -661,7 +661,7 @@ impl BlockChainClient for TestBlockChainClient { } } - fn replay_block_transactions(&self, _block: BlockId, _analytics: CallAnalytics) -> Result>, CallError> { + fn replay_block_transactions(&self, _block: BlockId, _analytics: CallAnalytics) -> Result>, CallError> { Ok(Box::new( self.traces .read() @@ -955,7 +955,7 @@ impl super::traits::EngineClient for TestBlockChainClient { None } - fn as_full_client(&self) -> Option<&BlockChainClient> { Some(self) } + fn as_full_client(&self) -> Option<&dyn BlockChainClient> { Some(self) } fn block_number(&self, id: BlockId) -> Option { BlockChainClient::block_number(self, id) diff --git a/ethcore/src/client/trace.rs b/ethcore/src/client/trace.rs index 73563a1d0f6..7be957b33ec 100644 --- a/ethcore/src/client/trace.rs +++ b/ethcore/src/client/trace.rs @@ -23,11 +23,11 @@ use types::BlockNumber; impl TraceDatabaseExtras for BlockChain { fn block_hash(&self, block_number: BlockNumber) -> Option { - (self as &BlockProvider).block_hash(block_number) + (self as &dyn BlockProvider).block_hash(block_number) } fn transaction_hash(&self, block_number: BlockNumber, tx_position: usize) -> Option { - (self as &BlockProvider).block_hash(block_number) + (self as &dyn BlockProvider).block_hash(block_number) .and_then(|block_hash| { let tx_address = TransactionAddress { block_hash: block_hash, diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index bcfa7417c4b..9d0b27fc709 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -55,7 +55,7 @@ use verification::queue::kind::blocks::Unverified; /// State information to be used during client query pub enum StateOrBlock { /// State to be used, may be pending - State(Box), + State(Box), /// Id of an existing block from a chain to get state from Block(BlockId) @@ -67,8 +67,8 @@ impl From for StateOrBlock { } } -impl From> for StateOrBlock { - fn from(info: Box) -> StateOrBlock { +impl From> for StateOrBlock { + fn from(info: Box) -> StateOrBlock { StateOrBlock::State(info) } } @@ -184,7 +184,7 @@ pub trait Call { /// Provides `engine` method pub trait EngineInfo { /// Get underlying engine object - fn engine(&self) -> &EthEngine; + fn engine(&self) -> &dyn EthEngine; } /// IO operations that should off-load heavy work to another thread. @@ -306,7 +306,7 @@ pub trait BlockChainClient : Sync + Send + AccountData + BlockChain + CallContra fn replay(&self, t: TransactionId, analytics: CallAnalytics) -> Result; /// Replays all the transactions in a given block for inspection. - fn replay_block_transactions(&self, block: BlockId, analytics: CallAnalytics) -> Result>, CallError>; + fn replay_block_transactions(&self, block: BlockId, analytics: CallAnalytics) -> Result>, CallError>; /// Returns traces matching given filter. fn filter_traces(&self, filter: TraceFilter) -> Option>; @@ -441,7 +441,7 @@ pub trait EngineClient: Sync + Send + ChainInfo { fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition>; /// Attempt to cast the engine client to a full client. - fn as_full_client(&self) -> Option<&BlockChainClient>; + fn as_full_client(&self) -> Option<&dyn BlockChainClient>; /// Get a block number by ID. fn block_number(&self, id: BlockId) -> Option; diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index 8bf838d2dc5..6f2723bba71 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -63,7 +63,7 @@ pub struct AuthorityRoundParams { /// Starting step, pub start_step: Option, /// Valid validators. - pub validators: Box, + pub validators: Box, /// Chain score validation transition block. pub validate_score_transition: u64, /// Monotonic step validation transition block. @@ -222,9 +222,9 @@ impl EpochManager { // Zooms to the epoch after the header with the given hash. Returns true if succeeded, false otherwise. fn zoom_to_after( &mut self, - client: &EngineClient, + client: &dyn EngineClient, machine: &EthereumMachine, - validators: &ValidatorSet, + validators: &dyn ValidatorSet, hash: H256 ) -> bool { let last_was_parent = self.finality_checker.subchain_head() == Some(hash); @@ -324,7 +324,7 @@ impl EmptyStep { EmptyStep { signature, step, parent_hash } } - fn verify(&self, validators: &ValidatorSet) -> Result { + fn verify(&self, validators: &dyn ValidatorSet) -> Result { let message = keccak(empty_step_rlp(self.step, &self.parent_hash)); let correct_proposer = step_proposer(validators, &self.parent_hash, self.step); @@ -419,9 +419,9 @@ struct PermissionedStep { pub struct AuthorityRound { transition_service: IoService<()>, step: Arc, - client: Arc>>>, - signer: RwLock>>, - validators: Box, + client: Arc>>>, + signer: RwLock>>, + validators: Box, validate_score_transition: u64, validate_step_transition: u64, empty_steps: Mutex>, @@ -563,13 +563,13 @@ fn header_empty_steps_signers(header: &Header, empty_steps_transition: u64) -> R } } -fn step_proposer(validators: &ValidatorSet, bh: &H256, step: u64) -> Address { +fn step_proposer(validators: &dyn ValidatorSet, bh: &H256, step: u64) -> Address { let proposer = validators.get(bh, step as usize); trace!(target: "engine", "Fetched proposer for step {}: {}", step, proposer); proposer } -fn is_step_proposer(validators: &ValidatorSet, bh: &H256, step: u64, address: &Address) -> bool { +fn is_step_proposer(validators: &dyn ValidatorSet, bh: &H256, step: u64, address: &Address) -> bool { step_proposer(validators, bh, step) == *address } @@ -597,7 +597,7 @@ fn verify_timestamp(step: &Step, header_step: u64) -> Result<(), BlockError> { } } -fn verify_external(header: &Header, validators: &ValidatorSet, empty_steps_transition: u64) -> Result<(), Error> { +fn verify_external(header: &Header, validators: &dyn ValidatorSet, empty_steps_transition: u64) -> Result<(), Error> { let header_step = header_step(header, empty_steps_transition)?; let proposer_signature = header_signature(header, empty_steps_transition)?; @@ -716,7 +716,7 @@ impl AuthorityRound { // fetch correct validator set for epoch at header, taking into account // finality of previous transitions. - fn epoch_set<'a>(&'a self, header: &Header) -> Result<(CowLike, BlockNumber), Error> { + fn epoch_set<'a>(&'a self, header: &Header) -> Result<(CowLike, BlockNumber), Error> { Ok(if self.immediate_transitions { (CowLike::Borrowed(&*self.validators), header.number()) } else { @@ -802,7 +802,7 @@ impl AuthorityRound { } } - fn report_skipped(&self, header: &Header, current_step: u64, parent_step: u64, validators: &ValidatorSet, set_number: u64) { + fn report_skipped(&self, header: &Header, current_step: u64, parent_step: u64, validators: &dyn ValidatorSet, set_number: u64) { // we're building on top of the genesis block so don't report any skipped steps if header.number() == 1 { return; @@ -825,7 +825,7 @@ impl AuthorityRound { } // Returns the hashes of all ancestor blocks that are finalized by the given `chain_head`. - fn build_finality(&self, chain_head: &Header, ancestry: &mut Iterator) -> Vec { + fn build_finality(&self, chain_head: &Header, ancestry: &mut dyn Iterator) -> Vec { if self.immediate_transitions { return Vec::new() } let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { @@ -894,7 +894,7 @@ fn unix_now() -> Duration { struct TransitionHandler { step: Arc, - client: Arc>>>, + client: Arc>>>, } const ENGINE_TIMEOUT_TOKEN: TimerToken = 23; @@ -1198,7 +1198,7 @@ impl Engine for AuthorityRound { &self, block: &mut ExecutedBlock, epoch_begin: bool, - _ancestry: &mut Iterator, + _ancestry: &mut dyn Iterator, ) -> Result<(), Error> { // with immediate transitions, we don't use the epoch mechanism anyway. // the genesis is always considered an epoch, but we ignore it intentionally. @@ -1555,12 +1555,12 @@ impl Engine for AuthorityRound { } } - fn register_client(&self, client: Weak) { + fn register_client(&self, client: Weak) { *self.client.write() = Some(client.clone()); self.validators.register_client(client); } - fn set_signer(&self, signer: Box) { + fn set_signer(&self, signer: Box) { *self.signer.write() = Some(signer); } @@ -1572,7 +1572,7 @@ impl Engine for AuthorityRound { ) } - fn snapshot_components(&self) -> Option> { + fn snapshot_components(&self) -> Option> { if self.immediate_transitions { None } else { @@ -1584,7 +1584,7 @@ impl Engine for AuthorityRound { super::total_difficulty_fork_choice(new, current) } - fn ancestry_actions(&self, header: &Header, ancestry: &mut Iterator) -> Vec { + fn ancestry_actions(&self, header: &Header, ancestry: &mut dyn Iterator) -> Vec { let finalized = self.build_finality( header, &mut ancestry.take_while(|e| !e.is_finalized).map(|e| e.header), @@ -1908,14 +1908,14 @@ mod tests { (spec, tap, accounts) } - fn empty_step(engine: &EthEngine, step: u64, parent_hash: &H256) -> EmptyStep { + fn empty_step(engine: &dyn EthEngine, step: u64, parent_hash: &H256) -> EmptyStep { let empty_step_rlp = super::empty_step_rlp(step, parent_hash); let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into(); let parent_hash = parent_hash.clone(); EmptyStep { step, signature, parent_hash } } - fn sealed_empty_step(engine: &EthEngine, step: u64, parent_hash: &H256) -> SealedEmptyStep { + fn sealed_empty_step(engine: &dyn EthEngine, step: u64, parent_hash: &H256) -> SealedEmptyStep { let empty_step_rlp = super::empty_step_rlp(step, parent_hash); let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into(); SealedEmptyStep { signature, step } diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 21652f0a12c..098c3f558a1 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -55,7 +55,7 @@ impl super::EpochVerifier for EpochVerifier { } } -fn verify_external(header: &Header, validators: &ValidatorSet) -> Result<(), Error> { +fn verify_external(header: &Header, validators: &dyn ValidatorSet) -> Result<(), Error> { use rlp::Rlp; // Check if the signature belongs to a validator, can depend on parent state. @@ -75,8 +75,8 @@ fn verify_external(header: &Header, validators: &ValidatorSet) -> Result<(), Err /// Engine using `BasicAuthority`, trivial proof-of-authority consensus. pub struct BasicAuthority { machine: EthereumMachine, - signer: RwLock>>, - validators: Box, + signer: RwLock>>, + validators: Box, } impl BasicAuthority { @@ -189,11 +189,11 @@ impl Engine for BasicAuthority { } } - fn register_client(&self, client: Weak) { + fn register_client(&self, client: Weak) { self.validators.register_client(client); } - fn set_signer(&self, signer: Box) { + fn set_signer(&self, signer: Box) { *self.signer.write() = Some(signer); } @@ -205,7 +205,7 @@ impl Engine for BasicAuthority { ) } - fn snapshot_components(&self) -> Option> { + fn snapshot_components(&self) -> Option> { None } diff --git a/ethcore/src/engines/clique/mod.rs b/ethcore/src/engines/clique/mod.rs index 5884ab05d3a..052f0a578ce 100644 --- a/ethcore/src/engines/clique/mod.rs +++ b/ethcore/src/engines/clique/mod.rs @@ -162,10 +162,10 @@ pub struct Clique { epoch_length: u64, period: u64, machine: EthereumMachine, - client: RwLock>>, + client: RwLock>>, block_state_by_hash: RwLock>, proposals: RwLock>, - signer: RwLock>>, + signer: RwLock>>, } #[cfg(test)] @@ -174,10 +174,10 @@ pub struct Clique { pub epoch_length: u64, pub period: u64, pub machine: EthereumMachine, - pub client: RwLock>>, + pub client: RwLock>>, pub block_state_by_hash: RwLock>, pub proposals: RwLock>, - pub signer: RwLock>>, + pub signer: RwLock>>, } impl Clique { @@ -370,7 +370,7 @@ impl Engine for Clique { &self, _block: &mut ExecutedBlock, _epoch_begin: bool, - _ancestry: &mut Iterator, + _ancestry: &mut dyn Iterator, ) -> Result<(), Error> { Ok(()) } @@ -736,12 +736,12 @@ impl Engine for Clique { } } - fn set_signer(&self, signer: Box) { + fn set_signer(&self, signer: Box) { trace!(target: "engine", "set_signer: {}", signer.address()); *self.signer.write() = Some(signer); } - fn register_client(&self, client: Weak) { + fn register_client(&self, client: Weak) { *self.client.write() = Some(client.clone()); } diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index d51e31ea17a..e0db2181e9d 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -175,10 +175,10 @@ pub enum SealingState { } /// A system-calling closure. Enacts calls on a block's state from the system address. -pub type SystemCall<'a> = FnMut(Address, Vec) -> Result, String> + 'a; +pub type SystemCall<'a> = dyn FnMut(Address, Vec) -> Result, String> + 'a; /// A system-calling closure. Enacts calls on a block's state with code either from an on-chain contract, or hard-coded EVM or WASM (if enabled on-chain) codes. -pub type SystemOrCodeCall<'a> = FnMut(SystemOrCodeCallKind, Vec) -> Result, String> + 'a; +pub type SystemOrCodeCall<'a> = dyn FnMut(SystemOrCodeCallKind, Vec) -> Result, String> + 'a; /// Kind of SystemOrCodeCall, this is either an on-chain address, or code. #[derive(PartialEq, Debug, Clone)] @@ -220,10 +220,10 @@ pub fn default_system_or_code_call<'a>(machine: &'a ::machine::EthereumMachine, } /// Type alias for a function we can get headers by hash through. -pub type Headers<'a, H> = Fn(H256) -> Option + 'a; +pub type Headers<'a, H> = dyn Fn(H256) -> Option + 'a; /// Type alias for a function we can query pending transitions by block hash through. -pub type PendingTransitionStore<'a> = Fn(H256) -> Option + 'a; +pub type PendingTransitionStore<'a> = dyn Fn(H256) -> Option + 'a; /// Proof dependent on state. pub trait StateDependentProof: Send + Sync { @@ -240,16 +240,16 @@ pub enum Proof { /// Known proof (extracted from signal) Known(Vec), /// State dependent proof. - WithState(Arc>), + WithState(Arc>), } /// Generated epoch verifier. pub enum ConstructedVerifier<'a, M: Machine> { /// Fully trusted verifier. - Trusted(Box>), + Trusted(Box>), /// Verifier unconfirmed. Check whether given finality proof finalizes given hash /// under previous epoch. - Unconfirmed(Box>, &'a [u8], H256), + Unconfirmed(Box>, &'a [u8], H256), /// Error constructing verifier. Err(Error), } @@ -257,7 +257,7 @@ pub enum ConstructedVerifier<'a, M: Machine> { impl<'a, M: Machine> ConstructedVerifier<'a, M> { /// Convert to a result, indicating that any necessary confirmation has been done /// already. - pub fn known_confirmed(self) -> Result>, Error> { + pub fn known_confirmed(self) -> Result>, Error> { match self { ConstructedVerifier::Trusted(v) | ConstructedVerifier::Unconfirmed(v, _, _) => Ok(v), ConstructedVerifier::Err(e) => Err(e), @@ -303,7 +303,7 @@ pub trait Engine: Sync + Send { &self, _block: &mut ExecutedBlock, _epoch_begin: bool, - _ancestry: &mut Iterator, + _ancestry: &mut dyn Iterator, ) -> Result<(), M::Error> { Ok(()) } @@ -426,7 +426,7 @@ pub trait Engine: Sync + Send { fn handle_message(&self, _message: &[u8]) -> Result<(), EngineError> { Err(EngineError::UnexpectedMessage) } /// Register a component which signs consensus messages. - fn set_signer(&self, _signer: Box) {} + fn set_signer(&self, _signer: Box) {} /// Sign using the EngineSigner, to be used for consensus tx signing. fn sign(&self, _hash: H256) -> Result { unimplemented!() } @@ -439,7 +439,7 @@ pub trait Engine: Sync + Send { /// Create a factory for building snapshot chunks and restoring from them. /// Returning `None` indicates that this engine doesn't support snapshot creation. - fn snapshot_components(&self) -> Option> { + fn snapshot_components(&self) -> Option> { None } @@ -463,7 +463,7 @@ pub trait Engine: Sync + Send { /// Gather all ancestry actions. Called at the last stage when a block is committed. The Engine must guarantee that /// the ancestry exists. - fn ancestry_actions(&self, _header: &Header, _ancestry: &mut Iterator) -> Vec { + fn ancestry_actions(&self, _header: &Header, _ancestry: &mut dyn Iterator) -> Vec { Vec::new() } diff --git a/ethcore/src/engines/null_engine.rs b/ethcore/src/engines/null_engine.rs index 27138985ad6..42cb9b5b40e 100644 --- a/ethcore/src/engines/null_engine.rs +++ b/ethcore/src/engines/null_engine.rs @@ -99,7 +99,7 @@ impl Engine for NullEngine { Ok(()) } - fn snapshot_components(&self) -> Option> { + fn snapshot_components(&self) -> Option> { Some(Box::new(::snapshot::PowSnapshot::new(10000, 10000))) } diff --git a/ethcore/src/engines/signer.rs b/ethcore/src/engines/signer.rs index 67066e74d7a..196c535fe76 100644 --- a/ethcore/src/engines/signer.rs +++ b/ethcore/src/engines/signer.rs @@ -29,7 +29,7 @@ pub trait EngineSigner: Send + Sync { } /// Creates a new `EngineSigner` from given key pair. -pub fn from_keypair(keypair: ethkey::KeyPair) -> Box { +pub fn from_keypair(keypair: ethkey::KeyPair) -> Box { Box::new(Signer(keypair)) } diff --git a/ethcore/src/engines/validator_set/contract.rs b/ethcore/src/engines/validator_set/contract.rs index dbf92ce4b59..5560447c447 100644 --- a/ethcore/src/engines/validator_set/contract.rs +++ b/ethcore/src/engines/validator_set/contract.rs @@ -37,7 +37,7 @@ use_contract!(validator_report, "res/contracts/validator_report.json"); pub struct ValidatorContract { contract_address: Address, validators: ValidatorSafeContract, - client: RwLock>>, // TODO [keorn]: remove + client: RwLock>>, // TODO [keorn]: remove } impl ValidatorContract { @@ -125,7 +125,7 @@ impl ValidatorSet for ValidatorContract { } } - fn register_client(&self, client: Weak) { + fn register_client(&self, client: Weak) { self.validators.register_client(client.clone()); *self.client.write() = Some(client); } diff --git a/ethcore/src/engines/validator_set/mod.rs b/ethcore/src/engines/validator_set/mod.rs index 915a3f9a15d..8d31937091a 100644 --- a/ethcore/src/engines/validator_set/mod.rs +++ b/ethcore/src/engines/validator_set/mod.rs @@ -44,7 +44,7 @@ use self::multi::Multi; use super::SystemCall; /// Creates a validator set from spec. -pub fn new_validator_set(spec: ValidatorSpec) -> Box { +pub fn new_validator_set(spec: ValidatorSpec) -> Box { match spec { ValidatorSpec::List(list) => Box::new(SimpleList::new(list.into_iter().map(Into::into).collect())), ValidatorSpec::SafeContract(address) => Box::new(ValidatorSafeContract::new(address.into())), @@ -141,5 +141,5 @@ pub trait ValidatorSet: Send + Sync + 'static { /// Notifies about benign misbehaviour. fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, _block: BlockNumber) {} /// Allows blockchain state access. - fn register_client(&self, _client: Weak) {} + fn register_client(&self, _client: Weak) {} } diff --git a/ethcore/src/engines/validator_set/multi.rs b/ethcore/src/engines/validator_set/multi.rs index b9ef6774784..1d115bf5d33 100644 --- a/ethcore/src/engines/validator_set/multi.rs +++ b/ethcore/src/engines/validator_set/multi.rs @@ -30,15 +30,15 @@ use client::EngineClient; use machine::{AuxiliaryData, Call, EthereumMachine}; use super::{SystemCall, ValidatorSet}; -type BlockNumberLookup = Box Result + Send + Sync + 'static>; +type BlockNumberLookup = Box Result + Send + Sync + 'static>; pub struct Multi { - sets: BTreeMap>, + sets: BTreeMap>, block_number: RwLock, } impl Multi { - pub fn new(set_map: BTreeMap>) -> Self { + pub fn new(set_map: BTreeMap>) -> Self { assert!(set_map.get(&0u64).is_some(), "ValidatorSet has to be specified from block 0."); Multi { sets: set_map, @@ -46,7 +46,7 @@ impl Multi { } } - fn correct_set(&self, id: BlockId) -> Option<&ValidatorSet> { + fn correct_set(&self, id: BlockId) -> Option<&dyn ValidatorSet> { match self.block_number.read()(id).map(|parent_block| self.correct_set_by_number(parent_block)) { Ok((_, set)) => Some(set), Err(e) => { @@ -58,7 +58,7 @@ impl Multi { // get correct set by block number, along with block number at which // this set was activated. - fn correct_set_by_number(&self, parent_block: BlockNumber) -> (BlockNumber, &ValidatorSet) { + fn correct_set_by_number(&self, parent_block: BlockNumber) -> (BlockNumber, &dyn ValidatorSet) { let (block, set) = self.sets.iter() .rev() .find(|&(block, _)| *block <= parent_block + 1) @@ -134,7 +134,7 @@ impl ValidatorSet for Multi { self.correct_set_by_number(set_block).1.report_benign(validator, set_block, block); } - fn register_client(&self, client: Weak) { + fn register_client(&self, client: Weak) { for set in self.sets.values() { set.register_client(client.clone()); } @@ -215,7 +215,7 @@ mod tests { fn transition_to_fixed_list_instant() { use super::super::SimpleList; - let mut map: BTreeMap<_, Box> = BTreeMap::new(); + let mut map: BTreeMap<_, Box> = BTreeMap::new(); let list1: Vec<_> = (0..10).map(|_| Address::random()).collect(); let list2 = { let mut list = list1.clone(); diff --git a/ethcore/src/engines/validator_set/safe_contract.rs b/ethcore/src/engines/validator_set/safe_contract.rs index c210bcc00d6..e8db31423bb 100644 --- a/ethcore/src/engines/validator_set/safe_contract.rs +++ b/ethcore/src/engines/validator_set/safe_contract.rs @@ -75,7 +75,7 @@ impl ::engines::StateDependentProof for StateProof { pub struct ValidatorSafeContract { contract_address: Address, validators: RwLock>, - client: RwLock>>, // TODO [keorn]: remove + client: RwLock>>, // TODO [keorn]: remove } // first proof is just a state proof call of `getValidators` at header's state. @@ -431,7 +431,7 @@ impl ValidatorSet for ValidatorSafeContract { })) } - fn register_client(&self, client: Weak) { + fn register_client(&self, client: Weak) { trace!(target: "engine", "Setting up contract caller."); *self.client.write() = Some(client); } diff --git a/ethcore/src/engines/validator_set/simple_list.rs b/ethcore/src/engines/validator_set/simple_list.rs index 0a0294be966..ea1a1200ea7 100644 --- a/ethcore/src/engines/validator_set/simple_list.rs +++ b/ethcore/src/engines/validator_set/simple_list.rs @@ -105,8 +105,8 @@ impl ValidatorSet for SimpleList { } } -impl AsRef for SimpleList { - fn as_ref(&self) -> &ValidatorSet { +impl AsRef for SimpleList { + fn as_ref(&self) -> &dyn ValidatorSet { self } } diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 5e4229a204f..477f67b26b1 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -162,7 +162,7 @@ pub enum QueueError { } impl error::Error for QueueError { - fn source(&self) -> Option<&(error::Error + 'static)> { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { QueueError::Channel(e) => Some(e), _ => None, @@ -264,7 +264,7 @@ pub enum Error { } impl error::Error for Error { - fn source(&self) -> Option<&(error::Error + 'static)> { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { Error::Io(e) => Some(e), Error::StdIo(e) => Some(e), diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index f2f0d000364..4fcf7f098f2 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -377,7 +377,7 @@ impl Engine for Arc { engines::ConstructedVerifier::Trusted(Box::new(self.clone())) } - fn snapshot_components(&self) -> Option> { + fn snapshot_components(&self) -> Option> { Some(Box::new(::snapshot::PowSnapshot::new(SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS))) } diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 2d96db0705d..e14a43787ec 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -201,8 +201,8 @@ enum CallCreateExecutiveKind { CallBuiltin(ActionParams), ExecCall(ActionParams, Substate), ExecCreate(ActionParams, Substate), - ResumeCall(OriginInfo, Box, Substate), - ResumeCreate(OriginInfo, Box, Substate), + ResumeCall(OriginInfo, Box, Substate), + ResumeCreate(OriginInfo, Box, Substate), } /// Executive for a raw call/create action. diff --git a/ethcore/src/factory.rs b/ethcore/src/factory.rs index 06b77da9aa1..574cbeeb39d 100644 --- a/ethcore/src/factory.rs +++ b/ethcore/src/factory.rs @@ -31,7 +31,7 @@ pub struct VmFactory { } impl VmFactory { - pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box { + pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box { if schedule.wasm.is_some() && params.code.as_ref().map_or(false, |code| code.len() > 4 && &code[0..4] == WASM_MAGIC_NUMBER) { Box::new(WasmInterpreter::new(params)) } else { diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 315cd0bb5ed..87a1b4a0b01 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -82,7 +82,11 @@ extern crate journaldb; extern crate keccak_hash as hash; extern crate keccak_hasher; extern crate kvdb; +// Note: in `ethcore` this is only used by tests, so without `#[cfg(test)]` there's a warning. +// However, when building `parity-ethereum` this is needed. So there's something funny going on +// here. extern crate kvdb_memorydb; + extern crate len_caching_lock; extern crate lru_cache; extern crate memory_cache; diff --git a/ethcore/src/machine/impls.rs b/ethcore/src/machine/impls.rs index 4962f1ea25f..ec14f1f95c9 100644 --- a/ethcore/src/machine/impls.rs +++ b/ethcore/src/machine/impls.rs @@ -67,7 +67,7 @@ impl From<::ethjson::spec::EthashParams> for EthashExtensions { } /// Special rules to be applied to the schedule. -pub type ScheduleCreationRules = Fn(&mut Schedule, BlockNumber) + Sync + Send; +pub type ScheduleCreationRules = dyn Fn(&mut Schedule, BlockNumber) + Sync + Send; /// An ethereum-like state machine. pub struct EthereumMachine { @@ -415,7 +415,7 @@ pub struct AuxiliaryData<'a> { /// Type alias for a function we can make calls through synchronously. /// Returns the call result and state proof for each call. -pub type Call<'a> = Fn(Address, Vec) -> Result<(Vec, Vec>), String> + 'a; +pub type Call<'a> = dyn Fn(Address, Vec) -> Result<(Vec, Vec>), String> + 'a; /// Request for auxiliary data of a block. #[derive(Debug, Clone, Copy, PartialEq)] @@ -429,7 +429,7 @@ pub enum AuxiliaryRequest { } impl super::Machine for EthereumMachine { - type EngineClient = ::client::EngineClient; + type EngineClient = dyn (::client::EngineClient); type Error = Error; diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 67ef9e6181c..0bd40d1c8d1 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -203,7 +203,7 @@ pub enum Author { /// Sealing block is external and we only need a reward beneficiary (i.e. PoW) External(Address), /// Sealing is done internally, we need a way to create signatures to seal block (i.e. PoA) - Sealer(Box), + Sealer(Box), } impl Author { @@ -245,8 +245,8 @@ pub struct Miner { options: MinerOptions, // TODO [ToDr] Arc is only required because of price updater transaction_queue: Arc, - engine: Arc, - accounts: Arc, + engine: Arc, + accounts: Arc, io_channel: RwLock>>, service_transaction_checker: Option, } diff --git a/ethcore/src/miner/pool_client.rs b/ethcore/src/miner/pool_client.rs index 60e93dee8ac..98431b45852 100644 --- a/ethcore/src/miner/pool_client.rs +++ b/ethcore/src/miner/pool_client.rs @@ -72,8 +72,8 @@ impl NonceCache { pub struct PoolClient<'a, C: 'a> { chain: &'a C, cached_nonces: CachedNonceClient<'a, C>, - engine: &'a EthEngine, - accounts: &'a LocalAccounts, + engine: &'a dyn EthEngine, + accounts: &'a dyn LocalAccounts, best_block_header: Header, service_transaction_checker: Option<&'a ServiceTransactionChecker>, } @@ -98,8 +98,8 @@ impl<'a, C: 'a> PoolClient<'a, C> where pub fn new( chain: &'a C, cache: &'a NonceCache, - engine: &'a EthEngine, - accounts: &'a LocalAccounts, + engine: &'a dyn EthEngine, + accounts: &'a dyn LocalAccounts, service_transaction_checker: Option<&'a ServiceTransactionChecker>, ) -> Self { let best_block_header = chain.best_block_header(); diff --git a/ethcore/src/pod_account.rs b/ethcore/src/pod_account.rs index c02074f95f9..5bdb05f49d0 100644 --- a/ethcore/src/pod_account.rs +++ b/ethcore/src/pod_account.rs @@ -79,7 +79,7 @@ impl PodAccount { } /// Place additional data into given hash DB. - pub fn insert_additional(&self, db: &mut HashDB, factory: &TrieFactory) { + pub fn insert_additional(&self, db: &mut dyn HashDB, factory: &TrieFactory) { match self.code { Some(ref c) if !c.is_empty() => { db.insert(c); } _ => {} diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index d18ecacbc3b..f968705b469 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -66,7 +66,7 @@ impl CodeState { // account address hash, account properties and the storage. Each item contains at most `max_storage_items` // storage records split according to snapshot format definition. pub fn to_fat_rlps(account_hash: &H256, acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet, first_chunk_size: usize, max_chunk_size: usize) -> Result, Error> { - let db = &(acct_db as &HashDB<_,_>); + let db = &(acct_db as &dyn HashDB<_,_>); let db = TrieDB::new(db, &acc.storage_root)?; let mut chunks = Vec::new(); let mut db_iter = db.iter()?; diff --git a/ethcore/src/snapshot/consensus/authority.rs b/ethcore/src/snapshot/consensus/authority.rs index 4423e074019..08a1f8afea0 100644 --- a/ethcore/src/snapshot/consensus/authority.rs +++ b/ethcore/src/snapshot/consensus/authority.rs @@ -127,9 +127,9 @@ impl SnapshotComponents for PoaSnapshot { fn rebuilder( &self, chain: BlockChain, - db: Arc, + db: Arc, manifest: &ManifestData, - ) -> Result, ::error::Error> { + ) -> Result, ::error::Error> { Ok(Box::new(ChunkRebuilder { manifest: manifest.clone(), warp_target: None, @@ -164,14 +164,14 @@ struct ChunkRebuilder { manifest: ManifestData, warp_target: Option
, chain: BlockChain, - db: Arc, + db: Arc, had_genesis: bool, // sorted vectors of unverified first blocks in a chunk // and epoch data from last blocks in chunks. // verification for these will be done at the end. unverified_firsts: Vec<(Header, Bytes, H256)>, - last_epochs: Vec<(Header, Box>)>, + last_epochs: Vec<(Header, Box>)>, } // verified data. @@ -183,9 +183,9 @@ struct Verified { impl ChunkRebuilder { fn verify_transition( &mut self, - last_verifier: &mut Option>>, + last_verifier: &mut Option>>, transition_rlp: Rlp, - engine: &EthEngine, + engine: &dyn EthEngine, ) -> Result { use engines::ConstructedVerifier; @@ -241,7 +241,7 @@ impl Rebuilder for ChunkRebuilder { fn feed( &mut self, chunk: &[u8], - engine: &EthEngine, + engine: &dyn EthEngine, abort_flag: &AtomicBool, ) -> Result<(), ::error::Error> { let rlp = Rlp::new(chunk); @@ -349,7 +349,7 @@ impl Rebuilder for ChunkRebuilder { Ok(()) } - fn finalize(&mut self, _engine: &EthEngine) -> Result<(), ::error::Error> { + fn finalize(&mut self, _engine: &dyn EthEngine) -> Result<(), ::error::Error> { if !self.had_genesis { return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into()); } diff --git a/ethcore/src/snapshot/consensus/mod.rs b/ethcore/src/snapshot/consensus/mod.rs index 907e9c520bb..670700c10cf 100644 --- a/ethcore/src/snapshot/consensus/mod.rs +++ b/ethcore/src/snapshot/consensus/mod.rs @@ -33,7 +33,7 @@ pub use self::authority::*; pub use self::work::*; /// A sink for produced chunks. -pub type ChunkSink<'a> = FnMut(&[u8]) -> ::std::io::Result<()> + 'a; +pub type ChunkSink<'a> = dyn FnMut(&[u8]) -> ::std::io::Result<()> + 'a; /// Components necessary for snapshot creation and restoration. pub trait SnapshotComponents: Send { @@ -63,9 +63,9 @@ pub trait SnapshotComponents: Send { fn rebuilder( &self, chain: BlockChain, - db: Arc, + db: Arc, manifest: &ManifestData, - ) -> Result, ::error::Error>; + ) -> Result, ::error::Error>; /// Minimum supported snapshot version number. fn min_supported_version(&self) -> u64; @@ -83,7 +83,7 @@ pub trait Rebuilder: Send { fn feed( &mut self, chunk: &[u8], - engine: &EthEngine, + engine: &dyn EthEngine, abort_flag: &AtomicBool, ) -> Result<(), ::error::Error>; @@ -92,5 +92,5 @@ pub trait Rebuilder: Send { /// /// This should apply the necessary "glue" between chunks, /// and verify against the restored state. - fn finalize(&mut self, engine: &EthEngine) -> Result<(), ::error::Error>; + fn finalize(&mut self, engine: &dyn EthEngine) -> Result<(), ::error::Error>; } diff --git a/ethcore/src/snapshot/consensus/work.rs b/ethcore/src/snapshot/consensus/work.rs index a3d4da1c2ef..201d528d140 100644 --- a/ethcore/src/snapshot/consensus/work.rs +++ b/ethcore/src/snapshot/consensus/work.rs @@ -81,9 +81,9 @@ impl SnapshotComponents for PowSnapshot { fn rebuilder( &self, chain: BlockChain, - db: Arc, + db: Arc, manifest: &ManifestData, - ) -> Result, ::error::Error> { + ) -> Result, ::error::Error> { PowRebuilder::new(chain, db.key_value().clone(), manifest, self.max_restore_blocks).map(|r| Box::new(r) as Box<_>) } @@ -194,7 +194,7 @@ impl<'a> PowWorker<'a> { /// After all chunks have been submitted, we "glue" the chunks together. pub struct PowRebuilder { chain: BlockChain, - db: Arc, + db: Arc, rng: OsRng, disconnected: Vec<(u64, H256)>, best_number: u64, @@ -206,7 +206,7 @@ pub struct PowRebuilder { impl PowRebuilder { /// Create a new PowRebuilder. - fn new(chain: BlockChain, db: Arc, manifest: &ManifestData, snapshot_blocks: u64) -> Result { + fn new(chain: BlockChain, db: Arc, manifest: &ManifestData, snapshot_blocks: u64) -> Result { Ok(PowRebuilder { chain: chain, db: db, @@ -224,7 +224,7 @@ impl PowRebuilder { impl Rebuilder for PowRebuilder { /// Feed the rebuilder an uncompressed block chunk. /// Returns the number of blocks fed or any errors. - fn feed(&mut self, chunk: &[u8], engine: &EthEngine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> { + fn feed(&mut self, chunk: &[u8], engine: &dyn EthEngine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> { use snapshot::verify_old_block; use ethereum_types::U256; use triehash::ordered_trie_root; @@ -298,7 +298,7 @@ impl Rebuilder for PowRebuilder { } /// Glue together any disconnected chunks and check that the chain is complete. - fn finalize(&mut self, _: &EthEngine) -> Result<(), ::error::Error> { + fn finalize(&mut self, _: &dyn EthEngine) -> Result<(), ::error::Error> { let mut batch = self.db.transaction(); for (first_num, first_hash) in self.disconnected.drain(..) { diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index 0eba4725569..91b6dc67cc8 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -71,7 +71,7 @@ pub enum Error { } impl error::Error for Error { - fn source(&self) -> Option<&(error::Error + 'static)> { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { Error::Trie(e) => Some(e), Error::Decoder(e) => Some(e), diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 155182ed516..84905f580f7 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -148,10 +148,10 @@ impl Progress { } /// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer. pub fn take_snapshot( - engine: &EthEngine, + engine: &dyn EthEngine, chain: &BlockChain, block_at: H256, - state_db: &HashDB, + state_db: &dyn HashDB, writer: W, p: &Progress, processing_threads: usize, @@ -228,7 +228,7 @@ pub fn take_snapshot( /// Secondary chunks are engine-specific, but they intend to corroborate the state data /// in the state chunks. /// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis. -pub fn chunk_secondary<'a>(mut chunker: Box, chain: &'a BlockChain, start_hash: H256, writer: &Mutex, progress: &'a Progress) -> Result, Error> { +pub fn chunk_secondary<'a>(mut chunker: Box, chain: &'a BlockChain, start_hash: H256, writer: &Mutex, progress: &'a Progress) -> Result, Error> { let mut chunk_hashes = Vec::new(); let mut snappy_buffer = vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)]; @@ -266,7 +266,7 @@ struct StateChunker<'a> { rlps: Vec, cur_size: usize, snappy_buffer: Vec, - writer: &'a Mutex, + writer: &'a Mutex, progress: &'a Progress, } @@ -321,7 +321,7 @@ impl<'a> StateChunker<'a> { /// /// Returns a list of hashes of chunks created, or any error it may /// have encountered. -pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex, progress: &'a Progress, part: Option) -> Result, Error> { +pub fn chunk_state<'a>(db: &dyn HashDB, root: &H256, writer: &Mutex, progress: &'a Progress, part: Option) -> Result, Error> { let account_trie = TrieDB::new(&db, &root)?; let mut chunker = StateChunker { @@ -383,7 +383,7 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: /// Used to rebuild the state trie piece by piece. pub struct StateRebuilder { - db: Box, + db: Box, state_root: H256, known_code: HashMap, // code hashes mapped to first account with this code. missing_code: HashMap>, // maps code hashes to lists of accounts missing that code. @@ -393,7 +393,7 @@ pub struct StateRebuilder { impl StateRebuilder { /// Create a new state rebuilder to write into the given backing DB. - pub fn new(db: Arc, pruning: Algorithm) -> Self { + pub fn new(db: Arc, pruning: Algorithm) -> Self { StateRebuilder { db: journaldb::new(db.clone(), pruning, ::db::COL_STATE), state_root: KECCAK_NULL_RLP, @@ -468,7 +468,7 @@ impl StateRebuilder { /// Finalize the restoration. Check for accounts missing code and make a dummy /// journal entry. /// Once all chunks have been fed, there should be nothing missing. - pub fn finalize(mut self, era: u64, id: H256) -> Result, ::error::Error> { + pub fn finalize(mut self, era: u64, id: H256) -> Result, ::error::Error> { let missing = self.missing_code.keys().cloned().collect::>(); if !missing.is_empty() { return Err(Error::MissingCode(missing).into()) } @@ -493,7 +493,7 @@ struct RebuiltStatus { // rebuild a set of accounts and their storage. // returns a status detailing newly-loaded code and accounts missing code. fn rebuild_accounts( - db: &mut HashDB, + db: &mut dyn HashDB, account_fat_rlps: Rlp, out_chunk: &mut [(H256, Bytes)], known_code: &HashMap, @@ -560,7 +560,7 @@ const POW_VERIFY_RATE: f32 = 0.02; /// Verify an old block with the given header, engine, blockchain, body. If `always` is set, it will perform /// the fullest verification possible. If not, it will take a random sample to determine whether it will /// do heavy or light verification. -pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &EthEngine, chain: &BlockChain, always: bool) -> Result<(), ::error::Error> { +pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &dyn EthEngine, chain: &BlockChain, always: bool) -> Result<(), ::error::Error> { engine.verify_block_basic(header)?; if always || rng.gen::() <= POW_VERIFY_RATE { diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 8120f47b81f..0c26f095e26 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -76,22 +76,22 @@ struct Restoration { state_chunks_left: HashSet, block_chunks_left: HashSet, state: StateRebuilder, - secondary: Box, + secondary: Box, writer: Option, snappy_buffer: Bytes, final_state_root: H256, guard: Guard, - db: Arc, + db: Arc, } struct RestorationParams<'a> { manifest: ManifestData, // manifest to base restoration on. pruning: Algorithm, // pruning algorithm for the database. - db: Arc, // database + db: Arc, // database writer: Option, // writer for recovered snapshot. genesis: &'a [u8], // genesis block of the chain. guard: Guard, // guard for the restoration directory. - engine: &'a EthEngine, + engine: &'a dyn EthEngine, } impl Restoration { @@ -149,7 +149,7 @@ impl Restoration { } // feeds a block chunk - fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &EthEngine, flag: &AtomicBool) -> Result<(), Error> { + fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &dyn EthEngine, flag: &AtomicBool) -> Result<(), Error> { if self.block_chunks_left.contains(&hash) { let expected_len = snappy::decompressed_len(chunk)?; if expected_len > MAX_CHUNK_SIZE { @@ -170,7 +170,7 @@ impl Restoration { } // finish up restoration. - fn finalize(mut self, engine: &EthEngine) -> Result<(), Error> { + fn finalize(mut self, engine: &dyn EthEngine) -> Result<(), Error> { use trie::TrieError; if !self.is_done() { return Ok(()) } @@ -211,37 +211,37 @@ pub trait SnapshotClient: BlockChainClient + BlockInfo + DatabaseRestore {} /// Snapshot service parameters. pub struct ServiceParams { /// The consensus engine this is built on. - pub engine: Arc, + pub engine: Arc, /// The chain's genesis block. pub genesis_block: Bytes, /// State pruning algorithm. pub pruning: Algorithm, /// Handler for opening a restoration DB. - pub restoration_db_handler: Box, + pub restoration_db_handler: Box, /// Async IO channel for sending messages. pub channel: Channel, /// The directory to put snapshots in. /// Usually "/snapshot" pub snapshot_root: PathBuf, /// A handle for database restoration. - pub client: Arc, + pub client: Arc, } /// `SnapshotService` implementation. /// This controls taking snapshots and restoring from them. pub struct Service { restoration: Mutex>, - restoration_db_handler: Box, + restoration_db_handler: Box, snapshot_root: PathBuf, io_channel: Mutex, pruning: Algorithm, status: Mutex, reader: RwLock>, - engine: Arc, + engine: Arc, genesis_block: Bytes, state_chunks: AtomicUsize, block_chunks: AtomicUsize, - client: Arc, + client: Arc, progress: super::Progress, taking_snapshot: AtomicBool, restoring_snapshot: AtomicBool, diff --git a/ethcore/src/snapshot/tests/helpers.rs b/ethcore/src/snapshot/tests/helpers.rs index a70c7b7e272..96c5c51b023 100644 --- a/ethcore/src/snapshot/tests/helpers.rs +++ b/ethcore/src/snapshot/tests/helpers.rs @@ -62,7 +62,7 @@ impl StateProducer { /// Tick the state producer. This alters the state, writing new data into /// the database. - pub fn tick(&mut self, rng: &mut R, db: &mut HashDB) { + pub fn tick(&mut self, rng: &mut R, db: &mut dyn HashDB) { // modify existing accounts. let mut accounts_to_modify: Vec<_> = { let trie = TrieDB::new(&db, &self.state_root).unwrap(); @@ -132,7 +132,7 @@ pub fn fill_storage(mut db: AccountDBMut, root: &mut H256, seed: &mut H256) { /// Take a snapshot from the given client into a temporary file. /// Return a snapshot reader for it. -pub fn snap(client: &Client) -> (Box, TempDir) { +pub fn snap(client: &Client) -> (Box, TempDir) { use types::ids::BlockId; let tempdir = TempDir::new("").unwrap(); @@ -151,9 +151,9 @@ pub fn snap(client: &Client) -> (Box, TempDir) { /// Restore a snapshot into a given database. This will read chunks from the given reader /// write into the given database. pub fn restore( - db: Arc, - engine: &EthEngine, - reader: &SnapshotReader, + db: Arc, + engine: &dyn EthEngine, + reader: &dyn SnapshotReader, genesis: &[u8], ) -> Result<(), ::error::Error> { use std::sync::atomic::AtomicBool; diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs index 9df21c3cd83..06df0c9cc42 100644 --- a/ethcore/src/snapshot/watcher.rs +++ b/ethcore/src/snapshot/watcher.rs @@ -72,8 +72,8 @@ impl Broadcast for Mutex> { /// A `ChainNotify` implementation which will trigger a snapshot event /// at certain block numbers. pub struct Watcher { - oracle: Box, - broadcast: Box, + oracle: Box, + broadcast: Box, period: u64, history: u64, } diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index fd00dd56f08..87822226127 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -382,7 +382,7 @@ pub struct Spec { /// User friendly spec name pub name: String, /// What engine are we using for this? - pub engine: Arc, + pub engine: Arc, /// Name of the subdir inside the main data dir to use for chain data and settings. pub data_dir: String, @@ -601,7 +601,7 @@ impl Spec { engine_spec: ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap, - ) -> Arc { + ) -> Arc { let machine = Self::machine(&engine_spec, params, builtins); match engine_spec { diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index 36260ca2cdc..5daaa139839 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -217,7 +217,7 @@ impl Account { /// Get (and cache) the contents of the trie's storage at `key`. /// Takes modified storage into account. - pub fn storage_at(&self, db: &HashDB, key: &H256) -> TrieResult { + pub fn storage_at(&self, db: &dyn HashDB, key: &H256) -> TrieResult { if let Some(value) = self.cached_storage_at(key) { return Ok(value); } @@ -230,7 +230,7 @@ impl Account { /// Get (and cache) the contents of the trie's storage at `key`. /// Does not take modified storage into account. - pub fn original_storage_at(&self, db: &HashDB, key: &H256) -> TrieResult { + pub fn original_storage_at(&self, db: &dyn HashDB, key: &H256) -> TrieResult { if let Some(value) = self.cached_original_storage_at(key) { return Ok(value); } @@ -252,7 +252,7 @@ impl Account { } } - fn get_and_cache_storage(storage_root: &H256, storage_cache: &mut LruCache, db: &HashDB, key: &H256) -> TrieResult { + fn get_and_cache_storage(storage_root: &H256, storage_cache: &mut LruCache, db: &dyn HashDB, key: &H256) -> TrieResult { let db = SecTrieDB::new(&db, storage_root)?; let panicky_decoder = |bytes:&[u8]| ::rlp::decode(&bytes).expect("decoding db value failed"); let item: U256 = db.get_with(key.as_bytes(), panicky_decoder)?.unwrap_or_else(U256::zero); @@ -358,7 +358,7 @@ impl Account { /// Provide a database to get `code_hash`. Should not be called if it is a contract without code. Returns the cached code, if successful. #[must_use] - pub fn cache_code(&mut self, db: &HashDB) -> Option> { + pub fn cache_code(&mut self, db: &dyn HashDB) -> Option> { // TODO: fill out self.code_cache; trace!("Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); @@ -388,7 +388,7 @@ impl Account { /// Provide a database to get `code_size`. Should not be called if it is a contract without code. Returns whether /// the cache succeeds. #[must_use] - pub fn cache_code_size(&mut self, db: &HashDB) -> bool { + pub fn cache_code_size(&mut self, db: &dyn HashDB) -> bool { // TODO: fill out self.code_cache; trace!("Account::cache_code_size: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); self.code_size.is_some() || @@ -482,7 +482,7 @@ impl Account { } /// Commit the `storage_changes` to the backing DB and update `storage_root`. - pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) -> TrieResult<()> { + pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut dyn HashDB) -> TrieResult<()> { let mut t = trie_factory.from_existing(db, &mut self.storage_root)?; for (k, v) in self.storage_changes.drain() { // cast key and value to trait type, @@ -499,7 +499,7 @@ impl Account { } /// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this. - pub fn commit_code(&mut self, db: &mut HashDB) { + pub fn commit_code(&mut self, db: &mut dyn HashDB) { trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_filth == Filth::Dirty, self.code_cache.is_empty()); match (self.code_filth == Filth::Dirty, self.code_cache.is_empty()) { (true, true) => { @@ -588,7 +588,7 @@ impl Account { /// trie. /// `storage_key` is the hash of the desired storage key, meaning /// this will only work correctly under a secure trie. - pub fn prove_storage(&self, db: &HashDB, storage_key: H256) -> TrieResult<(Vec, H256)> { + pub fn prove_storage(&self, db: &dyn HashDB, storage_key: H256) -> TrieResult<(Vec, H256)> { let mut recorder = Recorder::new(); let trie = TrieDB::new(&db, &self.storage_root)?; diff --git a/ethcore/src/state/backend.rs b/ethcore/src/state/backend.rs index 11e73edb3ab..9d9a2a9b388 100644 --- a/ethcore/src/state/backend.rs +++ b/ethcore/src/state/backend.rs @@ -36,10 +36,10 @@ use journaldb::AsKeyedHashDB; /// State backend. See module docs for more details. pub trait Backend: Send { /// Treat the backend as a read-only hashdb. - fn as_hash_db(&self) -> &HashDB; + fn as_hash_db(&self) -> &dyn HashDB; /// Treat the backend as a writeable hashdb. - fn as_hash_db_mut(&mut self) -> &mut HashDB; + fn as_hash_db_mut(&mut self) -> &mut dyn HashDB; /// Add an account entry to the cache. fn add_to_account_cache(&mut self, addr: Address, data: Option, modified: bool); @@ -114,13 +114,13 @@ impl HashDB for ProofCheck { } impl AsHashDB for ProofCheck { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &dyn HashDB { self } + fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self } } impl Backend for ProofCheck { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &dyn HashDB { self } + fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self } fn add_to_account_cache(&mut self, _addr: Address, _data: Option, _modified: bool) {} fn cache_code(&self, _hash: H256, _code: Arc>) {} fn get_cached_account(&self, _addr: &Address) -> Option> { None } @@ -146,12 +146,12 @@ pub struct Proving { } impl AsKeyedHashDB for Proving { - fn as_keyed_hash_db(&self) -> &journaldb::KeyedHashDB { self } + fn as_keyed_hash_db(&self) -> &dyn journaldb::KeyedHashDB { self } } impl + Send + Sync> AsHashDB for Proving { - fn as_hash_db(&self) -> &HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db(&self) -> &dyn HashDB { self } + fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self } } impl journaldb::KeyedHashDB for Proving { @@ -194,9 +194,9 @@ impl + Send + Sync> HashDB + Send + Sync> Backend for Proving { - fn as_hash_db(&self) -> &HashDB { self } + fn as_hash_db(&self) -> &dyn HashDB { self } - fn as_hash_db_mut(&mut self) -> &mut HashDB { self } + fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self } fn add_to_account_cache(&mut self, _: Address, _: Option, _: bool) { } @@ -248,11 +248,11 @@ impl + Clone> Clone for Proving { pub struct Basic(pub H); impl + Send + Sync> Backend for Basic { - fn as_hash_db(&self) -> &HashDB { + fn as_hash_db(&self) -> &dyn HashDB { self.0.as_hash_db() } - fn as_hash_db_mut(&mut self) -> &mut HashDB { + fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self.0.as_hash_db_mut() } diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index d0d57287dce..95e31489c84 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -638,7 +638,7 @@ impl State { &self, address: &Address, key: &H256, f_cached_at: FCachedStorageAt, f_at: FStorageAt, ) -> TrieResult where FCachedStorageAt: Fn(&Account, &H256) -> Option, - FStorageAt: Fn(&Account, &HashDB, &H256) -> TrieResult + FStorageAt: Fn(&Account, &dyn HashDB, &H256) -> TrieResult { // Storage key search and update works like this: // 1. If there's an entry for the account in the local cache check for the key and return it if found. @@ -1101,7 +1101,7 @@ impl State { /// Load required account data from the databases. Returns whether the cache succeeds. #[must_use] - fn update_account_cache(require: RequireCache, account: &mut Account, state_db: &B, db: &HashDB) -> bool { + fn update_account_cache(require: RequireCache, account: &mut Account, state_db: &B, db: &dyn HashDB) -> bool { if let RequireCache::None = require { return true; } diff --git a/ethcore/src/state_db.rs b/ethcore/src/state_db.rs index 2613411e4e0..132677ab5cf 100644 --- a/ethcore/src/state_db.rs +++ b/ethcore/src/state_db.rs @@ -107,7 +107,7 @@ struct BlockChanges { /// `StateDB` is propagated into the global cache. pub struct StateDB { /// Backing database. - db: Box, + db: Box, /// Shared canonical state cache. account_cache: Arc>, /// DB Code cache. Maps code hashes to shared bytes. @@ -132,7 +132,7 @@ impl StateDB { /// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping. // TODO: make the cache size actually accurate by moving the account storage cache // into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`. - pub fn new(db: Box, cache_size: usize) -> StateDB { + pub fn new(db: Box, cache_size: usize) -> StateDB { let bloom = Self::load_bloom(&**db.backing()); let acc_cache_size = cache_size * ACCOUNT_CACHE_RATIO / 100; let code_cache_size = cache_size - acc_cache_size; @@ -156,7 +156,7 @@ impl StateDB { /// Loads accounts bloom from the database /// This bloom is used to handle request for the non-existant account fast - pub fn load_bloom(db: &KeyValueDB) -> Bloom { + pub fn load_bloom(db: &dyn KeyValueDB) -> Bloom { let hash_count_entry = db.get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY) .expect("Low-level database error"); @@ -313,12 +313,12 @@ impl StateDB { } /// Conversion method to interpret self as `HashDB` reference - pub fn as_hash_db(&self) -> &HashDB { + pub fn as_hash_db(&self) -> &dyn HashDB { self.db.as_hash_db() } /// Conversion method to interpret self as mutable `HashDB` reference - pub fn as_hash_db_mut(&mut self) -> &mut HashDB { + pub fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self.db.as_hash_db_mut() } @@ -368,7 +368,7 @@ impl StateDB { } /// Returns underlying `JournalDB`. - pub fn journal_db(&self) -> &JournalDB { + pub fn journal_db(&self) -> &dyn JournalDB { &*self.db } @@ -407,9 +407,9 @@ impl StateDB { } impl state::Backend for StateDB { - fn as_hash_db(&self) -> &HashDB { self.db.as_hash_db() } + fn as_hash_db(&self) -> &dyn HashDB { self.db.as_hash_db() } - fn as_hash_db_mut(&mut self) -> &mut HashDB { + fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self.db.as_hash_db_mut() } diff --git a/ethcore/src/test_helpers.rs b/ethcore/src/test_helpers.rs index e59a41d34b4..6bdaeadf9bd 100644 --- a/ethcore/src/test_helpers.rs +++ b/ethcore/src/test_helpers.rs @@ -267,11 +267,11 @@ struct TestBlockChainDB { _trace_blooms_dir: TempDir, blooms: blooms_db::Database, trace_blooms: blooms_db::Database, - key_value: Arc, + key_value: Arc, } impl BlockChainDB for TestBlockChainDB { - fn key_value(&self) -> &Arc { + fn key_value(&self) -> &Arc { &self.key_value } @@ -285,7 +285,7 @@ impl BlockChainDB for TestBlockChainDB { } /// Creates new test instance of `BlockChainDB` -pub fn new_db() -> Arc { +pub fn new_db() -> Arc { let blooms_dir = TempDir::new("").unwrap(); let trace_blooms_dir = TempDir::new("").unwrap(); @@ -301,7 +301,7 @@ pub fn new_db() -> Arc { } /// Creates a new temporary `BlockChainDB` on FS -pub fn new_temp_db(tempdir: &Path) -> Arc { +pub fn new_temp_db(tempdir: &Path) -> Arc { let blooms_dir = TempDir::new("").unwrap(); let trace_blooms_dir = TempDir::new("").unwrap(); let key_value_dir = tempdir.join("key_value"); @@ -321,7 +321,7 @@ pub fn new_temp_db(tempdir: &Path) -> Arc { } /// Creates new instance of KeyValueDBHandler -pub fn restoration_db_handler(config: kvdb_rocksdb::DatabaseConfig) -> Box { +pub fn restoration_db_handler(config: kvdb_rocksdb::DatabaseConfig) -> Box { struct RestorationDBHandler { config: kvdb_rocksdb::DatabaseConfig, } @@ -329,11 +329,11 @@ pub fn restoration_db_handler(config: kvdb_rocksdb::DatabaseConfig) -> Box, + key_value: Arc, } impl BlockChainDB for RestorationDB { - fn key_value(&self) -> &Arc { + fn key_value(&self) -> &Arc { &self.key_value } @@ -347,7 +347,7 @@ pub fn restoration_db_handler(config: kvdb_rocksdb::DatabaseConfig) -> Box io::Result> { + fn open(&self, db_path: &Path) -> io::Result> { let key_value = Arc::new(kvdb_rocksdb::Database::open(&self.config, &db_path.to_string_lossy())?); let blooms_path = db_path.join("blooms"); let trace_blooms_path = db_path.join("trace_blooms"); diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index 2ee521a4ad6..4c8c2cd7f0e 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -64,7 +64,7 @@ pub struct TraceDB where T: DatabaseExtras { /// hashes of cached traces cache_manager: RwLock>, /// db - db: Arc, + db: Arc, /// tracing enabled enabled: bool, /// extras @@ -73,7 +73,7 @@ pub struct TraceDB where T: DatabaseExtras { impl TraceDB where T: DatabaseExtras { /// Creates new instance of `TraceDB`. - pub fn new(config: Config, db: Arc, extras: Arc) -> Self { + pub fn new(config: Config, db: Arc, extras: Arc) -> Self { let mut batch = DBTransaction::new(); let genesis = extras.block_hash(0) .expect("Genesis block is always inserted upon extras db creation qed"); diff --git a/ethcore/src/verification/canon_verifier.rs b/ethcore/src/verification/canon_verifier.rs index 03a1c7155f8..76f37c19df0 100644 --- a/ethcore/src/verification/canon_verifier.rs +++ b/ethcore/src/verification/canon_verifier.rs @@ -32,7 +32,7 @@ impl Verifier for CanonVerifier { &self, header: &Header, parent: &Header, - engine: &EthEngine, + engine: &dyn EthEngine, do_full: Option>, ) -> Result<(), Error> { verification::verify_block_family(header, parent, engine, do_full) @@ -42,7 +42,7 @@ impl Verifier for CanonVerifier { verification::verify_block_final(expected, got) } - fn verify_block_external(&self, header: &Header, engine: &EthEngine) -> Result<(), Error> { + fn verify_block_external(&self, header: &Header, engine: &dyn EthEngine) -> Result<(), Error> { engine.verify_block_external(header) } } diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 5546bd60c91..37c721ab1b8 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -44,7 +44,7 @@ pub enum VerifierType { } /// Create a new verifier based on type. -pub fn new(v: VerifierType) -> Box> { +pub fn new(v: VerifierType) -> Box> { match v { VerifierType::Canon | VerifierType::CanonNoSeal => Box::new(CanonVerifier), VerifierType::Noop => Box::new(NoopVerifier), diff --git a/ethcore/src/verification/noop_verifier.rs b/ethcore/src/verification/noop_verifier.rs index d68f1eb8856..3b646ed9e40 100644 --- a/ethcore/src/verification/noop_verifier.rs +++ b/ethcore/src/verification/noop_verifier.rs @@ -32,7 +32,7 @@ impl Verifier for NoopVerifier { &self, _: &Header, _t: &Header, - _: &EthEngine, + _: &dyn EthEngine, _: Option> ) -> Result<(), Error> { Ok(()) @@ -42,7 +42,7 @@ impl Verifier for NoopVerifier { Ok(()) } - fn verify_block_external(&self, _header: &Header, _engine: &EthEngine) -> Result<(), Error> { + fn verify_block_external(&self, _header: &Header, _engine: &dyn EthEngine) -> Result<(), Error> { Ok(()) } } diff --git a/ethcore/src/verification/queue/kind.rs b/ethcore/src/verification/queue/kind.rs index d8f4dd4034c..ed96ebc9d0e 100644 --- a/ethcore/src/verification/queue/kind.rs +++ b/ethcore/src/verification/queue/kind.rs @@ -58,10 +58,10 @@ pub trait Kind: 'static + Sized + Send + Sync { type Verified: Sized + Send + BlockLike + HeapSizeOf; /// Attempt to create the `Unverified` item from the input. - fn create(input: Self::Input, engine: &EthEngine, check_seal: bool) -> Result; + fn create(input: Self::Input, engine: &dyn EthEngine, check_seal: bool) -> Result; /// Attempt to verify the `Unverified` item using the given engine. - fn verify(unverified: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result; + fn verify(unverified: Self::Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result; } /// The blocks verification module. @@ -86,7 +86,7 @@ pub mod blocks { type Unverified = Unverified; type Verified = PreverifiedBlock; - fn create(input: Self::Input, engine: &EthEngine, check_seal: bool) -> Result { + fn create(input: Self::Input, engine: &dyn EthEngine, check_seal: bool) -> Result { match verify_block_basic(&input, engine, check_seal) { Ok(()) => Ok(input), Err(Error::Block(BlockError::TemporarilyInvalid(oob))) => { @@ -100,7 +100,7 @@ pub mod blocks { } } - fn verify(un: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result { + fn verify(un: Self::Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result { let hash = un.hash(); match verify_block_unordered(un, engine, check_seal) { Ok(verified) => Ok(verified), @@ -209,14 +209,14 @@ pub mod headers { type Unverified = Header; type Verified = Header; - fn create(input: Self::Input, engine: &EthEngine, check_seal: bool) -> Result { + fn create(input: Self::Input, engine: &dyn EthEngine, check_seal: bool) -> Result { match verify_header_params(&input, engine, true, check_seal) { Ok(_) => Ok(input), Err(err) => Err((input, err)) } } - fn verify(unverified: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result { + fn verify(unverified: Self::Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result { match check_seal { true => engine.verify_block_unordered(&unverified,).map(|_| unverified), false => Ok(unverified), diff --git a/ethcore/src/verification/queue/mod.rs b/ethcore/src/verification/queue/mod.rs index 79801019d3f..3f3484ddc44 100644 --- a/ethcore/src/verification/queue/mod.rs +++ b/ethcore/src/verification/queue/mod.rs @@ -138,7 +138,7 @@ struct Sizes { /// A queue of items to be verified. Sits between network or other I/O and the `BlockChain`. /// Keeps them in the same order as inserted, minus invalid items. pub struct VerificationQueue { - engine: Arc, + engine: Arc, more_to_verify: Arc, verification: Arc>, deleting: Arc, @@ -206,7 +206,7 @@ struct Verification { impl VerificationQueue { /// Creates a new queue instance. - pub fn new(config: Config, engine: Arc, message_channel: IoChannel, check_seal: bool) -> Self { + pub fn new(config: Config, engine: Arc, message_channel: IoChannel, check_seal: bool) -> Self { let verification = Arc::new(Verification { unverified: LenCachingMutex::new(VecDeque::new()), verifying: LenCachingMutex::new(VecDeque::new()), @@ -293,7 +293,7 @@ impl VerificationQueue { fn verify( verification: Arc>, - engine: Arc, + engine: Arc, wait: Arc, ready: Arc, empty: Arc, diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index eff7ae406cc..fa458803cd5 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -64,7 +64,7 @@ impl HeapSizeOf for PreverifiedBlock { } /// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block -pub fn verify_block_basic(block: &Unverified, engine: &EthEngine, check_seal: bool) -> Result<(), Error> { +pub fn verify_block_basic(block: &Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result<(), Error> { verify_header_params(&block.header, engine, true, check_seal)?; verify_block_integrity(block)?; @@ -89,7 +89,7 @@ pub fn verify_block_basic(block: &Unverified, engine: &EthEngine, check_seal: bo /// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash. /// Still operates on a individual block /// Returns a `PreverifiedBlock` structure populated with transactions -pub fn verify_block_unordered(block: Unverified, engine: &EthEngine, check_seal: bool) -> Result { +pub fn verify_block_unordered(block: Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result { let header = block.header; if check_seal { engine.verify_block_unordered(&header)?; @@ -131,14 +131,14 @@ pub struct FullFamilyParams<'a, C: BlockInfo + CallContract + 'a> { pub block: &'a PreverifiedBlock, /// Block provider to use during verification - pub block_provider: &'a BlockProvider, + pub block_provider: &'a dyn BlockProvider, /// Engine client to use during verification pub client: &'a C, } /// Phase 3 verification. Check block information against parent and uncles. -pub fn verify_block_family(header: &Header, parent: &Header, engine: &EthEngine, do_full: Option>) -> Result<(), Error> { +pub fn verify_block_family(header: &Header, parent: &Header, engine: &dyn EthEngine, do_full: Option>) -> Result<(), Error> { // TODO: verify timestamp verify_parent(&header, &parent, engine)?; engine.verify_block_family(&header, &parent)?; @@ -159,7 +159,7 @@ pub fn verify_block_family(header: &Header, parent: Ok(()) } -fn verify_uncles(block: &PreverifiedBlock, bc: &BlockProvider, engine: &EthEngine) -> Result<(), Error> { +fn verify_uncles(block: &PreverifiedBlock, bc: &dyn BlockProvider, engine: &dyn EthEngine) -> Result<(), Error> { let header = &block.header; let num_uncles = block.uncles.len(); let max_uncles = engine.maximum_uncle_count(header.number()); @@ -267,7 +267,7 @@ pub fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error> } /// Check basic header parameters. -pub fn verify_header_params(header: &Header, engine: &EthEngine, is_full: bool, check_seal: bool) -> Result<(), Error> { +pub fn verify_header_params(header: &Header, engine: &dyn EthEngine, is_full: bool, check_seal: bool) -> Result<(), Error> { if check_seal { let expected_seal_fields = engine.seal_fields(header); if header.seal().len() != expected_seal_fields { @@ -326,7 +326,7 @@ pub fn verify_header_params(header: &Header, engine: &EthEngine, is_full: bool, } /// Check header parameters agains parent header. -fn verify_parent(header: &Header, parent: &Header, engine: &EthEngine) -> Result<(), Error> { +fn verify_parent(header: &Header, parent: &Header, engine: &dyn EthEngine) -> Result<(), Error> { assert!(header.parent_hash().is_zero() || &parent.hash() == header.parent_hash(), "Parent hash should already have been verified; qed"); @@ -516,12 +516,12 @@ mod tests { } } - fn basic_test(bytes: &[u8], engine: &EthEngine) -> Result<(), Error> { + fn basic_test(bytes: &[u8], engine: &dyn EthEngine) -> Result<(), Error> { let unverified = Unverified::from_rlp(bytes.to_vec())?; verify_block_basic(&unverified, engine, true) } - fn family_test(bytes: &[u8], engine: &EthEngine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { + fn family_test(bytes: &[u8], engine: &dyn EthEngine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { let block = Unverified::from_rlp(bytes.to_vec()).unwrap(); let header = block.header; let transactions: Vec<_> = block.transactions @@ -547,13 +547,13 @@ mod tests { let full_params = FullFamilyParams { block: &block, - block_provider: bc as &BlockProvider, + block_provider: bc as &dyn BlockProvider, client: &client, }; verify_block_family(&block.header, &parent, engine, Some(full_params)) } - fn unordered_test(bytes: &[u8], engine: &EthEngine) -> Result<(), Error> { + fn unordered_test(bytes: &[u8], engine: &dyn EthEngine) -> Result<(), Error> { let un = Unverified::from_rlp(bytes.to_vec())?; verify_block_unordered(un, engine, false)?; Ok(()) diff --git a/ethcore/src/verification/verifier.rs b/ethcore/src/verification/verifier.rs index f7221dae81d..309e596aa78 100644 --- a/ethcore/src/verification/verifier.rs +++ b/ethcore/src/verification/verifier.rs @@ -32,12 +32,12 @@ pub trait Verifier: Send + Sync &self, header: &Header, parent: &Header, - engine: &EthEngine, + engine: &dyn EthEngine, do_full: Option> ) -> Result<(), Error>; /// Do a final verification check for an enacted header vs its expected counterpart. fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>; /// Verify a block, inspecting external state. - fn verify_block_external(&self, header: &Header, engine: &EthEngine) -> Result<(), Error>; + fn verify_block_external(&self, header: &Header, engine: &dyn EthEngine) -> Result<(), Error>; } From bf55db4c7e3b4298597aec868713428a18d53e4d Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Mon, 17 Jun 2019 07:44:59 +0100 Subject: [PATCH 14/16] Die error_chain, die (#10747) * Replace error chain for network error * Fix usages and add manual From impls * OnDemand Error and remove remaining dependencies * Die error_chain, die. * DIE * Hasta la vista, baby --- Cargo.lock | 5 +- ethcore/light/Cargo.toml | 1 + ethcore/light/src/client/header_chain.rs | 4 +- ethcore/light/src/lib.rs | 3 +- ethcore/light/src/on_demand/mod.rs | 43 +++-- ethcore/private-tx/src/lib.rs | 4 - ethcore/service/Cargo.toml | 2 +- ethcore/service/src/error.rs | 26 +-- ethcore/service/src/lib.rs | 5 +- ethcore/service/src/service.rs | 4 +- ethcore/src/lib.rs | 4 - ethcore/sync/src/api.rs | 6 +- ethcore/sync/src/blocks.rs | 8 +- ethcore/sync/src/lib.rs | 2 +- miner/src/lib.rs | 2 - miner/src/pool/verifier.rs | 22 +-- parity/configuration.rs | 2 +- parity/helpers.rs | 2 +- rpc/src/v1/helpers/errors.rs | 17 +- util/network-devp2p/src/connection.rs | 12 +- util/network-devp2p/src/discovery.rs | 8 +- util/network-devp2p/src/handshake.rs | 10 +- util/network-devp2p/src/host.rs | 8 +- util/network-devp2p/src/lib.rs | 2 - util/network-devp2p/src/node_table.rs | 18 +-- util/network-devp2p/src/session.rs | 18 +-- util/network/Cargo.toml | 2 +- util/network/src/error.rs | 191 ++++++++++++----------- util/network/src/lib.rs | 6 +- 29 files changed, 213 insertions(+), 224 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c3a00e53ff4..1d884f5037e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1013,6 +1013,7 @@ version = "1.12.0" dependencies = [ "bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "common-types 0.1.0", + "derive_more 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.12.0", "ethcore-blockchain 0.1.0", @@ -1106,7 +1107,7 @@ name = "ethcore-network" version = "1.12.0" dependencies = [ "assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "derive_more 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-io 1.12.0", "ethereum-types 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethkey 0.3.0", @@ -1243,7 +1244,7 @@ name = "ethcore-service" version = "0.1.0" dependencies = [ "ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", - "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "derive_more 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.12.0", "ethcore-blockchain 0.1.0", "ethcore-db 0.1.0", diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 8f76651d03c..fe0901dd3c5 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -10,6 +10,7 @@ authors = ["Parity Technologies "] log = "0.4" parity-bytes = "0.1" common-types = { path = "../types" } +derive_more = "0.14.0" ethcore = { path = ".."} ethcore-db = { path = "../db" } ethcore-blockchain = { path = "../blockchain" } diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 2bcf53cb9f7..be52185bc53 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -260,7 +260,7 @@ impl HeaderChain { let best_block = { let era = match candidates.get(&curr.best_num) { Some(era) => era, - None => bail!("Database corrupt: highest block referenced but no data."), + None => return Err("Database corrupt: highest block referenced but no data.".into()), }; let best = &era.candidates[0]; @@ -582,7 +582,7 @@ impl HeaderChain { } else { let msg = format!("header of block #{} not found in DB ; database in an \ inconsistent state", h_num); - bail!(msg); + return Err(msg.into()); }; let decoded = header.decode().expect("decoding db value failed"); diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index 31deecf3123..5221830af90 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -86,8 +86,7 @@ extern crate keccak_hash as hash; extern crate triehash_ethereum as triehash; extern crate kvdb; extern crate memory_cache; -#[macro_use] -extern crate error_chain; +extern crate derive_more; #[cfg(test)] extern crate kvdb_memorydb; diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 7d1f4fabf8e..b54257c2794 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -66,32 +66,31 @@ pub const DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS: usize = 1; /// OnDemand related errors pub mod error { - // Silence: `use of deprecated item 'std::error::Error::cause': replaced by Error::source, which can support downcasting` - // https://github.com/paritytech/parity-ethereum/issues/10302 - #![allow(deprecated)] - use futures::sync::oneshot::Canceled; - error_chain! { - - foreign_links { - ChannelCanceled(Canceled) #[doc = "Canceled oneshot channel"]; - } - - errors { - #[doc = "Timeout bad response"] - BadResponse(err: String) { - description("Max response evaluation time exceeded") - display("{}", err) - } + /// OnDemand Error + #[derive(Debug, derive_more::Display, derive_more::From)] + pub enum Error { + /// Canceled oneshot channel + ChannelCanceled(Canceled), + /// Timeout bad response + BadResponse(String), + /// OnDemand requests limit exceeded + #[display(fmt = "OnDemand request maximum backoff iterations exceeded")] + RequestLimit, + } - #[doc = "OnDemand requests limit exceeded"] - RequestLimit { - description("OnDemand request maximum backoff iterations exceeded") - display("OnDemand request maximum backoff iterations exceeded") + impl std::error::Error for Error { + fn source(&self) -> Option<&(std::error::Error + 'static)> { + match self { + Error::ChannelCanceled(err) => Some(err), + _ => None, } } } + + /// OnDemand Result + pub type Result = std::result::Result; } /// Public interface for performing network requests `OnDemand` @@ -272,7 +271,7 @@ impl Pending { response_err ); - let err = self::error::ErrorKind::BadResponse(err); + let err = self::error::Error::BadResponse(err); if self.sender.send(Err(err.into())).is_err() { debug!(target: "on_demand", "Dropped oneshot channel receiver on no response"); } @@ -280,7 +279,7 @@ impl Pending { // returning a peer discovery timeout during query attempts fn request_limit_reached(self) { - let err = self::error::ErrorKind::RequestLimit; + let err = self::error::Error::RequestLimit; if self.sender.send(Err(err.into())).is_err() { debug!(target: "on_demand", "Dropped oneshot channel receiver on time out"); } diff --git a/ethcore/private-tx/src/lib.rs b/ethcore/private-tx/src/lib.rs index b5d36d35967..b9de9e16735 100644 --- a/ethcore/private-tx/src/lib.rs +++ b/ethcore/private-tx/src/lib.rs @@ -16,10 +16,6 @@ //! Private transactions module. -// Recursion limit required because of -// error_chain foreign_links. -#![recursion_limit="256"] - mod encryptor; mod key_server_keys; mod private_transactions; diff --git a/ethcore/service/Cargo.toml b/ethcore/service/Cargo.toml index 218d4f475d5..be9c3d45377 100644 --- a/ethcore/service/Cargo.toml +++ b/ethcore/service/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] [dependencies] ansi_term = "0.10" -error-chain = { version = "0.12", default-features = false } +derive_more = "0.14.0" ethcore = { path = ".." } ethcore-blockchain = { path = "../blockchain" } ethcore-io = { path = "../../util/io" } diff --git a/ethcore/service/src/error.rs b/ethcore/service/src/error.rs index c73cb0dfc16..b2787e97861 100644 --- a/ethcore/service/src/error.rs +++ b/ethcore/service/src/error.rs @@ -14,18 +14,26 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -// Silence: `use of deprecated item 'std::error::Error::cause': replaced by Error::source, which can support downcasting` -// https://github.com/paritytech/parity-ethereum/issues/10302 -#![allow(deprecated)] - use ethcore; use io; use ethcore_private_tx; -error_chain! { - foreign_links { - Ethcore(ethcore::error::Error); - IoError(io::IoError); - PrivateTransactions(ethcore_private_tx::Error); +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum Error { + /// Ethcore Error + Ethcore(ethcore::error::Error), + /// Io Error + IoError(io::IoError), + /// Private Transactions Error + PrivateTransactions(ethcore_private_tx::Error), +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(std::error::Error + 'static)> { + match self { + Error::Ethcore(err) => Some(err), + Error::IoError(err) => Some(err), + Error::PrivateTransactions(err) => Some(err), + } } } diff --git a/ethcore/service/src/lib.rs b/ethcore/service/src/lib.rs index 7828fff8b44..67292e1d8da 100644 --- a/ethcore/service/src/lib.rs +++ b/ethcore/service/src/lib.rs @@ -23,8 +23,7 @@ extern crate ethcore_sync as sync; extern crate ethereum_types; extern crate kvdb; -#[macro_use] -extern crate error_chain; +extern crate derive_more; #[macro_use] extern crate log; #[macro_use] @@ -42,5 +41,5 @@ mod stop_guard; #[cfg(test)] extern crate kvdb_rocksdb; -pub use error::{Error, ErrorKind}; +pub use error::Error; pub use service::{ClientService, PrivateTxService}; diff --git a/ethcore/service/src/service.rs b/ethcore/service/src/service.rs index c16a071892b..10909f98507 100644 --- a/ethcore/service/src/service.rs +++ b/ethcore/service/src/service.rs @@ -59,7 +59,7 @@ impl PrivateTxHandler for PrivateTxService { Ok(import_result) => Ok(import_result), Err(err) => { warn!(target: "privatetx", "Unable to import private transaction packet: {}", err); - bail!(err.to_string()) + return Err(err.to_string()) } } } @@ -69,7 +69,7 @@ impl PrivateTxHandler for PrivateTxService { Ok(import_result) => Ok(import_result), Err(err) => { warn!(target: "privatetx", "Unable to import signed private transaction packet: {}", err); - bail!(err.to_string()) + return Err(err.to_string()) } } } diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 87a1b4a0b01..a5f3adf5891 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -54,10 +54,6 @@ //! cargo build --release //! ``` -// Recursion limit required because of -// error_chain foreign_links. -#![recursion_limit="128"] - extern crate ansi_term; extern crate bn; extern crate byteorder; diff --git a/ethcore/sync/src/api.rs b/ethcore/sync/src/api.rs index ddb7542b749..7aa9e71ae0a 100644 --- a/ethcore/sync/src/api.rs +++ b/ethcore/sync/src/api.rs @@ -22,7 +22,7 @@ use std::time::Duration; use bytes::Bytes; use devp2p::NetworkService; use network::{NetworkProtocolHandler, NetworkContext, PeerId, ProtocolId, - NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode, Error, ErrorKind, + NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode, Error, ConnectionFilter}; use network::client_version::ClientVersion; @@ -593,7 +593,7 @@ impl ChainNotify for EthSync { match self.network.start() { Err((err, listen_address)) => { match err.into() { - ErrorKind::Io(ref e) if e.kind() == io::ErrorKind::AddrInUse => { + Error::Io(ref e) if e.kind() == io::ErrorKind::AddrInUse => { warn!("Network port {:?} is already in use, make sure that another instance of an Ethereum client is not running or change the port using the --port option.", listen_address.expect("Listen address is not set.")) }, err => warn!("Error starting network: {}", err), @@ -983,7 +983,7 @@ impl ManageNetwork for LightSync { match self.network.start() { Err((err, listen_address)) => { match err.into() { - ErrorKind::Io(ref e) if e.kind() == io::ErrorKind::AddrInUse => { + Error::Io(ref e) if e.kind() == io::ErrorKind::AddrInUse => { warn!("Network port {:?} is already in use, make sure that another instance of an Ethereum client is not running or change the port using the --port option.", listen_address.expect("Listen address is not set.")) }, err => warn!("Error starting network: {}", err), diff --git a/ethcore/sync/src/blocks.rs b/ethcore/sync/src/blocks.rs index 7d9ad428a93..2ce2be33348 100644 --- a/ethcore/sync/src/blocks.rs +++ b/ethcore/sync/src/blocks.rs @@ -435,13 +435,13 @@ impl BlockCollection { }, None => { warn!("Got body with no header {}", h); - Err(network::ErrorKind::BadProtocol.into()) + Err(network::Error::BadProtocol) } } } None => { trace!(target: "sync", "Ignored unknown/stale block body. tx_root = {:?}, uncles = {:?}", header_id.transactions_root, header_id.uncles); - Err(network::ErrorKind::BadProtocol.into()) + Err(network::Error::BadProtocol) } } } @@ -463,7 +463,7 @@ impl BlockCollection { }, None => { warn!("Got receipt with no header {}", h); - return Err(network::ErrorKind::BadProtocol.into()) + return Err(network::Error::BadProtocol) } } } @@ -471,7 +471,7 @@ impl BlockCollection { }, hash_map::Entry::Vacant(_) => { trace!(target: "sync", "Ignored unknown/stale block receipt {:?}", receipt_root); - Err(network::ErrorKind::BadProtocol.into()) + Err(network::Error::BadProtocol) } } } diff --git a/ethcore/sync/src/lib.rs b/ethcore/sync/src/lib.rs index aecf8fbb82c..8ed8befc87a 100644 --- a/ethcore/sync/src/lib.rs +++ b/ethcore/sync/src/lib.rs @@ -76,5 +76,5 @@ mod api; pub use api::*; pub use chain::{SyncStatus, SyncState}; pub use devp2p::validate_node_url; -pub use network::{NonReservedPeerMode, Error, ErrorKind, ConnectionFilter, ConnectionDirection}; +pub use network::{NonReservedPeerMode, Error, ConnectionFilter, ConnectionDirection}; pub use private_tx::{PrivateTxHandler, NoopPrivateTxHandler, SimplePrivateTxHandler}; diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 5babec09871..99f7a293b55 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -41,8 +41,6 @@ extern crate ethabi_contract; #[macro_use] extern crate ethabi_derive; #[macro_use] -extern crate error_chain; -#[macro_use] extern crate log; #[macro_use] extern crate serde_derive; diff --git a/miner/src/pool/verifier.rs b/miner/src/pool/verifier.rs index 1fded37630e..5c8273dd577 100644 --- a/miner/src/pool/verifier.rs +++ b/miner/src/pool/verifier.rs @@ -168,7 +168,7 @@ impl txpool::Verifier for Verifier txpool::Verifier for Verifier txpool::Verifier for Verifier txpool::Verifier for Verifier txpool::Verifier for Verifier txpool::Verifier for Verifier signed.into(), Err(err) => { debug!(target: "txqueue", "[{:?}] Rejected tx {:?}", hash, err); - bail!(err) + return Err(err) }, }, Transaction::Local(tx) => tx, @@ -256,7 +256,7 @@ impl txpool::Verifier for Verifier txpool::Verifier for Verifier txpool::Verifier for Verifier txpool::Verifier for Verifier txpool::Verifier for Verifier continue, - Some(sync::ErrorKind::AddressResolve(_)) => return Err(format!("Failed to resolve hostname of a boot node: {}", line)), + Some(sync::Error::AddressResolve(_)) => return Err(format!("Failed to resolve hostname of a boot node: {}", line)), Some(_) => return Err(format!("Invalid node address format given for a boot node: {}", line)), } } diff --git a/parity/helpers.rs b/parity/helpers.rs index 2f0046f228d..61947d6bf53 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -186,7 +186,7 @@ pub fn to_bootnodes(bootnodes: &Option) -> Result, String> { Some(ref x) if !x.is_empty() => x.split(',').map(|s| { match validate_node_url(s).map(Into::into) { None => Ok(s.to_owned()), - Some(sync::ErrorKind::AddressResolve(_)) => Err(format!("Failed to resolve hostname of a boot node: {}", s)), + Some(sync::Error::AddressResolve(_)) => Err(format!("Failed to resolve hostname of a boot node: {}", s)), Some(_) => Err(format!("Invalid node address format given for a boot node: {}", s)), } }).collect(), diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 023f7df3ccc..3365c5490a0 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -25,7 +25,7 @@ use rlp::DecoderError; use types::transaction::Error as TransactionError; use ethcore_private_tx::Error as PrivateTransactionError; use vm::Error as VMError; -use light::on_demand::error::{Error as OnDemandError, ErrorKind as OnDemandErrorKind}; +use light::on_demand::error::{Error as OnDemandError}; use ethcore::client::BlockChainClient; use types::blockchain_info::BlockChainInfo; use v1::types::BlockNumber; @@ -555,10 +555,9 @@ pub fn filter_block_not_found(id: BlockId) -> Error { pub fn on_demand_error(err: OnDemandError) -> Error { match err { - OnDemandError(OnDemandErrorKind::ChannelCanceled(e), _) => on_demand_cancel(e), - OnDemandError(OnDemandErrorKind::RequestLimit, _) => timeout_new_peer(&err), - OnDemandError(OnDemandErrorKind::BadResponse(_), _) => max_attempts_reached(&err), - _ => on_demand_others(&err), + OnDemandError::ChannelCanceled(e) => on_demand_cancel(e), + OnDemandError::RequestLimit => timeout_new_peer(&err), + OnDemandError::BadResponse(_) => max_attempts_reached(&err), } } @@ -583,14 +582,6 @@ pub fn timeout_new_peer(err: &OnDemandError) -> Error { } } -pub fn on_demand_others(err: &OnDemandError) -> Error { - Error { - code: ErrorCode::ServerError(codes::UNKNOWN_ERROR), - message: err.to_string(), - data: None, - } -} - pub fn status_error(has_peers: bool) -> Error { if has_peers { no_work() diff --git a/util/network-devp2p/src/connection.rs b/util/network-devp2p/src/connection.rs index 424e7e17619..218fa924adf 100644 --- a/util/network-devp2p/src/connection.rs +++ b/util/network-devp2p/src/connection.rs @@ -37,7 +37,7 @@ use tiny_keccak::Keccak; use ethkey::crypto; use handshake::Handshake; use io::{IoContext, StreamToken}; -use network::{Error, ErrorKind}; +use network::Error; const ENCRYPTED_HEADER_LEN: usize = 32; const RECEIVE_PAYLOAD: Duration = Duration::from_secs(30); @@ -358,7 +358,7 @@ impl EncryptedConnection { let mut header = RlpStream::new(); let len = payload.len(); if len > MAX_PAYLOAD_SIZE { - bail!(ErrorKind::OversizedPacket); + return Err(Error::OversizedPacket); } header.append_raw(&[(len >> 16) as u8, (len >> 8) as u8, len as u8], 1); header.append_raw(&[0xc2u8, 0x80u8, 0x80u8], 1); @@ -386,14 +386,14 @@ impl EncryptedConnection { /// Decrypt and authenticate an incoming packet header. Prepare for receiving payload. fn read_header(&mut self, header: &[u8]) -> Result<(), Error> { if header.len() != ENCRYPTED_HEADER_LEN { - return Err(ErrorKind::Auth.into()); + return Err(Error::Auth); } EncryptedConnection::update_mac(&mut self.ingress_mac, &mut self.mac_encoder, &header[0..16]); let mac = &header[16..]; let mut expected = H256::zero(); self.ingress_mac.clone().finalize(expected.as_bytes_mut()); if mac != &expected[0..16] { - return Err(ErrorKind::Auth.into()); + return Err(Error::Auth); } let mut hdec = H128::default(); @@ -422,7 +422,7 @@ impl EncryptedConnection { let padding = (16 - (self.payload_len % 16)) % 16; let full_length = self.payload_len + padding + 16; if payload.len() != full_length { - return Err(ErrorKind::Auth.into()); + return Err(Error::Auth); } self.ingress_mac.update(&payload[0..payload.len() - 16]); EncryptedConnection::update_mac(&mut self.ingress_mac, &mut self.mac_encoder, &[0u8; 0]); @@ -430,7 +430,7 @@ impl EncryptedConnection { let mut expected = H128::default(); self.ingress_mac.clone().finalize(expected.as_bytes_mut()); if mac != &expected[..] { - return Err(ErrorKind::Auth.into()); + return Err(Error::Auth); } let mut packet = vec![0u8; self.payload_len]; diff --git a/util/network-devp2p/src/discovery.rs b/util/network-devp2p/src/discovery.rs index bd5cd09a639..e196a2f1db5 100644 --- a/util/network-devp2p/src/discovery.rs +++ b/util/network-devp2p/src/discovery.rs @@ -27,7 +27,7 @@ use parity_bytes::Bytes; use rlp::{Rlp, RlpStream}; use ethkey::{KeyPair, recover, Secret, sign}; -use network::{Error, ErrorKind}; +use network::Error; use network::IpFilter; use node_table::*; use PROTOCOL_VERSION; @@ -482,12 +482,12 @@ impl<'a> Discovery<'a> { pub fn on_packet(&mut self, packet: &[u8], from: SocketAddr) -> Result, Error> { // validate packet if packet.len() < 32 + 65 + 4 + 1 { - return Err(ErrorKind::BadProtocol.into()); + return Err(Error::BadProtocol); } let hash_signed = keccak(&packet[32..]); if hash_signed[..] != packet[0..32] { - return Err(ErrorKind::BadProtocol.into()); + return Err(Error::BadProtocol); } let signed = &packet[(32 + 65)..]; @@ -512,7 +512,7 @@ impl<'a> Discovery<'a> { let secs_since_epoch = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); if self.check_timestamps && timestamp < secs_since_epoch { debug!(target: "discovery", "Expired packet"); - return Err(ErrorKind::Expired.into()); + return Err(Error::Expired); } Ok(()) } diff --git a/util/network-devp2p/src/handshake.rs b/util/network-devp2p/src/handshake.rs index aee73d0e8b6..69c2ba22d7a 100644 --- a/util/network-devp2p/src/handshake.rs +++ b/util/network-devp2p/src/handshake.rs @@ -28,7 +28,7 @@ use ethkey::{Generator, KeyPair, Public, Random, recover, Secret, sign}; use ethkey::crypto::{ecdh, ecies}; use host::HostInfo; use io::{IoContext, StreamToken}; -use network::{Error, ErrorKind}; +use network::Error; use node_table::NodeId; #[derive(PartialEq, Eq, Debug)] @@ -166,7 +166,7 @@ impl Handshake { trace!(target: "network", "Received handshake auth from {:?}", self.connection.remote_addr_str()); if data.len() != V4_AUTH_PACKET_SIZE { debug!(target: "network", "Wrong auth packet size"); - return Err(ErrorKind::BadProtocol.into()); + return Err(Error::BadProtocol); } self.auth_cipher = data.to_vec(); match ecies::decrypt(secret, &[], data) { @@ -183,7 +183,7 @@ impl Handshake { let total = ((u16::from(data[0]) << 8 | (u16::from(data[1]))) as usize) + 2; if total < V4_AUTH_PACKET_SIZE { debug!(target: "network", "Wrong EIP8 auth packet size"); - return Err(ErrorKind::BadProtocol.into()); + return Err(Error::BadProtocol); } let rest = total - data.len(); self.state = HandshakeState::ReadingAuthEip8; @@ -212,7 +212,7 @@ impl Handshake { trace!(target: "network", "Received handshake ack from {:?}", self.connection.remote_addr_str()); if data.len() != V4_ACK_PACKET_SIZE { debug!(target: "network", "Wrong ack packet size"); - return Err(ErrorKind::BadProtocol.into()); + return Err(Error::BadProtocol); } self.ack_cipher = data.to_vec(); match ecies::decrypt(secret, &[], data) { @@ -226,7 +226,7 @@ impl Handshake { let total = (((u16::from(data[0])) << 8 | (u16::from(data[1]))) as usize) + 2; if total < V4_ACK_PACKET_SIZE { debug!(target: "network", "Wrong EIP8 ack packet size"); - return Err(ErrorKind::BadProtocol.into()); + return Err(Error::BadProtocol); } let rest = total - data.len(); self.state = HandshakeState::ReadingAckEip8; diff --git a/util/network-devp2p/src/host.rs b/util/network-devp2p/src/host.rs index cf9f6ac5b38..be50929b6cc 100644 --- a/util/network-devp2p/src/host.rs +++ b/util/network-devp2p/src/host.rs @@ -44,7 +44,7 @@ use io::*; use ip_utils::{map_external_address, select_public_address}; use network::{NetworkConfiguration, NetworkIoMessage, PacketId, PeerId, ProtocolId}; use network::{NetworkContext as NetworkContextTrait, NonReservedPeerMode}; -use network::{DisconnectReason, Error, ErrorKind, NetworkProtocolHandler, SessionInfo}; +use network::{DisconnectReason, Error, NetworkProtocolHandler, SessionInfo}; use network::{ConnectionDirection, ConnectionFilter}; use network::client_version::ClientVersion; use node_table::*; @@ -157,7 +157,7 @@ impl<'s> NetworkContextTrait for NetworkContext<'s> { fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), Error> { assert!(self.session.is_some(), "Respond called without network context"); - self.session_id.map_or_else(|| Err(ErrorKind::Expired.into()), |id| self.send(id, packet_id, data)) + self.session_id.map_or_else(|| Err(Error::Expired), |id| self.send(id, packet_id, data)) } fn disable_peer(&self, peer: PeerId) { @@ -719,8 +719,8 @@ impl Host { Err(e) => { let s = session.lock(); trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e); - match *e.kind() { - ErrorKind::Disconnect(DisconnectReason::IncompatibleProtocol) | ErrorKind::Disconnect(DisconnectReason::UselessPeer) => { + match e { + Error::Disconnect(DisconnectReason::IncompatibleProtocol) | Error::Disconnect(DisconnectReason::UselessPeer) => { if let Some(id) = s.id() { if !self.reserved_nodes.read().contains(id) { let mut nodes = self.nodes.write(); diff --git a/util/network-devp2p/src/lib.rs b/util/network-devp2p/src/lib.rs index a0ded7d6bf6..b9417341a19 100644 --- a/util/network-devp2p/src/lib.rs +++ b/util/network-devp2p/src/lib.rs @@ -68,8 +68,6 @@ extern crate bytes; extern crate crypto as rcrypto; #[cfg(test)] extern crate env_logger; -#[macro_use] -extern crate error_chain; extern crate ethcore_io as io; extern crate ethcore_network as network; extern crate ethereum_types; diff --git a/util/network-devp2p/src/node_table.rs b/util/network-devp2p/src/node_table.rs index 0e39c43ce65..b65b5bd0a65 100644 --- a/util/network-devp2p/src/node_table.rs +++ b/util/network-devp2p/src/node_table.rs @@ -31,7 +31,7 @@ use serde_json; use discovery::{NodeEntry, TableUpdates}; use ip_utils::*; -use network::{AllowIP, Error, ErrorKind, IpFilter}; +use network::{AllowIP, Error, IpFilter}; /// Node public key pub type NodeId = H512; @@ -133,8 +133,8 @@ impl FromStr for NodeEndpoint { address: a, udp_port: a.port() }), - Ok(None) => bail!(ErrorKind::AddressResolve(None)), - Err(_) => Err(ErrorKind::AddressParse.into()) // always an io::Error of InvalidInput kind + Ok(None) => return Err(Error::AddressResolve(None.into())), + Err(_) => Err(Error::AddressParse) // always an io::Error of InvalidInput kind } } } @@ -216,7 +216,7 @@ impl FromStr for Node { type Err = Error; fn from_str(s: &str) -> Result { let (id, endpoint) = if s.len() > 136 && &s[0..8] == "enode://" && &s[136..137] == "@" { - (s[8..136].parse().map_err(|_| ErrorKind::InvalidNodeId)?, NodeEndpoint::from_str(&s[137..])?) + (s[8..136].parse().map_err(|_| Error::InvalidNodeId)?, NodeEndpoint::from_str(&s[137..])?) } else { (NodeId::default(), NodeEndpoint::from_str(s)?) @@ -629,21 +629,21 @@ mod tests { fn endpoint_parse_empty_ip_string_returns_error() { let endpoint = NodeEndpoint::from_str(""); assert!(endpoint.is_err()); - assert_matches!(endpoint.unwrap_err().kind(), &ErrorKind::AddressParse); + assert_matches!(endpoint.unwrap_err(), Error::AddressParse); } #[test] fn endpoint_parse_invalid_ip_string_returns_error() { let endpoint = NodeEndpoint::from_str("beef"); assert!(endpoint.is_err()); - assert_matches!(endpoint.unwrap_err().kind(), &ErrorKind::AddressParse); + assert_matches!(endpoint.unwrap_err(), Error::AddressParse); } #[test] fn endpoint_parse_valid_ip_without_port_returns_error() { let endpoint = NodeEndpoint::from_str("123.123.123.123"); assert!(endpoint.is_err()); - assert_matches!(endpoint.unwrap_err().kind(), &ErrorKind::AddressParse); + assert_matches!(endpoint.unwrap_err(), Error::AddressParse); let endpoint = NodeEndpoint::from_str("123.123.123.123:123"); assert!(endpoint.is_ok()) } @@ -668,11 +668,11 @@ mod tests { fn node_parse_fails_for_invalid_urls() { let node = Node::from_str("foo"); assert!(node.is_err()); - assert_matches!(node.unwrap_err().kind(), &ErrorKind::AddressParse); + assert_matches!(node.unwrap_err(), Error::AddressParse); let node = Node::from_str("enode://foo@bar"); assert!(node.is_err()); - assert_matches!(node.unwrap_err().kind(), &ErrorKind::AddressParse); + assert_matches!(node.unwrap_err(), Error::AddressParse); } #[test] diff --git a/util/network-devp2p/src/session.rs b/util/network-devp2p/src/session.rs index 8087342a15a..6776f04183b 100644 --- a/util/network-devp2p/src/session.rs +++ b/util/network-devp2p/src/session.rs @@ -30,7 +30,7 @@ use connection::{Connection, EncryptedConnection, MAX_PAYLOAD_SIZE, Packet}; use handshake::Handshake; use host::*; use io::{IoContext, StreamToken}; -use network::{DisconnectReason, Error, ErrorKind, PeerCapabilityInfo, ProtocolId, SessionInfo}; +use network::{DisconnectReason, Error, PeerCapabilityInfo, ProtocolId, SessionInfo}; use network::client_version::ClientVersion; use network::SessionCapabilityInfo; use node_table::NodeId; @@ -256,10 +256,10 @@ impl Session { where Message: Send + Sync + Clone { if protocol.is_some() && (self.info.capabilities.is_empty() || !self.had_hello) { debug!(target: "network", "Sending to unconfirmed session {}, protocol: {:?}, packet: {}", self.token(), protocol.as_ref().map(|p| str::from_utf8(&p[..]).unwrap_or("??")), packet_id); - bail!(ErrorKind::BadProtocol); + return Err(Error::BadProtocol); } if self.expired() { - return Err(ErrorKind::Expired.into()); + return Err(Error::Expired); } let mut i = 0usize; let pid = match protocol { @@ -281,7 +281,7 @@ impl Session { let mut payload = data; // create a reference with local lifetime if self.compression { if payload.len() > MAX_PAYLOAD_SIZE { - bail!(ErrorKind::OversizedPacket); + return Err(Error::OversizedPacket); } let len = snappy::compress_into(&payload, &mut compressed); trace!(target: "network", "compressed {} to {}", payload.len(), len); @@ -331,16 +331,16 @@ impl Session { fn read_packet(&mut self, io: &IoContext, packet: &Packet, host: &HostInfo) -> Result where Message: Send + Sync + Clone { if packet.data.len() < 2 { - return Err(ErrorKind::BadProtocol.into()); + return Err(Error::BadProtocol); } let packet_id = packet.data[0]; if packet_id != PACKET_HELLO && packet_id != PACKET_DISCONNECT && !self.had_hello { - return Err(ErrorKind::BadProtocol.into()); + return Err(Error::BadProtocol); } let data = if self.compression { let compressed = &packet.data[1..]; if snappy::decompressed_len(&compressed)? > MAX_PAYLOAD_SIZE { - bail!(ErrorKind::OversizedPacket); + return Err(Error::OversizedPacket); } snappy::decompress(&compressed)? } else { @@ -358,7 +358,7 @@ impl Session { if self.had_hello { debug!(target:"network", "Disconnected: {}: {:?}", self.token(), DisconnectReason::from_u8(reason)); } - Err(ErrorKind::Disconnect(DisconnectReason::from_u8(reason)).into()) + Err(Error::Disconnect(DisconnectReason::from_u8(reason))) } PACKET_PING => { self.send_pong(io)?; @@ -500,7 +500,7 @@ impl Session { rlp.append(&(reason as u32)); self.send_packet(io, None, PACKET_DISCONNECT, &rlp.drain()).ok(); } - ErrorKind::Disconnect(reason).into() + Error::Disconnect(reason) } fn send(&mut self, io: &IoContext, data: &[u8]) -> Result<(), Error> where Message: Send + Sync + Clone { diff --git a/util/network/Cargo.toml b/util/network/Cargo.toml index 018152c72f0..55c15d7d181 100644 --- a/util/network/Cargo.toml +++ b/util/network/Cargo.toml @@ -7,7 +7,7 @@ version = "1.12.0" authors = ["Parity Technologies "] [dependencies] -error-chain = { version = "0.12", default-features = false } +derive_more = "0.14.0" parity-crypto = "0.4.0" ethcore-io = { path = "../io" } ethereum-types = "0.6.0" diff --git a/util/network/src/error.rs b/util/network/src/error.rs index bd48830c1bb..81e3b78b887 100644 --- a/util/network/src/error.rs +++ b/util/network/src/error.rs @@ -14,11 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -// Silence: `use of deprecated item 'std::error::Error::cause': replaced by Error::source, which can support downcasting` -// https://github.com/paritytech/parity-ethereum/issues/10302 -#![allow(deprecated)] - -use std::{io, net, fmt}; +use std::{error, io, net, fmt}; use libc::{ENFILE, EMFILE}; use io::IoError; use {rlp, ethkey, crypto, snappy}; @@ -85,118 +81,127 @@ impl fmt::Display for DisconnectReason { } } -error_chain! { - foreign_links { - SocketIo(IoError) #[doc = "Socket IO error."]; - Decompression(snappy::InvalidInput) #[doc = "Decompression error."]; - Rlp(rlp::DecoderError) #[doc = "Rlp decoder error."]; - } - - errors { - #[doc = "Error concerning the network address parsing subsystem."] - AddressParse { - description("Failed to parse network address"), - display("Failed to parse network address"), - } - - #[doc = "Error concerning the network address resolution subsystem."] - AddressResolve(err: Option) { - description("Failed to resolve network address"), - display("Failed to resolve network address {}", err.as_ref().map_or("".to_string(), |e| e.to_string())), - } - - #[doc = "Authentication failure"] - Auth { - description("Authentication failure"), - display("Authentication failure"), - } - - #[doc = "Unrecognised protocol"] - BadProtocol { - description("Bad protocol"), - display("Bad protocol"), - } - - #[doc = "Expired message"] - Expired { - description("Expired message"), - display("Expired message"), - } +/// Queue error +#[derive(Debug, derive_more::Display)] +pub enum Error { + /// Socket IO error. + SocketIo(IoError), + /// Decompression error. + Decompression(snappy::InvalidInput), + /// Rlp decoder error. + Rlp(rlp::DecoderError), + /// Error concerning the network address parsing subsystem. + #[display(fmt = "Failed to parse network address")] + AddressParse, + /// Error concerning the network address resolution subsystem. + #[display(fmt = "Failed to resolve network address {}", _0)] + AddressResolve(AddressResolveError), + /// Authentication failure + #[display(fmt = "Authentication failure")] + Auth, + /// Unrecognised protocol + #[display(fmt = "Bad protocol")] + BadProtocol, + /// Expired message + #[display(fmt = "Expired message")] + Expired, + /// Peer not found + #[display(fmt = "Peer not found")] + PeerNotFound, + /// Peer is disconnected + #[display(fmt = "Peer disconnected: {}", _0)] + Disconnect(DisconnectReason), + /// Invalid node id + #[display(fmt = "Invalid node id")] + InvalidNodeId, + /// Packet size is over the protocol limit + #[display(fmt = "Packet is too large")] + OversizedPacket, + /// Reached system resource limits for this process + #[display(fmt = "Too many open files in this process. Check your resource limits and restart parity")] + ProcessTooManyFiles, + /// Reached system wide resource limits + #[display(fmt = "Too many open files on system. Consider closing some processes/release some file handlers or increas the system-wide resource limits and restart parity.")] + SystemTooManyFiles, + /// An unknown IO error occurred. + #[display(fmt = "Unexpected IO error: {}", _0)] + Io(io::Error), +} - #[doc = "Peer not found"] - PeerNotFound { - description("Peer not found"), - display("Peer not found"), - } +/// Wraps io::Error for Display impl +#[derive(Debug)] +pub struct AddressResolveError(Option); - #[doc = "Peer is disconnected"] - Disconnect(reason: DisconnectReason) { - description("Peer disconnected"), - display("Peer disconnected: {}", reason), - } +impl fmt::Display for AddressResolveError { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "{}", self.0.as_ref().map_or("".to_string(), |e| e.to_string())) + } +} - #[doc = "Invalid node id"] - InvalidNodeId { - description("Invalid node id"), - display("Invalid node id"), - } +impl From> for AddressResolveError { + fn from(err: Option) -> Self { + AddressResolveError(err) + } +} - #[doc = "Packet size is over the protocol limit"] - OversizedPacket { - description("Packet is too large"), - display("Packet is too large"), +impl error::Error for Error { + fn source(&self) -> Option<&(error::Error + 'static)> { + match self { + Error::Decompression(e) => Some(e), + Error::Rlp(e) => Some(e), + _ => None, } + } +} - #[doc = "Reached system resource limits for this process"] - ProcessTooManyFiles { - description("Too many open files in process."), - display("Too many open files in this process. Check your resource limits and restart parity"), - } +impl From for Error { + fn from(err: IoError) -> Self { + Error::SocketIo(err) + } +} - #[doc = "Reached system wide resource limits"] - SystemTooManyFiles { - description("Too many open files on system."), - display("Too many open files on system. Consider closing some processes/release some file handlers or increas the system-wide resource limits and restart parity."), - } +impl From for Error { + fn from(err: snappy::InvalidInput) -> Self { + Error::Decompression(err) + } +} - #[doc = "An unknown IO error occurred."] - Io(err: io::Error) { - description("IO Error"), - display("Unexpected IO error: {}", err), - } +impl From for Error { + fn from(err: rlp::DecoderError) -> Self { + Error::Rlp(err) } } impl From for Error { fn from(err: io::Error) -> Self { match err.raw_os_error() { - Some(ENFILE) => ErrorKind::ProcessTooManyFiles.into(), - Some(EMFILE) => ErrorKind::SystemTooManyFiles.into(), - _ => Error::from_kind(ErrorKind::Io(err)) + Some(ENFILE) => Error::ProcessTooManyFiles, + Some(EMFILE) => Error::SystemTooManyFiles, + _ => Error::Io(err) } } } impl From for Error { fn from(_err: ethkey::Error) -> Self { - ErrorKind::Auth.into() + Error::Auth } } impl From for Error { fn from(_err: ethkey::crypto::Error) -> Self { - ErrorKind::Auth.into() + Error::Auth } } impl From for Error { fn from(_err: crypto::error::SymmError) -> Self { - ErrorKind::Auth.into() + Error::Auth } } impl From for Error { - fn from(_err: net::AddrParseError) -> Self { ErrorKind::AddressParse.into() } + fn from(_err: net::AddrParseError) -> Self { Error::AddressParse } } #[test] @@ -208,13 +213,13 @@ fn test_errors() { } assert_eq!(DisconnectReason::Unknown, r); - match *>::from(rlp::DecoderError::RlpIsTooBig).kind() { - ErrorKind::Rlp(_) => {}, + match >::from(rlp::DecoderError::RlpIsTooBig) { + Error::Rlp(_) => {}, _ => panic!("Unexpected error"), } - match *>::from(ethkey::crypto::Error::InvalidMessage).kind() { - ErrorKind::Auth => {}, + match >::from(ethkey::crypto::Error::InvalidMessage) { + Error::Auth => {}, _ => panic!("Unexpected error"), } } @@ -226,18 +231,18 @@ fn test_io_errors() { assert_matches!( >::from( io::Error::from_raw_os_error(ENFILE) - ).kind(), - ErrorKind::ProcessTooManyFiles); + ), + Error::ProcessTooManyFiles); assert_matches!( >::from( io::Error::from_raw_os_error(EMFILE) - ).kind(), - ErrorKind::SystemTooManyFiles); + ), + Error::SystemTooManyFiles); assert_matches!( >::from( io::Error::from_raw_os_error(0) - ).kind(), - ErrorKind::Io(_)); + ), + Error::Io(_)); } diff --git a/util/network/src/lib.rs b/util/network/src/lib.rs index 9e6f71fdd16..be47fa36755 100644 --- a/util/network/src/lib.rs +++ b/util/network/src/lib.rs @@ -32,9 +32,7 @@ extern crate serde_derive; #[cfg(test)] #[macro_use] extern crate assert_matches; - -#[macro_use] -extern crate error_chain; +extern crate derive_more; #[macro_use] extern crate lazy_static; @@ -46,7 +44,7 @@ mod error; pub use connection_filter::{ConnectionFilter, ConnectionDirection}; pub use io::TimerToken; -pub use error::{Error, ErrorKind, DisconnectReason}; +pub use error::{Error, DisconnectReason}; use client_version::ClientVersion; use std::cmp::Ordering; From 20248c443ba537896e7c101cc425ab8ae6ead463 Mon Sep 17 00:00:00 2001 From: David Date: Mon, 17 Jun 2019 11:12:48 +0200 Subject: [PATCH 15/16] Use fewer threads for snapshotting (#10752) * Use fewer threads for snapshotting When taking a snapshot the current default number of threads is equal to half the number of **logical** CPUs in the system. On HT enabled CPUs this value seems a bit high, e.g. 6 snapshotting threads on a 6/12 core/hyperthread CPU. Maybe a better default value is half the number of physical cores? * fix test --- ethcore/src/snapshot/mod.rs | 2 +- parity/configuration.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 84905f580f7..d2fa115db88 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -107,7 +107,7 @@ impl Default for SnapshotConfiguration { fn default() -> Self { SnapshotConfiguration { no_periodic: false, - processing_threads: ::std::cmp::max(1, num_cpus::get() / 2), + processing_threads: ::std::cmp::max(1, num_cpus::get_physical() / 2), } } } diff --git a/parity/configuration.rs b/parity/configuration.rs index e5c950b5c5d..0d6c2d86fe0 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -934,7 +934,7 @@ impl Configuration { no_periodic: self.args.flag_no_periodic_snapshot, processing_threads: match self.args.arg_snapshot_threads { Some(threads) if threads > 0 => threads, - _ => ::std::cmp::max(1, num_cpus::get() / 2), + _ => ::std::cmp::max(1, num_cpus::get_physical() / 2), }, }; From 35c607f6beaf6ae4ef0cf29ced7b6aeb25dae6a1 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 17 Jun 2019 13:22:53 +0200 Subject: [PATCH 16/16] updater: fix static id hashes initialization (#10755) * updater: fix static id hashes initialization * Update updater/src/updater.rs --- updater/src/updater.rs | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/updater/src/updater.rs b/updater/src/updater.rs index 27daac4edb8..6e84f8281e9 100644 --- a/updater/src/updater.rs +++ b/updater/src/updater.rs @@ -159,7 +159,7 @@ pub struct Updater H256 { + let mut bytes = s.as_bytes().to_vec(); + bytes.resize(H256::len_bytes(), 0); + H256::from_slice(&bytes) } /// Client trait for getting latest release information from operations contract. @@ -1253,4 +1262,11 @@ pub mod tests { // and since our update policy requires consensus, the client should be disabled assert!(client.is_disabled()); } + + #[test] + fn static_hashes_do_not_panic() { + let client_id_hash: H256 = *CLIENT_ID_HASH; + assert_eq!(&format!("{:x}", client_id_hash), "7061726974790000000000000000000000000000000000000000000000000000"); + let _: H256 = *PLATFORM_ID_HASH; + } }