Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Add tokio runtime to ethcore io worker #9979

Merged
merged 2 commits into from
Dec 3, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions util/io/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,5 @@ slab = "0.4"
num_cpus = "1.8"
timer = "0.2"
time = "0.1"
tokio = "0.1"
futures = "0.1"
2 changes: 2 additions & 0 deletions util/io/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,8 @@ extern crate num_cpus;
extern crate timer;
extern crate fnv;
extern crate time;
extern crate tokio;
extern crate futures;

#[cfg(feature = "mio")]
mod service_mio;
Expand Down
50 changes: 24 additions & 26 deletions util/io/src/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,13 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.

use futures::future::{self, Loop};
use std::sync::Arc;
use std::thread::{JoinHandle, self};
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
use deque;
use service_mio::{HandlerId, IoChannel, IoContext};
use tokio::{self};
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess it is safer to keep this self to be backward compatible but these are not needed since Rust 1.30

use IoHandler;
use LOCAL_STACK_SIZE;

Expand Down Expand Up @@ -69,37 +71,33 @@ impl Worker {
worker.thread = Some(thread::Builder::new().stack_size(STACK_SIZE).name(format!("IO Worker #{}", index)).spawn(
move || {
LOCAL_STACK_SIZE.with(|val| val.set(STACK_SIZE));
Worker::work_loop(stealer, channel.clone(), wait, wait_mutex.clone(), deleting)
let ini = (stealer, channel.clone(), wait, wait_mutex.clone(), deleting);
let future = future::loop_fn(ini, |(stealer, channel, wait, wait_mutex, deleting)| {
{
let mut lock = wait_mutex.lock();
if deleting.load(AtomicOrdering::Acquire) {
return Ok(Loop::Break(()));
}
wait.wait(&mut lock);
}

while !deleting.load(AtomicOrdering::Acquire) {
match stealer.steal() {
deque::Steal::Data(work) => Worker::do_work(work, channel.clone()),
deque::Steal::Retry => {},
deque::Steal::Empty => break,
}
}
Ok(Loop::Continue((stealer, channel, wait, wait_mutex, deleting)))
});
if let Err(()) = tokio::runtime::current_thread::block_on_all(future) {
error!(target: "ioworker", "error while executing future")
}
})
.expect("Error creating worker thread"));
worker
}

fn work_loop<Message>(stealer: deque::Stealer<Work<Message>>,
channel: IoChannel<Message>,
wait: Arc<Condvar>,
wait_mutex: Arc<Mutex<()>>,
deleting: Arc<AtomicBool>)
where Message: Send + Sync + 'static {
loop {
{
let mut lock = wait_mutex.lock();
if deleting.load(AtomicOrdering::Acquire) {
return;
}
wait.wait(&mut lock);
}

while !deleting.load(AtomicOrdering::Acquire) {
match stealer.steal() {
deque::Steal::Data(work) => Worker::do_work(work, channel.clone()),
deque::Steal::Retry => {},
deque::Steal::Empty => break,
}
}
}
}

fn do_work<Message>(work: Work<Message>, channel: IoChannel<Message>) where Message: Send + Sync + 'static {
match work.work_type {
WorkType::Readable => {
Expand Down