diff --git a/tokio/src/runtime/basic_scheduler.rs b/tokio/src/runtime/basic_scheduler.rs index d873fcf4699..872d0d5b897 100644 --- a/tokio/src/runtime/basic_scheduler.rs +++ b/tokio/src/runtime/basic_scheduler.rs @@ -3,12 +3,10 @@ use crate::loom::sync::atomic::AtomicBool; use crate::loom::sync::Mutex; use crate::park::{Park, Unpark}; use crate::runtime::context::EnterGuard; -use crate::runtime::driver::Driver; use crate::runtime::stats::{RuntimeStats, WorkerStatsBatcher}; use crate::runtime::task::{self, JoinHandle, OwnedTasks, Schedule, Task}; use crate::runtime::Callback; use crate::sync::notify::Notify; -use crate::util::atomic_cell::AtomicCell; use crate::util::{waker_ref, Wake, WakerRef}; use std::cell::RefCell; @@ -21,12 +19,13 @@ use std::task::Poll::{Pending, Ready}; use std::time::Duration; /// Executes tasks on the current thread -pub(crate) struct BasicScheduler { - /// Core scheduler data is acquired by a thread entering `block_on`. - core: AtomicCell, +pub(crate) struct BasicScheduler { + /// Inner state guarded by a mutex that is shared + /// between all `block_on` calls. + inner: Mutex>>, /// Notifier for waking up other threads to steal the - /// driver. + /// parker. notify: Notify, /// Sendable task spawner @@ -39,11 +38,15 @@ pub(crate) struct BasicScheduler { context_guard: Option, } -/// Data required for executing the scheduler. The struct is passed around to -/// a function that will perform the scheduling work and acts as a capability token. -struct Core { +/// The inner scheduler that owns the task queue and the main parker P. +struct Inner { /// Scheduler run queue - tasks: VecDeque>>, + /// + /// When the scheduler is executed, the queue is removed from `self` and + /// moved into `Context`. + /// + /// This indirection is to allow `BasicScheduler` to be `Send`. + tasks: Option, /// Sendable task spawner spawner: Spawner, @@ -51,10 +54,13 @@ struct Core { /// Current tick tick: u8, - /// Runtime driver - /// - /// The driver is removed before starting to park the thread - driver: Option, + /// Thread park handle + park: P, + + /// Callback for a worker parking itself + before_park: Option, + /// Callback for a worker unparking itself + after_unpark: Option, /// Stats batcher stats: WorkerStatsBatcher, @@ -65,6 +71,13 @@ pub(crate) struct Spawner { shared: Arc, } +struct Tasks { + /// Local run queue. + /// + /// Tasks notified from the current thread are pushed into this queue. + queue: VecDeque>>, +} + /// A remote scheduler entry. /// /// These are filled in by remote threads sending instructions to the scheduler. @@ -87,29 +100,22 @@ struct Shared { owned: OwnedTasks>, /// Unpark the blocked thread. - unpark: ::Unpark, + unpark: Box, /// Indicates whether the blocked on thread was woken. woken: AtomicBool, - /// Callback for a worker parking itself - before_park: Option, - - /// Callback for a worker unparking itself - after_unpark: Option, - /// Keeps track of various runtime stats. stats: RuntimeStats, } /// Thread-local context. struct Context { - /// Handle to the spawner - spawner: Spawner, + /// Shared scheduler state + shared: Arc, - /// Scheduler core, enabling the holder of `Context` to execute the - /// scheduler. - core: RefCell>>, + /// Local queue + tasks: RefCell, } /// Initial queue capacity. @@ -127,36 +133,38 @@ const REMOTE_FIRST_INTERVAL: u8 = 31; // Tracks the current BasicScheduler. scoped_thread_local!(static CURRENT: Context); -impl BasicScheduler { +impl BasicScheduler

{ pub(crate) fn new( - driver: Driver, + park: P, before_park: Option, after_unpark: Option, - ) -> BasicScheduler { - let unpark = driver.unpark(); + ) -> BasicScheduler

{ + let unpark = Box::new(park.unpark()); let spawner = Spawner { shared: Arc::new(Shared { queue: Mutex::new(Some(VecDeque::with_capacity(INITIAL_CAPACITY))), owned: OwnedTasks::new(), - unpark, + unpark: unpark as Box, woken: AtomicBool::new(false), - before_park, - after_unpark, stats: RuntimeStats::new(1), }), }; - let core = AtomicCell::new(Some(Box::new(Core { - tasks: VecDeque::with_capacity(INITIAL_CAPACITY), + let inner = Mutex::new(Some(Inner { + tasks: Some(Tasks { + queue: VecDeque::with_capacity(INITIAL_CAPACITY), + }), spawner: spawner.clone(), tick: 0, - driver: Some(driver), + park, + before_park, + after_unpark, stats: WorkerStatsBatcher::new(0), - }))); + })); BasicScheduler { - core, + inner, notify: Notify::new(), spawner, context_guard: None, @@ -170,12 +178,12 @@ impl BasicScheduler { pub(crate) fn block_on(&self, future: F) -> F::Output { pin!(future); - // Attempt to steal the scheduler core and block_on the future if we can - // there, otherwise, lets select on a notification that the core is - // available or the future is complete. + // Attempt to steal the dedicated parker and block_on the future if we can there, + // otherwise, lets select on a notification that the parker is available + // or the future is complete. loop { - if let Some(core) = self.take_core() { - return core.block_on(future); + if let Some(inner) = &mut self.take_inner() { + return inner.block_on(future); } else { let mut enter = crate::runtime::enter(false); @@ -202,14 +210,11 @@ impl BasicScheduler { } } - fn take_core(&self) -> Option> { - let core = self.core.take()?; + fn take_inner(&self) -> Option> { + let inner = self.inner.lock().take()?; - Some(CoreGuard { - context: Context { - spawner: self.spawner.clone(), - core: RefCell::new(Some(core)), - }, + Some(InnerGuard { + inner: Some(inner), basic_scheduler: self, }) } @@ -219,109 +224,156 @@ impl BasicScheduler { } } -impl Context { - /// Execute the closure with the given scheduler core stored in the - /// thread-local context. - fn run_task(&self, mut core: Box, f: impl FnOnce() -> R) -> (Box, R) { - core.stats.incr_poll_count(); - self.enter(core, || crate::coop::budget(f)) - } +impl Inner

{ + /// Blocks on the provided future and drives the runtime's driver. + fn block_on(&mut self, future: F) -> F::Output { + enter(self, |scheduler, context| { + let _enter = crate::runtime::enter(false); + let waker = scheduler.spawner.waker_ref(); + let mut cx = std::task::Context::from_waker(&waker); - /// Blocks the current thread until an event is received by the driver, - /// including I/O events, timer events, ... - fn park(&self, mut core: Box) -> Box { - let mut driver = core.driver.take().expect("driver missing"); - - if let Some(f) = &self.spawner.shared.before_park { - // Incorrect lint, the closures are actually different types so `f` - // cannot be passed as an argument to `enter`. - #[allow(clippy::redundant_closure)] - let (c, _) = self.enter(core, || f()); - core = c; - } + pin!(future); - // This check will fail if `before_park` spawns a task for us to run - // instead of parking the thread - if core.tasks.is_empty() { - // Park until the thread is signaled - core.stats.about_to_park(); - core.stats.submit(&core.spawner.shared.stats); + 'outer: loop { + if scheduler.spawner.reset_woken() { + scheduler.stats.incr_poll_count(); + if let Ready(v) = crate::coop::budget(|| future.as_mut().poll(&mut cx)) { + return v; + } + } - let (c, _) = self.enter(core, || { - driver.park().expect("failed to park"); - }); + for _ in 0..MAX_TASKS_PER_TICK { + // Get and increment the current tick + let tick = scheduler.tick; + scheduler.tick = scheduler.tick.wrapping_add(1); - core = c; - core.stats.returned_from_park(); - } + let entry = if tick % REMOTE_FIRST_INTERVAL == 0 { + scheduler.spawner.pop().or_else(|| { + context + .tasks + .borrow_mut() + .queue + .pop_front() + .map(RemoteMsg::Schedule) + }) + } else { + context + .tasks + .borrow_mut() + .queue + .pop_front() + .map(RemoteMsg::Schedule) + .or_else(|| scheduler.spawner.pop()) + }; - if let Some(f) = &self.spawner.shared.after_unpark { - // Incorrect lint, the closures are actually different types so `f` - // cannot be passed as an argument to `enter`. - #[allow(clippy::redundant_closure)] - let (c, _) = self.enter(core, || f()); - core = c; - } + let entry = match entry { + Some(entry) => entry, + None => { + if let Some(f) = &scheduler.before_park { + f(); + } + // This check will fail if `before_park` spawns a task for us to run + // instead of parking the thread + if context.tasks.borrow_mut().queue.is_empty() { + // Park until the thread is signaled + scheduler.stats.about_to_park(); + scheduler.stats.submit(&scheduler.spawner.shared.stats); + scheduler.park.park().expect("failed to park"); + scheduler.stats.returned_from_park(); + } + if let Some(f) = &scheduler.after_unpark { + f(); + } - core.driver = Some(driver); - core - } + // Try polling the `block_on` future next + continue 'outer; + } + }; - /// Checks the driver for new events without blocking the thread. - fn park_yield(&self, mut core: Box) -> Box { - let mut driver = core.driver.take().expect("driver missing"); + match entry { + RemoteMsg::Schedule(task) => { + scheduler.stats.incr_poll_count(); + let task = context.shared.owned.assert_owner(task); + crate::coop::budget(|| task.run()) + } + } + } - core.stats.submit(&core.spawner.shared.stats); - let (mut core, _) = self.enter(core, || { - driver - .park_timeout(Duration::from_millis(0)) - .expect("failed to park"); - }); + // Yield to the park, this drives the timer and pulls any pending + // I/O events. + scheduler.stats.submit(&scheduler.spawner.shared.stats); + scheduler + .park + .park_timeout(Duration::from_millis(0)) + .expect("failed to park"); + } + }) + } +} - core.driver = Some(driver); - core +/// Enters the scheduler context. This sets the queue and other necessary +/// scheduler state in the thread-local. +fn enter(scheduler: &mut Inner

, f: F) -> R +where + F: FnOnce(&mut Inner

, &Context) -> R, + P: Park, +{ + // Ensures the run queue is placed back in the `BasicScheduler` instance + // once `block_on` returns.` + struct Guard<'a, P: Park> { + context: Option, + scheduler: &'a mut Inner

, } - fn enter(&self, core: Box, f: impl FnOnce() -> R) -> (Box, R) { - // Store the scheduler core in the thread-local context - // - // A drop-guard is employed at a higher level. - *self.core.borrow_mut() = Some(core); + impl Drop for Guard<'_, P> { + fn drop(&mut self) { + let Context { tasks, .. } = self.context.take().expect("context missing"); + self.scheduler.tasks = Some(tasks.into_inner()); + } + } - // Execute the closure while tracking the execution budget - let ret = f(); + // Remove `tasks` from `self` and place it in a `Context`. + let tasks = scheduler.tasks.take().expect("invalid state"); - // Take the scheduler core back - let core = self.core.borrow_mut().take().expect("core missing"); - (core, ret) - } + let guard = Guard { + context: Some(Context { + shared: scheduler.spawner.shared.clone(), + tasks: RefCell::new(tasks), + }), + scheduler, + }; + + let context = guard.context.as_ref().unwrap(); + let scheduler = &mut *guard.scheduler; + + CURRENT.set(context, || f(scheduler, context)) } -impl Drop for BasicScheduler { +impl Drop for BasicScheduler

{ fn drop(&mut self) { // Avoid a double panic if we are currently panicking and // the lock may be poisoned. - let core = match self.take_core() { - Some(core) => core, + let mut inner = match self.inner.lock().take() { + Some(inner) => inner, None if std::thread::panicking() => return, - None => panic!("Oh no! We never placed the Core back, this is a bug!"), + None => panic!("Oh no! We never placed the Inner state back, this is a bug!"), }; - core.enter(|mut core, context| { + enter(&mut inner, |scheduler, context| { // Drain the OwnedTasks collection. This call also closes the // collection, ensuring that no tasks are ever pushed after this // call returns. - context.spawner.shared.owned.close_and_shutdown_all(); + context.shared.owned.close_and_shutdown_all(); // Drain local queue // We already shut down every task, so we just need to drop the task. - while let Some(task) = core.tasks.pop_front() { + for task in context.tasks.borrow_mut().queue.drain(..) { drop(task); } // Drain remote queue and set it to None - let remote_queue = core.spawner.shared.queue.lock().take(); + let remote_queue = scheduler.spawner.shared.queue.lock().take(); // Using `Option::take` to replace the shared queue with `None`. // We already shut down every task, so we just need to drop the task. @@ -335,14 +387,12 @@ impl Drop for BasicScheduler { } } - assert!(context.spawner.shared.owned.is_empty()); - - (core, ()) + assert!(context.shared.owned.is_empty()); }); } } -impl fmt::Debug for BasicScheduler { +impl fmt::Debug for BasicScheduler

{ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BasicScheduler").finish() } @@ -405,13 +455,8 @@ impl Schedule for Arc { fn schedule(&self, task: task::Notified) { CURRENT.with(|maybe_cx| match maybe_cx { - Some(cx) if Arc::ptr_eq(self, &cx.spawner.shared) => { - cx.core - .borrow_mut() - .as_mut() - .expect("core missing") - .tasks - .push_back(task); + Some(cx) if Arc::ptr_eq(self, &cx.shared) => { + cx.tasks.borrow_mut().queue.push_back(task); } _ => { // If the queue is None, then the runtime has shut down. We @@ -439,107 +484,35 @@ impl Wake for Shared { } } -// ===== CoreGuard ===== +// ===== InnerGuard ===== -/// Used to ensure we always place the `Core` value back into its slot in -/// `BasicScheduler`, even if the future panics. -struct CoreGuard<'a> { - context: Context, - basic_scheduler: &'a BasicScheduler, +/// Used to ensure we always place the Inner value +/// back into its slot in `BasicScheduler`, even if the +/// future panics. +struct InnerGuard<'a, P: Park> { + inner: Option>, + basic_scheduler: &'a BasicScheduler

, } -impl CoreGuard<'_> { - fn block_on(self, future: F) -> F::Output { - self.enter(|mut core, context| { - let _enter = crate::runtime::enter(false); - let waker = context.spawner.waker_ref(); - let mut cx = std::task::Context::from_waker(&waker); - - pin!(future); - - 'outer: loop { - if core.spawner.reset_woken() { - let (c, res) = context.run_task(core, || future.as_mut().poll(&mut cx)); - - core = c; - - if let Ready(v) = res { - return (core, v); - } - } - - for _ in 0..MAX_TASKS_PER_TICK { - // Get and increment the current tick - let tick = core.tick; - core.tick = core.tick.wrapping_add(1); - - let entry = if tick % REMOTE_FIRST_INTERVAL == 0 { - core.spawner - .pop() - .or_else(|| core.tasks.pop_front().map(RemoteMsg::Schedule)) - } else { - core.tasks - .pop_front() - .map(RemoteMsg::Schedule) - .or_else(|| core.spawner.pop()) - }; - - let entry = match entry { - Some(entry) => entry, - None => { - core = context.park(core); - - // Try polling the `block_on` future next - continue 'outer; - } - }; - - match entry { - RemoteMsg::Schedule(task) => { - let task = context.spawner.shared.owned.assert_owner(task); - - let (c, _) = context.run_task(core, || { - task.run(); - }); - - core = c; - } - } - } - - // Yield to the driver, this drives the timer and pulls any - // pending I/O events. - core = context.park_yield(core); - } - }) - } - - /// Enters the scheduler context. This sets the queue and other necessary - /// scheduler state in the thread-local. - fn enter(self, f: F) -> R - where - F: FnOnce(Box, &Context) -> (Box, R), - { - // Remove `core` from `context` to pass into the closure. - let core = self.context.core.borrow_mut().take().expect("core missing"); - - // Call the closure and place `core` back - let (core, ret) = CURRENT.set(&self.context, || f(core, &self.context)); - - *self.context.core.borrow_mut() = Some(core); - - ret +impl InnerGuard<'_, P> { + fn block_on(&mut self, future: F) -> F::Output { + // The only time inner gets set to `None` is if we have dropped + // already so this unwrap is safe. + self.inner.as_mut().unwrap().block_on(future) } } -impl Drop for CoreGuard<'_> { +impl Drop for InnerGuard<'_, P> { fn drop(&mut self) { - if let Some(core) = self.context.core.borrow_mut().take() { + if let Some(scheduler) = self.inner.take() { + let mut lock = self.basic_scheduler.inner.lock(); + // Replace old scheduler back into the state to allow // other threads to pick it up and drive it. - self.basic_scheduler.core.set(core); + lock.replace(scheduler); - // Wake up other possible threads that could steal the driver. + // Wake up other possible threads that could steal + // the dedicated parker P. self.basic_scheduler.notify.notify_one() } } diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index e77c5e3a0f8..847dd5972e1 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -283,7 +283,7 @@ cfg_rt! { #[derive(Debug)] enum Kind { /// Execute all tasks on the current-thread. - CurrentThread(BasicScheduler), + CurrentThread(BasicScheduler), /// Execute tasks across multiple threads. #[cfg(feature = "rt-multi-thread")] diff --git a/tokio/src/runtime/tests/loom_basic_scheduler.rs b/tokio/src/runtime/tests/loom_basic_scheduler.rs index a772603f711..d2894b9b27e 100644 --- a/tokio/src/runtime/tests/loom_basic_scheduler.rs +++ b/tokio/src/runtime/tests/loom_basic_scheduler.rs @@ -34,22 +34,20 @@ fn assert_at_most_num_polls(rt: Arc, at_most_polls: usize) { #[test] fn block_on_num_polls() { loom::model(|| { - // we expect at most 4 number of polls because there are three points at - // which we poll the future and an opportunity for a false-positive.. At - // any of these points it can be ready: + // we expect at most 3 number of polls because there are + // three points at which we poll the future. At any of these + // points it can be ready: // - // - when we fail to steal the parker and we block on a notification - // that it is available. + // - when we fail to steal the parker and we block on a + // notification that it is available. // // - when we steal the parker and we schedule the future // - // - when the future is woken up and we have ran the max number of tasks - // for the current tick or there are no more tasks to run. + // - when the future is woken up and we have ran the max + // number of tasks for the current tick or there are no + // more tasks to run. // - // - a thread is notified that the parker is available but a third - // thread acquires it before the notified thread can. - // - let at_most = 4; + let at_most = 3; let rt1 = Arc::new(Builder::new_current_thread().build().unwrap()); let rt2 = rt1.clone(); diff --git a/tokio/src/util/atomic_cell.rs b/tokio/src/runtime/thread_pool/atomic_cell.rs similarity index 77% rename from tokio/src/util/atomic_cell.rs rename to tokio/src/runtime/thread_pool/atomic_cell.rs index 07e37303a7b..98847e6ffa1 100644 --- a/tokio/src/util/atomic_cell.rs +++ b/tokio/src/runtime/thread_pool/atomic_cell.rs @@ -3,7 +3,7 @@ use crate::loom::sync::atomic::AtomicPtr; use std::ptr; use std::sync::atomic::Ordering::AcqRel; -pub(crate) struct AtomicCell { +pub(super) struct AtomicCell { data: AtomicPtr, } @@ -11,22 +11,22 @@ unsafe impl Send for AtomicCell {} unsafe impl Sync for AtomicCell {} impl AtomicCell { - pub(crate) fn new(data: Option>) -> AtomicCell { + pub(super) fn new(data: Option>) -> AtomicCell { AtomicCell { data: AtomicPtr::new(to_raw(data)), } } - pub(crate) fn swap(&self, val: Option>) -> Option> { + pub(super) fn swap(&self, val: Option>) -> Option> { let old = self.data.swap(to_raw(val), AcqRel); from_raw(old) } - pub(crate) fn set(&self, val: Box) { + pub(super) fn set(&self, val: Box) { let _ = self.swap(Some(val)); } - pub(crate) fn take(&self) -> Option> { + pub(super) fn take(&self) -> Option> { self.swap(None) } } diff --git a/tokio/src/runtime/thread_pool/mod.rs b/tokio/src/runtime/thread_pool/mod.rs index 3e1ce448215..82e34c78d28 100644 --- a/tokio/src/runtime/thread_pool/mod.rs +++ b/tokio/src/runtime/thread_pool/mod.rs @@ -1,5 +1,8 @@ //! Threadpool +mod atomic_cell; +use atomic_cell::AtomicCell; + mod idle; use self::idle::Idle; diff --git a/tokio/src/runtime/thread_pool/worker.rs b/tokio/src/runtime/thread_pool/worker.rs index 27d0d5e7d32..ae8efe6724f 100644 --- a/tokio/src/runtime/thread_pool/worker.rs +++ b/tokio/src/runtime/thread_pool/worker.rs @@ -66,9 +66,8 @@ use crate::runtime::enter::EnterContext; use crate::runtime::park::{Parker, Unparker}; use crate::runtime::stats::{RuntimeStats, WorkerStatsBatcher}; use crate::runtime::task::{Inject, JoinHandle, OwnedTasks}; -use crate::runtime::thread_pool::Idle; +use crate::runtime::thread_pool::{AtomicCell, Idle}; use crate::runtime::{queue, task, Callback}; -use crate::util::atomic_cell::AtomicCell; use crate::util::FastRand; use std::cell::RefCell; diff --git a/tokio/src/util/mod.rs b/tokio/src/util/mod.rs index f0a79a7cca9..df30f2b86a9 100644 --- a/tokio/src/util/mod.rs +++ b/tokio/src/util/mod.rs @@ -3,9 +3,6 @@ cfg_io_driver! { pub(crate) mod slab; } -#[cfg(feature = "rt")] -pub(crate) mod atomic_cell; - #[cfg(any( // io driver uses `WakeList` directly feature = "net",