Skip to content

Commit

Permalink
sync: add back RwLockWriteGuard::map and RwLockWriteGuard::try_map (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
Darksonn authored Feb 13, 2021
1 parent 7c6a1c4 commit e3f2dcf
Show file tree
Hide file tree
Showing 5 changed files with 577 additions and 263 deletions.
5 changes: 4 additions & 1 deletion tokio/src/sync/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,10 @@ cfg_sync! {
pub use semaphore::{Semaphore, SemaphorePermit, OwnedSemaphorePermit};

mod rwlock;
pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
pub use rwlock::RwLock;
pub use rwlock::read_guard::RwLockReadGuard;
pub use rwlock::write_guard::RwLockWriteGuard;
pub use rwlock::write_guard_mapped::RwLockMappedWriteGuard;

mod task;
pub(crate) use task::AtomicWaker;
Expand Down
273 changes: 11 additions & 262 deletions tokio/src/sync/rwlock.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
use crate::sync::batch_semaphore::{Semaphore, TryAcquireError};
use crate::sync::mutex::TryLockError;
use std::cell::UnsafeCell;
use std::fmt;
use std::marker;
use std::mem;
use std::ops;

pub(crate) mod read_guard;
pub(crate) mod write_guard;
pub(crate) mod write_guard_mapped;
pub(crate) use read_guard::RwLockReadGuard;
pub(crate) use write_guard::RwLockWriteGuard;
pub(crate) use write_guard_mapped::RwLockMappedWriteGuard;

#[cfg(not(loom))]
const MAX_READS: usize = 32;
Expand Down Expand Up @@ -80,240 +84,6 @@ pub struct RwLock<T: ?Sized> {
c: UnsafeCell<T>,
}

/// RAII structure used to release the shared read access of a lock when
/// dropped.
///
/// This structure is created by the [`read`] method on
/// [`RwLock`].
///
/// [`read`]: method@RwLock::read
/// [`RwLock`]: struct@RwLock
pub struct RwLockReadGuard<'a, T: ?Sized> {
s: &'a Semaphore,
data: *const T,
marker: marker::PhantomData<&'a T>,
}

impl<'a, T> RwLockReadGuard<'a, T> {
/// Make a new `RwLockReadGuard` for a component of the locked data.
///
/// This operation cannot fail as the `RwLockReadGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be
/// used as `RwLockReadGuard::map(...)`. A method would interfere with
/// methods of the same name on the contents of the locked data.
///
/// This is an asynchronous version of [`RwLockReadGuard::map`] from the
/// [`parking_lot` crate].
///
/// [`RwLockReadGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.map
/// [`parking_lot` crate]: https://crates.io/crates/parking_lot
///
/// # Examples
///
/// ```
/// use tokio::sync::{RwLock, RwLockReadGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main]
/// # async fn main() {
/// let lock = RwLock::new(Foo(1));
///
/// let guard = lock.read().await;
/// let guard = RwLockReadGuard::map(guard, |f| &f.0);
///
/// assert_eq!(1, *guard);
/// # }
/// ```
#[inline]
pub fn map<F, U: ?Sized>(this: Self, f: F) -> RwLockReadGuard<'a, U>
where
F: FnOnce(&T) -> &U,
{
let data = f(&*this) as *const U;
let s = this.s;
// NB: Forget to avoid drop impl from being called.
mem::forget(this);
RwLockReadGuard {
s,
data,
marker: marker::PhantomData,
}
}

/// Attempts to make a new [`RwLockReadGuard`] for a component of the
/// locked data. The original guard is returned if the closure returns
/// `None`.
///
/// This operation cannot fail as the `RwLockReadGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be used as
/// `RwLockReadGuard::try_map(..)`. A method would interfere with methods of the
/// same name on the contents of the locked data.
///
/// This is an asynchronous version of [`RwLockReadGuard::try_map`] from the
/// [`parking_lot` crate].
///
/// [`RwLockReadGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.try_map
/// [`parking_lot` crate]: https://crates.io/crates/parking_lot
///
/// # Examples
///
/// ```
/// use tokio::sync::{RwLock, RwLockReadGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main]
/// # async fn main() {
/// let lock = RwLock::new(Foo(1));
///
/// let guard = lock.read().await;
/// let guard = RwLockReadGuard::try_map(guard, |f| Some(&f.0)).expect("should not fail");
///
/// assert_eq!(1, *guard);
/// # }
/// ```
#[inline]
pub fn try_map<F, U: ?Sized>(this: Self, f: F) -> Result<RwLockReadGuard<'a, U>, Self>
where
F: FnOnce(&T) -> Option<&U>,
{
let data = match f(&*this) {
Some(data) => data as *const U,
None => return Err(this),
};
let s = this.s;
// NB: Forget to avoid drop impl from being called.
mem::forget(this);
Ok(RwLockReadGuard {
s,
data,
marker: marker::PhantomData,
})
}
}

impl<'a, T: ?Sized> fmt::Debug for RwLockReadGuard<'a, T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}

impl<'a, T: ?Sized> fmt::Display for RwLockReadGuard<'a, T>
where
T: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}

impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> {
fn drop(&mut self) {
self.s.release(1);
}
}

/// RAII structure used to release the exclusive write access of a lock when
/// dropped.
///
/// This structure is created by the [`write`] and method
/// on [`RwLock`].
///
/// [`write`]: method@RwLock::write
/// [`RwLock`]: struct@RwLock
pub struct RwLockWriteGuard<'a, T: ?Sized> {
s: &'a Semaphore,
data: *mut T,
marker: marker::PhantomData<&'a mut T>,
}

impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
/// Atomically downgrades a write lock into a read lock without allowing
/// any writers to take exclusive access of the lock in the meantime.
///
/// **Note:** This won't *necessarily* allow any additional readers to acquire
/// locks, since [`RwLock`] is fair and it is possible that a writer is next
/// in line.
///
/// Returns an RAII guard which will drop this read access of the `RwLock`
/// when dropped.
///
/// # Examples
///
/// ```
/// # use tokio::sync::RwLock;
/// # use std::sync::Arc;
/// #
/// # #[tokio::main]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(1));
///
/// let n = lock.write().await;
///
/// let cloned_lock = lock.clone();
/// let handle = tokio::spawn(async move {
/// *cloned_lock.write().await = 2;
/// });
///
/// let n = n.downgrade();
/// assert_eq!(*n, 1, "downgrade is atomic");
///
/// drop(n);
/// handle.await.unwrap();
/// assert_eq!(*lock.read().await, 2, "second writer obtained write lock");
/// # }
/// ```
///
/// [`RwLock`]: struct@RwLock
pub fn downgrade(self) -> RwLockReadGuard<'a, T> {
let RwLockWriteGuard { s, data, .. } = self;

// Release all but one of the permits held by the write guard
s.release(MAX_READS - 1);
// NB: Forget to avoid drop impl from being called.
mem::forget(self);
RwLockReadGuard {
s,
data,
marker: marker::PhantomData,
}
}
}

impl<'a, T: ?Sized> fmt::Debug for RwLockWriteGuard<'a, T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}

impl<'a, T: ?Sized> fmt::Display for RwLockWriteGuard<'a, T>
where
T: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}

impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> {
fn drop(&mut self) {
self.s.release(MAX_READS);
}
}

#[test]
#[cfg(not(loom))]
fn bounds() {
Expand Down Expand Up @@ -351,11 +121,13 @@ unsafe impl<T> Sync for RwLock<T> where T: ?Sized + Send + Sync {}
unsafe impl<T> Send for RwLockReadGuard<'_, T> where T: ?Sized + Sync {}
unsafe impl<T> Sync for RwLockReadGuard<'_, T> where T: ?Sized + Send + Sync {}
unsafe impl<T> Sync for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
unsafe impl<T> Sync for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over
// `T` is `Send` - but since this is also provides mutable access, we need to
// make sure that `T` is `Send` since its value can be sent across thread
// boundaries.
unsafe impl<T> Send for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
unsafe impl<T> Send for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}

impl<T: ?Sized> RwLock<T> {
/// Creates a new instance of an `RwLock<T>` which is unlocked.
Expand Down Expand Up @@ -437,7 +209,6 @@ impl<T: ?Sized> RwLock<T> {
/// drop(n);
///}
/// ```
///
pub async fn read(&self) -> RwLockReadGuard<'_, T> {
self.s.acquire(1).await.unwrap_or_else(|_| {
// The semaphore was closed. but, we never explicitly close it, and we have a
Expand Down Expand Up @@ -500,8 +271,8 @@ impl<T: ?Sized> RwLock<T> {
/// Locks this `RwLock` with exclusive write access, causing the current
/// task to yield until the lock has been acquired.
///
/// The calling task will yield while other writers or readers
/// currently have access to the lock.
/// The calling task will yield while other writers or readers currently
/// have access to the lock.
///
/// Returns an RAII guard which will drop the write access of this `RwLock`
/// when dropped.
Expand Down Expand Up @@ -602,28 +373,6 @@ impl<T: ?Sized> RwLock<T> {
}
}

impl<T: ?Sized> ops::Deref for RwLockReadGuard<'_, T> {
type Target = T;

fn deref(&self) -> &T {
unsafe { &*self.data }
}
}

impl<T: ?Sized> ops::Deref for RwLockWriteGuard<'_, T> {
type Target = T;

fn deref(&self) -> &T {
unsafe { &*self.data }
}
}

impl<T: ?Sized> ops::DerefMut for RwLockWriteGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.data }
}
}

impl<T> From<T> for RwLock<T> {
fn from(s: T) -> Self {
Self::new(s)
Expand Down
Loading

0 comments on commit e3f2dcf

Please sign in to comment.