From ee619d3446af5b5bda28b1f3c8b66d377f174b3d Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 14 Oct 2024 21:36:37 +0200 Subject: [PATCH 1/3] add test ensuring a moved mutex deadlocks --- .../fail/concurrency/mutex-leak-move-deadlock.rs | 16 ++++++++++++++++ .../concurrency/mutex-leak-move-deadlock.stderr | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 tests/fail/concurrency/mutex-leak-move-deadlock.rs create mode 100644 tests/fail/concurrency/mutex-leak-move-deadlock.stderr diff --git a/tests/fail/concurrency/mutex-leak-move-deadlock.rs b/tests/fail/concurrency/mutex-leak-move-deadlock.rs new file mode 100644 index 0000000000..b996fcaf45 --- /dev/null +++ b/tests/fail/concurrency/mutex-leak-move-deadlock.rs @@ -0,0 +1,16 @@ +//@error-in-other-file: deadlock +//@normalize-stderr-test: "src/sys/.*\.rs" -> "$$FILE" +//@normalize-stderr-test: "LL \| .*" -> "LL | $$CODE" +//@normalize-stderr-test: "\| +\^+" -> "| ^" +//@normalize-stderr-test: "\n *= note:.*" -> "" +use std::mem; +use std::sync::Mutex; + +fn main() { + let m = Mutex::new(0); + mem::forget(m.lock()); + // Move the lock while it is "held" (really: leaked) + let m2 = m; + // Now try to acquire the lock again. + let _guard = m2.lock(); +} diff --git a/tests/fail/concurrency/mutex-leak-move-deadlock.stderr b/tests/fail/concurrency/mutex-leak-move-deadlock.stderr new file mode 100644 index 0000000000..0ca8b3558d --- /dev/null +++ b/tests/fail/concurrency/mutex-leak-move-deadlock.stderr @@ -0,0 +1,16 @@ +error: deadlock: the evaluated program deadlocked + --> RUSTLIB/std/$FILE:LL:CC + | +LL | $CODE + | ^ the evaluated program deadlocked + | +note: inside `main` + --> tests/fail/concurrency/mutex-leak-move-deadlock.rs:LL:CC + | +LL | $CODE + | ^ + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + From 5947a489ee3dd5ab0bc7d8e2b3a9bb3c8b9c867a Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 14 Oct 2024 21:59:05 +0200 Subject: [PATCH 2/3] move lazy_sync helper methods to be with InterpCx --- src/concurrency/sync.rs | 155 +++++++++++++++++++++-------------- src/shims/unix/macos/sync.rs | 12 +-- src/shims/unix/sync.rs | 12 +-- src/shims/windows/sync.rs | 3 +- 4 files changed, 103 insertions(+), 79 deletions(-) diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index 2c6a7bf0f5..e7e6c100cf 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -193,75 +193,104 @@ impl<'tcx> AllocExtra<'tcx> { /// If `init` is set to this, we consider the primitive initialized. pub const LAZY_INIT_COOKIE: u32 = 0xcafe_affe; -/// Helper for lazily initialized `alloc_extra.sync` data: -/// this forces an immediate init. -pub fn lazy_sync_init<'tcx, T: 'static + Copy>( - ecx: &mut MiriInterpCx<'tcx>, - primitive: &MPlaceTy<'tcx>, - init_offset: Size, - data: T, -) -> InterpResult<'tcx> { - let (alloc, offset, _) = ecx.ptr_get_alloc_id(primitive.ptr(), 0)?; - let (alloc_extra, _machine) = ecx.get_alloc_extra_mut(alloc)?; - alloc_extra.sync.insert(offset, Box::new(data)); - // Mark this as "initialized". - let init_field = primitive.offset(init_offset, ecx.machine.layouts.u32, ecx)?; - ecx.write_scalar_atomic( - Scalar::from_u32(LAZY_INIT_COOKIE), - &init_field, - AtomicWriteOrd::Relaxed, - )?; - interp_ok(()) -} - -/// Helper for lazily initialized `alloc_extra.sync` data: -/// Checks if the primitive is initialized, and return its associated data if so. -/// Otherwise, calls `new_data` to initialize the primitive. -pub fn lazy_sync_get_data<'tcx, T: 'static + Copy>( - ecx: &mut MiriInterpCx<'tcx>, - primitive: &MPlaceTy<'tcx>, - init_offset: Size, - name: &str, - new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>, -) -> InterpResult<'tcx, T> { - // Check if this is already initialized. Needs to be atomic because we can race with another - // thread initializing. Needs to be an RMW operation to ensure we read the *latest* value. - // So we just try to replace MUTEX_INIT_COOKIE with itself. - let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE); - let init_field = primitive.offset(init_offset, ecx.machine.layouts.u32, ecx)?; - let (_init, success) = ecx - .atomic_compare_exchange_scalar( - &init_field, - &ImmTy::from_scalar(init_cookie, ecx.machine.layouts.u32), - init_cookie, - AtomicRwOrd::Relaxed, - AtomicReadOrd::Relaxed, - /* can_fail_spuriously */ false, - )? - .to_scalar_pair(); - - if success.to_bool()? { - // If it is initialized, it must be found in the "sync primitive" table, - // or else it has been moved illegally. - let (alloc, offset, _) = ecx.ptr_get_alloc_id(primitive.ptr(), 0)?; - let alloc_extra = ecx.get_alloc_extra(alloc)?; - let data = alloc_extra - .get_sync::(offset) - .ok_or_else(|| err_ub_format!("`{name}` can't be moved after first use"))?; - interp_ok(*data) - } else { - let data = new_data(ecx)?; - lazy_sync_init(ecx, primitive, init_offset, data)?; - interp_ok(data) - } -} - // Public interface to synchronization primitives. Please note that in most // cases, the function calls are infallible and it is the client's (shim // implementation's) responsibility to detect and deal with erroneous // situations. impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {} pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { + /// Helper for lazily initialized `alloc_extra.sync` data: + /// this forces an immediate init. + fn lazy_sync_init( + &mut self, + primitive: &MPlaceTy<'tcx>, + init_offset: Size, + data: T, + ) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + + let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?; + let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?; + alloc_extra.sync.insert(offset, Box::new(data)); + // Mark this as "initialized". + let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?; + this.write_scalar_atomic( + Scalar::from_u32(LAZY_INIT_COOKIE), + &init_field, + AtomicWriteOrd::Relaxed, + )?; + interp_ok(()) + } + + /// Helper for lazily initialized `alloc_extra.sync` data: + /// Checks if the primitive is initialized, and return its associated data if so. + /// Otherwise, calls `new_data` to initialize the primitive. + fn lazy_sync_get_data( + &mut self, + primitive: &MPlaceTy<'tcx>, + init_offset: Size, + name: &str, + new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>, + ) -> InterpResult<'tcx, T> { + let this = self.eval_context_mut(); + + // Check if this is already initialized. Needs to be atomic because we can race with another + // thread initializing. Needs to be an RMW operation to ensure we read the *latest* value. + // So we just try to replace MUTEX_INIT_COOKIE with itself. + let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE); + let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?; + let (_init, success) = this + .atomic_compare_exchange_scalar( + &init_field, + &ImmTy::from_scalar(init_cookie, this.machine.layouts.u32), + init_cookie, + AtomicRwOrd::Relaxed, + AtomicReadOrd::Relaxed, + /* can_fail_spuriously */ false, + )? + .to_scalar_pair(); + + if success.to_bool()? { + // If it is initialized, it must be found in the "sync primitive" table, + // or else it has been moved illegally. + let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?; + let alloc_extra = this.get_alloc_extra(alloc)?; + let data = alloc_extra + .get_sync::(offset) + .ok_or_else(|| err_ub_format!("`{name}` can't be moved after first use"))?; + interp_ok(*data) + } else { + let data = new_data(this)?; + this.lazy_sync_init(primitive, init_offset, data)?; + interp_ok(data) + } + } + + /// Get the synchronization primitive associated with the given pointer, + /// or initialize a new one. + fn get_sync_or_init<'a, T: 'static>( + &'a mut self, + ptr: Pointer, + new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> InterpResult<'tcx, T>, + ) -> InterpResult<'tcx, &'a T> + where + 'tcx: 'a, + { + let this = self.eval_context_mut(); + // Ensure there is memory behind this pointer, so that this allocation + // is truly the only place where the data could be stored. + this.check_ptr_access(ptr, Size::from_bytes(1), CheckInAllocMsg::InboundsTest)?; + + let (alloc, offset, _) = this.ptr_get_alloc_id(ptr, 0)?; + let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc)?; + // Due to borrow checker reasons, we have to do the lookup twice. + if alloc_extra.get_sync::(offset).is_none() { + let new = new(machine)?; + alloc_extra.sync.insert(offset, Box::new(new)); + } + interp_ok(alloc_extra.get_sync::(offset).unwrap()) + } + #[inline] /// Get the id of the thread that currently owns this lock. fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId { diff --git a/src/shims/unix/macos/sync.rs b/src/shims/unix/macos/sync.rs index cd5b0ed1d0..25e1ff42fb 100644 --- a/src/shims/unix/macos/sync.rs +++ b/src/shims/unix/macos/sync.rs @@ -23,15 +23,11 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { let lock = this.deref_pointer(lock_ptr)?; // We store the mutex ID in the `sync` metadata. This means that when the lock is moved, // that's just implicitly creating a new lock at the new location. - let (alloc, offset, _) = this.ptr_get_alloc_id(lock.ptr(), 0)?; - let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc)?; - if let Some(data) = alloc_extra.get_sync::(offset) { - interp_ok(data.id) - } else { + let data = this.get_sync_or_init(lock.ptr(), |machine| { let id = machine.sync.mutex_create(); - alloc_extra.sync.insert(offset, Box::new(MacOsUnfairLock { id })); - interp_ok(id) - } + interp_ok(MacOsUnfairLock { id }) + })?; + interp_ok(data.id) } } diff --git a/src/shims/unix/sync.rs b/src/shims/unix/sync.rs index 913f53adbb..8eb874a456 100644 --- a/src/shims/unix/sync.rs +++ b/src/shims/unix/sync.rs @@ -2,7 +2,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; use rustc_target::abi::Size; -use crate::concurrency::sync::{LAZY_INIT_COOKIE, lazy_sync_get_data, lazy_sync_init}; +use crate::concurrency::sync::LAZY_INIT_COOKIE; use crate::*; /// Do a bytewise comparison of the two places, using relaxed atomic reads. This is used to check if @@ -176,7 +176,7 @@ fn mutex_create<'tcx>( let mutex = ecx.deref_pointer(mutex_ptr)?; let id = ecx.machine.sync.mutex_create(); let data = PthreadMutex { id, kind }; - lazy_sync_init(ecx, &mutex, mutex_init_offset(ecx)?, data)?; + ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data)?; interp_ok(data) } @@ -189,7 +189,7 @@ fn mutex_get_data<'tcx, 'a>( mutex_ptr: &OpTy<'tcx>, ) -> InterpResult<'tcx, PthreadMutex> { let mutex = ecx.deref_pointer(mutex_ptr)?; - lazy_sync_get_data(ecx, &mutex, mutex_init_offset(ecx)?, "pthread_mutex_t", |ecx| { + ecx.lazy_sync_get_data(&mutex, mutex_init_offset(ecx)?, "pthread_mutex_t", |ecx| { let kind = mutex_kind_from_static_initializer(ecx, &mutex)?; let id = ecx.machine.sync.mutex_create(); interp_ok(PthreadMutex { id, kind }) @@ -261,7 +261,7 @@ fn rwlock_get_data<'tcx>( rwlock_ptr: &OpTy<'tcx>, ) -> InterpResult<'tcx, PthreadRwLock> { let rwlock = ecx.deref_pointer(rwlock_ptr)?; - lazy_sync_get_data(ecx, &rwlock, rwlock_init_offset(ecx)?, "pthread_rwlock_t", |ecx| { + ecx.lazy_sync_get_data(&rwlock, rwlock_init_offset(ecx)?, "pthread_rwlock_t", |ecx| { if !bytewise_equal_atomic_relaxed( ecx, &rwlock, @@ -377,7 +377,7 @@ fn cond_create<'tcx>( let cond = ecx.deref_pointer(cond_ptr)?; let id = ecx.machine.sync.condvar_create(); let data = PthreadCondvar { id, clock }; - lazy_sync_init(ecx, &cond, cond_init_offset(ecx)?, data)?; + ecx.lazy_sync_init(&cond, cond_init_offset(ecx)?, data)?; interp_ok(data) } @@ -386,7 +386,7 @@ fn cond_get_data<'tcx>( cond_ptr: &OpTy<'tcx>, ) -> InterpResult<'tcx, PthreadCondvar> { let cond = ecx.deref_pointer(cond_ptr)?; - lazy_sync_get_data(ecx, &cond, cond_init_offset(ecx)?, "pthread_cond_t", |ecx| { + ecx.lazy_sync_get_data(&cond, cond_init_offset(ecx)?, "pthread_cond_t", |ecx| { if !bytewise_equal_atomic_relaxed( ecx, &cond, diff --git a/src/shims/windows/sync.rs b/src/shims/windows/sync.rs index 8771bb4a8a..3701f479e5 100644 --- a/src/shims/windows/sync.rs +++ b/src/shims/windows/sync.rs @@ -3,7 +3,6 @@ use std::time::Duration; use rustc_target::abi::Size; use crate::concurrency::init_once::InitOnceStatus; -use crate::concurrency::sync::lazy_sync_get_data; use crate::*; #[derive(Copy, Clone)] @@ -25,7 +24,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { let init_once = this.deref_pointer(init_once_ptr)?; let init_offset = Size::ZERO; - lazy_sync_get_data(this, &init_once, init_offset, "INIT_ONCE", |this| { + this.lazy_sync_get_data(&init_once, init_offset, "INIT_ONCE", |this| { // TODO: check that this is still all-zero. let id = this.machine.sync.init_once_create(); interp_ok(WindowsInitOnce { id }) From 9a4cd35bf0befd2cad2259e9d6fcd99b2aa7e4e0 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 14 Oct 2024 22:43:41 +0200 Subject: [PATCH 3/3] ensure that a macOS os_unfair_lock that is moved while being held is not implicitly unlocked --- src/concurrency/sync.rs | 21 ++-- src/concurrency/thread.rs | 2 +- src/shims/unix/macos/sync.rs | 98 ++++++++++++++++--- src/shims/unix/sync.rs | 71 ++++++++------ src/shims/windows/sync.rs | 15 ++- .../apple_os_unfair_lock_move_deadlock.rs | 13 +++ .../apple_os_unfair_lock_move_deadlock.stderr | 13 +++ .../concurrency/apple-os-unfair-lock.rs | 4 +- 8 files changed, 178 insertions(+), 59 deletions(-) create mode 100644 tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs create mode 100644 tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.stderr diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index e7e6c100cf..199aedfa6d 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -223,13 +223,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { } /// Helper for lazily initialized `alloc_extra.sync` data: - /// Checks if the primitive is initialized, and return its associated data if so. - /// Otherwise, calls `new_data` to initialize the primitive. + /// Checks if the primitive is initialized: + /// - If yes, fetches the data from `alloc_extra.sync`, or calls `missing_data` if that fails + /// and stores that in `alloc_extra.sync`. + /// - Otherwise, calls `new_data` to initialize the primitive. fn lazy_sync_get_data( &mut self, primitive: &MPlaceTy<'tcx>, init_offset: Size, - name: &str, + missing_data: impl FnOnce() -> InterpResult<'tcx, T>, new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>, ) -> InterpResult<'tcx, T> { let this = self.eval_context_mut(); @@ -254,11 +256,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // If it is initialized, it must be found in the "sync primitive" table, // or else it has been moved illegally. let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?; - let alloc_extra = this.get_alloc_extra(alloc)?; - let data = alloc_extra - .get_sync::(offset) - .ok_or_else(|| err_ub_format!("`{name}` can't be moved after first use"))?; - interp_ok(*data) + let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?; + if let Some(data) = alloc_extra.get_sync::(offset) { + interp_ok(*data) + } else { + let data = missing_data()?; + alloc_extra.sync.insert(offset, Box::new(data)); + interp_ok(data) + } } else { let data = new_data(this)?; this.lazy_sync_init(primitive, init_offset, data)?; diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index 3c5fb74fb7..3946cb5ee5 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -59,7 +59,7 @@ macro_rules! callback { @unblock = |$this:ident| $unblock:block ) => { callback!( - @capture<$tcx, $($lft),*> { $($name: $type),+ } + @capture<$tcx, $($lft),*> { $($name: $type),* } @unblock = |$this| $unblock @timeout = |_this| { unreachable!( diff --git a/src/shims/unix/macos/sync.rs b/src/shims/unix/macos/sync.rs index 25e1ff42fb..1df1202442 100644 --- a/src/shims/unix/macos/sync.rs +++ b/src/shims/unix/macos/sync.rs @@ -10,24 +10,42 @@ //! and we do not detect copying of the lock, but macOS doesn't guarantee anything //! in that case either. +use rustc_target::abi::Size; + use crate::*; -struct MacOsUnfairLock { - id: MutexId, +#[derive(Copy, Clone)] +enum MacOsUnfairLock { + Poisoned, + Active { id: MutexId }, } impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {} trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { - fn os_unfair_lock_getid(&mut self, lock_ptr: &OpTy<'tcx>) -> InterpResult<'tcx, MutexId> { + fn os_unfair_lock_get_data( + &mut self, + lock_ptr: &OpTy<'tcx>, + ) -> InterpResult<'tcx, MacOsUnfairLock> { let this = self.eval_context_mut(); let lock = this.deref_pointer(lock_ptr)?; - // We store the mutex ID in the `sync` metadata. This means that when the lock is moved, - // that's just implicitly creating a new lock at the new location. - let data = this.get_sync_or_init(lock.ptr(), |machine| { - let id = machine.sync.mutex_create(); - interp_ok(MacOsUnfairLock { id }) - })?; - interp_ok(data.id) + this.lazy_sync_get_data( + &lock, + Size::ZERO, // offset for init tracking + || { + // If we get here, due to how we reset things to zero in `os_unfair_lock_unlock`, + // this means the lock was moved while locked. This can happen with a `std` lock, + // but then any future attempt to unlock will just deadlock. In practice, terrible + // things can probably happen if you swap two locked locks, since they'd wake up + // from the wrong queue... we just won't catch all UB of this library API then (we + // would need to store some unique identifer in-memory for this, instead of a static + // LAZY_INIT_COOKIE). This can't be hit via `std::sync::Mutex`. + interp_ok(MacOsUnfairLock::Poisoned) + }, + |ecx| { + let id = ecx.machine.sync.mutex_create(); + interp_ok(MacOsUnfairLock::Active { id }) + }, + ) } } @@ -36,7 +54,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.os_unfair_lock_getid(lock_op)?; + let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else { + // Trying to get a poisoned lock. Just block forever... + this.block_thread( + BlockReason::Sleep, + None, + callback!( + @capture<'tcx> {} + @unblock = |_this| { + panic!("we shouldn't wake up ever") + } + ), + ); + return interp_ok(()); + }; + if this.mutex_is_locked(id) { if this.mutex_get_owner(id) == this.active_thread() { // Matching the current macOS implementation: abort on reentrant locking. @@ -60,7 +92,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.os_unfair_lock_getid(lock_op)?; + let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else { + // Trying to get a poisoned lock. That never works. + this.write_scalar(Scalar::from_bool(false), dest)?; + return interp_ok(()); + }; + if this.mutex_is_locked(id) { // Contrary to the blocking lock function, this does not check for // reentrancy. @@ -76,7 +113,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.os_unfair_lock_getid(lock_op)?; + let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else { + // The lock is poisoned, who knows who owns it... we'll pretend: someone else. + throw_machine_stop!(TerminationInfo::Abort( + "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned() + )); + }; + + // Now, unlock. if this.mutex_unlock(id)?.is_none() { // Matching the current macOS implementation: abort. throw_machine_stop!(TerminationInfo::Abort( @@ -84,32 +128,56 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { )); } + // If the lock is not locked by anyone now, it went quer. + // Reset to zero so that it can be moved and initialized again for the next phase. + if !this.mutex_is_locked(id) { + let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?; + this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?; + } + interp_ok(()) } fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.os_unfair_lock_getid(lock_op)?; + let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else { + // The lock is poisoned, who knows who owns it... we'll pretend: someone else. + throw_machine_stop!(TerminationInfo::Abort( + "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned() + )); + }; if !this.mutex_is_locked(id) || this.mutex_get_owner(id) != this.active_thread() { throw_machine_stop!(TerminationInfo::Abort( "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned() )); } + // The lock is definitely not quiet since we are the owner. + interp_ok(()) } fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let id = this.os_unfair_lock_getid(lock_op)?; + let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else { + // The lock is poisoned, who knows who owns it... we'll pretend: someone else. + return interp_ok(()); + }; if this.mutex_is_locked(id) && this.mutex_get_owner(id) == this.active_thread() { throw_machine_stop!(TerminationInfo::Abort( "called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned() )); } + // If the lock is not locked by anyone now, it went quer. + // Reset to zero so that it can be moved and initialized again for the next phase. + if !this.mutex_is_locked(id) { + let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?; + this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?; + } + interp_ok(()) } } diff --git a/src/shims/unix/sync.rs b/src/shims/unix/sync.rs index 8eb874a456..a4beaa47ba 100644 --- a/src/shims/unix/sync.rs +++ b/src/shims/unix/sync.rs @@ -189,11 +189,16 @@ fn mutex_get_data<'tcx, 'a>( mutex_ptr: &OpTy<'tcx>, ) -> InterpResult<'tcx, PthreadMutex> { let mutex = ecx.deref_pointer(mutex_ptr)?; - ecx.lazy_sync_get_data(&mutex, mutex_init_offset(ecx)?, "pthread_mutex_t", |ecx| { - let kind = mutex_kind_from_static_initializer(ecx, &mutex)?; - let id = ecx.machine.sync.mutex_create(); - interp_ok(PthreadMutex { id, kind }) - }) + ecx.lazy_sync_get_data( + &mutex, + mutex_init_offset(ecx)?, + || throw_ub_format!("`pthread_mutex_t` can't be moved after first use"), + |ecx| { + let kind = mutex_kind_from_static_initializer(ecx, &mutex)?; + let id = ecx.machine.sync.mutex_create(); + interp_ok(PthreadMutex { id, kind }) + }, + ) } /// Returns the kind of a static initializer. @@ -261,17 +266,22 @@ fn rwlock_get_data<'tcx>( rwlock_ptr: &OpTy<'tcx>, ) -> InterpResult<'tcx, PthreadRwLock> { let rwlock = ecx.deref_pointer(rwlock_ptr)?; - ecx.lazy_sync_get_data(&rwlock, rwlock_init_offset(ecx)?, "pthread_rwlock_t", |ecx| { - if !bytewise_equal_atomic_relaxed( - ecx, - &rwlock, - &ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]), - )? { - throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`"); - } - let id = ecx.machine.sync.rwlock_create(); - interp_ok(PthreadRwLock { id }) - }) + ecx.lazy_sync_get_data( + &rwlock, + rwlock_init_offset(ecx)?, + || throw_ub_format!("`pthread_rwlock_t` can't be moved after first use"), + |ecx| { + if !bytewise_equal_atomic_relaxed( + ecx, + &rwlock, + &ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]), + )? { + throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`"); + } + let id = ecx.machine.sync.rwlock_create(); + interp_ok(PthreadRwLock { id }) + }, + ) } // # pthread_condattr_t @@ -386,18 +396,23 @@ fn cond_get_data<'tcx>( cond_ptr: &OpTy<'tcx>, ) -> InterpResult<'tcx, PthreadCondvar> { let cond = ecx.deref_pointer(cond_ptr)?; - ecx.lazy_sync_get_data(&cond, cond_init_offset(ecx)?, "pthread_cond_t", |ecx| { - if !bytewise_equal_atomic_relaxed( - ecx, - &cond, - &ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]), - )? { - throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`"); - } - // This used the static initializer. The clock there is always CLOCK_REALTIME. - let id = ecx.machine.sync.condvar_create(); - interp_ok(PthreadCondvar { id, clock: ClockId::Realtime }) - }) + ecx.lazy_sync_get_data( + &cond, + cond_init_offset(ecx)?, + || throw_ub_format!("`pthread_cond_t` can't be moved after first use"), + |ecx| { + if !bytewise_equal_atomic_relaxed( + ecx, + &cond, + &ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]), + )? { + throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`"); + } + // This used the static initializer. The clock there is always CLOCK_REALTIME. + let id = ecx.machine.sync.condvar_create(); + interp_ok(PthreadCondvar { id, clock: ClockId::Realtime }) + }, + ) } impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {} diff --git a/src/shims/windows/sync.rs b/src/shims/windows/sync.rs index 3701f479e5..f8861085fe 100644 --- a/src/shims/windows/sync.rs +++ b/src/shims/windows/sync.rs @@ -24,11 +24,16 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { let init_once = this.deref_pointer(init_once_ptr)?; let init_offset = Size::ZERO; - this.lazy_sync_get_data(&init_once, init_offset, "INIT_ONCE", |this| { - // TODO: check that this is still all-zero. - let id = this.machine.sync.init_once_create(); - interp_ok(WindowsInitOnce { id }) - }) + this.lazy_sync_get_data( + &init_once, + init_offset, + || throw_ub_format!("`INIT_ONCE` can't be moved after first use"), + |this| { + // TODO: check that this is still all-zero. + let id = this.machine.sync.init_once_create(); + interp_ok(WindowsInitOnce { id }) + }, + ) } /// Returns `true` if we were succssful, `false` if we would block. diff --git a/tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs b/tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs new file mode 100644 index 0000000000..8406143933 --- /dev/null +++ b/tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs @@ -0,0 +1,13 @@ +//@only-target: darwin + +use std::cell::UnsafeCell; + +fn main() { + let lock = UnsafeCell::new(libc::OS_UNFAIR_LOCK_INIT); + + unsafe { libc::os_unfair_lock_lock(lock.get()) }; + let lock = lock; + // This needs to either error or deadlock. + unsafe { libc::os_unfair_lock_lock(lock.get()) }; + //~^ error: deadlock +} diff --git a/tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.stderr b/tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.stderr new file mode 100644 index 0000000000..f043c7074f --- /dev/null +++ b/tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.stderr @@ -0,0 +1,13 @@ +error: deadlock: the evaluated program deadlocked + --> tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs:LL:CC + | +LL | unsafe { libc::os_unfair_lock_lock(lock.get()) }; + | ^ the evaluated program deadlocked + | + = note: BACKTRACE: + = note: inside `main` at tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + diff --git a/tests/pass-dep/concurrency/apple-os-unfair-lock.rs b/tests/pass-dep/concurrency/apple-os-unfair-lock.rs index 0fc432f24c..f5b64474f8 100644 --- a/tests/pass-dep/concurrency/apple-os-unfair-lock.rs +++ b/tests/pass-dep/concurrency/apple-os-unfair-lock.rs @@ -16,8 +16,8 @@ fn main() { // `os_unfair_lock`s can be moved and leaked. // In the real implementation, even moving it while locked is possible - // (and "forks" the lock, i.e. old and new location have independent wait queues); - // Miri behavior differs here and anyway none of this is documented. + // (and "forks" the lock, i.e. old and new location have independent wait queues). + // We only test the somewhat sane case of moving while unlocked that `std` plans to rely on. let lock = lock; let locked = unsafe { libc::os_unfair_lock_trylock(lock.get()) }; assert!(locked);