From 76bce58b7ae52890d83edc5486c1427c3ab3c546 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Wed, 7 Aug 2024 14:30:26 +0200 Subject: [PATCH 1/4] atomics: allow atomic and non-atomic reads to race --- library/core/src/sync/atomic.rs | 62 ++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 24 deletions(-) diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index b06a3bd4487d3..b3fe99f7f1a15 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -24,26 +24,37 @@ //! //! ## Memory model for atomic accesses //! -//! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically `atomic_ref`. -//! Basically, creating a *shared reference* to one of the Rust atomic types corresponds to creating -//! an `atomic_ref` in C++; the `atomic_ref` is destroyed when the lifetime of the shared reference -//! ends. A Rust atomic type that is exclusively owned or behind a mutable reference does *not* -//! correspond to an “atomic object” in C++, since the underlying primitive can be mutably accessed, -//! for example with `get_mut`, to perform non-atomic operations. +//! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically the rules +//! from the [`intro.races`][cpp-intro.races] section, without the "consume" memory ordering. Since +//! C++ uses an object-based memory model whereas Rust is access-based, a bit of translation work +//! has to be done to apply the C++ rules to Rust: whenever C++ talks about "the value of an +//! object", we understand that to mean the resulting bytes obtained when doing a read. When the C++ +//! standard talks about "the value of an atomic object", this refers to the result of doing an +//! atomic load (via the operations provided in this module). A "modification of an atomic object" +//! refers to an atomic store. +//! +//! The end result is *almost* equivalent to saying that creating a *shared reference* to one of the +//! Rust atomic types corresponds to creating an `atomic_ref` in C++, with the `atomic_ref` being +//! destroyed when the lifetime of the shared reference ends. The main difference is that Rust +//! permits concurrent atomic and non-atomic reads to the same memory as those cause no issue in the +//! C++ memory model, they are just forbidden in C++ because memory is partitioned into "atomic +//! objects" and "non-atomic objects" (with `atomic_ref` temporarily converting a non-atomic object +//! into an atomic object). +//! +//! That said, Rust *does* inherit the C++ limitation that non-synchronized atomic accesses may not +//! partially overlap: they must be either disjoint or access the exact same memory. This in +//! particular rules out non-synchronized differently-sized accesses to the same data. //! //! [cpp]: https://en.cppreference.com/w/cpp/atomic +//! [cpp-intro.races]: https://timsong-cpp.github.io/cppwp/n4868/intro.multithread#intro.races //! //! Each method takes an [`Ordering`] which represents the strength of -//! the memory barrier for that operation. These orderings are the -//! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2]. +//! the memory barrier for that operation. These orderings behave the +//! same as the corresponding [C++20 atomic orderings][1]. For more information see the [nomicon][2]. //! //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order //! [2]: ../../../nomicon/atomics.html //! -//! Since C++ does not support mixing atomic and non-atomic accesses, or non-synchronized -//! different-sized accesses to the same data, Rust does not support those operations either. -//! Note that both of those restrictions only apply if the accesses are non-synchronized. -//! //! ```rust,no_run undefined_behavior //! use std::sync::atomic::{AtomicU16, AtomicU8, Ordering}; //! use std::mem::transmute; @@ -52,27 +63,30 @@ //! let atomic = AtomicU16::new(0); //! //! thread::scope(|s| { -//! // This is UB: mixing atomic and non-atomic accesses -//! s.spawn(|| atomic.store(1, Ordering::Relaxed)); -//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); +//! // This is UB: conflicting concurrent accesses. +//! s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store +//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write //! }); //! //! thread::scope(|s| { -//! // This is UB: even reads are not allowed to be mixed -//! s.spawn(|| atomic.load(Ordering::Relaxed)); -//! s.spawn(|| unsafe { atomic.as_ptr().read() }); +//! // This is fine: the accesses do not conflict (as none of them performs any modification). +//! // In C++ this would be disallowed since creating an `atomic_ref` precludes +//! // further non-atomic accesses, but Rust does not have that limitation. +//! s.spawn(|| atomic.load(Ordering::Relaxed)); // atomic load +//! s.spawn(|| unsafe { atomic.as_ptr().read() }); // non-atomic read //! }); //! //! thread::scope(|s| { //! // This is fine, `join` synchronizes the code in a way such that atomic -//! // and non-atomic accesses can't happen "at the same time" -//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); -//! handle.join().unwrap(); -//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); +//! // and non-atomic accesses can't happen "at the same time". +//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store +//! handle.join().unwrap(); // synchronize +//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write //! }); //! //! thread::scope(|s| { -//! // This is UB: using different-sized atomic accesses to the same data +//! // This is UB: using differently-sized atomic accesses to the same data. +//! // (It would be UB even if these are both loads.) //! s.spawn(|| atomic.store(1, Ordering::Relaxed)); //! s.spawn(|| unsafe { //! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic); @@ -82,7 +96,7 @@ //! //! thread::scope(|s| { //! // This is fine, `join` synchronizes the code in a way such that -//! // differently-sized accesses can't happen "at the same time" +//! // differently-sized accesses can't happen "at the same time". //! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); //! handle.join().unwrap(); //! s.spawn(|| unsafe { From ed417f44f81f7096bfe135e7d84092f2a7fecaa6 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Wed, 7 Aug 2024 14:50:34 +0200 Subject: [PATCH 2/4] miri: no longer complain about read-read races --- src/tools/miri/src/concurrency/data_race.rs | 23 +++-------- .../tests/fail/data_race/read_read_race1.rs | 30 --------------- .../fail/data_race/read_read_race1.stderr | 22 ----------- .../tests/fail/data_race/read_read_race2.rs | 30 --------------- .../fail/data_race/read_read_race2.stderr | 22 ----------- .../miri/tests/pass/concurrency/data_race.rs | 38 ++++++++++++++++++- 6 files changed, 43 insertions(+), 122 deletions(-) delete mode 100644 src/tools/miri/tests/fail/data_race/read_read_race1.rs delete mode 100644 src/tools/miri/tests/fail/data_race/read_read_race1.stderr delete mode 100644 src/tools/miri/tests/fail/data_race/read_read_race2.rs delete mode 100644 src/tools/miri/tests/fail/data_race/read_read_race2.stderr diff --git a/src/tools/miri/src/concurrency/data_race.rs b/src/tools/miri/src/concurrency/data_race.rs index b5b43f589f699..558a032d0bcf5 100644 --- a/src/tools/miri/src/concurrency/data_race.rs +++ b/src/tools/miri/src/concurrency/data_race.rs @@ -499,7 +499,7 @@ impl MemoryCellClocks { Ok(()) } - /// Detect data-races with an atomic read, caused by a non-atomic access that does + /// Detect data-races with an atomic read, caused by a non-atomic write that does /// not happen-before the atomic-read. fn atomic_read_detect( &mut self, @@ -510,12 +510,8 @@ impl MemoryCellClocks { trace!("Atomic read with vectors: {:#?} :: {:#?}", self, thread_clocks); let atomic = self.atomic_access(thread_clocks, access_size)?; atomic.read_vector.set_at_index(&thread_clocks.clock, index); - // Make sure the last non-atomic write and all non-atomic reads were before this access. - if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock { - Ok(()) - } else { - Err(DataRace) - } + // Make sure the last non-atomic write was before this access. + if self.write_was_before(&thread_clocks.clock) { Ok(()) } else { Err(DataRace) } } /// Detect data-races with an atomic write, either with a non-atomic read or with @@ -552,11 +548,9 @@ impl MemoryCellClocks { } thread_clocks.clock.index_mut(index).set_read_type(read_type); if self.write_was_before(&thread_clocks.clock) { + // We must be ordered-after all atomic writes. let race_free = if let Some(atomic) = self.atomic() { - // We must be ordered-after all atomic accesses, reads and writes. - // This ensures we don't mix atomic and non-atomic accesses. atomic.write_vector <= thread_clocks.clock - && atomic.read_vector <= thread_clocks.clock } else { true }; @@ -957,9 +951,7 @@ impl VClockAlloc { let mut other_size = None; // if `Some`, this was a size-mismatch race let write_clock; let (other_access, other_thread, other_clock) = - // First check the atomic-nonatomic cases. If it looks like multiple - // cases apply, this one should take precedence, else it might look like - // we are reporting races between two non-atomic reads. + // First check the atomic-nonatomic cases. if !access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock) @@ -1007,10 +999,7 @@ impl VClockAlloc { assert!(!involves_non_atomic); Some("overlapping unsynchronized atomic accesses must use the same access size") } else if access.is_read() && other_access.is_read() { - assert!(involves_non_atomic); - Some( - "overlapping atomic and non-atomic accesses must be synchronized, even if both are read-only", - ) + panic!("there should be no same-size read-read races") } else { None }; diff --git a/src/tools/miri/tests/fail/data_race/read_read_race1.rs b/src/tools/miri/tests/fail/data_race/read_read_race1.rs deleted file mode 100644 index 02aa4e4b716e3..0000000000000 --- a/src/tools/miri/tests/fail/data_race/read_read_race1.rs +++ /dev/null @@ -1,30 +0,0 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 - -use std::sync::atomic::{AtomicU16, Ordering}; -use std::thread; - -// Make sure races between atomic and non-atomic reads are detected. -// This seems harmless but C++ does not allow them, so we can't allow them for now either. -// This test coverse the case where the non-atomic access come first. -fn main() { - let a = AtomicU16::new(0); - - thread::scope(|s| { - s.spawn(|| { - let ptr = &a as *const AtomicU16 as *mut u16; - unsafe { ptr.read() }; - }); - s.spawn(|| { - thread::yield_now(); - - // We also put a non-atomic access here, but that should *not* be reported. - let ptr = &a as *const AtomicU16 as *mut u16; - unsafe { ptr.read() }; - // Then do the atomic access. - a.load(Ordering::SeqCst); - //~^ ERROR: Data race detected between (1) non-atomic read on thread `unnamed-1` and (2) atomic load on thread `unnamed-2` - }); - }); -} diff --git a/src/tools/miri/tests/fail/data_race/read_read_race1.stderr b/src/tools/miri/tests/fail/data_race/read_read_race1.stderr deleted file mode 100644 index e97c4a4fdcb37..0000000000000 --- a/src/tools/miri/tests/fail/data_race/read_read_race1.stderr +++ /dev/null @@ -1,22 +0,0 @@ -error: Undefined Behavior: Data race detected between (1) non-atomic read on thread `unnamed-ID` and (2) atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here - --> tests/fail/data_race/read_read_race1.rs:LL:CC - | -LL | a.load(Ordering::SeqCst); - | ^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between (1) non-atomic read on thread `unnamed-ID` and (2) atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here - | -help: and (1) occurred earlier here - --> tests/fail/data_race/read_read_race1.rs:LL:CC - | -LL | unsafe { ptr.read() }; - | ^^^^^^^^^^ - = help: overlapping atomic and non-atomic accesses must be synchronized, even if both are read-only - = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model - = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior - = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information - = note: BACKTRACE (of the first span) on thread `unnamed-ID`: - = note: inside closure at tests/fail/data_race/read_read_race1.rs:LL:CC - -note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace - -error: aborting due to 1 previous error - diff --git a/src/tools/miri/tests/fail/data_race/read_read_race2.rs b/src/tools/miri/tests/fail/data_race/read_read_race2.rs deleted file mode 100644 index 3b94c9143f3b2..0000000000000 --- a/src/tools/miri/tests/fail/data_race/read_read_race2.rs +++ /dev/null @@ -1,30 +0,0 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 - -use std::sync::atomic::{AtomicU16, Ordering}; -use std::thread; - -// Make sure races between atomic and non-atomic reads are detected. -// This seems harmless but C++ does not allow them, so we can't allow them for now either. -// This test coverse the case where the atomic access come first. -fn main() { - let a = AtomicU16::new(0); - - thread::scope(|s| { - s.spawn(|| { - // We also put a non-atomic access here, but that should *not* be reported. - let ptr = &a as *const AtomicU16 as *mut u16; - unsafe { ptr.read() }; - // Then do the atomic access. - a.load(Ordering::SeqCst); - }); - s.spawn(|| { - thread::yield_now(); - - let ptr = &a as *const AtomicU16 as *mut u16; - unsafe { ptr.read() }; - //~^ ERROR: Data race detected between (1) atomic load on thread `unnamed-1` and (2) non-atomic read on thread `unnamed-2` - }); - }); -} diff --git a/src/tools/miri/tests/fail/data_race/read_read_race2.stderr b/src/tools/miri/tests/fail/data_race/read_read_race2.stderr deleted file mode 100644 index d64032db7b32c..0000000000000 --- a/src/tools/miri/tests/fail/data_race/read_read_race2.stderr +++ /dev/null @@ -1,22 +0,0 @@ -error: Undefined Behavior: Data race detected between (1) atomic load on thread `unnamed-ID` and (2) non-atomic read on thread `unnamed-ID` at ALLOC. (2) just happened here - --> tests/fail/data_race/read_read_race2.rs:LL:CC - | -LL | unsafe { ptr.read() }; - | ^^^^^^^^^^ Data race detected between (1) atomic load on thread `unnamed-ID` and (2) non-atomic read on thread `unnamed-ID` at ALLOC. (2) just happened here - | -help: and (1) occurred earlier here - --> tests/fail/data_race/read_read_race2.rs:LL:CC - | -LL | a.load(Ordering::SeqCst); - | ^^^^^^^^^^^^^^^^^^^^^^^^ - = help: overlapping atomic and non-atomic accesses must be synchronized, even if both are read-only - = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model - = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior - = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information - = note: BACKTRACE (of the first span) on thread `unnamed-ID`: - = note: inside closure at tests/fail/data_race/read_read_race2.rs:LL:CC - -note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace - -error: aborting due to 1 previous error - diff --git a/src/tools/miri/tests/pass/concurrency/data_race.rs b/src/tools/miri/tests/pass/concurrency/data_race.rs index 34380dfa504d5..adf5a37891f06 100644 --- a/src/tools/miri/tests/pass/concurrency/data_race.rs +++ b/src/tools/miri/tests/pass/concurrency/data_race.rs @@ -1,7 +1,7 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 use std::sync::atomic::*; -use std::thread::spawn; +use std::thread::{self, spawn}; #[derive(Copy, Clone)] struct EvilSend(pub T); @@ -143,10 +143,46 @@ fn test_local_variable_lazy_write() { assert_eq!(val, 127); } +// This test coverse the case where the non-atomic access come first. +fn test_read_read_race1() { + let a = AtomicU16::new(0); + + thread::scope(|s| { + s.spawn(|| { + let ptr = &a as *const AtomicU16 as *mut u16; + unsafe { ptr.read() }; + }); + s.spawn(|| { + thread::yield_now(); + + a.load(Ordering::SeqCst); + }); + }); +} + +// This test coverse the case where the atomic access come first. +fn test_read_read_race2() { + let a = AtomicU16::new(0); + + thread::scope(|s| { + s.spawn(|| { + a.load(Ordering::SeqCst); + }); + s.spawn(|| { + thread::yield_now(); + + let ptr = &a as *const AtomicU16 as *mut u16; + unsafe { ptr.read() }; + }); + }); +} + pub fn main() { test_fence_sync(); test_multiple_reads(); test_rmw_no_block(); test_simple_release(); test_local_variable_lazy_write(); + test_read_read_race1(); + test_read_read_race2(); } From 6ca5e29e49811ccee4e8843c419ec45d7656aff6 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 10 Aug 2024 19:47:22 +0200 Subject: [PATCH 3/4] allow mixed-size atomic reads --- library/core/src/sync/atomic.rs | 26 +++++++----- src/tools/miri/src/concurrency/data_race.rs | 40 ++++++++++++------ .../tests/fail/data_race/mixed_size_read.rs | 28 ------------- ...ze_read_read_write.match_first_load.stderr | 22 ++++++++++ ...e_read_read_write.match_second_load.stderr | 22 ++++++++++ .../data_race/mixed_size_read_read_write.rs | 39 ++++++++++++++++++ .../mixed_size_read_write.read_write.stderr | 22 ++++++++++ .../fail/data_race/mixed_size_read_write.rs | 39 ++++++++++++++++++ ...> mixed_size_read_write.write_read.stderr} | 14 +++---- ...ize_write.rs => mixed_size_write_write.rs} | 0 ...e.stderr => mixed_size_write_write.stderr} | 6 +-- .../fail/weak_memory/racing_mixed_size.rs | 41 ------------------- .../weak_memory/racing_mixed_size_read.rs | 39 ------------------ .../weak_memory/racing_mixed_size_read.stderr | 22 ---------- .../miri/tests/pass/concurrency/data_race.rs | 21 ++++++++++ 15 files changed, 218 insertions(+), 163 deletions(-) delete mode 100644 src/tools/miri/tests/fail/data_race/mixed_size_read.rs create mode 100644 src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_first_load.stderr create mode 100644 src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_second_load.stderr create mode 100644 src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.rs create mode 100644 src/tools/miri/tests/fail/data_race/mixed_size_read_write.read_write.stderr create mode 100644 src/tools/miri/tests/fail/data_race/mixed_size_read_write.rs rename src/tools/miri/tests/fail/data_race/{mixed_size_read.stderr => mixed_size_read_write.write_read.stderr} (66%) rename src/tools/miri/tests/fail/data_race/{mixed_size_write.rs => mixed_size_write_write.rs} (100%) rename src/tools/miri/tests/fail/data_race/{mixed_size_write.stderr => mixed_size_write_write.stderr} (90%) delete mode 100644 src/tools/miri/tests/fail/weak_memory/racing_mixed_size.rs delete mode 100644 src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.rs delete mode 100644 src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.stderr diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index b3fe99f7f1a15..2357fb932b494 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -33,6 +33,12 @@ //! atomic load (via the operations provided in this module). A "modification of an atomic object" //! refers to an atomic store. //! +//! The most important aspect of this model is that conflicting non-synchronized accesses are +//! Undefined Behavior unless both accesses are atomic. Here, accesses are *conflicting* if they +//! affect overlapping regions of memory and at least one of them is a write. They are +//! *non-synchronized* if neither of them *happens-before* the other, according to the +//! happens-before order of the memory model. +//! //! The end result is *almost* equivalent to saying that creating a *shared reference* to one of the //! Rust atomic types corresponds to creating an `atomic_ref` in C++, with the `atomic_ref` being //! destroyed when the lifetime of the shared reference ends. The main difference is that Rust @@ -41,9 +47,10 @@ //! objects" and "non-atomic objects" (with `atomic_ref` temporarily converting a non-atomic object //! into an atomic object). //! -//! That said, Rust *does* inherit the C++ limitation that non-synchronized atomic accesses may not -//! partially overlap: they must be either disjoint or access the exact same memory. This in -//! particular rules out non-synchronized differently-sized accesses to the same data. +//! That said, Rust *does* inherit the C++ limitation that non-synchronized conflicting atomic +//! accesses may not partially overlap: they must be either disjoint or access the exact same +//! memory. This in particular rules out non-synchronized differently-sized atomic accesses to the +//! same data unless all accesses are reads. //! //! [cpp]: https://en.cppreference.com/w/cpp/atomic //! [cpp-intro.races]: https://timsong-cpp.github.io/cppwp/n4868/intro.multithread#intro.races @@ -63,7 +70,7 @@ //! let atomic = AtomicU16::new(0); //! //! thread::scope(|s| { -//! // This is UB: conflicting concurrent accesses. +//! // This is UB: conflicting non-synchronized accesses, at least one of which is non-atomic. //! s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store //! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write //! }); @@ -77,16 +84,15 @@ //! }); //! //! thread::scope(|s| { -//! // This is fine, `join` synchronizes the code in a way such that atomic -//! // and non-atomic accesses can't happen "at the same time". +//! // This is fine: `join` synchronizes the code in a way such that the atomic +//! // store happens-before the non-atomic write. //! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store //! handle.join().unwrap(); // synchronize //! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write //! }); //! //! thread::scope(|s| { -//! // This is UB: using differently-sized atomic accesses to the same data. -//! // (It would be UB even if these are both loads.) +//! // This is UB: non-synchronized conflicting differently-sized atomic accesses. //! s.spawn(|| atomic.store(1, Ordering::Relaxed)); //! s.spawn(|| unsafe { //! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic); @@ -95,8 +101,8 @@ //! }); //! //! thread::scope(|s| { -//! // This is fine, `join` synchronizes the code in a way such that -//! // differently-sized accesses can't happen "at the same time". +//! // This is fine: `join` synchronizes the code in a way such that +//! // the 1-byte store happens-before the 2-byte store. //! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); //! handle.join().unwrap(); //! s.spawn(|| unsafe { diff --git a/src/tools/miri/src/concurrency/data_race.rs b/src/tools/miri/src/concurrency/data_race.rs index 558a032d0bcf5..812602c398074 100644 --- a/src/tools/miri/src/concurrency/data_race.rs +++ b/src/tools/miri/src/concurrency/data_race.rs @@ -191,7 +191,8 @@ struct AtomicMemoryCellClocks { /// The size of accesses to this atomic location. /// We use this to detect non-synchronized mixed-size accesses. Since all accesses must be /// aligned to their size, this is sufficient to detect imperfectly overlapping accesses. - size: Size, + /// `None` indicates that we saw multiple different sizes, which is okay as long as all accesses are reads. + size: Option, } #[derive(Copy, Clone, PartialEq, Eq, Debug)] @@ -265,6 +266,14 @@ impl AccessType { let mut msg = String::new(); if let Some(size) = size { + if size == Size::ZERO { + // In this case there were multiple read accesss with different sizes and then a write. + // We will be reporting *one* of the other reads, but we don't have enough information + // to determine which one had which size. + assert!(self == AccessType::AtomicLoad); + assert!(ty.is_none()); + return format!("multiple differently-sized atomic loads, including one load"); + } msg.push_str(&format!("{}-byte {}", size.bytes(), msg)) } @@ -305,8 +314,7 @@ impl AccessType { } } -/// Memory Cell vector clock metadata -/// for data-race detection. +/// Per-byte vector clock metadata for data-race detection. #[derive(Clone, PartialEq, Eq, Debug)] struct MemoryCellClocks { /// The vector-clock timestamp and the thread that did the last non-atomic write. We don't need @@ -325,8 +333,8 @@ struct MemoryCellClocks { read: VClock, /// Atomic access, acquire, release sequence tracking clocks. - /// For non-atomic memory in the common case this - /// value is set to None. + /// For non-atomic memory this value is set to None. + /// For atomic memory, each byte carries this information. atomic_ops: Option>, } @@ -336,7 +344,7 @@ impl AtomicMemoryCellClocks { read_vector: Default::default(), write_vector: Default::default(), sync_vector: Default::default(), - size, + size: Some(size), } } } @@ -383,17 +391,23 @@ impl MemoryCellClocks { &mut self, thread_clocks: &ThreadClockSet, size: Size, + write: bool, ) -> Result<&mut AtomicMemoryCellClocks, DataRace> { match self.atomic_ops { Some(ref mut atomic) => { // We are good if the size is the same or all atomic accesses are before our current time. - if atomic.size == size { + if atomic.size == Some(size) { Ok(atomic) } else if atomic.read_vector <= thread_clocks.clock && atomic.write_vector <= thread_clocks.clock { - // This is now the new size that must be used for accesses here. - atomic.size = size; + // We are fully ordered after all previous accesses, so we can change the size. + atomic.size = Some(size); + Ok(atomic) + } else if !write && atomic.write_vector <= thread_clocks.clock { + // This is a read, and it is ordered after the last write. It's okay for the + // sizes to mismatch, as long as no writes with a different size occur later. + atomic.size = None; Ok(atomic) } else { Err(DataRace) @@ -508,7 +522,7 @@ impl MemoryCellClocks { access_size: Size, ) -> Result<(), DataRace> { trace!("Atomic read with vectors: {:#?} :: {:#?}", self, thread_clocks); - let atomic = self.atomic_access(thread_clocks, access_size)?; + let atomic = self.atomic_access(thread_clocks, access_size, /*write*/ false)?; atomic.read_vector.set_at_index(&thread_clocks.clock, index); // Make sure the last non-atomic write was before this access. if self.write_was_before(&thread_clocks.clock) { Ok(()) } else { Err(DataRace) } @@ -523,7 +537,7 @@ impl MemoryCellClocks { access_size: Size, ) -> Result<(), DataRace> { trace!("Atomic write with vectors: {:#?} :: {:#?}", self, thread_clocks); - let atomic = self.atomic_access(thread_clocks, access_size)?; + let atomic = self.atomic_access(thread_clocks, access_size, /*write*/ true)?; atomic.write_vector.set_at_index(&thread_clocks.clock, index); // Make sure the last non-atomic write and all non-atomic reads were before this access. if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock { @@ -969,10 +983,10 @@ impl VClockAlloc { } else if let Some(idx) = Self::find_gt_index(&mem_clocks.read, &active_clocks.clock) { (AccessType::NaRead(mem_clocks.read[idx].read_type()), idx, &mem_clocks.read) // Finally, mixed-size races. - } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != access_size { + } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != Some(access_size) { // This is only a race if we are not synchronized with all atomic accesses, so find // the one we are not synchronized with. - other_size = Some(atomic.size); + other_size = Some(atomic.size.unwrap_or(Size::ZERO)); if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock) { (AccessType::AtomicStore, idx, &atomic.write_vector) diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read.rs b/src/tools/miri/tests/fail/data_race/mixed_size_read.rs deleted file mode 100644 index 828b47f0a6576..0000000000000 --- a/src/tools/miri/tests/fail/data_race/mixed_size_read.rs +++ /dev/null @@ -1,28 +0,0 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 - -use std::sync::atomic::{AtomicU8, AtomicU16, Ordering}; -use std::thread; - -fn convert(a: &AtomicU16) -> &[AtomicU8; 2] { - unsafe { std::mem::transmute(a) } -} - -// We can't allow mixed-size accesses; they are not possible in C++ and even -// Intel says you shouldn't do it. -fn main() { - let a = AtomicU16::new(0); - let a16 = &a; - let a8 = convert(a16); - - thread::scope(|s| { - s.spawn(|| { - a16.load(Ordering::SeqCst); - }); - s.spawn(|| { - a8[0].load(Ordering::SeqCst); - //~^ ERROR: Race condition detected between (1) 2-byte atomic load on thread `unnamed-1` and (2) 1-byte atomic load on thread `unnamed-2` - }); - }); -} diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_first_load.stderr b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_first_load.stderr new file mode 100644 index 0000000000000..b829627c00abc --- /dev/null +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_first_load.stderr @@ -0,0 +1,22 @@ +error: Undefined Behavior: Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-ID` and (2) 2-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + --> tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + | +LL | a16.store(0, Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-ID` and (2) 2-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + | +help: and (1) occurred earlier here + --> tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + | +LL | a16.load(Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + = help: overlapping unsynchronized atomic accesses must use the same access size + = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model + = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior + = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information + = note: BACKTRACE (of the first span) on thread `unnamed-ID`: + = note: inside closure at tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_second_load.stderr b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_second_load.stderr new file mode 100644 index 0000000000000..6bc38b14cb2ec --- /dev/null +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_second_load.stderr @@ -0,0 +1,22 @@ +error: Undefined Behavior: Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-ID` and (2) 1-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + --> tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + | +LL | a8[0].store(0, Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-ID` and (2) 1-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + | +help: and (1) occurred earlier here + --> tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + | +LL | a16.load(Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + = help: overlapping unsynchronized atomic accesses must use the same access size + = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model + = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior + = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information + = note: BACKTRACE (of the first span) on thread `unnamed-ID`: + = note: inside closure at tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.rs b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.rs new file mode 100644 index 0000000000000..ece5bd31274de --- /dev/null +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.rs @@ -0,0 +1,39 @@ +//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// Two variants: the atomic store matches the size of the first or second atomic load. +//@revisions: match_first_load match_second_load + +use std::sync::atomic::{AtomicU16, AtomicU8, Ordering}; +use std::thread; + +fn convert(a: &AtomicU16) -> &[AtomicU8; 2] { + unsafe { std::mem::transmute(a) } +} + +// We can't allow mixed-size accesses; they are not possible in C++ and even +// Intel says you shouldn't do it. +fn main() { + let a = AtomicU16::new(0); + let a16 = &a; + let a8 = convert(a16); + + thread::scope(|s| { + s.spawn(|| { + a16.load(Ordering::SeqCst); + }); + s.spawn(|| { + a8[0].load(Ordering::SeqCst); + }); + s.spawn(|| { + thread::yield_now(); // make sure this happens last + if cfg!(match_first_load) { + a16.store(0, Ordering::SeqCst); + //~[match_first_load]^ ERROR: Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-1` and (2) 2-byte atomic store on thread `unnamed-3` + } else { + a8[0].store(0, Ordering::SeqCst); + //~[match_second_load]^ ERROR: Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-1` and (2) 1-byte atomic store on thread `unnamed-3` + } + }); + }); +} diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_write.read_write.stderr b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.read_write.stderr new file mode 100644 index 0000000000000..6f8dbd38ca841 --- /dev/null +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.read_write.stderr @@ -0,0 +1,22 @@ +error: Undefined Behavior: Race condition detected between (1) 1-byte atomic load on thread `unnamed-ID` and (2) 2-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + --> tests/fail/data_race/mixed_size_read_write.rs:LL:CC + | +LL | a16.store(1, Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) 1-byte atomic load on thread `unnamed-ID` and (2) 2-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + | +help: and (1) occurred earlier here + --> tests/fail/data_race/mixed_size_read_write.rs:LL:CC + | +LL | a8[0].load(Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = help: overlapping unsynchronized atomic accesses must use the same access size + = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model + = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior + = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information + = note: BACKTRACE (of the first span) on thread `unnamed-ID`: + = note: inside closure at tests/fail/data_race/mixed_size_read_write.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_write.rs b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.rs new file mode 100644 index 0000000000000..acc12427b3fdc --- /dev/null +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.rs @@ -0,0 +1,39 @@ +//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// Two revisions, depending on which access goes first. +//@revisions: read_write write_read + +use std::sync::atomic::{AtomicU16, AtomicU8, Ordering}; +use std::thread; + +fn convert(a: &AtomicU16) -> &[AtomicU8; 2] { + unsafe { std::mem::transmute(a) } +} + +// We can't allow mixed-size accesses; they are not possible in C++ and even +// Intel says you shouldn't do it. +fn main() { + let a = AtomicU16::new(0); + let a16 = &a; + let a8 = convert(a16); + + thread::scope(|s| { + s.spawn(|| { + if cfg!(read_write) { + // Let the other one go first. + thread::yield_now(); + } + a16.store(1, Ordering::SeqCst); + //~[read_write]^ ERROR: Race condition detected between (1) 1-byte atomic load on thread `unnamed-2` and (2) 2-byte atomic store on thread `unnamed-1` + }); + s.spawn(|| { + if cfg!(write_read) { + // Let the other one go first. + thread::yield_now(); + } + a8[0].load(Ordering::SeqCst); + //~[write_read]^ ERROR: Race condition detected between (1) 2-byte atomic store on thread `unnamed-1` and (2) 1-byte atomic load on thread `unnamed-2` + }); + }); +} diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read.stderr b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.write_read.stderr similarity index 66% rename from src/tools/miri/tests/fail/data_race/mixed_size_read.stderr rename to src/tools/miri/tests/fail/data_race/mixed_size_read_write.write_read.stderr index 31a798a89b133..990d2058bca97 100644 --- a/src/tools/miri/tests/fail/data_race/mixed_size_read.stderr +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.write_read.stderr @@ -1,20 +1,20 @@ -error: Undefined Behavior: Race condition detected between (1) 2-byte atomic load on thread `unnamed-ID` and (2) 1-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here - --> tests/fail/data_race/mixed_size_read.rs:LL:CC +error: Undefined Behavior: Race condition detected between (1) 2-byte atomic store on thread `unnamed-ID` and (2) 1-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here + --> tests/fail/data_race/mixed_size_read_write.rs:LL:CC | LL | a8[0].load(Ordering::SeqCst); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) 2-byte atomic load on thread `unnamed-ID` and (2) 1-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) 2-byte atomic store on thread `unnamed-ID` and (2) 1-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here | help: and (1) occurred earlier here - --> tests/fail/data_race/mixed_size_read.rs:LL:CC + --> tests/fail/data_race/mixed_size_read_write.rs:LL:CC | -LL | a16.load(Ordering::SeqCst); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | a16.store(1, Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ = help: overlapping unsynchronized atomic accesses must use the same access size = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information = note: BACKTRACE (of the first span) on thread `unnamed-ID`: - = note: inside closure at tests/fail/data_race/mixed_size_read.rs:LL:CC + = note: inside closure at tests/fail/data_race/mixed_size_read_write.rs:LL:CC note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_write.rs b/src/tools/miri/tests/fail/data_race/mixed_size_write_write.rs similarity index 100% rename from src/tools/miri/tests/fail/data_race/mixed_size_write.rs rename to src/tools/miri/tests/fail/data_race/mixed_size_write_write.rs diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_write.stderr b/src/tools/miri/tests/fail/data_race/mixed_size_write_write.stderr similarity index 90% rename from src/tools/miri/tests/fail/data_race/mixed_size_write.stderr rename to src/tools/miri/tests/fail/data_race/mixed_size_write_write.stderr index c30b48c1f32b0..1f22413bc5f93 100644 --- a/src/tools/miri/tests/fail/data_race/mixed_size_write.stderr +++ b/src/tools/miri/tests/fail/data_race/mixed_size_write_write.stderr @@ -1,11 +1,11 @@ error: Undefined Behavior: Race condition detected between (1) 2-byte atomic store on thread `unnamed-ID` and (2) 1-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here - --> tests/fail/data_race/mixed_size_write.rs:LL:CC + --> tests/fail/data_race/mixed_size_write_write.rs:LL:CC | LL | a8[0].store(1, Ordering::SeqCst); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) 2-byte atomic store on thread `unnamed-ID` and (2) 1-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here | help: and (1) occurred earlier here - --> tests/fail/data_race/mixed_size_write.rs:LL:CC + --> tests/fail/data_race/mixed_size_write_write.rs:LL:CC | LL | a16.store(1, Ordering::SeqCst); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -14,7 +14,7 @@ LL | a16.store(1, Ordering::SeqCst); = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information = note: BACKTRACE (of the first span) on thread `unnamed-ID`: - = note: inside closure at tests/fail/data_race/mixed_size_write.rs:LL:CC + = note: inside closure at tests/fail/data_race/mixed_size_write_write.rs:LL:CC note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace diff --git a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size.rs b/src/tools/miri/tests/fail/weak_memory/racing_mixed_size.rs deleted file mode 100644 index 1193dddc57784..0000000000000 --- a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size.rs +++ /dev/null @@ -1,41 +0,0 @@ -// We want to control preemption here. -// Avoid accidental synchronization via address reuse. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 - -#![feature(core_intrinsics)] - -use std::ptr; -use std::sync::atomic::AtomicU32; -use std::sync::atomic::Ordering::*; -use std::thread::spawn; - -fn static_atomic_u32(val: u32) -> &'static AtomicU32 { - let ret = Box::leak(Box::new(AtomicU32::new(val))); - ret -} - -fn split_u32_ptr(dword: *const u32) -> *const [u16; 2] { - unsafe { std::mem::transmute::<*const u32, *const [u16; 2]>(dword) } -} - -// Wine's SRWLock implementation does this, which is definitely undefined in C++ memory model -// https://github.com/wine-mirror/wine/blob/303f8042f9db508adaca02ef21f8de4992cb9c03/dlls/ntdll/sync.c#L543-L566 -// It probably works just fine on x86, but Intel does document this as "don't do it!" -pub fn main() { - let x = static_atomic_u32(0); - let j1 = spawn(move || { - x.store(1, Relaxed); - }); - - let j2 = spawn(move || { - let x_ptr = x as *const AtomicU32 as *const u32; - let x_split = split_u32_ptr(x_ptr); - unsafe { - let hi = ptr::addr_of!((*x_split)[0]); - std::intrinsics::atomic_load_relaxed(hi); //~ ERROR: (1) 4-byte atomic store on thread `unnamed-1` and (2) 2-byte atomic load - } - }); - - j1.join().unwrap(); - j2.join().unwrap(); -} diff --git a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.rs b/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.rs deleted file mode 100644 index 0a0e372f1f3d2..0000000000000 --- a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.rs +++ /dev/null @@ -1,39 +0,0 @@ -// We want to control preemption here. -// Avoid accidental synchronization via address reuse. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 - -use std::sync::atomic::Ordering::*; -use std::sync::atomic::{AtomicU16, AtomicU32}; -use std::thread::spawn; - -fn static_atomic(val: u32) -> &'static AtomicU32 { - let ret = Box::leak(Box::new(AtomicU32::new(val))); - ret -} - -fn split_u32_ptr(dword: *const u32) -> *const [u16; 2] { - unsafe { std::mem::transmute::<*const u32, *const [u16; 2]>(dword) } -} - -// Racing mixed size reads may cause two loads to read-from -// the same store but observe different values, which doesn't make -// sense under the formal model so we forbid this. -pub fn main() { - let x = static_atomic(0); - - let j1 = spawn(move || { - x.load(Relaxed); - }); - - let j2 = spawn(move || { - let x_ptr = x as *const AtomicU32 as *const u32; - let x_split = split_u32_ptr(x_ptr); - unsafe { - let hi = x_split as *const u16 as *const AtomicU16; - (*hi).load(Relaxed); //~ ERROR: (1) 4-byte atomic load on thread `unnamed-1` and (2) 2-byte atomic load - } - }); - - j1.join().unwrap(); - j2.join().unwrap(); -} diff --git a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.stderr b/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.stderr deleted file mode 100644 index 9e6a6e80418cf..0000000000000 --- a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.stderr +++ /dev/null @@ -1,22 +0,0 @@ -error: Undefined Behavior: Race condition detected between (1) 4-byte atomic load on thread `unnamed-ID` and (2) 2-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here - --> tests/fail/weak_memory/racing_mixed_size_read.rs:LL:CC - | -LL | (*hi).load(Relaxed); - | ^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) 4-byte atomic load on thread `unnamed-ID` and (2) 2-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here - | -help: and (1) occurred earlier here - --> tests/fail/weak_memory/racing_mixed_size_read.rs:LL:CC - | -LL | x.load(Relaxed); - | ^^^^^^^^^^^^^^^ - = help: overlapping unsynchronized atomic accesses must use the same access size - = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model - = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior - = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information - = note: BACKTRACE (of the first span) on thread `unnamed-ID`: - = note: inside closure at tests/fail/weak_memory/racing_mixed_size_read.rs:LL:CC - -note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace - -error: aborting due to 1 previous error - diff --git a/src/tools/miri/tests/pass/concurrency/data_race.rs b/src/tools/miri/tests/pass/concurrency/data_race.rs index adf5a37891f06..0918b94ed31e0 100644 --- a/src/tools/miri/tests/pass/concurrency/data_race.rs +++ b/src/tools/miri/tests/pass/concurrency/data_race.rs @@ -177,6 +177,26 @@ fn test_read_read_race2() { }); } +fn mixed_size_read_read() { + fn convert(a: &AtomicU16) -> &[AtomicU8; 2] { + unsafe { std::mem::transmute(a) } + } + + let a = AtomicU16::new(0); + let a16 = &a; + let a8 = convert(a16); + + // Just two different-sized atomic reads without any writes are fine. + thread::scope(|s| { + s.spawn(|| { + a16.load(Ordering::SeqCst); + }); + s.spawn(|| { + a8[0].load(Ordering::SeqCst); + }); + }); +} + pub fn main() { test_fence_sync(); test_multiple_reads(); @@ -185,4 +205,5 @@ pub fn main() { test_local_variable_lazy_write(); test_read_read_race1(); test_read_read_race2(); + mixed_size_read_read(); } From 96be76bf53b2a9472f03786ccc3b291196e5a77d Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 12 Aug 2024 10:34:05 +0200 Subject: [PATCH 4/4] Further clarificarion for atomic and UnsafeCell docs: - UnsafeCell: mention the term "data race", and reference the data race definition - atomic: failing RMWs are just reads, reorder and reword docs --- library/core/src/cell.rs | 10 +++-- library/core/src/sync/atomic.rs | 37 +++++++++---------- .../miri/tests/pass/concurrency/data_race.rs | 17 +++++++++ 3 files changed, 41 insertions(+), 23 deletions(-) diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs index a3e91bc067090..c99f1aece4f69 100644 --- a/library/core/src/cell.rs +++ b/library/core/src/cell.rs @@ -1895,11 +1895,17 @@ impl fmt::Display for RefMut<'_, T> { /// uniqueness guarantee for mutable references is unaffected. There is *no* legal way to obtain /// aliasing `&mut`, not even with `UnsafeCell`. /// +/// `UnsafeCell` does nothing to avoid data races; they are still undefined behavior. If multiple +/// threads have access to the same `UnsafeCell`, they must follow the usual rules of the +/// [concurrent memory model]: conflicting non-synchronized accesses must be done via the APIs in +/// [`core::sync::atomic`]. +/// /// The `UnsafeCell` API itself is technically very simple: [`.get()`] gives you a raw pointer /// `*mut T` to its contents. It is up to _you_ as the abstraction designer to use that raw pointer /// correctly. /// /// [`.get()`]: `UnsafeCell::get` +/// [concurrent memory model]: ../sync/atomic/index.html#memory-model-for-atomic-accesses /// /// The precise Rust aliasing rules are somewhat in flux, but the main points are not contentious: /// @@ -1922,10 +1928,6 @@ impl fmt::Display for RefMut<'_, T> { /// live memory and the compiler is allowed to insert spurious reads if it can prove that this /// memory has not yet been deallocated. /// -/// - At all times, you must avoid data races. If multiple threads have access to -/// the same `UnsafeCell`, then any writes must have a proper happens-before relation to all other -/// accesses (or use atomics). -/// /// To assist with proper design, the following scenarios are explicitly declared legal /// for single-threaded code: /// diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 2357fb932b494..be85f5071299d 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -33,12 +33,6 @@ //! atomic load (via the operations provided in this module). A "modification of an atomic object" //! refers to an atomic store. //! -//! The most important aspect of this model is that conflicting non-synchronized accesses are -//! Undefined Behavior unless both accesses are atomic. Here, accesses are *conflicting* if they -//! affect overlapping regions of memory and at least one of them is a write. They are -//! *non-synchronized* if neither of them *happens-before* the other, according to the -//! happens-before order of the memory model. -//! //! The end result is *almost* equivalent to saying that creating a *shared reference* to one of the //! Rust atomic types corresponds to creating an `atomic_ref` in C++, with the `atomic_ref` being //! destroyed when the lifetime of the shared reference ends. The main difference is that Rust @@ -47,20 +41,25 @@ //! objects" and "non-atomic objects" (with `atomic_ref` temporarily converting a non-atomic object //! into an atomic object). //! -//! That said, Rust *does* inherit the C++ limitation that non-synchronized conflicting atomic -//! accesses may not partially overlap: they must be either disjoint or access the exact same -//! memory. This in particular rules out non-synchronized differently-sized atomic accesses to the -//! same data unless all accesses are reads. +//! The most important aspect of this model is that *data races* are undefined behavior. A data race +//! is defined as conflicting non-synchronized accesses where at least one of the accesses is +//! non-atomic. Here, accesses are *conflicting* if they affect overlapping regions of memory and at +//! least one of them is a write. They are *non-synchronized* if neither of them *happens-before* +//! the other, according to the happens-before order of the memory model. //! -//! [cpp]: https://en.cppreference.com/w/cpp/atomic -//! [cpp-intro.races]: https://timsong-cpp.github.io/cppwp/n4868/intro.multithread#intro.races +//! The other possible cause of undefined behavior in the memory model are mixed-size accesses: Rust +//! inherits the C++ limitation that non-synchronized conflicting atomic accesses may not partially +//! overlap. In other words, every pair of non-synchronized atomic accesses must be either disjoint, +//! access the exact same memory (including using the same access size), or both be reads. //! -//! Each method takes an [`Ordering`] which represents the strength of -//! the memory barrier for that operation. These orderings behave the -//! same as the corresponding [C++20 atomic orderings][1]. For more information see the [nomicon][2]. +//! Each atomic access takes an [`Ordering`] which defines how the operation interacts with the +//! happens-before order. These orderings behave the same as the corresponding [C++20 atomic +//! orderings][cpp_memory_order]. For more information, see the [nomicon]. //! -//! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order -//! [2]: ../../../nomicon/atomics.html +//! [cpp]: https://en.cppreference.com/w/cpp/atomic +//! [cpp-intro.races]: https://timsong-cpp.github.io/cppwp/n4868/intro.multithread#intro.races +//! [cpp_memory_order]: https://en.cppreference.com/w/cpp/atomic/memory_order +//! [nomicon]: ../../../nomicon/atomics.html //! //! ```rust,no_run undefined_behavior //! use std::sync::atomic::{AtomicU16, AtomicU8, Ordering}; @@ -157,7 +156,7 @@ //! //! # Atomic accesses to read-only memory //! -//! In general, *all* atomic accesses on read-only memory are Undefined Behavior. For instance, attempting +//! In general, *all* atomic accesses on read-only memory are undefined behavior. For instance, attempting //! to do a `compare_exchange` that will definitely fail (making it conceptually a read-only //! operation) can still cause a segmentation fault if the underlying memory page is mapped read-only. Since //! atomic `load`s might be implemented using compare-exchange operations, even a `load` can fault @@ -173,7 +172,7 @@ //! //! As an exception from the general rule stated above, "sufficiently small" atomic loads with //! `Ordering::Relaxed` are implemented in a way that works on read-only memory, and are hence not -//! Undefined Behavior. The exact size limit for what makes a load "sufficiently small" varies +//! undefined behavior. The exact size limit for what makes a load "sufficiently small" varies //! depending on the target: //! //! | `target_arch` | Size limit | diff --git a/src/tools/miri/tests/pass/concurrency/data_race.rs b/src/tools/miri/tests/pass/concurrency/data_race.rs index 0918b94ed31e0..d16de0ae8e232 100644 --- a/src/tools/miri/tests/pass/concurrency/data_race.rs +++ b/src/tools/miri/tests/pass/concurrency/data_race.rs @@ -197,6 +197,22 @@ fn mixed_size_read_read() { }); } +fn failing_rmw_is_read() { + let a = AtomicUsize::new(0); + thread::scope(|s| { + s.spawn(|| unsafe { + // Non-atomic read. + let _val = *(&a as *const AtomicUsize).cast::(); + }); + + s.spawn(|| { + // RMW that will fail. + // This is not considered a write, so there is no data race here. + a.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap_err(); + }); + }); +} + pub fn main() { test_fence_sync(); test_multiple_reads(); @@ -206,4 +222,5 @@ pub fn main() { test_read_read_race1(); test_read_read_race2(); mixed_size_read_read(); + failing_rmw_is_read(); }