diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs index a3e91bc067090..c99f1aece4f69 100644 --- a/library/core/src/cell.rs +++ b/library/core/src/cell.rs @@ -1895,11 +1895,17 @@ impl fmt::Display for RefMut<'_, T> { /// uniqueness guarantee for mutable references is unaffected. There is *no* legal way to obtain /// aliasing `&mut`, not even with `UnsafeCell`. /// +/// `UnsafeCell` does nothing to avoid data races; they are still undefined behavior. If multiple +/// threads have access to the same `UnsafeCell`, they must follow the usual rules of the +/// [concurrent memory model]: conflicting non-synchronized accesses must be done via the APIs in +/// [`core::sync::atomic`]. +/// /// The `UnsafeCell` API itself is technically very simple: [`.get()`] gives you a raw pointer /// `*mut T` to its contents. It is up to _you_ as the abstraction designer to use that raw pointer /// correctly. /// /// [`.get()`]: `UnsafeCell::get` +/// [concurrent memory model]: ../sync/atomic/index.html#memory-model-for-atomic-accesses /// /// The precise Rust aliasing rules are somewhat in flux, but the main points are not contentious: /// @@ -1922,10 +1928,6 @@ impl fmt::Display for RefMut<'_, T> { /// live memory and the compiler is allowed to insert spurious reads if it can prove that this /// memory has not yet been deallocated. /// -/// - At all times, you must avoid data races. If multiple threads have access to -/// the same `UnsafeCell`, then any writes must have a proper happens-before relation to all other -/// accesses (or use atomics). -/// /// To assist with proper design, the following scenarios are explicitly declared legal /// for single-threaded code: /// diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index b06a3bd4487d3..be85f5071299d 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -24,25 +24,42 @@ //! //! ## Memory model for atomic accesses //! -//! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically `atomic_ref`. -//! Basically, creating a *shared reference* to one of the Rust atomic types corresponds to creating -//! an `atomic_ref` in C++; the `atomic_ref` is destroyed when the lifetime of the shared reference -//! ends. A Rust atomic type that is exclusively owned or behind a mutable reference does *not* -//! correspond to an “atomic object” in C++, since the underlying primitive can be mutably accessed, -//! for example with `get_mut`, to perform non-atomic operations. +//! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically the rules +//! from the [`intro.races`][cpp-intro.races] section, without the "consume" memory ordering. Since +//! C++ uses an object-based memory model whereas Rust is access-based, a bit of translation work +//! has to be done to apply the C++ rules to Rust: whenever C++ talks about "the value of an +//! object", we understand that to mean the resulting bytes obtained when doing a read. When the C++ +//! standard talks about "the value of an atomic object", this refers to the result of doing an +//! atomic load (via the operations provided in this module). A "modification of an atomic object" +//! refers to an atomic store. //! -//! [cpp]: https://en.cppreference.com/w/cpp/atomic +//! The end result is *almost* equivalent to saying that creating a *shared reference* to one of the +//! Rust atomic types corresponds to creating an `atomic_ref` in C++, with the `atomic_ref` being +//! destroyed when the lifetime of the shared reference ends. The main difference is that Rust +//! permits concurrent atomic and non-atomic reads to the same memory as those cause no issue in the +//! C++ memory model, they are just forbidden in C++ because memory is partitioned into "atomic +//! objects" and "non-atomic objects" (with `atomic_ref` temporarily converting a non-atomic object +//! into an atomic object). +//! +//! The most important aspect of this model is that *data races* are undefined behavior. A data race +//! is defined as conflicting non-synchronized accesses where at least one of the accesses is +//! non-atomic. Here, accesses are *conflicting* if they affect overlapping regions of memory and at +//! least one of them is a write. They are *non-synchronized* if neither of them *happens-before* +//! the other, according to the happens-before order of the memory model. //! -//! Each method takes an [`Ordering`] which represents the strength of -//! the memory barrier for that operation. These orderings are the -//! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2]. +//! The other possible cause of undefined behavior in the memory model are mixed-size accesses: Rust +//! inherits the C++ limitation that non-synchronized conflicting atomic accesses may not partially +//! overlap. In other words, every pair of non-synchronized atomic accesses must be either disjoint, +//! access the exact same memory (including using the same access size), or both be reads. //! -//! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order -//! [2]: ../../../nomicon/atomics.html +//! Each atomic access takes an [`Ordering`] which defines how the operation interacts with the +//! happens-before order. These orderings behave the same as the corresponding [C++20 atomic +//! orderings][cpp_memory_order]. For more information, see the [nomicon]. //! -//! Since C++ does not support mixing atomic and non-atomic accesses, or non-synchronized -//! different-sized accesses to the same data, Rust does not support those operations either. -//! Note that both of those restrictions only apply if the accesses are non-synchronized. +//! [cpp]: https://en.cppreference.com/w/cpp/atomic +//! [cpp-intro.races]: https://timsong-cpp.github.io/cppwp/n4868/intro.multithread#intro.races +//! [cpp_memory_order]: https://en.cppreference.com/w/cpp/atomic/memory_order +//! [nomicon]: ../../../nomicon/atomics.html //! //! ```rust,no_run undefined_behavior //! use std::sync::atomic::{AtomicU16, AtomicU8, Ordering}; @@ -52,27 +69,29 @@ //! let atomic = AtomicU16::new(0); //! //! thread::scope(|s| { -//! // This is UB: mixing atomic and non-atomic accesses -//! s.spawn(|| atomic.store(1, Ordering::Relaxed)); -//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); +//! // This is UB: conflicting non-synchronized accesses, at least one of which is non-atomic. +//! s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store +//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write //! }); //! //! thread::scope(|s| { -//! // This is UB: even reads are not allowed to be mixed -//! s.spawn(|| atomic.load(Ordering::Relaxed)); -//! s.spawn(|| unsafe { atomic.as_ptr().read() }); +//! // This is fine: the accesses do not conflict (as none of them performs any modification). +//! // In C++ this would be disallowed since creating an `atomic_ref` precludes +//! // further non-atomic accesses, but Rust does not have that limitation. +//! s.spawn(|| atomic.load(Ordering::Relaxed)); // atomic load +//! s.spawn(|| unsafe { atomic.as_ptr().read() }); // non-atomic read //! }); //! //! thread::scope(|s| { -//! // This is fine, `join` synchronizes the code in a way such that atomic -//! // and non-atomic accesses can't happen "at the same time" -//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); -//! handle.join().unwrap(); -//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); +//! // This is fine: `join` synchronizes the code in a way such that the atomic +//! // store happens-before the non-atomic write. +//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store +//! handle.join().unwrap(); // synchronize +//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write //! }); //! //! thread::scope(|s| { -//! // This is UB: using different-sized atomic accesses to the same data +//! // This is UB: non-synchronized conflicting differently-sized atomic accesses. //! s.spawn(|| atomic.store(1, Ordering::Relaxed)); //! s.spawn(|| unsafe { //! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic); @@ -81,8 +100,8 @@ //! }); //! //! thread::scope(|s| { -//! // This is fine, `join` synchronizes the code in a way such that -//! // differently-sized accesses can't happen "at the same time" +//! // This is fine: `join` synchronizes the code in a way such that +//! // the 1-byte store happens-before the 2-byte store. //! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); //! handle.join().unwrap(); //! s.spawn(|| unsafe { @@ -137,7 +156,7 @@ //! //! # Atomic accesses to read-only memory //! -//! In general, *all* atomic accesses on read-only memory are Undefined Behavior. For instance, attempting +//! In general, *all* atomic accesses on read-only memory are undefined behavior. For instance, attempting //! to do a `compare_exchange` that will definitely fail (making it conceptually a read-only //! operation) can still cause a segmentation fault if the underlying memory page is mapped read-only. Since //! atomic `load`s might be implemented using compare-exchange operations, even a `load` can fault @@ -153,7 +172,7 @@ //! //! As an exception from the general rule stated above, "sufficiently small" atomic loads with //! `Ordering::Relaxed` are implemented in a way that works on read-only memory, and are hence not -//! Undefined Behavior. The exact size limit for what makes a load "sufficiently small" varies +//! undefined behavior. The exact size limit for what makes a load "sufficiently small" varies //! depending on the target: //! //! | `target_arch` | Size limit | diff --git a/src/tools/miri/src/concurrency/data_race.rs b/src/tools/miri/src/concurrency/data_race.rs index b5b43f589f699..812602c398074 100644 --- a/src/tools/miri/src/concurrency/data_race.rs +++ b/src/tools/miri/src/concurrency/data_race.rs @@ -191,7 +191,8 @@ struct AtomicMemoryCellClocks { /// The size of accesses to this atomic location. /// We use this to detect non-synchronized mixed-size accesses. Since all accesses must be /// aligned to their size, this is sufficient to detect imperfectly overlapping accesses. - size: Size, + /// `None` indicates that we saw multiple different sizes, which is okay as long as all accesses are reads. + size: Option, } #[derive(Copy, Clone, PartialEq, Eq, Debug)] @@ -265,6 +266,14 @@ impl AccessType { let mut msg = String::new(); if let Some(size) = size { + if size == Size::ZERO { + // In this case there were multiple read accesss with different sizes and then a write. + // We will be reporting *one* of the other reads, but we don't have enough information + // to determine which one had which size. + assert!(self == AccessType::AtomicLoad); + assert!(ty.is_none()); + return format!("multiple differently-sized atomic loads, including one load"); + } msg.push_str(&format!("{}-byte {}", size.bytes(), msg)) } @@ -305,8 +314,7 @@ impl AccessType { } } -/// Memory Cell vector clock metadata -/// for data-race detection. +/// Per-byte vector clock metadata for data-race detection. #[derive(Clone, PartialEq, Eq, Debug)] struct MemoryCellClocks { /// The vector-clock timestamp and the thread that did the last non-atomic write. We don't need @@ -325,8 +333,8 @@ struct MemoryCellClocks { read: VClock, /// Atomic access, acquire, release sequence tracking clocks. - /// For non-atomic memory in the common case this - /// value is set to None. + /// For non-atomic memory this value is set to None. + /// For atomic memory, each byte carries this information. atomic_ops: Option>, } @@ -336,7 +344,7 @@ impl AtomicMemoryCellClocks { read_vector: Default::default(), write_vector: Default::default(), sync_vector: Default::default(), - size, + size: Some(size), } } } @@ -383,17 +391,23 @@ impl MemoryCellClocks { &mut self, thread_clocks: &ThreadClockSet, size: Size, + write: bool, ) -> Result<&mut AtomicMemoryCellClocks, DataRace> { match self.atomic_ops { Some(ref mut atomic) => { // We are good if the size is the same or all atomic accesses are before our current time. - if atomic.size == size { + if atomic.size == Some(size) { Ok(atomic) } else if atomic.read_vector <= thread_clocks.clock && atomic.write_vector <= thread_clocks.clock { - // This is now the new size that must be used for accesses here. - atomic.size = size; + // We are fully ordered after all previous accesses, so we can change the size. + atomic.size = Some(size); + Ok(atomic) + } else if !write && atomic.write_vector <= thread_clocks.clock { + // This is a read, and it is ordered after the last write. It's okay for the + // sizes to mismatch, as long as no writes with a different size occur later. + atomic.size = None; Ok(atomic) } else { Err(DataRace) @@ -499,7 +513,7 @@ impl MemoryCellClocks { Ok(()) } - /// Detect data-races with an atomic read, caused by a non-atomic access that does + /// Detect data-races with an atomic read, caused by a non-atomic write that does /// not happen-before the atomic-read. fn atomic_read_detect( &mut self, @@ -508,14 +522,10 @@ impl MemoryCellClocks { access_size: Size, ) -> Result<(), DataRace> { trace!("Atomic read with vectors: {:#?} :: {:#?}", self, thread_clocks); - let atomic = self.atomic_access(thread_clocks, access_size)?; + let atomic = self.atomic_access(thread_clocks, access_size, /*write*/ false)?; atomic.read_vector.set_at_index(&thread_clocks.clock, index); - // Make sure the last non-atomic write and all non-atomic reads were before this access. - if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock { - Ok(()) - } else { - Err(DataRace) - } + // Make sure the last non-atomic write was before this access. + if self.write_was_before(&thread_clocks.clock) { Ok(()) } else { Err(DataRace) } } /// Detect data-races with an atomic write, either with a non-atomic read or with @@ -527,7 +537,7 @@ impl MemoryCellClocks { access_size: Size, ) -> Result<(), DataRace> { trace!("Atomic write with vectors: {:#?} :: {:#?}", self, thread_clocks); - let atomic = self.atomic_access(thread_clocks, access_size)?; + let atomic = self.atomic_access(thread_clocks, access_size, /*write*/ true)?; atomic.write_vector.set_at_index(&thread_clocks.clock, index); // Make sure the last non-atomic write and all non-atomic reads were before this access. if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock { @@ -552,11 +562,9 @@ impl MemoryCellClocks { } thread_clocks.clock.index_mut(index).set_read_type(read_type); if self.write_was_before(&thread_clocks.clock) { + // We must be ordered-after all atomic writes. let race_free = if let Some(atomic) = self.atomic() { - // We must be ordered-after all atomic accesses, reads and writes. - // This ensures we don't mix atomic and non-atomic accesses. atomic.write_vector <= thread_clocks.clock - && atomic.read_vector <= thread_clocks.clock } else { true }; @@ -957,9 +965,7 @@ impl VClockAlloc { let mut other_size = None; // if `Some`, this was a size-mismatch race let write_clock; let (other_access, other_thread, other_clock) = - // First check the atomic-nonatomic cases. If it looks like multiple - // cases apply, this one should take precedence, else it might look like - // we are reporting races between two non-atomic reads. + // First check the atomic-nonatomic cases. if !access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock) @@ -977,10 +983,10 @@ impl VClockAlloc { } else if let Some(idx) = Self::find_gt_index(&mem_clocks.read, &active_clocks.clock) { (AccessType::NaRead(mem_clocks.read[idx].read_type()), idx, &mem_clocks.read) // Finally, mixed-size races. - } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != access_size { + } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != Some(access_size) { // This is only a race if we are not synchronized with all atomic accesses, so find // the one we are not synchronized with. - other_size = Some(atomic.size); + other_size = Some(atomic.size.unwrap_or(Size::ZERO)); if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock) { (AccessType::AtomicStore, idx, &atomic.write_vector) @@ -1007,10 +1013,7 @@ impl VClockAlloc { assert!(!involves_non_atomic); Some("overlapping unsynchronized atomic accesses must use the same access size") } else if access.is_read() && other_access.is_read() { - assert!(involves_non_atomic); - Some( - "overlapping atomic and non-atomic accesses must be synchronized, even if both are read-only", - ) + panic!("there should be no same-size read-read races") } else { None }; diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read.rs b/src/tools/miri/tests/fail/data_race/mixed_size_read.rs deleted file mode 100644 index 828b47f0a6576..0000000000000 --- a/src/tools/miri/tests/fail/data_race/mixed_size_read.rs +++ /dev/null @@ -1,28 +0,0 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 - -use std::sync::atomic::{AtomicU8, AtomicU16, Ordering}; -use std::thread; - -fn convert(a: &AtomicU16) -> &[AtomicU8; 2] { - unsafe { std::mem::transmute(a) } -} - -// We can't allow mixed-size accesses; they are not possible in C++ and even -// Intel says you shouldn't do it. -fn main() { - let a = AtomicU16::new(0); - let a16 = &a; - let a8 = convert(a16); - - thread::scope(|s| { - s.spawn(|| { - a16.load(Ordering::SeqCst); - }); - s.spawn(|| { - a8[0].load(Ordering::SeqCst); - //~^ ERROR: Race condition detected between (1) 2-byte atomic load on thread `unnamed-1` and (2) 1-byte atomic load on thread `unnamed-2` - }); - }); -} diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_first_load.stderr b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_first_load.stderr new file mode 100644 index 0000000000000..b829627c00abc --- /dev/null +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_first_load.stderr @@ -0,0 +1,22 @@ +error: Undefined Behavior: Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-ID` and (2) 2-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + --> tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + | +LL | a16.store(0, Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-ID` and (2) 2-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + | +help: and (1) occurred earlier here + --> tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + | +LL | a16.load(Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + = help: overlapping unsynchronized atomic accesses must use the same access size + = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model + = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior + = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information + = note: BACKTRACE (of the first span) on thread `unnamed-ID`: + = note: inside closure at tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_second_load.stderr b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_second_load.stderr new file mode 100644 index 0000000000000..6bc38b14cb2ec --- /dev/null +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.match_second_load.stderr @@ -0,0 +1,22 @@ +error: Undefined Behavior: Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-ID` and (2) 1-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + --> tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + | +LL | a8[0].store(0, Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-ID` and (2) 1-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + | +help: and (1) occurred earlier here + --> tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + | +LL | a16.load(Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + = help: overlapping unsynchronized atomic accesses must use the same access size + = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model + = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior + = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information + = note: BACKTRACE (of the first span) on thread `unnamed-ID`: + = note: inside closure at tests/fail/data_race/mixed_size_read_read_write.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.rs b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.rs new file mode 100644 index 0000000000000..ece5bd31274de --- /dev/null +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.rs @@ -0,0 +1,39 @@ +//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// Two variants: the atomic store matches the size of the first or second atomic load. +//@revisions: match_first_load match_second_load + +use std::sync::atomic::{AtomicU16, AtomicU8, Ordering}; +use std::thread; + +fn convert(a: &AtomicU16) -> &[AtomicU8; 2] { + unsafe { std::mem::transmute(a) } +} + +// We can't allow mixed-size accesses; they are not possible in C++ and even +// Intel says you shouldn't do it. +fn main() { + let a = AtomicU16::new(0); + let a16 = &a; + let a8 = convert(a16); + + thread::scope(|s| { + s.spawn(|| { + a16.load(Ordering::SeqCst); + }); + s.spawn(|| { + a8[0].load(Ordering::SeqCst); + }); + s.spawn(|| { + thread::yield_now(); // make sure this happens last + if cfg!(match_first_load) { + a16.store(0, Ordering::SeqCst); + //~[match_first_load]^ ERROR: Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-1` and (2) 2-byte atomic store on thread `unnamed-3` + } else { + a8[0].store(0, Ordering::SeqCst); + //~[match_second_load]^ ERROR: Race condition detected between (1) multiple differently-sized atomic loads, including one load on thread `unnamed-1` and (2) 1-byte atomic store on thread `unnamed-3` + } + }); + }); +} diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_write.read_write.stderr b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.read_write.stderr new file mode 100644 index 0000000000000..6f8dbd38ca841 --- /dev/null +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.read_write.stderr @@ -0,0 +1,22 @@ +error: Undefined Behavior: Race condition detected between (1) 1-byte atomic load on thread `unnamed-ID` and (2) 2-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + --> tests/fail/data_race/mixed_size_read_write.rs:LL:CC + | +LL | a16.store(1, Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) 1-byte atomic load on thread `unnamed-ID` and (2) 2-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here + | +help: and (1) occurred earlier here + --> tests/fail/data_race/mixed_size_read_write.rs:LL:CC + | +LL | a8[0].load(Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = help: overlapping unsynchronized atomic accesses must use the same access size + = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model + = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior + = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information + = note: BACKTRACE (of the first span) on thread `unnamed-ID`: + = note: inside closure at tests/fail/data_race/mixed_size_read_write.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_write.rs b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.rs new file mode 100644 index 0000000000000..acc12427b3fdc --- /dev/null +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.rs @@ -0,0 +1,39 @@ +//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// Two revisions, depending on which access goes first. +//@revisions: read_write write_read + +use std::sync::atomic::{AtomicU16, AtomicU8, Ordering}; +use std::thread; + +fn convert(a: &AtomicU16) -> &[AtomicU8; 2] { + unsafe { std::mem::transmute(a) } +} + +// We can't allow mixed-size accesses; they are not possible in C++ and even +// Intel says you shouldn't do it. +fn main() { + let a = AtomicU16::new(0); + let a16 = &a; + let a8 = convert(a16); + + thread::scope(|s| { + s.spawn(|| { + if cfg!(read_write) { + // Let the other one go first. + thread::yield_now(); + } + a16.store(1, Ordering::SeqCst); + //~[read_write]^ ERROR: Race condition detected between (1) 1-byte atomic load on thread `unnamed-2` and (2) 2-byte atomic store on thread `unnamed-1` + }); + s.spawn(|| { + if cfg!(write_read) { + // Let the other one go first. + thread::yield_now(); + } + a8[0].load(Ordering::SeqCst); + //~[write_read]^ ERROR: Race condition detected between (1) 2-byte atomic store on thread `unnamed-1` and (2) 1-byte atomic load on thread `unnamed-2` + }); + }); +} diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read.stderr b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.write_read.stderr similarity index 66% rename from src/tools/miri/tests/fail/data_race/mixed_size_read.stderr rename to src/tools/miri/tests/fail/data_race/mixed_size_read_write.write_read.stderr index 31a798a89b133..990d2058bca97 100644 --- a/src/tools/miri/tests/fail/data_race/mixed_size_read.stderr +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.write_read.stderr @@ -1,20 +1,20 @@ -error: Undefined Behavior: Race condition detected between (1) 2-byte atomic load on thread `unnamed-ID` and (2) 1-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here - --> tests/fail/data_race/mixed_size_read.rs:LL:CC +error: Undefined Behavior: Race condition detected between (1) 2-byte atomic store on thread `unnamed-ID` and (2) 1-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here + --> tests/fail/data_race/mixed_size_read_write.rs:LL:CC | LL | a8[0].load(Ordering::SeqCst); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) 2-byte atomic load on thread `unnamed-ID` and (2) 1-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) 2-byte atomic store on thread `unnamed-ID` and (2) 1-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here | help: and (1) occurred earlier here - --> tests/fail/data_race/mixed_size_read.rs:LL:CC + --> tests/fail/data_race/mixed_size_read_write.rs:LL:CC | -LL | a16.load(Ordering::SeqCst); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | a16.store(1, Ordering::SeqCst); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ = help: overlapping unsynchronized atomic accesses must use the same access size = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information = note: BACKTRACE (of the first span) on thread `unnamed-ID`: - = note: inside closure at tests/fail/data_race/mixed_size_read.rs:LL:CC + = note: inside closure at tests/fail/data_race/mixed_size_read_write.rs:LL:CC note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_write.rs b/src/tools/miri/tests/fail/data_race/mixed_size_write_write.rs similarity index 100% rename from src/tools/miri/tests/fail/data_race/mixed_size_write.rs rename to src/tools/miri/tests/fail/data_race/mixed_size_write_write.rs diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_write.stderr b/src/tools/miri/tests/fail/data_race/mixed_size_write_write.stderr similarity index 90% rename from src/tools/miri/tests/fail/data_race/mixed_size_write.stderr rename to src/tools/miri/tests/fail/data_race/mixed_size_write_write.stderr index c30b48c1f32b0..1f22413bc5f93 100644 --- a/src/tools/miri/tests/fail/data_race/mixed_size_write.stderr +++ b/src/tools/miri/tests/fail/data_race/mixed_size_write_write.stderr @@ -1,11 +1,11 @@ error: Undefined Behavior: Race condition detected between (1) 2-byte atomic store on thread `unnamed-ID` and (2) 1-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here - --> tests/fail/data_race/mixed_size_write.rs:LL:CC + --> tests/fail/data_race/mixed_size_write_write.rs:LL:CC | LL | a8[0].store(1, Ordering::SeqCst); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) 2-byte atomic store on thread `unnamed-ID` and (2) 1-byte atomic store on thread `unnamed-ID` at ALLOC. (2) just happened here | help: and (1) occurred earlier here - --> tests/fail/data_race/mixed_size_write.rs:LL:CC + --> tests/fail/data_race/mixed_size_write_write.rs:LL:CC | LL | a16.store(1, Ordering::SeqCst); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -14,7 +14,7 @@ LL | a16.store(1, Ordering::SeqCst); = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information = note: BACKTRACE (of the first span) on thread `unnamed-ID`: - = note: inside closure at tests/fail/data_race/mixed_size_write.rs:LL:CC + = note: inside closure at tests/fail/data_race/mixed_size_write_write.rs:LL:CC note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace diff --git a/src/tools/miri/tests/fail/data_race/read_read_race1.rs b/src/tools/miri/tests/fail/data_race/read_read_race1.rs deleted file mode 100644 index 02aa4e4b716e3..0000000000000 --- a/src/tools/miri/tests/fail/data_race/read_read_race1.rs +++ /dev/null @@ -1,30 +0,0 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 - -use std::sync::atomic::{AtomicU16, Ordering}; -use std::thread; - -// Make sure races between atomic and non-atomic reads are detected. -// This seems harmless but C++ does not allow them, so we can't allow them for now either. -// This test coverse the case where the non-atomic access come first. -fn main() { - let a = AtomicU16::new(0); - - thread::scope(|s| { - s.spawn(|| { - let ptr = &a as *const AtomicU16 as *mut u16; - unsafe { ptr.read() }; - }); - s.spawn(|| { - thread::yield_now(); - - // We also put a non-atomic access here, but that should *not* be reported. - let ptr = &a as *const AtomicU16 as *mut u16; - unsafe { ptr.read() }; - // Then do the atomic access. - a.load(Ordering::SeqCst); - //~^ ERROR: Data race detected between (1) non-atomic read on thread `unnamed-1` and (2) atomic load on thread `unnamed-2` - }); - }); -} diff --git a/src/tools/miri/tests/fail/data_race/read_read_race1.stderr b/src/tools/miri/tests/fail/data_race/read_read_race1.stderr deleted file mode 100644 index e97c4a4fdcb37..0000000000000 --- a/src/tools/miri/tests/fail/data_race/read_read_race1.stderr +++ /dev/null @@ -1,22 +0,0 @@ -error: Undefined Behavior: Data race detected between (1) non-atomic read on thread `unnamed-ID` and (2) atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here - --> tests/fail/data_race/read_read_race1.rs:LL:CC - | -LL | a.load(Ordering::SeqCst); - | ^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between (1) non-atomic read on thread `unnamed-ID` and (2) atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here - | -help: and (1) occurred earlier here - --> tests/fail/data_race/read_read_race1.rs:LL:CC - | -LL | unsafe { ptr.read() }; - | ^^^^^^^^^^ - = help: overlapping atomic and non-atomic accesses must be synchronized, even if both are read-only - = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model - = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior - = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information - = note: BACKTRACE (of the first span) on thread `unnamed-ID`: - = note: inside closure at tests/fail/data_race/read_read_race1.rs:LL:CC - -note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace - -error: aborting due to 1 previous error - diff --git a/src/tools/miri/tests/fail/data_race/read_read_race2.rs b/src/tools/miri/tests/fail/data_race/read_read_race2.rs deleted file mode 100644 index 3b94c9143f3b2..0000000000000 --- a/src/tools/miri/tests/fail/data_race/read_read_race2.rs +++ /dev/null @@ -1,30 +0,0 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 - -use std::sync::atomic::{AtomicU16, Ordering}; -use std::thread; - -// Make sure races between atomic and non-atomic reads are detected. -// This seems harmless but C++ does not allow them, so we can't allow them for now either. -// This test coverse the case where the atomic access come first. -fn main() { - let a = AtomicU16::new(0); - - thread::scope(|s| { - s.spawn(|| { - // We also put a non-atomic access here, but that should *not* be reported. - let ptr = &a as *const AtomicU16 as *mut u16; - unsafe { ptr.read() }; - // Then do the atomic access. - a.load(Ordering::SeqCst); - }); - s.spawn(|| { - thread::yield_now(); - - let ptr = &a as *const AtomicU16 as *mut u16; - unsafe { ptr.read() }; - //~^ ERROR: Data race detected between (1) atomic load on thread `unnamed-1` and (2) non-atomic read on thread `unnamed-2` - }); - }); -} diff --git a/src/tools/miri/tests/fail/data_race/read_read_race2.stderr b/src/tools/miri/tests/fail/data_race/read_read_race2.stderr deleted file mode 100644 index d64032db7b32c..0000000000000 --- a/src/tools/miri/tests/fail/data_race/read_read_race2.stderr +++ /dev/null @@ -1,22 +0,0 @@ -error: Undefined Behavior: Data race detected between (1) atomic load on thread `unnamed-ID` and (2) non-atomic read on thread `unnamed-ID` at ALLOC. (2) just happened here - --> tests/fail/data_race/read_read_race2.rs:LL:CC - | -LL | unsafe { ptr.read() }; - | ^^^^^^^^^^ Data race detected between (1) atomic load on thread `unnamed-ID` and (2) non-atomic read on thread `unnamed-ID` at ALLOC. (2) just happened here - | -help: and (1) occurred earlier here - --> tests/fail/data_race/read_read_race2.rs:LL:CC - | -LL | a.load(Ordering::SeqCst); - | ^^^^^^^^^^^^^^^^^^^^^^^^ - = help: overlapping atomic and non-atomic accesses must be synchronized, even if both are read-only - = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model - = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior - = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information - = note: BACKTRACE (of the first span) on thread `unnamed-ID`: - = note: inside closure at tests/fail/data_race/read_read_race2.rs:LL:CC - -note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace - -error: aborting due to 1 previous error - diff --git a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size.rs b/src/tools/miri/tests/fail/weak_memory/racing_mixed_size.rs deleted file mode 100644 index 1193dddc57784..0000000000000 --- a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size.rs +++ /dev/null @@ -1,41 +0,0 @@ -// We want to control preemption here. -// Avoid accidental synchronization via address reuse. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 - -#![feature(core_intrinsics)] - -use std::ptr; -use std::sync::atomic::AtomicU32; -use std::sync::atomic::Ordering::*; -use std::thread::spawn; - -fn static_atomic_u32(val: u32) -> &'static AtomicU32 { - let ret = Box::leak(Box::new(AtomicU32::new(val))); - ret -} - -fn split_u32_ptr(dword: *const u32) -> *const [u16; 2] { - unsafe { std::mem::transmute::<*const u32, *const [u16; 2]>(dword) } -} - -// Wine's SRWLock implementation does this, which is definitely undefined in C++ memory model -// https://github.com/wine-mirror/wine/blob/303f8042f9db508adaca02ef21f8de4992cb9c03/dlls/ntdll/sync.c#L543-L566 -// It probably works just fine on x86, but Intel does document this as "don't do it!" -pub fn main() { - let x = static_atomic_u32(0); - let j1 = spawn(move || { - x.store(1, Relaxed); - }); - - let j2 = spawn(move || { - let x_ptr = x as *const AtomicU32 as *const u32; - let x_split = split_u32_ptr(x_ptr); - unsafe { - let hi = ptr::addr_of!((*x_split)[0]); - std::intrinsics::atomic_load_relaxed(hi); //~ ERROR: (1) 4-byte atomic store on thread `unnamed-1` and (2) 2-byte atomic load - } - }); - - j1.join().unwrap(); - j2.join().unwrap(); -} diff --git a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.rs b/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.rs deleted file mode 100644 index 0a0e372f1f3d2..0000000000000 --- a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.rs +++ /dev/null @@ -1,39 +0,0 @@ -// We want to control preemption here. -// Avoid accidental synchronization via address reuse. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 - -use std::sync::atomic::Ordering::*; -use std::sync::atomic::{AtomicU16, AtomicU32}; -use std::thread::spawn; - -fn static_atomic(val: u32) -> &'static AtomicU32 { - let ret = Box::leak(Box::new(AtomicU32::new(val))); - ret -} - -fn split_u32_ptr(dword: *const u32) -> *const [u16; 2] { - unsafe { std::mem::transmute::<*const u32, *const [u16; 2]>(dword) } -} - -// Racing mixed size reads may cause two loads to read-from -// the same store but observe different values, which doesn't make -// sense under the formal model so we forbid this. -pub fn main() { - let x = static_atomic(0); - - let j1 = spawn(move || { - x.load(Relaxed); - }); - - let j2 = spawn(move || { - let x_ptr = x as *const AtomicU32 as *const u32; - let x_split = split_u32_ptr(x_ptr); - unsafe { - let hi = x_split as *const u16 as *const AtomicU16; - (*hi).load(Relaxed); //~ ERROR: (1) 4-byte atomic load on thread `unnamed-1` and (2) 2-byte atomic load - } - }); - - j1.join().unwrap(); - j2.join().unwrap(); -} diff --git a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.stderr b/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.stderr deleted file mode 100644 index 9e6a6e80418cf..0000000000000 --- a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.stderr +++ /dev/null @@ -1,22 +0,0 @@ -error: Undefined Behavior: Race condition detected between (1) 4-byte atomic load on thread `unnamed-ID` and (2) 2-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here - --> tests/fail/weak_memory/racing_mixed_size_read.rs:LL:CC - | -LL | (*hi).load(Relaxed); - | ^^^^^^^^^^^^^^^^^^^ Race condition detected between (1) 4-byte atomic load on thread `unnamed-ID` and (2) 2-byte atomic load on thread `unnamed-ID` at ALLOC. (2) just happened here - | -help: and (1) occurred earlier here - --> tests/fail/weak_memory/racing_mixed_size_read.rs:LL:CC - | -LL | x.load(Relaxed); - | ^^^^^^^^^^^^^^^ - = help: overlapping unsynchronized atomic accesses must use the same access size - = help: see https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#memory-model-for-atomic-accesses for more information about the Rust memory model - = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior - = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information - = note: BACKTRACE (of the first span) on thread `unnamed-ID`: - = note: inside closure at tests/fail/weak_memory/racing_mixed_size_read.rs:LL:CC - -note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace - -error: aborting due to 1 previous error - diff --git a/src/tools/miri/tests/pass/concurrency/data_race.rs b/src/tools/miri/tests/pass/concurrency/data_race.rs index 34380dfa504d5..d16de0ae8e232 100644 --- a/src/tools/miri/tests/pass/concurrency/data_race.rs +++ b/src/tools/miri/tests/pass/concurrency/data_race.rs @@ -1,7 +1,7 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 use std::sync::atomic::*; -use std::thread::spawn; +use std::thread::{self, spawn}; #[derive(Copy, Clone)] struct EvilSend(pub T); @@ -143,10 +143,84 @@ fn test_local_variable_lazy_write() { assert_eq!(val, 127); } +// This test coverse the case where the non-atomic access come first. +fn test_read_read_race1() { + let a = AtomicU16::new(0); + + thread::scope(|s| { + s.spawn(|| { + let ptr = &a as *const AtomicU16 as *mut u16; + unsafe { ptr.read() }; + }); + s.spawn(|| { + thread::yield_now(); + + a.load(Ordering::SeqCst); + }); + }); +} + +// This test coverse the case where the atomic access come first. +fn test_read_read_race2() { + let a = AtomicU16::new(0); + + thread::scope(|s| { + s.spawn(|| { + a.load(Ordering::SeqCst); + }); + s.spawn(|| { + thread::yield_now(); + + let ptr = &a as *const AtomicU16 as *mut u16; + unsafe { ptr.read() }; + }); + }); +} + +fn mixed_size_read_read() { + fn convert(a: &AtomicU16) -> &[AtomicU8; 2] { + unsafe { std::mem::transmute(a) } + } + + let a = AtomicU16::new(0); + let a16 = &a; + let a8 = convert(a16); + + // Just two different-sized atomic reads without any writes are fine. + thread::scope(|s| { + s.spawn(|| { + a16.load(Ordering::SeqCst); + }); + s.spawn(|| { + a8[0].load(Ordering::SeqCst); + }); + }); +} + +fn failing_rmw_is_read() { + let a = AtomicUsize::new(0); + thread::scope(|s| { + s.spawn(|| unsafe { + // Non-atomic read. + let _val = *(&a as *const AtomicUsize).cast::(); + }); + + s.spawn(|| { + // RMW that will fail. + // This is not considered a write, so there is no data race here. + a.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap_err(); + }); + }); +} + pub fn main() { test_fence_sync(); test_multiple_reads(); test_rmw_no_block(); test_simple_release(); test_local_variable_lazy_write(); + test_read_read_race1(); + test_read_read_race2(); + mixed_size_read_read(); + failing_rmw_is_read(); }