diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index f0597f295b3f0..03a4695f9e199 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -124,6 +124,7 @@ #![feature(extend_one_unchecked)] #![feature(fmt_internals)] #![feature(fn_traits)] +#![feature(generic_atomic)] #![feature(hasher_prefixfree_extras)] #![feature(inplace_iteration)] #![feature(iter_advance_by)] diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index 4d4e84bfda587..57b8686c05f27 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -24,8 +24,8 @@ use core::pin::{Pin, PinCoerceUnsized}; use core::ptr::{self, NonNull}; #[cfg(not(no_global_oom_handling))] use core::slice::from_raw_parts_mut; -use core::sync::atomic; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; +use core::sync::atomic::{self, Atomic}; use core::{borrow, fmt, hint}; #[cfg(not(no_global_oom_handling))] @@ -346,12 +346,12 @@ impl fmt::Debug for Weak { // inner types. #[repr(C)] struct ArcInner { - strong: atomic::AtomicUsize, + strong: Atomic, // the value usize::MAX acts as a sentinel for temporarily "locking" the // ability to upgrade weak pointers or downgrade strong ones; this is used // to avoid races in `make_mut` and `get_mut`. - weak: atomic::AtomicUsize, + weak: Atomic, data: T, } @@ -2706,8 +2706,8 @@ impl Weak { /// Helper type to allow accessing the reference counts without /// making any assertions about the data field. struct WeakInner<'a> { - weak: &'a atomic::AtomicUsize, - strong: &'a atomic::AtomicUsize, + weak: &'a Atomic, + strong: &'a Atomic, } impl Weak { diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index b06a3bd4487d3..88989e7cc60f2 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -227,6 +227,100 @@ use crate::cell::UnsafeCell; use crate::hint::spin_loop; use crate::{fmt, intrinsics}; +trait Sealed {} + +/// A marker trait for primitive types which can be modified atomically. +/// +/// This is an implementation detail for [Atomic]\ which may disappear or be replaced at any time. +/// +/// # Safety +/// +/// Types implementing this trait must be primitives that can be modified atomically. +/// +/// The associated `Self::AtomicInner` type must have the same size and bit validity as `Self`, +/// but may have a higher alignment requirement, so the following `transmute`s are sound: +/// +/// - `&mut Self::AtomicInner` as `&mut Self` +/// - `Self` as `Self::AtomicInner` or the reverse +#[unstable( + feature = "atomic_internals", + reason = "implementation detail which may disappear or be replaced at any time", + issue = "none" +)] +#[allow(private_bounds)] +pub unsafe trait AtomicPrimitive: Sized + Copy + Sealed { + #[doc(hidden)] + type AtomicInner: Sized; +} + +macro impl_atomic_primitive( + $Atom:ident $(<$T:ident>)? ($Primitive:ty), + size($size:literal), + align($align:literal) $(,)? +) { + impl $(<$T>)? Sealed for $Primitive {} + + #[unstable( + feature = "atomic_internals", + reason = "implementation detail which may disappear or be replaced at any time", + issue = "none" + )] + #[cfg(target_has_atomic_load_store = $size)] + unsafe impl $(<$T>)? AtomicPrimitive for $Primitive { + type AtomicInner = $Atom $(<$T>)?; + } +} + +impl_atomic_primitive!(AtomicBool(bool), size("8"), align(1)); +impl_atomic_primitive!(AtomicI8(i8), size("8"), align(1)); +impl_atomic_primitive!(AtomicU8(u8), size("8"), align(1)); +impl_atomic_primitive!(AtomicI16(i16), size("16"), align(2)); +impl_atomic_primitive!(AtomicU16(u16), size("16"), align(2)); +impl_atomic_primitive!(AtomicI32(i32), size("32"), align(4)); +impl_atomic_primitive!(AtomicU32(u32), size("32"), align(4)); +impl_atomic_primitive!(AtomicI64(i64), size("64"), align(8)); +impl_atomic_primitive!(AtomicU64(u64), size("64"), align(8)); +impl_atomic_primitive!(AtomicI128(i128), size("128"), align(16)); +impl_atomic_primitive!(AtomicU128(u128), size("128"), align(16)); + +#[cfg(target_pointer_width = "16")] +impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(2)); +#[cfg(target_pointer_width = "32")] +impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(4)); +#[cfg(target_pointer_width = "64")] +impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(8)); + +#[cfg(target_pointer_width = "16")] +impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(2)); +#[cfg(target_pointer_width = "32")] +impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(4)); +#[cfg(target_pointer_width = "64")] +impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(8)); + +#[cfg(target_pointer_width = "16")] +impl_atomic_primitive!(AtomicPtr(*mut T), size("ptr"), align(2)); +#[cfg(target_pointer_width = "32")] +impl_atomic_primitive!(AtomicPtr(*mut T), size("ptr"), align(4)); +#[cfg(target_pointer_width = "64")] +impl_atomic_primitive!(AtomicPtr(*mut T), size("ptr"), align(8)); + +/// A memory location which can be safely modified from multiple threads. +/// +/// This has the same size and bit validity as the underlying type `T`. However, +/// the alignment of this type is always equal to its size, even on targets where +/// `T` has alignment less than its size. +/// +/// For more about the differences between atomic types and non-atomic types as +/// well as information about the portability of this type, please see the +/// [module-level documentation]. +/// +/// **Note:** This type is only available on platforms that support atomic loads +/// and stores of `T`. +/// +/// [module-level documentation]: crate::sync::atomic +#[unstable(feature = "generic_atomic", issue = "130539")] +pub type Atomic = ::AtomicInner; + // Some architectures don't have byte-sized atomics, which results in LLVM // emulating them using a LL/SC loop. However for AtomicBool we can take // advantage of the fact that it only ever contains 0 or 1 and use atomic OR/AND diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs index 5d51d6a0c78a8..d60624b652a69 100644 --- a/library/std/src/alloc.rs +++ b/library/std/src/alloc.rs @@ -57,7 +57,7 @@ #![stable(feature = "alloc_module", since = "1.28.0")] use core::ptr::NonNull; -use core::sync::atomic::{AtomicPtr, Ordering}; +use core::sync::atomic::{Atomic, AtomicPtr, Ordering}; use core::{hint, mem, ptr}; #[stable(feature = "alloc_module", since = "1.28.0")] @@ -287,7 +287,7 @@ unsafe impl Allocator for System { } } -static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut()); +static HOOK: Atomic<*mut ()> = AtomicPtr::new(ptr::null_mut()); /// Registers a custom allocation error hook, replacing any that was previously registered. /// diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs index 7df9a8a14b00c..8945baaf1e64a 100644 --- a/library/std/src/backtrace.rs +++ b/library/std/src/backtrace.rs @@ -91,8 +91,8 @@ mod tests; use crate::backtrace_rs::{self, BytesOrWideString}; use crate::ffi::c_void; use crate::panic::UnwindSafe; -use crate::sync::atomic::AtomicU8; use crate::sync::atomic::Ordering::Relaxed; +use crate::sync::atomic::{Atomic, AtomicU8}; use crate::sync::LazyLock; use crate::sys::backtrace::{lock, output_filename, set_image_base}; use crate::{env, fmt}; @@ -254,7 +254,7 @@ impl Backtrace { // Cache the result of reading the environment variables to make // backtrace captures speedy, because otherwise reading environment // variables every time can be somewhat slow. - static ENABLED: AtomicU8 = AtomicU8::new(0); + static ENABLED: Atomic = AtomicU8::new(0); match ENABLED.load(Relaxed) { 0 => {} 1 => return false, diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs index 6de069a518e3d..6fbdaeca39743 100644 --- a/library/std/src/io/stdio.rs +++ b/library/std/src/io/stdio.rs @@ -11,7 +11,7 @@ use crate::io::{ self, BorrowedCursor, BufReader, IoSlice, IoSliceMut, LineWriter, Lines, SpecReadByte, }; use crate::panic::{RefUnwindSafe, UnwindSafe}; -use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, Ordering}; use crate::sync::{Arc, Mutex, MutexGuard, OnceLock, ReentrantLock, ReentrantLockGuard}; use crate::sys::stdio; use crate::thread::AccessError; @@ -37,7 +37,7 @@ thread_local! { /// have a consistent order between set_output_capture and print_to *within /// the same thread*. Within the same thread, things always have a perfectly /// consistent order. So Ordering::Relaxed is fine. -static OUTPUT_CAPTURE_USED: AtomicBool = AtomicBool::new(false); +static OUTPUT_CAPTURE_USED: Atomic = AtomicBool::new(false); /// A handle to a raw instance of the standard input stream of this process. /// diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index e4ba5e47613b2..0126a0a8ea6e5 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -334,6 +334,7 @@ #![feature(float_minimum_maximum)] #![feature(float_next_up_down)] #![feature(fmt_internals)] +#![feature(generic_atomic)] #![feature(hasher_prefixfree_extras)] #![feature(hashmap_internals)] #![feature(ip)] diff --git a/library/std/src/os/uefi/env.rs b/library/std/src/os/uefi/env.rs index cf8ae697e389d..ab5406e605c6b 100644 --- a/library/std/src/os/uefi/env.rs +++ b/library/std/src/os/uefi/env.rs @@ -4,13 +4,13 @@ use crate::ffi::c_void; use crate::ptr::NonNull; -use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr, Ordering}; -static SYSTEM_TABLE: AtomicPtr = AtomicPtr::new(crate::ptr::null_mut()); -static IMAGE_HANDLE: AtomicPtr = AtomicPtr::new(crate::ptr::null_mut()); +static SYSTEM_TABLE: Atomic<*mut c_void> = AtomicPtr::new(crate::ptr::null_mut()); +static IMAGE_HANDLE: Atomic<*mut c_void> = AtomicPtr::new(crate::ptr::null_mut()); // Flag to check if BootServices are still valid. // Start with assuming that they are not available -static BOOT_SERVICES_FLAG: AtomicBool = AtomicBool::new(false); +static BOOT_SERVICES_FLAG: Atomic = AtomicBool::new(false); /// Initializes the global System Table and Image Handle pointers. /// diff --git a/library/std/src/os/xous/services.rs b/library/std/src/os/xous/services.rs index ddf0236f5ad74..51aa4b139f1de 100644 --- a/library/std/src/os/xous/services.rs +++ b/library/std/src/os/xous/services.rs @@ -1,4 +1,4 @@ -use core::sync::atomic::{AtomicU32, Ordering}; +use core::sync::atomic::{Atomic, AtomicU32, Ordering}; use crate::os::xous::ffi::Connection; @@ -106,7 +106,7 @@ pub fn try_connect(name: &str) -> Option { ns::try_connect_with_name(name) } -static NAME_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0); +static NAME_SERVER_CONNECTION: Atomic = AtomicU32::new(0); /// Returns a `Connection` to the name server. If the name server has not been started, /// then this call will block until the name server has been started. The `Connection` diff --git a/library/std/src/os/xous/services/dns.rs b/library/std/src/os/xous/services/dns.rs index 0288164839360..010e4fd33238c 100644 --- a/library/std/src/os/xous/services/dns.rs +++ b/library/std/src/os/xous/services/dns.rs @@ -17,7 +17,7 @@ impl Into for DnsLendMut { /// Returns a `Connection` to the DNS lookup server. This server is used for /// querying domain name values. pub(crate) fn dns_server() -> Connection { - static DNS_CONNECTION: AtomicU32 = AtomicU32::new(0); + static DNS_CONNECTION: Atomic = AtomicU32::new(0); let cid = DNS_CONNECTION.load(Ordering::Relaxed); if cid != 0 { return cid.into(); diff --git a/library/std/src/os/xous/services/log.rs b/library/std/src/os/xous/services/log.rs index 1661011ca64b1..0b5b103211aa1 100644 --- a/library/std/src/os/xous/services/log.rs +++ b/library/std/src/os/xous/services/log.rs @@ -1,4 +1,4 @@ -use core::sync::atomic::{AtomicU32, Ordering}; +use core::sync::atomic::{Atomic, AtomicU32, Ordering}; use crate::os::xous::ffi::Connection; @@ -64,7 +64,7 @@ impl Into for LogLend { /// running. It is safe to call this multiple times, because the address is /// shared among all threads in a process. pub(crate) fn log_server() -> Connection { - static LOG_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0); + static LOG_SERVER_CONNECTION: Atomic = AtomicU32::new(0); let cid = LOG_SERVER_CONNECTION.load(Ordering::Relaxed); if cid != 0 { diff --git a/library/std/src/os/xous/services/net.rs b/library/std/src/os/xous/services/net.rs index 83acc7961b377..464945e5f7ef2 100644 --- a/library/std/src/os/xous/services/net.rs +++ b/library/std/src/os/xous/services/net.rs @@ -84,7 +84,7 @@ impl<'a> Into<[usize; 5]> for NetBlockingScalar { /// Returns a `Connection` to the Network server. This server provides all /// OS-level networking functions. pub(crate) fn net_server() -> Connection { - static NET_CONNECTION: AtomicU32 = AtomicU32::new(0); + static NET_CONNECTION: Atomic = AtomicU32::new(0); let cid = NET_CONNECTION.load(Ordering::Relaxed); if cid != 0 { return cid.into(); diff --git a/library/std/src/os/xous/services/systime.rs b/library/std/src/os/xous/services/systime.rs index 079ede7aa86c7..6253a559f37ba 100644 --- a/library/std/src/os/xous/services/systime.rs +++ b/library/std/src/os/xous/services/systime.rs @@ -1,4 +1,4 @@ -use core::sync::atomic::{AtomicU32, Ordering}; +use core::sync::atomic::{Atomic, AtomicU32, Ordering}; use crate::os::xous::ffi::{connect, Connection}; @@ -17,7 +17,7 @@ impl Into<[usize; 5]> for SystimeScalar { /// Returns a `Connection` to the systime server. This server is used for reporting the /// realtime clock. pub(crate) fn systime_server() -> Connection { - static SYSTIME_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0); + static SYSTIME_SERVER_CONNECTION: Atomic = AtomicU32::new(0); let cid = SYSTIME_SERVER_CONNECTION.load(Ordering::Relaxed); if cid != 0 { return cid.into(); diff --git a/library/std/src/os/xous/services/ticktimer.rs b/library/std/src/os/xous/services/ticktimer.rs index 66ade6da65cd3..bf51ecde8e5bc 100644 --- a/library/std/src/os/xous/services/ticktimer.rs +++ b/library/std/src/os/xous/services/ticktimer.rs @@ -1,4 +1,4 @@ -use core::sync::atomic::{AtomicU32, Ordering}; +use core::sync::atomic::{Atomic, AtomicU32, Ordering}; use crate::os::xous::ffi::Connection; @@ -31,7 +31,7 @@ impl Into<[usize; 5]> for TicktimerScalar { /// Returns a `Connection` to the ticktimer server. This server is used for synchronization /// primitives such as sleep, Mutex, and Condvar. pub(crate) fn ticktimer_server() -> Connection { - static TICKTIMER_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0); + static TICKTIMER_SERVER_CONNECTION: Atomic = AtomicU32::new(0); let cid = TICKTIMER_SERVER_CONNECTION.load(Ordering::Relaxed); if cid != 0 { return cid.into(); diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs index 541cf42ab47e6..b936632aa7fbe 100644 --- a/library/std/src/panic.rs +++ b/library/std/src/panic.rs @@ -3,7 +3,7 @@ #![stable(feature = "std_panic", since = "1.9.0")] use crate::any::Any; -use crate::sync::atomic::{AtomicU8, Ordering}; +use crate::sync::atomic::{Atomic, AtomicU8, Ordering}; use crate::sync::{Condvar, Mutex, RwLock}; use crate::thread::Result; use crate::{collections, fmt, panicking}; @@ -456,7 +456,7 @@ impl BacktraceStyle { // that backtrace. // // Internally stores equivalent of an Option. -static SHOULD_CAPTURE: AtomicU8 = AtomicU8::new(0); +static SHOULD_CAPTURE: Atomic = AtomicU8::new(0); /// Configures whether the default panic hook will capture and display a /// backtrace. diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs index 336e34d7b95e5..1ae2113a38926 100644 --- a/library/std/src/panicking.rs +++ b/library/std/src/panicking.rs @@ -21,7 +21,7 @@ use crate::any::Any; use crate::io::try_set_output_capture; use crate::mem::{self, ManuallyDrop}; use crate::panic::{BacktraceStyle, PanicHookInfo}; -use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, Ordering}; use crate::sync::{PoisonError, RwLock}; use crate::sys::stdio::panic_output; use crate::sys::{backtrace, dbg}; @@ -257,7 +257,7 @@ fn default_hook(info: &PanicHookInfo<'_>) { let mut lock = backtrace::lock(); let _ = writeln!(err, "thread '{name}' panicked at {location}:\n{msg}"); - static FIRST_PANIC: AtomicBool = AtomicBool::new(true); + static FIRST_PANIC: Atomic = AtomicBool::new(true); match backtrace { // SAFETY: we took out a lock just a second ago. @@ -342,7 +342,7 @@ pub mod panic_count { #[unstable(feature = "update_panic_count", issue = "none")] pub mod panic_count { use crate::cell::Cell; - use crate::sync::atomic::{AtomicUsize, Ordering}; + use crate::sync::atomic::{Atomic, AtomicUsize, Ordering}; const ALWAYS_ABORT_FLAG: usize = 1 << (usize::BITS - 1); @@ -384,7 +384,7 @@ pub mod panic_count { // // Stealing a bit is fine because it just amounts to assuming that each // panicking thread consumes at least 2 bytes of address space. - static GLOBAL_PANIC_COUNT: AtomicUsize = AtomicUsize::new(0); + static GLOBAL_PANIC_COUNT: Atomic = AtomicUsize::new(0); // Increases the global and local panic count, and returns whether an // immediate abort is required. diff --git a/library/std/src/sync/mpmc/array.rs b/library/std/src/sync/mpmc/array.rs index 34acd9c9a943b..9df57251add84 100644 --- a/library/std/src/sync/mpmc/array.rs +++ b/library/std/src/sync/mpmc/array.rs @@ -16,13 +16,13 @@ use super::waker::SyncWaker; use crate::cell::UnsafeCell; use crate::mem::MaybeUninit; use crate::ptr; -use crate::sync::atomic::{self, AtomicUsize, Ordering}; +use crate::sync::atomic::{self, Atomic, AtomicUsize, Ordering}; use crate::time::Instant; /// A slot in a channel. struct Slot { /// The current stamp. - stamp: AtomicUsize, + stamp: Atomic, /// The message in this slot. Either read out in `read` or dropped through /// `discard_all_messages`. @@ -55,7 +55,7 @@ pub(crate) struct Channel { /// represent the lap. The mark bit in the head is always zero. /// /// Messages are popped from the head of the channel. - head: CachePadded, + head: CachePadded>, /// The tail of the channel. /// @@ -64,7 +64,7 @@ pub(crate) struct Channel { /// represent the lap. The mark bit indicates that the channel is disconnected. /// /// Messages are pushed into the tail of the channel. - tail: CachePadded, + tail: CachePadded>, /// The buffer holding slots. buffer: Box<[Slot]>, diff --git a/library/std/src/sync/mpmc/context.rs b/library/std/src/sync/mpmc/context.rs index 8db3c9896eb77..2ec7fed5bbcb9 100644 --- a/library/std/src/sync/mpmc/context.rs +++ b/library/std/src/sync/mpmc/context.rs @@ -4,7 +4,7 @@ use super::select::Selected; use super::waker::current_thread_id; use crate::cell::Cell; use crate::ptr; -use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use crate::sync::atomic::{Atomic, AtomicPtr, AtomicUsize, Ordering}; use crate::sync::Arc; use crate::thread::{self, Thread}; use crate::time::Instant; @@ -19,10 +19,10 @@ pub struct Context { #[derive(Debug)] struct Inner { /// Selected operation. - select: AtomicUsize, + select: Atomic, /// A slot into which another thread may store a pointer to its `Packet`. - packet: AtomicPtr<()>, + packet: Atomic<*mut ()>, /// Thread handle. thread: Thread, diff --git a/library/std/src/sync/mpmc/counter.rs b/library/std/src/sync/mpmc/counter.rs index d1bfe612f536f..efa6af1148334 100644 --- a/library/std/src/sync/mpmc/counter.rs +++ b/library/std/src/sync/mpmc/counter.rs @@ -1,16 +1,16 @@ -use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, AtomicUsize, Ordering}; use crate::{ops, process}; /// Reference counter internals. struct Counter { /// The number of senders associated with the channel. - senders: AtomicUsize, + senders: Atomic, /// The number of receivers associated with the channel. - receivers: AtomicUsize, + receivers: Atomic, /// Set to `true` if the last sender or the last receiver reference deallocates the channel. - destroy: AtomicBool, + destroy: Atomic, /// The internal channel. chan: C, diff --git a/library/std/src/sync/mpmc/list.rs b/library/std/src/sync/mpmc/list.rs index 88a8c75f7c8b9..b21ebed34b853 100644 --- a/library/std/src/sync/mpmc/list.rs +++ b/library/std/src/sync/mpmc/list.rs @@ -9,7 +9,7 @@ use crate::cell::UnsafeCell; use crate::marker::PhantomData; use crate::mem::MaybeUninit; use crate::ptr; -use crate::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; +use crate::sync::atomic::{self, Atomic, AtomicPtr, AtomicUsize, Ordering}; use crate::time::Instant; // Bits indicating the state of a slot: @@ -37,7 +37,7 @@ struct Slot { msg: UnsafeCell>, /// The state of the slot. - state: AtomicUsize, + state: Atomic, } impl Slot { @@ -55,7 +55,7 @@ impl Slot { /// Each block in the list can hold up to `BLOCK_CAP` messages. struct Block { /// The next block in the linked list. - next: AtomicPtr>, + next: Atomic<*mut Block>, /// Slots for messages. slots: [Slot; BLOCK_CAP], @@ -65,11 +65,11 @@ impl Block { /// Creates an empty block. fn new() -> Block { // SAFETY: This is safe because: - // [1] `Block::next` (AtomicPtr) may be safely zero initialized. + // [1] `Block::next` (Atomic<*mut _>) may be safely zero initialized. // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. // [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it // holds a MaybeUninit. - // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. + // [4] `Slot::state` (Atomic) may be safely zero initialized. unsafe { MaybeUninit::zeroed().assume_init() } } @@ -110,10 +110,10 @@ impl Block { #[derive(Debug)] struct Position { /// The index in the channel. - index: AtomicUsize, + index: Atomic, /// The block in the linked list. - block: AtomicPtr>, + block: Atomic<*mut Block>, } /// The token type for the list flavor. diff --git a/library/std/src/sync/mpmc/waker.rs b/library/std/src/sync/mpmc/waker.rs index fb877887f9c9d..703d0a4d72823 100644 --- a/library/std/src/sync/mpmc/waker.rs +++ b/library/std/src/sync/mpmc/waker.rs @@ -3,7 +3,7 @@ use super::context::Context; use super::select::{Operation, Selected}; use crate::ptr; -use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, Ordering}; use crate::sync::Mutex; /// Represents a thread blocked on a specific channel operation. @@ -137,7 +137,7 @@ pub(crate) struct SyncWaker { inner: Mutex, /// `true` if the waker is empty. - is_empty: AtomicBool, + is_empty: Atomic, } impl SyncWaker { diff --git a/library/std/src/sync/mpmc/zero.rs b/library/std/src/sync/mpmc/zero.rs index 2b82eeda3d5fb..d6cf01d4f20fd 100644 --- a/library/std/src/sync/mpmc/zero.rs +++ b/library/std/src/sync/mpmc/zero.rs @@ -9,7 +9,7 @@ use super::utils::Backoff; use super::waker::Waker; use crate::cell::UnsafeCell; use crate::marker::PhantomData; -use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, Ordering}; use crate::sync::Mutex; use crate::time::Instant; use crate::{fmt, ptr}; @@ -35,7 +35,7 @@ struct Packet { on_stack: bool, /// Equals `true` once the packet is ready for reading or writing. - ready: AtomicBool, + ready: Atomic, /// The message. msg: UnsafeCell>, diff --git a/library/std/src/sync/poison.rs b/library/std/src/sync/poison.rs index da66a088e51b1..1f7c2a12977b4 100644 --- a/library/std/src/sync/poison.rs +++ b/library/std/src/sync/poison.rs @@ -1,13 +1,13 @@ use crate::error::Error; use crate::fmt; #[cfg(panic = "unwind")] -use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, Ordering}; #[cfg(panic = "unwind")] use crate::thread; pub struct Flag { #[cfg(panic = "unwind")] - failed: AtomicBool, + failed: Atomic, } // Note that the Ordering uses to access the `failed` field of `Flag` below is diff --git a/library/std/src/sys/alloc/sgx.rs b/library/std/src/sys/alloc/sgx.rs index fca9d087e5bfc..c8d4ed2638958 100644 --- a/library/std/src/sys/alloc/sgx.rs +++ b/library/std/src/sys/alloc/sgx.rs @@ -1,6 +1,6 @@ use crate::alloc::{GlobalAlloc, Layout, System}; use crate::ptr; -use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, Ordering}; use crate::sys::pal::abi::mem as sgx_mem; use crate::sys::pal::waitqueue::SpinMutex; @@ -20,7 +20,7 @@ struct Sgx; unsafe impl dlmalloc::Allocator for Sgx { /// Allocs system resources fn alloc(&self, _size: usize) -> (*mut u8, usize, u32) { - static INIT: AtomicBool = AtomicBool::new(false); + static INIT: Atomic = AtomicBool::new(false); // No ordering requirement since this function is protected by the global lock. if !INIT.swap(true, Ordering::Relaxed) { diff --git a/library/std/src/sys/alloc/wasm.rs b/library/std/src/sys/alloc/wasm.rs index a308fafc68b15..c39b4db7910ea 100644 --- a/library/std/src/sys/alloc/wasm.rs +++ b/library/std/src/sys/alloc/wasm.rs @@ -60,10 +60,10 @@ unsafe impl GlobalAlloc for System { #[cfg(target_feature = "atomics")] mod lock { - use crate::sync::atomic::AtomicI32; use crate::sync::atomic::Ordering::{Acquire, Release}; + use crate::sync::atomic::{Atomic, AtomicI32}; - static LOCKED: AtomicI32 = AtomicI32::new(0); + static LOCKED: Atomic = AtomicI32::new(0); pub struct DropLock; diff --git a/library/std/src/sys/alloc/windows.rs b/library/std/src/sys/alloc/windows.rs index e91956966aa73..89dc32c6e6782 100644 --- a/library/std/src/sys/alloc/windows.rs +++ b/library/std/src/sys/alloc/windows.rs @@ -3,7 +3,7 @@ use crate::alloc::{GlobalAlloc, Layout, System}; use crate::ffi::c_void; use crate::mem::MaybeUninit; use crate::ptr; -use crate::sync::atomic::{AtomicPtr, Ordering}; +use crate::sync::atomic::{Atomic, AtomicPtr, Ordering}; use crate::sys::c; #[cfg(test)] @@ -83,7 +83,7 @@ windows_targets::link!("kernel32.dll" "system" fn HeapFree(hheap: c::HANDLE, dwf // Cached handle to the default heap of the current process. // Either a non-null handle returned by `GetProcessHeap`, or null when not yet initialized or `GetProcessHeap` failed. -static HEAP: AtomicPtr = AtomicPtr::new(ptr::null_mut()); +static HEAP: Atomic<*mut c_void> = AtomicPtr::new(ptr::null_mut()); // Get a handle to the default heap of the current process, or null if the operation fails. // If this operation is successful, `HEAP` will be successfully initialized and contain diff --git a/library/std/src/sys/alloc/xous.rs b/library/std/src/sys/alloc/xous.rs index 9ea43445d0206..1525e226645ee 100644 --- a/library/std/src/sys/alloc/xous.rs +++ b/library/std/src/sys/alloc/xous.rs @@ -46,10 +46,10 @@ unsafe impl GlobalAlloc for System { } mod lock { - use crate::sync::atomic::AtomicI32; use crate::sync::atomic::Ordering::{Acquire, Release}; + use crate::sync::atomic::{Atomic, AtomicI32}; - static LOCKED: AtomicI32 = AtomicI32::new(0); + static LOCKED: Atomic = AtomicI32::new(0); pub struct DropLock; diff --git a/library/std/src/sys/pal/hermit/args.rs b/library/std/src/sys/pal/hermit/args.rs index 51afe3434aedc..d40bc84c8341b 100644 --- a/library/std/src/sys/pal/hermit/args.rs +++ b/library/std/src/sys/pal/hermit/args.rs @@ -1,11 +1,11 @@ use crate::ffi::{c_char, CStr, OsString}; use crate::os::hermit::ffi::OsStringExt; use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; -use crate::sync::atomic::{AtomicIsize, AtomicPtr}; +use crate::sync::atomic::{Atomic, AtomicIsize, AtomicPtr}; use crate::{fmt, ptr, vec}; -static ARGC: AtomicIsize = AtomicIsize::new(0); -static ARGV: AtomicPtr<*const u8> = AtomicPtr::new(ptr::null_mut()); +static ARGC: Atomic = AtomicIsize::new(0); +static ARGV: Atomic<*mut *const u8> = AtomicPtr::new(ptr::null_mut()); /// One-time global initialization. pub unsafe fn init(argc: isize, argv: *const *const u8) { diff --git a/library/std/src/sys/pal/hermit/futex.rs b/library/std/src/sys/pal/hermit/futex.rs index 21c5facd52fbd..3033d7e4723a6 100644 --- a/library/std/src/sys/pal/hermit/futex.rs +++ b/library/std/src/sys/pal/hermit/futex.rs @@ -1,14 +1,14 @@ use super::hermit_abi; use crate::ptr::null; -use crate::sync::atomic::AtomicU32; +use crate::sync::atomic::Atomic; use crate::time::Duration; /// An atomic for use as a futex that is at least 8-bits but may be larger. -pub type SmallAtomic = AtomicU32; +pub type SmallAtomic = Atomic; /// Must be the underlying type of SmallAtomic pub type SmallPrimitive = u32; -pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) -> bool { +pub fn futex_wait(futex: &Atomic, expected: u32, timeout: Option) -> bool { // Calculate the timeout as a relative timespec. // // Overflows are rounded up to an infinite timeout (None). @@ -32,12 +32,12 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - } #[inline] -pub fn futex_wake(futex: &AtomicU32) -> bool { +pub fn futex_wake(futex: &Atomic) -> bool { unsafe { hermit_abi::futex_wake(futex.as_ptr(), 1) > 0 } } #[inline] -pub fn futex_wake_all(futex: &AtomicU32) { +pub fn futex_wake_all(futex: &Atomic) { unsafe { hermit_abi::futex_wake(futex.as_ptr(), i32::MAX); } diff --git a/library/std/src/sys/pal/itron/spin.rs b/library/std/src/sys/pal/itron/spin.rs index 6a9a7c72deb7d..bc4f83260bbd0 100644 --- a/library/std/src/sys/pal/itron/spin.rs +++ b/library/std/src/sys/pal/itron/spin.rs @@ -1,12 +1,12 @@ use super::abi; use crate::cell::UnsafeCell; use crate::mem::MaybeUninit; -use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, AtomicUsize, Ordering}; /// A mutex implemented by `dis_dsp` (for intra-core synchronization) and a /// spinlock (for inter-core synchronization). pub struct SpinMutex { - locked: AtomicBool, + locked: Atomic, data: UnsafeCell, } @@ -19,7 +19,7 @@ impl SpinMutex { /// Acquire a lock. #[inline] pub fn with_locked(&self, f: impl FnOnce(&mut T) -> R) -> R { - struct SpinMutexGuard<'a>(&'a AtomicBool); + struct SpinMutexGuard<'a>(&'a Atomic); impl Drop for SpinMutexGuard<'_> { #[inline] @@ -50,7 +50,7 @@ impl SpinMutex { /// It's assumed that `0` is not a valid ID, and all kernel /// object IDs fall into range `1..=usize::MAX`. pub struct SpinIdOnceCell { - id: AtomicUsize, + id: Atomic, spin: SpinMutex<()>, extra: UnsafeCell>, } diff --git a/library/std/src/sys/pal/itron/thread.rs b/library/std/src/sys/pal/itron/thread.rs index 01e69afa99e13..a734ad7f34ed2 100644 --- a/library/std/src/sys/pal/itron/thread.rs +++ b/library/std/src/sys/pal/itron/thread.rs @@ -9,7 +9,7 @@ use crate::ffi::CStr; use crate::mem::ManuallyDrop; use crate::num::NonZero; use crate::ptr::NonNull; -use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::atomic::{Atomic, AtomicUsize, Ordering}; use crate::time::Duration; use crate::{hint, io}; @@ -64,7 +64,7 @@ struct ThreadInner { /// '--> JOIN_FINALIZE ---' /// (-1) /// - lifecycle: AtomicUsize, + lifecycle: Atomic, } // Safety: The only `!Sync` field, `ThreadInner::start`, is only touched by diff --git a/library/std/src/sys/pal/sgx/abi/mod.rs b/library/std/src/sys/pal/sgx/abi/mod.rs index d8836452e7555..9bec03752c487 100644 --- a/library/std/src/sys/pal/sgx/abi/mod.rs +++ b/library/std/src/sys/pal/sgx/abi/mod.rs @@ -1,7 +1,7 @@ #![cfg_attr(test, allow(unused))] // RT initialization logic is not compiled for test use core::arch::global_asm; -use core::sync::atomic::{AtomicUsize, Ordering}; +use core::sync::atomic::{Atomic, AtomicUsize, Ordering}; use crate::io::Write; @@ -31,7 +31,7 @@ unsafe extern "C" fn tcs_init(secondary: bool) { const BUSY: usize = 1; const DONE: usize = 2; // Three-state spin-lock - static RELOC_STATE: AtomicUsize = AtomicUsize::new(UNINIT); + static RELOC_STATE: Atomic = AtomicUsize::new(UNINIT); if secondary && RELOC_STATE.load(Ordering::Relaxed) != DONE { rtabort!("Entered secondary TCS before main TCS!") diff --git a/library/std/src/sys/pal/sgx/abi/tls/mod.rs b/library/std/src/sys/pal/sgx/abi/tls/mod.rs index 34fc2f20d2214..2885a4bc141dc 100644 --- a/library/std/src/sys/pal/sgx/abi/tls/mod.rs +++ b/library/std/src/sys/pal/sgx/abi/tls/mod.rs @@ -3,7 +3,7 @@ mod sync_bitset; use self::sync_bitset::*; use crate::cell::Cell; use crate::num::NonZero; -use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::atomic::{Atomic, AtomicUsize, Ordering}; use crate::{mem, ptr}; #[cfg(target_pointer_width = "64")] @@ -14,13 +14,9 @@ const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS; #[cfg_attr(test, linkage = "available_externally")] #[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_KEY_IN_USEE"] static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT; -macro_rules! dup { - ((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* )); - (() $($val:tt)*) => ([$($val),*]) -} #[cfg_attr(test, linkage = "available_externally")] #[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_DESTRUCTORE"] -static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) (AtomicUsize::new(0))); +static TLS_DESTRUCTOR: [Atomic; TLS_KEYS] = [const { AtomicUsize::new(0) }; TLS_KEYS]; extern "C" { fn get_tls_ptr() -> *const u8; @@ -82,7 +78,7 @@ impl<'a> Drop for ActiveTls<'a> { impl Tls { pub fn new() -> Tls { - Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) } + Tls { data: [const { Cell::new(ptr::null_mut()) }; TLS_KEYS] } } pub unsafe fn activate(&self) -> ActiveTls<'_> { diff --git a/library/std/src/sys/pal/sgx/abi/tls/sync_bitset.rs b/library/std/src/sys/pal/sgx/abi/tls/sync_bitset.rs index 4eeff8f6ef773..9087168589aa5 100644 --- a/library/std/src/sys/pal/sgx/abi/tls/sync_bitset.rs +++ b/library/std/src/sys/pal/sgx/abi/tls/sync_bitset.rs @@ -4,10 +4,10 @@ mod tests; use super::{TLS_KEYS_BITSET_SIZE, USIZE_BITS}; use crate::iter::{Enumerate, Peekable}; use crate::slice::Iter; -use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::atomic::{Atomic, AtomicUsize, Ordering}; /// A bitset that can be used synchronously. -pub(super) struct SyncBitset([AtomicUsize; TLS_KEYS_BITSET_SIZE]); +pub(super) struct SyncBitset([Atomic; TLS_KEYS_BITSET_SIZE]); pub(super) const SYNC_BITSET_INIT: SyncBitset = SyncBitset([AtomicUsize::new(0), AtomicUsize::new(0)]); @@ -58,7 +58,7 @@ impl SyncBitset { } pub(super) struct SyncBitsetIter<'a> { - iter: Peekable>>, + iter: Peekable>>>, elem_idx: usize, } diff --git a/library/std/src/sys/pal/sgx/args.rs b/library/std/src/sys/pal/sgx/args.rs index a72a041da6cc9..9890a7c8ed62b 100644 --- a/library/std/src/sys/pal/sgx/args.rs +++ b/library/std/src/sys/pal/sgx/args.rs @@ -1,14 +1,14 @@ use super::abi::usercalls::alloc; use super::abi::usercalls::raw::ByteBuffer; use crate::ffi::OsString; -use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::atomic::{Atomic, AtomicUsize, Ordering}; use crate::sys::os_str::Buf; use crate::sys_common::FromInner; use crate::{fmt, slice}; #[cfg_attr(test, linkage = "available_externally")] #[export_name = "_ZN16__rust_internals3std3sys3sgx4args4ARGSE"] -static ARGS: AtomicUsize = AtomicUsize::new(0); +static ARGS: Atomic = AtomicUsize::new(0); type ArgsStore = Vec; #[cfg_attr(test, allow(dead_code))] diff --git a/library/std/src/sys/pal/sgx/mod.rs b/library/std/src/sys/pal/sgx/mod.rs index 8d29b2ec6193e..76f186a164658 100644 --- a/library/std/src/sys/pal/sgx/mod.rs +++ b/library/std/src/sys/pal/sgx/mod.rs @@ -6,7 +6,7 @@ #![allow(fuzzy_provenance_casts)] // FIXME: this entire module systematically confuses pointers and integers use crate::io::ErrorKind; -use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, Ordering}; pub mod abi; pub mod args; @@ -57,7 +57,7 @@ pub fn unsupported_err() -> crate::io::Error { /// what happens when `SGX_INEFFECTIVE_ERROR` is set to `true`. If it is /// `false`, the behavior is the same as `unsupported`. pub fn sgx_ineffective(v: T) -> crate::io::Result { - static SGX_INEFFECTIVE_ERROR: AtomicBool = AtomicBool::new(false); + static SGX_INEFFECTIVE_ERROR: Atomic = AtomicBool::new(false); if SGX_INEFFECTIVE_ERROR.load(Ordering::Relaxed) { Err(crate::io::const_io_error!( ErrorKind::Uncategorized, diff --git a/library/std/src/sys/pal/sgx/os.rs b/library/std/src/sys/pal/sgx/os.rs index 46af710aa396c..53ed8593f23f1 100644 --- a/library/std/src/sys/pal/sgx/os.rs +++ b/library/std/src/sys/pal/sgx/os.rs @@ -5,7 +5,7 @@ use crate::error::Error as StdError; use crate::ffi::{OsStr, OsString}; use crate::marker::PhantomData; use crate::path::{self, PathBuf}; -use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::atomic::{Atomic, AtomicUsize, Ordering}; use crate::sync::{Mutex, Once}; use crate::sys::{decode_error_kind, sgx_ineffective, unsupported}; use crate::{fmt, io, str, vec}; @@ -75,7 +75,7 @@ pub fn current_exe() -> io::Result { #[cfg_attr(test, linkage = "available_externally")] #[export_name = "_ZN16__rust_internals3std3sys3sgx2os3ENVE"] -static ENV: AtomicUsize = AtomicUsize::new(0); +static ENV: Atomic = AtomicUsize::new(0); #[cfg_attr(test, linkage = "available_externally")] #[export_name = "_ZN16__rust_internals3std3sys3sgx2os8ENV_INITE"] static ENV_INIT: Once = Once::new(); diff --git a/library/std/src/sys/pal/sgx/waitqueue/spin_mutex.rs b/library/std/src/sys/pal/sgx/waitqueue/spin_mutex.rs index f6e851ccaddfa..73c7a101d601d 100644 --- a/library/std/src/sys/pal/sgx/waitqueue/spin_mutex.rs +++ b/library/std/src/sys/pal/sgx/waitqueue/spin_mutex.rs @@ -7,12 +7,12 @@ mod tests; use crate::cell::UnsafeCell; use crate::hint; use crate::ops::{Deref, DerefMut}; -use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, Ordering}; #[derive(Default)] pub struct SpinMutex { value: UnsafeCell, - lock: AtomicBool, + lock: Atomic, } unsafe impl Send for SpinMutex {} diff --git a/library/std/src/sys/pal/uefi/helpers.rs b/library/std/src/sys/pal/uefi/helpers.rs index 4031d33ba8066..1f42635ad5a2d 100644 --- a/library/std/src/sys/pal/uefi/helpers.rs +++ b/library/std/src/sys/pal/uefi/helpers.rs @@ -20,7 +20,7 @@ use crate::os::uefi::ffi::{OsStrExt, OsStringExt}; use crate::os::uefi::{self}; use crate::ptr::NonNull; use crate::slice; -use crate::sync::atomic::{AtomicPtr, Ordering}; +use crate::sync::atomic::{Atomic, AtomicPtr, Ordering}; use crate::sys_common::wstr::WStrUnits; type BootInstallMultipleProtocolInterfaces = @@ -198,7 +198,7 @@ pub(crate) fn device_path_to_text(path: NonNull) -> io::R Ok(path) } - static LAST_VALID_HANDLE: AtomicPtr = + static LAST_VALID_HANDLE: Atomic<*mut crate::ffi::c_void> = AtomicPtr::new(crate::ptr::null_mut()); if let Some(handle) = NonNull::new(LAST_VALID_HANDLE.load(Ordering::Acquire)) { diff --git a/library/std/src/sys/pal/uefi/mod.rs b/library/std/src/sys/pal/uefi/mod.rs index ac22f4ded8855..24b103fcc0d1e 100644 --- a/library/std/src/sys/pal/uefi/mod.rs +++ b/library/std/src/sys/pal/uefi/mod.rs @@ -38,9 +38,9 @@ pub type RawOsError = usize; use crate::io as std_io; use crate::os::uefi; use crate::ptr::NonNull; -use crate::sync::atomic::{AtomicPtr, Ordering}; +use crate::sync::atomic::{Atomic, AtomicPtr, Ordering}; -static EXIT_BOOT_SERVICE_EVENT: AtomicPtr = +static EXIT_BOOT_SERVICE_EVENT: Atomic<*mut crate::ffi::c_void> = AtomicPtr::new(crate::ptr::null_mut()); /// # SAFETY diff --git a/library/std/src/sys/pal/uefi/time.rs b/library/std/src/sys/pal/uefi/time.rs index 495ff2dc930ed..48e771d1b05d5 100644 --- a/library/std/src/sys/pal/uefi/time.rs +++ b/library/std/src/sys/pal/uefi/time.rs @@ -121,7 +121,7 @@ pub(crate) mod instant_internal { use super::*; use crate::mem::MaybeUninit; use crate::ptr::NonNull; - use crate::sync::atomic::{AtomicPtr, Ordering}; + use crate::sync::atomic::{Atomic, AtomicPtr, Ordering}; use crate::sys_common::mul_div_u64; const NS_PER_SEC: u64 = 1_000_000_000; @@ -142,7 +142,7 @@ pub(crate) mod instant_internal { Some(mul_div_u64(ts, NS_PER_SEC, freq)) } - static LAST_VALID_HANDLE: AtomicPtr = + static LAST_VALID_HANDLE: Atomic<*mut crate::ffi::c_void> = AtomicPtr::new(crate::ptr::null_mut()); if let Some(handle) = NonNull::new(LAST_VALID_HANDLE.load(Ordering::Acquire)) { diff --git a/library/std/src/sys/pal/unix/args.rs b/library/std/src/sys/pal/unix/args.rs index a943e3a581a83..fcda7b02fe2a9 100644 --- a/library/std/src/sys/pal/unix/args.rs +++ b/library/std/src/sys/pal/unix/args.rs @@ -117,7 +117,7 @@ impl DoubleEndedIterator for Args { mod imp { use crate::ffi::c_char; use crate::ptr; - use crate::sync::atomic::{AtomicIsize, AtomicPtr, Ordering}; + use crate::sync::atomic::{Atomic, AtomicIsize, AtomicPtr, Ordering}; // The system-provided argc and argv, which we store in static memory // here so that we can defer the work of parsing them until its actually @@ -125,8 +125,8 @@ mod imp { // // Note that we never mutate argv/argc, the argv array, or the argv // strings, which allows the code in this file to be very simple. - static ARGC: AtomicIsize = AtomicIsize::new(0); - static ARGV: AtomicPtr<*const u8> = AtomicPtr::new(ptr::null_mut()); + static ARGC: Atomic = AtomicIsize::new(0); + static ARGV: Atomic<*mut *const u8> = AtomicPtr::new(ptr::null_mut()); unsafe fn really_init(argc: isize, argv: *const *const u8) { // These don't need to be ordered with each other or other stores, diff --git a/library/std/src/sys/pal/unix/fs.rs b/library/std/src/sys/pal/unix/fs.rs index d09cee2e89c5b..39f99e5692089 100644 --- a/library/std/src/sys/pal/unix/fs.rs +++ b/library/std/src/sys/pal/unix/fs.rs @@ -149,14 +149,14 @@ cfg_has_statx! {{ flags: i32, mask: u32, ) -> Option> { - use crate::sync::atomic::{AtomicU8, Ordering}; + use crate::sync::atomic::{Atomic, AtomicU8, Ordering}; // Linux kernel prior to 4.11 or glibc prior to glibc 2.28 don't support `statx`. // We check for it on first failure and remember availability to avoid having to // do it again. #[repr(u8)] enum STATX_STATE{ Unknown = 0, Present, Unavailable } - static STATX_SAVED_STATE: AtomicU8 = AtomicU8::new(STATX_STATE::Unknown as u8); + static STATX_SAVED_STATE: Atomic = AtomicU8::new(STATX_STATE::Unknown as u8); syscall! { fn statx( diff --git a/library/std/src/sys/pal/unix/futex.rs b/library/std/src/sys/pal/unix/futex.rs index cc725045c4810..294ae974f8553 100644 --- a/library/std/src/sys/pal/unix/futex.rs +++ b/library/std/src/sys/pal/unix/futex.rs @@ -8,7 +8,7 @@ target_os = "fuchsia", ))] -use crate::sync::atomic::AtomicU32; +use crate::sync::atomic::{Atomic, AtomicU32}; use crate::time::Duration; /// An atomic for use as a futex that is at least 8-bits but may be larger. @@ -22,7 +22,7 @@ pub type SmallPrimitive = u32; /// /// Returns false on timeout, and true in all other cases. #[cfg(any(target_os = "linux", target_os = "android", target_os = "freebsd"))] -pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) -> bool { +pub fn futex_wait(futex: &Atomic, expected: u32, timeout: Option) -> bool { use super::time::Timespec; use crate::ptr::null; use crate::sync::atomic::Ordering::Relaxed; @@ -55,7 +55,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - let umtx_timeout_ptr = umtx_timeout.as_ref().map_or(null(), |t| t as *const _); let umtx_timeout_size = umtx_timeout.as_ref().map_or(0, |t| crate::mem::size_of_val(t)); libc::_umtx_op( - futex as *const AtomicU32 as *mut _, + futex as *const Atomic as *mut _, libc::UMTX_OP_WAIT_UINT_PRIVATE, expected as libc::c_ulong, crate::ptr::without_provenance_mut(umtx_timeout_size), @@ -66,7 +66,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - // absolute time rather than a relative time. libc::syscall( libc::SYS_futex, - futex as *const AtomicU32, + futex as *const Atomic, libc::FUTEX_WAIT_BITSET | libc::FUTEX_PRIVATE_FLAG, expected, timespec.as_ref().map_or(null(), |t| t as *const libc::timespec), @@ -94,16 +94,16 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - /// /// On some platforms, this always returns false. #[cfg(any(target_os = "linux", target_os = "android"))] -pub fn futex_wake(futex: &AtomicU32) -> bool { - let ptr = futex as *const AtomicU32; +pub fn futex_wake(futex: &Atomic) -> bool { + let ptr = futex as *const Atomic; let op = libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG; unsafe { libc::syscall(libc::SYS_futex, ptr, op, 1) > 0 } } /// Wakes up all threads that are waiting on `futex_wait` on this futex. #[cfg(any(target_os = "linux", target_os = "android"))] -pub fn futex_wake_all(futex: &AtomicU32) { - let ptr = futex as *const AtomicU32; +pub fn futex_wake_all(futex: &Atomic) { + let ptr = futex as *const Atomic; let op = libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG; unsafe { libc::syscall(libc::SYS_futex, ptr, op, i32::MAX); @@ -112,11 +112,11 @@ pub fn futex_wake_all(futex: &AtomicU32) { // FreeBSD doesn't tell us how many threads are woken up, so this always returns false. #[cfg(target_os = "freebsd")] -pub fn futex_wake(futex: &AtomicU32) -> bool { +pub fn futex_wake(futex: &Atomic) -> bool { use crate::ptr::null_mut; unsafe { libc::_umtx_op( - futex as *const AtomicU32 as *mut _, + futex as *const Atomic as *mut _, libc::UMTX_OP_WAKE_PRIVATE, 1, null_mut(), @@ -127,11 +127,11 @@ pub fn futex_wake(futex: &AtomicU32) -> bool { } #[cfg(target_os = "freebsd")] -pub fn futex_wake_all(futex: &AtomicU32) { +pub fn futex_wake_all(futex: &Atomic) { use crate::ptr::null_mut; unsafe { libc::_umtx_op( - futex as *const AtomicU32 as *mut _, + futex as *const Atomic as *mut _, libc::UMTX_OP_WAKE_PRIVATE, i32::MAX as libc::c_ulong, null_mut(), @@ -141,7 +141,7 @@ pub fn futex_wake_all(futex: &AtomicU32) { } #[cfg(target_os = "openbsd")] -pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) -> bool { +pub fn futex_wait(futex: &Atomic, expected: u32, timeout: Option) -> bool { use super::time::Timespec; use crate::ptr::{null, null_mut}; @@ -152,7 +152,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - let r = unsafe { libc::futex( - futex as *const AtomicU32 as *mut u32, + futex as *const Atomic as *mut u32, libc::FUTEX_WAIT, expected as i32, timespec.as_ref().map_or(null(), |t| t as *const libc::timespec), @@ -164,20 +164,25 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - } #[cfg(target_os = "openbsd")] -pub fn futex_wake(futex: &AtomicU32) -> bool { +pub fn futex_wake(futex: &Atomic) -> bool { use crate::ptr::{null, null_mut}; unsafe { - libc::futex(futex as *const AtomicU32 as *mut u32, libc::FUTEX_WAKE, 1, null(), null_mut()) - > 0 + libc::futex( + futex as *const Atomic as *mut u32, + libc::FUTEX_WAKE, + 1, + null(), + null_mut(), + ) > 0 } } #[cfg(target_os = "openbsd")] -pub fn futex_wake_all(futex: &AtomicU32) { +pub fn futex_wake_all(futex: &Atomic) { use crate::ptr::{null, null_mut}; unsafe { libc::futex( - futex as *const AtomicU32 as *mut u32, + futex as *const Atomic as *mut u32, libc::FUTEX_WAKE, i32::MAX, null(), @@ -187,7 +192,7 @@ pub fn futex_wake_all(futex: &AtomicU32) { } #[cfg(target_os = "dragonfly")] -pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) -> bool { +pub fn futex_wait(futex: &Atomic, expected: u32, timeout: Option) -> bool { // A timeout of 0 means infinite. // We round smaller timeouts up to 1 millisecond. // Overflows are rounded up to an infinite timeout. @@ -195,7 +200,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - timeout.and_then(|d| Some(i32::try_from(d.as_millis()).ok()?.max(1))).unwrap_or(0); let r = unsafe { - libc::umtx_sleep(futex as *const AtomicU32 as *const i32, expected as i32, timeout_ms) + libc::umtx_sleep(futex as *const Atomic as *const i32, expected as i32, timeout_ms) }; r == 0 || super::os::errno() != libc::ETIMEDOUT @@ -203,28 +208,28 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - // DragonflyBSD doesn't tell us how many threads are woken up, so this always returns false. #[cfg(target_os = "dragonfly")] -pub fn futex_wake(futex: &AtomicU32) -> bool { - unsafe { libc::umtx_wakeup(futex as *const AtomicU32 as *const i32, 1) }; +pub fn futex_wake(futex: &Atomic) -> bool { + unsafe { libc::umtx_wakeup(futex as *const Atomic as *const i32, 1) }; false } #[cfg(target_os = "dragonfly")] -pub fn futex_wake_all(futex: &AtomicU32) { - unsafe { libc::umtx_wakeup(futex as *const AtomicU32 as *const i32, i32::MAX) }; +pub fn futex_wake_all(futex: &Atomic) { + unsafe { libc::umtx_wakeup(futex as *const Atomic as *const i32, i32::MAX) }; } #[cfg(target_os = "emscripten")] extern "C" { - fn emscripten_futex_wake(addr: *const AtomicU32, count: libc::c_int) -> libc::c_int; + fn emscripten_futex_wake(addr: *const Atomic, count: libc::c_int) -> libc::c_int; fn emscripten_futex_wait( - addr: *const AtomicU32, + addr: *const Atomic, val: libc::c_uint, max_wait_ms: libc::c_double, ) -> libc::c_int; } #[cfg(target_os = "emscripten")] -pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) -> bool { +pub fn futex_wait(futex: &Atomic, expected: u32, timeout: Option) -> bool { unsafe { emscripten_futex_wait( futex, @@ -235,18 +240,18 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - } #[cfg(target_os = "emscripten")] -pub fn futex_wake(futex: &AtomicU32) -> bool { +pub fn futex_wake(futex: &Atomic) -> bool { unsafe { emscripten_futex_wake(futex, 1) > 0 } } #[cfg(target_os = "emscripten")] -pub fn futex_wake_all(futex: &AtomicU32) { +pub fn futex_wake_all(futex: &Atomic) { unsafe { emscripten_futex_wake(futex, i32::MAX) }; } #[cfg(target_os = "fuchsia")] pub mod zircon { - pub type zx_futex_t = crate::sync::atomic::AtomicU32; + pub type zx_futex_t = crate::sync::atomic::Atomic; pub type zx_handle_t = u32; pub type zx_status_t = i32; pub type zx_time_t = i64; @@ -277,7 +282,7 @@ pub mod zircon { } #[cfg(target_os = "fuchsia")] -pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) -> bool { +pub fn futex_wait(futex: &Atomic, expected: u32, timeout: Option) -> bool { // Sleep forever if the timeout is longer than fits in a i64. let deadline = timeout .and_then(|d| { @@ -295,12 +300,12 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - // Fuchsia doesn't tell us how many threads are woken up, so this always returns false. #[cfg(target_os = "fuchsia")] -pub fn futex_wake(futex: &AtomicU32) -> bool { +pub fn futex_wake(futex: &Atomic) -> bool { unsafe { zircon::zx_futex_wake(futex, 1) }; false } #[cfg(target_os = "fuchsia")] -pub fn futex_wake_all(futex: &AtomicU32) { +pub fn futex_wake_all(futex: &Atomic) { unsafe { zircon::zx_futex_wake(futex, u32::MAX) }; } diff --git a/library/std/src/sys/pal/unix/kernel_copy.rs b/library/std/src/sys/pal/unix/kernel_copy.rs index a671383cb7957..0e2bbcf5fa5e7 100644 --- a/library/std/src/sys/pal/unix/kernel_copy.rs +++ b/library/std/src/sys/pal/unix/kernel_copy.rs @@ -63,7 +63,7 @@ use crate::os::unix::net::UnixStream; use crate::pipe::{PipeReader, PipeWriter}; use crate::process::{ChildStderr, ChildStdin, ChildStdout}; use crate::ptr; -use crate::sync::atomic::{AtomicBool, AtomicU8, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, AtomicU8, Ordering}; use crate::sys::cvt; use crate::sys::weak::syscall; @@ -584,7 +584,7 @@ pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) -> // Kernel prior to 4.5 don't have copy_file_range // We store the availability in a global to avoid unnecessary syscalls - static HAS_COPY_FILE_RANGE: AtomicU8 = AtomicU8::new(NOT_PROBED); + static HAS_COPY_FILE_RANGE: Atomic = AtomicU8::new(NOT_PROBED); let mut have_probed = match HAS_COPY_FILE_RANGE.load(Ordering::Relaxed) { NOT_PROBED => false, @@ -709,8 +709,8 @@ enum SpliceMode { /// performs splice or sendfile between file descriptors /// Does _not_ fall back to a generic copy loop. fn sendfile_splice(mode: SpliceMode, reader: RawFd, writer: RawFd, len: u64) -> CopyResult { - static HAS_SENDFILE: AtomicBool = AtomicBool::new(true); - static HAS_SPLICE: AtomicBool = AtomicBool::new(true); + static HAS_SENDFILE: Atomic = AtomicBool::new(true); + static HAS_SPLICE: Atomic = AtomicBool::new(true); // Android builds use feature level 14, but the libc wrapper for splice is // gated on feature level 21+, so we have to invoke the syscall directly. diff --git a/library/std/src/sys/pal/unix/mod.rs b/library/std/src/sys/pal/unix/mod.rs index e8428eccb1691..1da5253d6c68f 100644 --- a/library/std/src/sys/pal/unix/mod.rs +++ b/library/std/src/sys/pal/unix/mod.rs @@ -214,7 +214,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { target_os = "vxworks", target_os = "vita", )))] -static ON_BROKEN_PIPE_FLAG_USED: crate::sync::atomic::AtomicBool = +static ON_BROKEN_PIPE_FLAG_USED: crate::sync::atomic::Atomic = crate::sync::atomic::AtomicBool::new(false); #[cfg(not(any( diff --git a/library/std/src/sys/pal/unix/rand.rs b/library/std/src/sys/pal/unix/rand.rs index cc0852aab4396..14af5269c76d6 100644 --- a/library/std/src/sys/pal/unix/rand.rs +++ b/library/std/src/sys/pal/unix/rand.rs @@ -78,7 +78,7 @@ cfg_if::cfg_if! { ))] mod imp { use crate::io::{Error, Result}; - use crate::sync::atomic::{AtomicBool, Ordering}; + use crate::sync::atomic::{Atomic, AtomicBool, Ordering}; use crate::sys::os::errno; #[cfg(any(target_os = "linux", target_os = "android"))] @@ -98,7 +98,7 @@ mod imp { // This provides the best quality random numbers available at the given moment // without ever blocking, and is preferable to falling back to /dev/urandom. - static GRND_INSECURE_AVAILABLE: AtomicBool = AtomicBool::new(true); + static GRND_INSECURE_AVAILABLE: Atomic = AtomicBool::new(true); if GRND_INSECURE_AVAILABLE.load(Ordering::Relaxed) { let ret = unsafe { getrandom(buf.as_mut_ptr().cast(), buf.len(), libc::GRND_INSECURE) }; if ret == -1 && errno() as libc::c_int == libc::EINVAL { @@ -125,7 +125,7 @@ mod imp { } pub fn syscall(v: &mut [u8]) -> Option> { - static GETRANDOM_UNAVAILABLE: AtomicBool = AtomicBool::new(false); + static GETRANDOM_UNAVAILABLE: Atomic = AtomicBool::new(false); if GETRANDOM_UNAVAILABLE.load(Ordering::Relaxed) { return None; @@ -275,13 +275,13 @@ mod imp { #[cfg(target_os = "vxworks")] mod imp { - use core::sync::atomic::AtomicBool; use core::sync::atomic::Ordering::Relaxed; + use core::sync::atomic::{Atomic, AtomicBool}; use crate::io::{Error, Result}; pub fn syscall(v: &mut [u8]) -> Result<()> { - static RNG_INIT: AtomicBool = AtomicBool::new(false); + static RNG_INIT: Atomic = AtomicBool::new(false); while !RNG_INIT.load(Relaxed) { let ret = unsafe { libc::randSecure() }; if ret < 0 { diff --git a/library/std/src/sys/pal/unix/stack_overflow.rs b/library/std/src/sys/pal/unix/stack_overflow.rs index 728ce8d60f639..7f6d3a89d0ac1 100644 --- a/library/std/src/sys/pal/unix/stack_overflow.rs +++ b/library/std/src/sys/pal/unix/stack_overflow.rs @@ -49,7 +49,7 @@ mod imp { use super::Handler; use crate::cell::Cell; use crate::ops::Range; - use crate::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering}; + use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr, AtomicUsize, Ordering}; use crate::sync::OnceLock; use crate::sys::pal::unix::os; use crate::{io, mem, ptr, thread}; @@ -118,9 +118,9 @@ mod imp { } } - static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0); - static MAIN_ALTSTACK: AtomicPtr = AtomicPtr::new(ptr::null_mut()); - static NEED_ALTSTACK: AtomicBool = AtomicBool::new(false); + static PAGE_SIZE: Atomic = AtomicUsize::new(0); + static MAIN_ALTSTACK: Atomic<*mut libc::c_void> = AtomicPtr::new(ptr::null_mut()); + static NEED_ALTSTACK: Atomic = AtomicBool::new(false); /// # Safety /// Must be called only once diff --git a/library/std/src/sys/pal/unix/weak.rs b/library/std/src/sys/pal/unix/weak.rs index 35762f5a53b5b..26dffa139b04e 100644 --- a/library/std/src/sys/pal/unix/weak.rs +++ b/library/std/src/sys/pal/unix/weak.rs @@ -23,7 +23,7 @@ use crate::ffi::CStr; use crate::marker::PhantomData; -use crate::sync::atomic::{self, AtomicPtr, Ordering}; +use crate::sync::atomic::{self, Atomic, AtomicPtr, Ordering}; use crate::{mem, ptr}; // We can use true weak linkage on ELF targets. @@ -73,7 +73,7 @@ pub(crate) macro dlsym { } pub(crate) struct DlsymWeak { name: &'static str, - func: AtomicPtr, + func: Atomic<*mut libc::c_void>, _marker: PhantomData, } diff --git a/library/std/src/sys/pal/wasm/atomics/futex.rs b/library/std/src/sys/pal/wasm/atomics/futex.rs index 42913a99ee9d6..93376e2ecf9d4 100644 --- a/library/std/src/sys/pal/wasm/atomics/futex.rs +++ b/library/std/src/sys/pal/wasm/atomics/futex.rs @@ -3,11 +3,11 @@ use core::arch::wasm32 as wasm; #[cfg(target_arch = "wasm64")] use core::arch::wasm64 as wasm; -use crate::sync::atomic::AtomicU32; +use crate::sync::atomic::Atomic; use crate::time::Duration; /// An atomic for use as a futex that is at least 8-bits but may be larger. -pub type SmallAtomic = AtomicU32; +pub type SmallAtomic = Atomic; /// Must be the underlying type of SmallAtomic pub type SmallPrimitive = u32; @@ -16,11 +16,14 @@ pub type SmallPrimitive = u32; /// Returns directly if the futex doesn't hold the expected value. /// /// Returns false on timeout, and true in all other cases. -pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) -> bool { +pub fn futex_wait(futex: &Atomic, expected: u32, timeout: Option) -> bool { let timeout = timeout.and_then(|t| t.as_nanos().try_into().ok()).unwrap_or(-1); unsafe { - wasm::memory_atomic_wait32(futex as *const AtomicU32 as *mut i32, expected as i32, timeout) - < 2 + wasm::memory_atomic_wait32( + futex as *const Atomic as *mut i32, + expected as i32, + timeout, + ) < 2 } } @@ -28,13 +31,13 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - /// /// Returns true if this actually woke up such a thread, /// or false if no thread was waiting on this futex. -pub fn futex_wake(futex: &AtomicU32) -> bool { - unsafe { wasm::memory_atomic_notify(futex as *const AtomicU32 as *mut i32, 1) > 0 } +pub fn futex_wake(futex: &Atomic) -> bool { + unsafe { wasm::memory_atomic_notify(futex as *const Atomic as *mut i32, 1) > 0 } } /// Wakes up all threads that are waiting on `futex_wait` on this futex. -pub fn futex_wake_all(futex: &AtomicU32) { +pub fn futex_wake_all(futex: &Atomic) { unsafe { - wasm::memory_atomic_notify(futex as *const AtomicU32 as *mut i32, i32::MAX as u32); + wasm::memory_atomic_notify(futex as *const Atomic as *mut i32, i32::MAX as u32); } } diff --git a/library/std/src/sys/pal/windows/compat.rs b/library/std/src/sys/pal/windows/compat.rs index 75232dfc0b0f1..705a7d6b4cd3b 100644 --- a/library/std/src/sys/pal/windows/compat.rs +++ b/library/std/src/sys/pal/windows/compat.rs @@ -145,7 +145,7 @@ macro_rules! compat_fn_with_fallback { use super::*; use crate::mem; use crate::ffi::CStr; - use crate::sync::atomic::{AtomicPtr, Ordering}; + use crate::sync::atomic::{Atomic, AtomicPtr, Ordering}; use crate::sys::compat::Module; type F = unsafe extern "system" fn($($argtype),*) -> $rettype; @@ -155,7 +155,7 @@ macro_rules! compat_fn_with_fallback { /// When that is called it attempts to load the requested symbol. /// If it succeeds, `PTR` is set to the address of that symbol. /// If it fails, then `PTR` is set to `fallback`. - static PTR: AtomicPtr = AtomicPtr::new(load as *mut _); + static PTR: Atomic<*mut c_void> = AtomicPtr::new(load as *mut _); unsafe extern "system" fn load($($argname: $argtype),*) -> $rettype { unsafe { @@ -213,9 +213,9 @@ macro_rules! compat_fn_optional { use crate::ffi::c_void; use crate::mem; use crate::ptr::{self, NonNull}; - use crate::sync::atomic::{AtomicPtr, Ordering}; + use crate::sync::atomic::{Atomic, AtomicPtr, Ordering}; - pub(in crate::sys) static PTR: AtomicPtr = AtomicPtr::new(ptr::null_mut()); + pub(in crate::sys) static PTR: Atomic<*mut c_void> = AtomicPtr::new(ptr::null_mut()); type F = unsafe extern "system" fn($($argtype),*) $(-> $rettype)?; diff --git a/library/std/src/sys/pal/windows/pipe.rs b/library/std/src/sys/pal/windows/pipe.rs index 7d1b5aca1d5fe..b277e640a641c 100644 --- a/library/std/src/sys/pal/windows/pipe.rs +++ b/library/std/src/sys/pal/windows/pipe.rs @@ -2,8 +2,8 @@ use crate::ffi::OsStr; use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut}; use crate::os::windows::prelude::*; use crate::path::Path; -use crate::sync::atomic::AtomicUsize; use crate::sync::atomic::Ordering::Relaxed; +use crate::sync::atomic::{Atomic, AtomicUsize}; use crate::sys::fs::{File, OpenOptions}; use crate::sys::handle::Handle; use crate::sys::pal::windows::api::{self, WinError}; @@ -208,7 +208,7 @@ pub fn spawn_pipe_relay( } fn random_number() -> usize { - static N: AtomicUsize = AtomicUsize::new(0); + static N: Atomic = AtomicUsize::new(0); loop { if N.load(Relaxed) != 0 { return N.fetch_add(1, Relaxed); diff --git a/library/std/src/sys/pal/windows/time.rs b/library/std/src/sys/pal/windows/time.rs index d9010e3996109..68126bd8d2fa0 100644 --- a/library/std/src/sys/pal/windows/time.rs +++ b/library/std/src/sys/pal/windows/time.rs @@ -164,7 +164,7 @@ fn intervals2dur(intervals: u64) -> Duration { mod perf_counter { use super::NANOS_PER_SEC; - use crate::sync::atomic::{AtomicU64, Ordering}; + use crate::sync::atomic::{Atomic, AtomicU64, Ordering}; use crate::sys::{c, cvt}; use crate::sys_common::mul_div_u64; use crate::time::Duration; @@ -199,7 +199,7 @@ mod perf_counter { // uninitialized. Storing this as a single `AtomicU64` allows us to use // `Relaxed` operations, as we are only interested in the effects on a // single memory location. - static FREQUENCY: AtomicU64 = AtomicU64::new(0); + static FREQUENCY: Atomic = AtomicU64::new(0); let cached = FREQUENCY.load(Ordering::Relaxed); // If a previous thread has filled in this global state, use that. diff --git a/library/std/src/sys/pal/xous/net/tcplistener.rs b/library/std/src/sys/pal/xous/net/tcplistener.rs index ddfb289162b69..bba4af22d8ac8 100644 --- a/library/std/src/sys/pal/xous/net/tcplistener.rs +++ b/library/std/src/sys/pal/xous/net/tcplistener.rs @@ -18,10 +18,10 @@ macro_rules! unimpl { #[derive(Clone)] pub struct TcpListener { - fd: Arc, + fd: Arc>, local: SocketAddr, - handle_count: Arc, - nonblocking: Arc, + handle_count: Arc>, + nonblocking: Arc>, } impl TcpListener { diff --git a/library/std/src/sys/pal/xous/net/tcpstream.rs b/library/std/src/sys/pal/xous/net/tcpstream.rs index 03442cf2fcdfd..3a5c01cb22fbb 100644 --- a/library/std/src/sys/pal/xous/net/tcpstream.rs +++ b/library/std/src/sys/pal/xous/net/tcpstream.rs @@ -29,11 +29,11 @@ pub struct TcpStream { remote_port: u16, peer_addr: SocketAddr, // milliseconds - read_timeout: Arc, + read_timeout: Arc>, // milliseconds - write_timeout: Arc, - handle_count: Arc, - nonblocking: Arc, + write_timeout: Arc>, + handle_count: Arc>, + nonblocking: Arc>, } fn sockaddr_to_buf(duration: Duration, addr: &SocketAddr, buf: &mut [u8]) { diff --git a/library/std/src/sys/pal/xous/net/udp.rs b/library/std/src/sys/pal/xous/net/udp.rs index de5133280ba9d..15265564575e8 100644 --- a/library/std/src/sys/pal/xous/net/udp.rs +++ b/library/std/src/sys/pal/xous/net/udp.rs @@ -27,7 +27,7 @@ pub struct UdpSocket { read_timeout: Cell, // in milliseconds. The setting applies only to `send` calls after the timeout is set. write_timeout: Cell, - handle_count: Arc, + handle_count: Arc>, nonblocking: Cell, } diff --git a/library/std/src/sys/sync/condvar/futex.rs b/library/std/src/sys/sync/condvar/futex.rs index 39cd97c01ea32..b573fc69fca64 100644 --- a/library/std/src/sys/sync/condvar/futex.rs +++ b/library/std/src/sys/sync/condvar/futex.rs @@ -1,5 +1,5 @@ -use crate::sync::atomic::AtomicU32; use crate::sync::atomic::Ordering::Relaxed; +use crate::sync::atomic::{Atomic, AtomicU32}; use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; use crate::sys::sync::Mutex; use crate::time::Duration; @@ -8,7 +8,7 @@ pub struct Condvar { // The value of this atomic is simply incremented on every notification. // This is used by `.wait()` to not miss any notifications after // unlocking the mutex and before waiting for notifications. - futex: AtomicU32, + futex: Atomic, } impl Condvar { diff --git a/library/std/src/sys/sync/condvar/pthread.rs b/library/std/src/sys/sync/condvar/pthread.rs index 2b4bdfe415c80..758a806ec481d 100644 --- a/library/std/src/sys/sync/condvar/pthread.rs +++ b/library/std/src/sys/sync/condvar/pthread.rs @@ -1,7 +1,7 @@ use crate::cell::UnsafeCell; use crate::ptr; -use crate::sync::atomic::AtomicPtr; use crate::sync::atomic::Ordering::Relaxed; +use crate::sync::atomic::{Atomic, AtomicPtr}; use crate::sys::sync::{mutex, Mutex}; #[cfg(not(target_os = "nto"))] use crate::sys::time::TIMESPEC_MAX; @@ -14,7 +14,7 @@ struct AllocatedCondvar(UnsafeCell); pub struct Condvar { inner: LazyBox, - mutex: AtomicPtr, + mutex: Atomic<*mut libc::pthread_mutex_t>, } #[inline] diff --git a/library/std/src/sys/sync/condvar/teeos.rs b/library/std/src/sys/sync/condvar/teeos.rs index 943867cd76169..71e2d15bcac4f 100644 --- a/library/std/src/sys/sync/condvar/teeos.rs +++ b/library/std/src/sys/sync/condvar/teeos.rs @@ -1,7 +1,7 @@ use crate::cell::UnsafeCell; use crate::ptr; -use crate::sync::atomic::AtomicPtr; use crate::sync::atomic::Ordering::Relaxed; +use crate::sync::atomic::{Atomic, AtomicPtr}; use crate::sys::sync::mutex::{self, Mutex}; use crate::sys::time::TIMESPEC_MAX; use crate::sys_common::lazy_box::{LazyBox, LazyInit}; @@ -19,7 +19,7 @@ struct AllocatedCondvar(UnsafeCell); pub struct Condvar { inner: LazyBox, - mutex: AtomicPtr, + mutex: Atomic<*mut libc::pthread_mutex_t>, } #[inline] diff --git a/library/std/src/sys/sync/condvar/xous.rs b/library/std/src/sys/sync/condvar/xous.rs index 007383479a100..3cec1c0064671 100644 --- a/library/std/src/sys/sync/condvar/xous.rs +++ b/library/std/src/sys/sync/condvar/xous.rs @@ -1,4 +1,4 @@ -use core::sync::atomic::{AtomicUsize, Ordering}; +use core::sync::atomic::{Atomic, AtomicUsize, Ordering}; use crate::os::xous::ffi::{blocking_scalar, scalar}; use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; @@ -11,8 +11,8 @@ use crate::time::Duration; const NOTIFY_TRIES: usize = 3; pub struct Condvar { - counter: AtomicUsize, - timed_out: AtomicUsize, + counter: Atomic, + timed_out: Atomic, } unsafe impl Send for Condvar {} diff --git a/library/std/src/sys/sync/mutex/fuchsia.rs b/library/std/src/sys/sync/mutex/fuchsia.rs index 81a6361a83a49..f53c550b34623 100644 --- a/library/std/src/sys/sync/mutex/fuchsia.rs +++ b/library/std/src/sys/sync/mutex/fuchsia.rs @@ -37,8 +37,8 @@ //! //! [mutex in Fuchsia's libsync]: https://cs.opensource.google/fuchsia/fuchsia/+/main:zircon/system/ulib/sync/mutex.c -use crate::sync::atomic::AtomicU32; use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; +use crate::sync::atomic::{Atomic, AtomicU32}; use crate::sys::futex::zircon::{ zx_futex_wait, zx_futex_wake_single_owner, zx_handle_t, zx_thread_self, ZX_ERR_BAD_HANDLE, ZX_ERR_BAD_STATE, ZX_ERR_INVALID_ARGS, ZX_ERR_TIMED_OUT, ZX_ERR_WRONG_TYPE, ZX_OK, @@ -52,7 +52,7 @@ const CONTESTED_BIT: u32 = 1; const UNLOCKED: u32 = 0; pub struct Mutex { - futex: AtomicU32, + futex: Atomic, } #[inline] diff --git a/library/std/src/sys/sync/mutex/xous.rs b/library/std/src/sys/sync/mutex/xous.rs index 63efa5a0210ab..f42d7457c70f9 100644 --- a/library/std/src/sys/sync/mutex/xous.rs +++ b/library/std/src/sys/sync/mutex/xous.rs @@ -1,7 +1,7 @@ use crate::os::xous::ffi::{blocking_scalar, do_yield}; use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; -use crate::sync::atomic::{AtomicBool, AtomicUsize}; +use crate::sync::atomic::{Atomic, AtomicBool, AtomicUsize}; pub struct Mutex { /// The "locked" value indicates how many threads are waiting on this @@ -14,12 +14,12 @@ pub struct Mutex { /// for a lock, or it is locked for long periods of time. Rather than /// spinning, these locks send a Message to the ticktimer server /// requesting that they be woken up when a lock is unlocked. - locked: AtomicUsize, + locked: Atomic, /// Whether this Mutex ever was contended, and therefore made a trip /// to the ticktimer server. If this was never set, then we were never /// on the slow path and can skip deregistering the mutex. - contended: AtomicBool, + contended: Atomic, } impl Mutex { diff --git a/library/std/src/sys/sync/once/futex.rs b/library/std/src/sys/sync/once/futex.rs index 25588a4217b62..ff37eff616b13 100644 --- a/library/std/src/sys/sync/once/futex.rs +++ b/library/std/src/sys/sync/once/futex.rs @@ -1,7 +1,7 @@ use crate::cell::Cell; use crate::sync as public; -use crate::sync::atomic::AtomicU32; use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; +use crate::sync::atomic::{Atomic, AtomicU32}; use crate::sync::once::ExclusiveState; use crate::sys::futex::{futex_wait, futex_wake_all}; @@ -49,7 +49,7 @@ impl OnceState { } struct CompletionGuard<'a> { - state_and_queued: &'a AtomicU32, + state_and_queued: &'a Atomic, set_state_on_drop_to: u32, } diff --git a/library/std/src/sys/sync/once/queue.rs b/library/std/src/sys/sync/once/queue.rs index 17abaf0bf267b..faefc411387f6 100644 --- a/library/std/src/sys/sync/once/queue.rs +++ b/library/std/src/sys/sync/once/queue.rs @@ -57,7 +57,7 @@ use crate::cell::Cell; use crate::sync::atomic::Ordering::{AcqRel, Acquire, Release}; -use crate::sync::atomic::{AtomicBool, AtomicPtr}; +use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr}; use crate::sync::once::ExclusiveState; use crate::thread::{self, Thread}; use crate::{fmt, ptr, sync as public}; @@ -65,7 +65,7 @@ use crate::{fmt, ptr, sync as public}; type StateAndQueue = *mut (); pub struct Once { - state_and_queue: AtomicPtr<()>, + state_and_queue: Atomic<*mut ()>, } pub struct OnceState { @@ -94,7 +94,7 @@ const QUEUE_MASK: usize = !STATE_MASK; #[repr(align(4))] // Ensure the two lower bits are free to use as state bits. struct Waiter { thread: Cell>, - signaled: AtomicBool, + signaled: Atomic, next: Cell<*const Waiter>, } @@ -102,7 +102,7 @@ struct Waiter { // Every node is a struct on the stack of a waiting thread. // Will wake up the waiters when it gets dropped, i.e. also on panic. struct WaiterQueue<'a> { - state_and_queue: &'a AtomicPtr<()>, + state_and_queue: &'a Atomic<*mut ()>, set_state_on_drop_to: StateAndQueue, } @@ -233,7 +233,7 @@ impl Once { } fn wait( - state_and_queue: &AtomicPtr<()>, + state_and_queue: &Atomic<*mut ()>, mut current: StateAndQueue, return_on_poisoned: bool, ) -> StateAndQueue { diff --git a/library/std/src/sys/sync/rwlock/futex.rs b/library/std/src/sys/sync/rwlock/futex.rs index 75ecc2ab5c52f..587b71dd58efd 100644 --- a/library/std/src/sys/sync/rwlock/futex.rs +++ b/library/std/src/sys/sync/rwlock/futex.rs @@ -1,5 +1,5 @@ -use crate::sync::atomic::AtomicU32; use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; +use crate::sync::atomic::{Atomic, AtomicU32}; use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; pub struct RwLock { @@ -10,10 +10,10 @@ pub struct RwLock { // 0x3FFF_FFFF: Write locked // Bit 30: Readers are waiting on this futex. // Bit 31: Writers are waiting on the writer_notify futex. - state: AtomicU32, + state: Atomic, // The 'condition variable' to notify writers through. // Incremented on every signal. - writer_notify: AtomicU32, + writer_notify: Atomic, } const READ_LOCKED: u32 = 1; diff --git a/library/std/src/sys/sync/rwlock/queue.rs b/library/std/src/sys/sync/rwlock/queue.rs index 458c16516bbe1..4c149e627e48d 100644 --- a/library/std/src/sys/sync/rwlock/queue.rs +++ b/library/std/src/sys/sync/rwlock/queue.rs @@ -112,7 +112,7 @@ use crate::hint::spin_loop; use crate::mem; use crate::ptr::{self, null_mut, without_provenance_mut, NonNull}; use crate::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; -use crate::sync::atomic::{AtomicBool, AtomicPtr}; +use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr}; use crate::thread::{self, Thread}; // Locking uses exponential backoff. `SPIN_COUNT` indicates how many times the @@ -121,7 +121,7 @@ use crate::thread::{self, Thread}; const SPIN_COUNT: usize = 7; type State = *mut (); -type AtomicState = AtomicPtr<()>; +type AtomicState = Atomic<*mut ()>; const UNLOCKED: State = without_provenance_mut(0); const LOCKED: usize = 1; @@ -157,7 +157,7 @@ unsafe fn to_node(state: State) -> NonNull { } /// An atomic node pointer with relaxed operations. -struct AtomicLink(AtomicPtr); +struct AtomicLink(Atomic<*mut Node>); impl AtomicLink { fn new(v: Option>) -> AtomicLink { @@ -180,7 +180,7 @@ struct Node { tail: AtomicLink, write: bool, thread: OnceCell, - completed: AtomicBool, + completed: Atomic, } impl Node { diff --git a/library/std/src/sys/sync/thread_parking/darwin.rs b/library/std/src/sys/sync/thread_parking/darwin.rs index 96e3d23c332c4..2c83c50247a49 100644 --- a/library/std/src/sys/sync/thread_parking/darwin.rs +++ b/library/std/src/sys/sync/thread_parking/darwin.rs @@ -13,8 +13,8 @@ #![allow(non_camel_case_types)] use crate::pin::Pin; -use crate::sync::atomic::AtomicI8; use crate::sync::atomic::Ordering::{Acquire, Release}; +use crate::sync::atomic::{Atomic, AtomicI8}; use crate::time::Duration; type dispatch_semaphore_t = *mut crate::ffi::c_void; @@ -38,7 +38,7 @@ const PARKED: i8 = -1; pub struct Parker { semaphore: dispatch_semaphore_t, - state: AtomicI8, + state: Atomic, } unsafe impl Sync for Parker {} diff --git a/library/std/src/sys/sync/thread_parking/id.rs b/library/std/src/sys/sync/thread_parking/id.rs index a7b07b509dfd8..5833883565bff 100644 --- a/library/std/src/sys/sync/thread_parking/id.rs +++ b/library/std/src/sys/sync/thread_parking/id.rs @@ -10,12 +10,12 @@ use crate::cell::UnsafeCell; use crate::pin::Pin; use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; -use crate::sync::atomic::{fence, AtomicI8}; +use crate::sync::atomic::{fence, Atomic, AtomicI8}; use crate::sys::thread_parking::{current, park, park_timeout, unpark, ThreadId}; use crate::time::Duration; pub struct Parker { - state: AtomicI8, + state: Atomic, tid: UnsafeCell>, } diff --git a/library/std/src/sys/sync/thread_parking/pthread.rs b/library/std/src/sys/sync/thread_parking/pthread.rs index c64600e9e29c3..035270d81880f 100644 --- a/library/std/src/sys/sync/thread_parking/pthread.rs +++ b/library/std/src/sys/sync/thread_parking/pthread.rs @@ -4,8 +4,8 @@ use crate::cell::UnsafeCell; use crate::marker::PhantomPinned; use crate::pin::Pin; use crate::ptr::addr_of_mut; -use crate::sync::atomic::AtomicUsize; use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; +use crate::sync::atomic::{Atomic, AtomicUsize}; #[cfg(not(target_os = "nto"))] use crate::sys::time::TIMESPEC_MAX; #[cfg(target_os = "nto")] @@ -84,7 +84,7 @@ unsafe fn wait_timeout( } pub struct Parker { - state: AtomicUsize, + state: Atomic, lock: UnsafeCell, cvar: UnsafeCell, // The `pthread` primitives require a stable address, so make this struct `!Unpin`. diff --git a/library/std/src/sys/sync/thread_parking/windows7.rs b/library/std/src/sys/sync/thread_parking/windows7.rs index 1000b63b6d01a..2246eb0840a3a 100644 --- a/library/std/src/sys/sync/thread_parking/windows7.rs +++ b/library/std/src/sys/sync/thread_parking/windows7.rs @@ -60,13 +60,13 @@ use core::ffi::c_void; use crate::pin::Pin; -use crate::sync::atomic::AtomicI8; use crate::sync::atomic::Ordering::{Acquire, Release}; +use crate::sync::atomic::{Atomic, AtomicI8}; use crate::sys::{c, dur2timeout}; use crate::time::Duration; pub struct Parker { - state: AtomicI8, + state: Atomic, } const PARKED: i8 = -1; @@ -186,8 +186,8 @@ impl Parker { mod keyed_events { use core::pin::Pin; use core::ptr; - use core::sync::atomic::AtomicPtr; use core::sync::atomic::Ordering::{Acquire, Relaxed}; + use core::sync::atomic::{Atomic, AtomicPtr}; use core::time::Duration; use super::{Parker, EMPTY, NOTIFIED}; @@ -244,7 +244,7 @@ mod keyed_events { fn keyed_event_handle() -> c::HANDLE { const INVALID: c::HANDLE = ptr::without_provenance_mut(!0); - static HANDLE: AtomicPtr = AtomicPtr::new(INVALID); + static HANDLE: Atomic<*mut crate::ffi::c_void> = AtomicPtr::new(INVALID); match HANDLE.load(Relaxed) { INVALID => { let mut handle = c::INVALID_HANDLE_VALUE; diff --git a/library/std/src/sys/sync/thread_parking/xous.rs b/library/std/src/sys/sync/thread_parking/xous.rs index 64b6f731f2377..18b0ce6152c73 100644 --- a/library/std/src/sys/sync/thread_parking/xous.rs +++ b/library/std/src/sys/sync/thread_parking/xous.rs @@ -2,8 +2,8 @@ use crate::os::xous::ffi::{blocking_scalar, scalar}; use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; use crate::pin::Pin; use crate::ptr; -use crate::sync::atomic::AtomicI8; use crate::sync::atomic::Ordering::{Acquire, Release}; +use crate::sync::atomic::{Atomic, AtomicI8}; use crate::time::Duration; const NOTIFIED: i8 = 1; @@ -11,7 +11,7 @@ const EMPTY: i8 = 0; const PARKED: i8 = -1; pub struct Parker { - state: AtomicI8, + state: Atomic, } impl Parker { diff --git a/library/std/src/sys/thread_local/key/xous.rs b/library/std/src/sys/thread_local/key/xous.rs index 4fb2fdcc61925..824599e9c0ca9 100644 --- a/library/std/src/sys/thread_local/key/xous.rs +++ b/library/std/src/sys/thread_local/key/xous.rs @@ -52,19 +52,19 @@ const TLS_MEMORY_SIZE: usize = 4096; /// TLS keys start at `1`. Index `0` is unused #[cfg(not(test))] #[export_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key13TLS_KEY_INDEXE"] -static TLS_KEY_INDEX: AtomicUsize = AtomicUsize::new(1); +static TLS_KEY_INDEX: Atomic = AtomicUsize::new(1); #[cfg(not(test))] #[export_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key9DTORSE"] -static DTORS: AtomicPtr = AtomicPtr::new(ptr::null_mut()); +static DTORS: Atomic<*mut Node> = AtomicPtr::new(ptr::null_mut()); #[cfg(test)] extern "Rust" { #[link_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key13TLS_KEY_INDEXE"] - static TLS_KEY_INDEX: AtomicUsize; + static TLS_KEY_INDEX: Atomic; #[link_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key9DTORSE"] - static DTORS: AtomicPtr; + static DTORS: Atomic<*mut Node>; } fn tls_ptr_addr() -> *mut *mut u8 { diff --git a/library/std/src/sys_common/lazy_box.rs b/library/std/src/sys_common/lazy_box.rs index b45b05f63baaa..dcf54ecc655f6 100644 --- a/library/std/src/sys_common/lazy_box.rs +++ b/library/std/src/sys_common/lazy_box.rs @@ -5,11 +5,11 @@ use crate::marker::PhantomData; use crate::ops::{Deref, DerefMut}; use crate::ptr::null_mut; -use crate::sync::atomic::AtomicPtr; use crate::sync::atomic::Ordering::{AcqRel, Acquire}; +use crate::sync::atomic::{Atomic, AtomicPtr}; pub(crate) struct LazyBox { - ptr: AtomicPtr, + ptr: Atomic<*mut T>, _phantom: PhantomData, } diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index 33cb7e0bcca82..2aebb39052f54 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -166,7 +166,7 @@ use crate::mem::{self, forget, ManuallyDrop}; use crate::num::NonZero; use crate::pin::Pin; use crate::ptr::addr_of_mut; -use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::atomic::{Atomic, AtomicUsize, Ordering}; use crate::sync::Arc; use crate::sys::sync::Parker; use crate::sys::thread as imp; @@ -455,7 +455,7 @@ impl Builder { let Builder { name, stack_size } = self; let stack_size = stack_size.unwrap_or_else(|| { - static MIN: AtomicUsize = AtomicUsize::new(0); + static MIN: Atomic = AtomicUsize::new(0); match MIN.load(Ordering::Relaxed) { 0 => {} @@ -1222,9 +1222,9 @@ impl ThreadId { cfg_if::cfg_if! { if #[cfg(target_has_atomic = "64")] { - use crate::sync::atomic::AtomicU64; + use crate::sync::atomic::{Atomic, AtomicU64}; - static COUNTER: AtomicU64 = AtomicU64::new(0); + static COUNTER: Atomic = AtomicU64::new(0); let mut last = COUNTER.load(Ordering::Relaxed); loop { diff --git a/library/std/src/thread/scoped.rs b/library/std/src/thread/scoped.rs index ba27c9220aea5..91701ea1e96fc 100644 --- a/library/std/src/thread/scoped.rs +++ b/library/std/src/thread/scoped.rs @@ -1,7 +1,7 @@ use super::{current, park, Builder, JoinInner, Result, Thread}; use crate::marker::PhantomData; use crate::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; -use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use crate::sync::atomic::{Atomic, AtomicBool, AtomicUsize, Ordering}; use crate::sync::Arc; use crate::{fmt, io}; @@ -35,8 +35,8 @@ pub struct Scope<'scope, 'env: 'scope> { pub struct ScopedJoinHandle<'scope, T>(JoinInner<'scope, T>); pub(super) struct ScopeData { - num_running_threads: AtomicUsize, - a_thread_panicked: AtomicBool, + num_running_threads: Atomic, + a_thread_panicked: Atomic, main_thread: Thread, }