Skip to content

Commit

Permalink
Auto merge of rust-lang#127912 - joboet:tls_dtor_thread_current, r=cu…
Browse files Browse the repository at this point in the history
…viper

std: make `thread::current` available in all `thread_local!` destructors

... and thereby allow the panic runtime to always print the right thread name.

This works by modifying the TLS destructor system to schedule a runtime cleanup function after all other TLS destructors registered by `std` have run. Unfortunately, this doesn't affect foreign TLS destructors, `thread::current` will still panic there.

Additionally, the thread ID returned by `current_id` will now always be available, even inside the global allocator, and will not change during the lifetime of one thread (this was previously the case with key-based TLS).

The mechanisms I added for this (`local_pointer` and `thread_cleanup`) will also allow finally fixing rust-lang#111272 by moving the signal stack to a similar runtime-cleanup TLS variable.
  • Loading branch information
bors committed Oct 3, 2024
2 parents ad9c494 + a71ba0c commit fd1f8aa
Show file tree
Hide file tree
Showing 21 changed files with 536 additions and 139 deletions.
34 changes: 25 additions & 9 deletions library/std/src/rt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,10 @@ pub use crate::panicking::{begin_panic, panic_count};
pub use core::panicking::{panic_display, panic_fmt};

#[rustfmt::skip]
use crate::any::Any;
use crate::sync::Once;
use crate::sys;
use crate::thread::{self, Thread};
use crate::{mem, panic, sys};

// Prints to the "panic output", depending on the platform this may be:
// - the standard error output
Expand Down Expand Up @@ -66,6 +67,11 @@ macro_rules! rtunwrap {
};
}

fn handle_rt_panic(e: Box<dyn Any + Send>) {
mem::forget(e);
rtabort!("initialization or cleanup bug");
}

// One-time runtime initialization.
// Runs before `main`.
// SAFETY: must be called only once during runtime initialization.
Expand Down Expand Up @@ -101,6 +107,20 @@ unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
thread::set_current(thread);
}

/// Clean up the thread-local runtime state. This *should* be run after all other
/// code managed by the Rust runtime, but will not cause UB if that condition is
/// not fulfilled. Also note that this function is not guaranteed to be run, but
/// skipping it will cause leaks and therefore is to be avoided.
pub(crate) fn thread_cleanup() {
// This function is run in situations where unwinding leads to an abort
// (think `extern "C"` functions). Abort here instead so that we can
// print a nice message.
panic::catch_unwind(|| {
crate::thread::drop_current();
})
.unwrap_or_else(handle_rt_panic);
}

// One-time runtime cleanup.
// Runs after `main` or at program exit.
// NOTE: this is not guaranteed to run, for example when the program aborts.
Expand All @@ -123,11 +143,6 @@ fn lang_start_internal(
argv: *const *const u8,
sigpipe: u8,
) -> Result<isize, !> {
use crate::{mem, panic};
let rt_abort = move |e| {
mem::forget(e);
rtabort!("initialization or cleanup bug");
};
// Guard against the code called by this function from unwinding outside of the Rust-controlled
// code, which is UB. This is a requirement imposed by a combination of how the
// `#[lang="start"]` attribute is implemented as well as by the implementation of the panicking
Expand All @@ -139,16 +154,17 @@ fn lang_start_internal(
// prevent std from accidentally introducing a panic to these functions. Another is from
// user code from `main` or, more nefariously, as described in e.g. issue #86030.
// SAFETY: Only called once during runtime initialization.
panic::catch_unwind(move || unsafe { init(argc, argv, sigpipe) }).map_err(rt_abort)?;
panic::catch_unwind(move || unsafe { init(argc, argv, sigpipe) })
.unwrap_or_else(handle_rt_panic);
let ret_code = panic::catch_unwind(move || panic::catch_unwind(main).unwrap_or(101) as isize)
.map_err(move |e| {
mem::forget(e);
rtabort!("drop of the panic payload panicked");
});
panic::catch_unwind(cleanup).map_err(rt_abort)?;
panic::catch_unwind(cleanup).unwrap_or_else(handle_rt_panic);
// Guard against multiple threads calling `libc::exit` concurrently.
// See the documentation for `unique_thread_exit` for more information.
panic::catch_unwind(|| crate::sys::exit_guard::unique_thread_exit()).map_err(rt_abort)?;
panic::catch_unwind(crate::sys::exit_guard::unique_thread_exit).unwrap_or_else(handle_rt_panic);
ret_code
}

Expand Down
6 changes: 5 additions & 1 deletion library/std/src/sys/pal/hermit/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,11 @@ pub unsafe extern "C" fn runtime_entry(
unsafe {
crate::sys::thread_local::destructors::run();
}
unsafe { hermit_abi::exit(result) }
crate::rt::thread_cleanup();

unsafe {
hermit_abi::exit(result);
}
}

#[inline]
Expand Down
1 change: 1 addition & 0 deletions library/std/src/sys/pal/hermit/thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ impl Thread {

// run all destructors
crate::sys::thread_local::destructors::run();
crate::rt::thread_cleanup();
}
}
}
Expand Down
6 changes: 4 additions & 2 deletions library/std/src/sys/sync/rwlock/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ use crate::mem;
use crate::ptr::{self, NonNull, null_mut, without_provenance_mut};
use crate::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
use crate::sync::atomic::{AtomicBool, AtomicPtr};
use crate::thread::{self, Thread};
use crate::thread::{self, Thread, ThreadId};

// Locking uses exponential backoff. `SPIN_COUNT` indicates how many times the
// locking operation will be retried.
Expand Down Expand Up @@ -200,7 +200,9 @@ impl Node {
fn prepare(&mut self) {
// Fall back to creating an unnamed `Thread` handle to allow locking in
// TLS destructors.
self.thread.get_or_init(|| thread::try_current().unwrap_or_else(Thread::new_unnamed));
self.thread.get_or_init(|| {
thread::try_current().unwrap_or_else(|| Thread::new_unnamed(ThreadId::new()))
});
self.completed = AtomicBool::new(false);
}

Expand Down
1 change: 1 addition & 0 deletions library/std/src/sys/thread_local/guard/apple.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ pub fn enable() {
unsafe extern "C" fn run_dtors(_: *mut u8) {
unsafe {
destructors::run();
crate::rt::thread_cleanup();
}
}
}
39 changes: 38 additions & 1 deletion library/std/src/sys/thread_local/guard/key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,12 @@
//! that will run all native TLS destructors in the destructor list.

use crate::ptr;
use crate::sys::thread_local::destructors;
use crate::sys::thread_local::key::{LazyKey, set};

#[cfg(target_thread_local)]
pub fn enable() {
use crate::sys::thread_local::destructors;

static DTORS: LazyKey = LazyKey::new(Some(run));

// Setting the key value to something other than NULL will result in the
Expand All @@ -18,6 +20,41 @@ pub fn enable() {
unsafe extern "C" fn run(_: *mut u8) {
unsafe {
destructors::run();
// On platforms with `__cxa_thread_atexit_impl`, `destructors::run`
// does nothing on newer systems as the TLS destructors are
// registered with the system. But because all of those platforms
// call the destructors of TLS keys after the registered ones, this
// function will still be run last (at the time of writing).
crate::rt::thread_cleanup();
}
}
}

/// On platforms with key-based TLS, the system runs the destructors for us.
/// We still have to make sure that [`crate::rt::thread_cleanup`] is called,
/// however. This is done by defering the execution of a TLS destructor to
/// the next round of destruction inside the TLS destructors.
#[cfg(not(target_thread_local))]
pub fn enable() {
const DEFER: *mut u8 = ptr::without_provenance_mut(1);
const RUN: *mut u8 = ptr::without_provenance_mut(2);

static CLEANUP: LazyKey = LazyKey::new(Some(run));

unsafe { set(CLEANUP.force(), DEFER) }

unsafe extern "C" fn run(state: *mut u8) {
if state == DEFER {
// Make sure that this function is run again in the next round of
// TLS destruction. If there is no futher round, there will be leaks,
// but that's okay, `thread_cleanup` is not guaranteed to be called.
unsafe { set(CLEANUP.force(), RUN) }
} else {
debug_assert_eq!(state, RUN);
// If the state is still RUN in the next round of TLS destruction,
// it means that no other TLS destructors defined by this runtime
// have been run, as they would have set the state to DEFER.
crate::rt::thread_cleanup();
}
}
}
5 changes: 4 additions & 1 deletion library/std/src/sys/thread_local/guard/solid.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ pub fn enable() {
}

unsafe extern "C" fn tls_dtor(_unused: *mut u8) {
unsafe { destructors::run() };
unsafe {
destructors::run();
crate::rt::thread_cleanup();
}
}
}
8 changes: 4 additions & 4 deletions library/std/src/sys/thread_local/guard/windows.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,13 +80,13 @@ pub static CALLBACK: unsafe extern "system" fn(*mut c_void, u32, *mut c_void) =

unsafe extern "system" fn tls_callback(_h: *mut c_void, dw_reason: u32, _pv: *mut c_void) {
if dw_reason == c::DLL_THREAD_DETACH || dw_reason == c::DLL_PROCESS_DETACH {
#[cfg(target_thread_local)]
unsafe {
#[cfg(target_thread_local)]
super::super::destructors::run();
}
#[cfg(not(target_thread_local))]
unsafe {
#[cfg(not(target_thread_local))]
super::super::key::run_dtors();

crate::rt::thread_cleanup();
}
}
}
2 changes: 2 additions & 0 deletions library/std/src/sys/thread_local/key/xous.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,4 +212,6 @@ unsafe fn run_dtors() {
unsafe { cur = (*cur).next };
}
}

crate::rt::thread_cleanup();
}
48 changes: 31 additions & 17 deletions library/std/src/sys/thread_local/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,15 @@ cfg_if::cfg_if! {
))] {
mod statik;
pub use statik::{EagerStorage, LazyStorage, thread_local_inner};
pub(crate) use statik::{LocalPointer, local_pointer};
} else if #[cfg(target_thread_local)] {
mod native;
pub use native::{EagerStorage, LazyStorage, thread_local_inner};
pub(crate) use native::{LocalPointer, local_pointer};
} else {
mod os;
pub use os::{Storage, thread_local_inner};
pub(crate) use os::{LocalPointer, local_pointer};
}
}

Expand Down Expand Up @@ -72,36 +75,47 @@ pub(crate) mod destructors {
}

/// This module provides a way to schedule the execution of the destructor list
/// on systems without a per-variable destructor system.
mod guard {
/// and the [runtime cleanup](crate::rt::thread_cleanup) function. Calling `enable`
/// should ensure that these functions are called at the right times.
pub(crate) mod guard {
cfg_if::cfg_if! {
if #[cfg(all(target_thread_local, target_vendor = "apple"))] {
mod apple;
pub(super) use apple::enable;
pub(crate) use apple::enable;
} else if #[cfg(target_os = "windows")] {
mod windows;
pub(super) use windows::enable;
pub(crate) use windows::enable;
} else if #[cfg(any(
all(target_family = "wasm", target_feature = "atomics"),
target_family = "wasm",
target_os = "uefi",
target_os = "zkvm",
))] {
pub(super) fn enable() {
// FIXME: Right now there is no concept of "thread exit", but
// this is likely going to show up at some point in the form of
// an exported symbol that the wasm runtime is going to be
// expected to call. For now we just leak everything, but if
// such a function starts to exist it will probably need to
// iterate the destructor list with this function:
pub(crate) fn enable() {
// FIXME: Right now there is no concept of "thread exit" on
// wasm, but this is likely going to show up at some point in
// the form of an exported symbol that the wasm runtime is going
// to be expected to call. For now we just leak everything, but
// if such a function starts to exist it will probably need to
// iterate the destructor list with these functions:
#[cfg(all(target_family = "wasm", target_feature = "atomics"))]
#[allow(unused)]
use super::destructors::run;
#[allow(unused)]
use crate::rt::thread_cleanup;
}
} else if #[cfg(target_os = "hermit")] {
pub(super) fn enable() {}
} else if #[cfg(any(
target_os = "hermit",
target_os = "xous",
))] {
// `std` is the only runtime, so it just calls the destructor functions
// itself when the time comes.
pub(crate) fn enable() {}
} else if #[cfg(target_os = "solid_asp3")] {
mod solid;
pub(super) use solid::enable;
} else if #[cfg(all(target_thread_local, not(target_family = "wasm")))] {
pub(crate) use solid::enable;
} else {
mod key;
pub(super) use key::enable;
pub(crate) use key::enable;
}
}
}
Expand Down
31 changes: 31 additions & 0 deletions library/std/src/sys/thread_local/native/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@
//! eliminates the `Destroyed` state for these values, which can allow more niche
//! optimizations to occur for the `State` enum. For `Drop` types, `()` is used.

use crate::cell::Cell;
use crate::ptr;

mod eager;
mod lazy;

Expand Down Expand Up @@ -107,3 +110,31 @@ pub macro thread_local_inner {
$crate::thread::local_impl::thread_local_inner!(@key $t, $($init)*);
},
}

#[rustc_macro_transparency = "semitransparent"]
pub(crate) macro local_pointer {
() => {},
($vis:vis static $name:ident; $($rest:tt)*) => {
#[thread_local]
$vis static $name: $crate::sys::thread_local::LocalPointer = $crate::sys::thread_local::LocalPointer::__new();
$crate::sys::thread_local::local_pointer! { $($rest)* }
},
}

pub(crate) struct LocalPointer {
p: Cell<*mut ()>,
}

impl LocalPointer {
pub const fn __new() -> LocalPointer {
LocalPointer { p: Cell::new(ptr::null_mut()) }
}

pub fn get(&self) -> *mut () {
self.p.get()
}

pub fn set(&self, p: *mut ()) {
self.p.set(p)
}
}
34 changes: 32 additions & 2 deletions library/std/src/sys/thread_local/os.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
use super::abort_on_dtor_unwind;
use super::key::{Key, LazyKey, get, set};
use super::{abort_on_dtor_unwind, guard};
use crate::cell::Cell;
use crate::marker::PhantomData;
use crate::ptr;
use crate::sys::thread_local::key::{Key, LazyKey, get, set};

#[doc(hidden)]
#[allow_internal_unstable(thread_local_internals)]
Expand Down Expand Up @@ -138,5 +138,35 @@ unsafe extern "C" fn destroy_value<T: 'static>(ptr: *mut u8) {
drop(ptr);
// SAFETY: `key` is the TLS key `ptr` was stored under.
unsafe { set(key, ptr::null_mut()) };
// Make sure that the runtime cleanup will be performed
// after the next round of TLS destruction.
guard::enable();
});
}

#[rustc_macro_transparency = "semitransparent"]
pub(crate) macro local_pointer {
() => {},
($vis:vis static $name:ident; $($rest:tt)*) => {
$vis static $name: $crate::sys::thread_local::LocalPointer = $crate::sys::thread_local::LocalPointer::__new();
$crate::sys::thread_local::local_pointer! { $($rest)* }
},
}

pub(crate) struct LocalPointer {
key: LazyKey,
}

impl LocalPointer {
pub const fn __new() -> LocalPointer {
LocalPointer { key: LazyKey::new(None) }
}

pub fn get(&'static self) -> *mut () {
unsafe { get(self.key.force()) as *mut () }
}

pub fn set(&'static self, p: *mut ()) {
unsafe { set(self.key.force(), p as *mut u8) }
}
}
Loading

0 comments on commit fd1f8aa

Please sign in to comment.