Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rollup of 8 pull requests #115767

Merged
merged 19 commits into from
Sep 11, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
c18da3c
Add regression test for LLVM 17-rc3 miscompile
Sep 6, 2023
fe61471
Extract parallel operations in `rustc_data_structures::sync` into a n…
Zoxc Sep 4, 2023
ddd8878
Address feedback
Sep 6, 2023
086cf34
Don't ICE when computing ctype's repr_nullable_ptr for possibly-unsiz…
compiler-errors Sep 7, 2023
254e13d
fix homogeneous_aggregate not ignoring some 1-ZST
RalfJung Sep 10, 2023
0ed2914
Rename after_parsing callback to after_crate_root_parsing
bjorn3 Sep 10, 2023
90e9053
Deprecate the pre_configure query
bjorn3 Sep 10, 2023
2eca717
Remove EarlyErrorHandler argument from after_analysis callback
bjorn3 Sep 10, 2023
c2e7900
Allow loading the SMIR for constants and statics
oli-obk Sep 11, 2023
b99ace4
Add a test for #108030
DianQK Sep 11, 2023
5dd01cc
Update books
rustbot Sep 11, 2023
f3cc59b
Rollup merge of #115548 - Zoxc:parallel-extract, r=wesleywiser
matthiaskrgr Sep 11, 2023
7a4904c
Rollup merge of #115591 - djkoloski:issue_115385, r=cuviper
matthiaskrgr Sep 11, 2023
5a2b589
Rollup merge of #115631 - compiler-errors:ctypes-unsized, r=davidtwco
matthiaskrgr Sep 11, 2023
279e257
Rollup merge of #115708 - RalfJung:homogeneous, r=davidtwco
matthiaskrgr Sep 11, 2023
c943ec2
Rollup merge of #115730 - bjorn3:some_driver_refactors, r=compiler-er…
matthiaskrgr Sep 11, 2023
2a087be
Rollup merge of #115749 - oli-obk:smir_consts, r=compiler-errors
matthiaskrgr Sep 11, 2023
48a1033
Rollup merge of #115757 - DianQK:lto-linkage-used-attr, r=wesleywiser
matthiaskrgr Sep 11, 2023
059231f
Rollup merge of #115761 - rustbot:docs-update, r=ehuss
matthiaskrgr Sep 11, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
176 changes: 5 additions & 171 deletions compiler/rustc_data_structures/src/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,19 +41,21 @@
//! [^2] `MTLockRef` is a typedef.

pub use crate::marker::*;
use parking_lot::Mutex;
use std::any::Any;
use std::collections::HashMap;
use std::hash::{BuildHasher, Hash};
use std::ops::{Deref, DerefMut};
use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};

mod lock;
pub use lock::{Lock, LockGuard, Mode};

mod worker_local;
pub use worker_local::{Registry, WorkerLocal};

mod parallel;
#[cfg(parallel_compiler)]
pub use parallel::scope;
pub use parallel::{join, par_for_each_in, par_map, parallel_guard};

pub use std::sync::atomic::Ordering;
pub use std::sync::atomic::Ordering::SeqCst;

Expand Down Expand Up @@ -107,37 +109,6 @@ mod mode {

pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};

/// A guard used to hold panics that occur during a parallel section to later by unwound.
/// This is used for the parallel compiler to prevent fatal errors from non-deterministically
/// hiding errors by ensuring that everything in the section has completed executing before
/// continuing with unwinding. It's also used for the non-parallel code to ensure error message
/// output match the parallel compiler for testing purposes.
pub struct ParallelGuard {
panic: Mutex<Option<Box<dyn Any + std::marker::Send + 'static>>>,
}

impl ParallelGuard {
pub fn run<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
catch_unwind(AssertUnwindSafe(f))
.map_err(|err| {
*self.panic.lock() = Some(err);
})
.ok()
}
}

/// This gives access to a fresh parallel guard in the closure and will unwind any panics
/// caught in it after the closure returns.
#[inline]
pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R {
let guard = ParallelGuard { panic: Mutex::new(None) };
let ret = f(&guard);
if let Some(panic) = guard.panic.into_inner() {
resume_unwind(panic);
}
ret
}

cfg_if! {
if #[cfg(not(parallel_compiler))] {
use std::ops::Add;
Expand Down Expand Up @@ -229,44 +200,6 @@ cfg_if! {
pub type AtomicU32 = Atomic<u32>;
pub type AtomicU64 = Atomic<u64>;

pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where A: FnOnce() -> RA,
B: FnOnce() -> RB
{
let (a, b) = parallel_guard(|guard| {
let a = guard.run(oper_a);
let b = guard.run(oper_b);
(a, b)
});
(a.unwrap(), b.unwrap())
}

#[macro_export]
macro_rules! parallel {
($($blocks:block),*) => {{
$crate::sync::parallel_guard(|guard| {
$(guard.run(|| $blocks);)*
});
}}
}

pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
parallel_guard(|guard| {
t.into_iter().for_each(|i| {
guard.run(|| for_each(i));
});
})
}

pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
t: T,
mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
) -> C {
parallel_guard(|guard| {
t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
})
}

pub use std::rc::Rc as Lrc;
pub use std::rc::Weak as Weak;
pub use std::cell::Ref as ReadGuard;
Expand Down Expand Up @@ -372,105 +305,6 @@ cfg_if! {

use std::thread;

#[inline]
pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce() -> RA + DynSend,
B: FnOnce() -> RB + DynSend,
{
if mode::is_dyn_thread_safe() {
let oper_a = FromDyn::from(oper_a);
let oper_b = FromDyn::from(oper_b);
let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()()));
(a.into_inner(), b.into_inner())
} else {
let (a, b) = parallel_guard(|guard| {
let a = guard.run(oper_a);
let b = guard.run(oper_b);
(a, b)
});
(a.unwrap(), b.unwrap())
}
}

// This function only works when `mode::is_dyn_thread_safe()`.
pub fn scope<'scope, OP, R>(op: OP) -> R
where
OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
R: DynSend,
{
let op = FromDyn::from(op);
rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
}

/// Runs a list of blocks in parallel. The first block is executed immediately on
/// the current thread. Use that for the longest running block.
#[macro_export]
macro_rules! parallel {
(impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => {
parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
};
(impl $fblock:block [$($blocks:expr,)*] []) => {
::rustc_data_structures::sync::scope(|s| {
$(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks);
s.spawn(move |_| block.into_inner()());)*
(|| $fblock)();
});
};
($fblock:block, $($blocks:block),*) => {
if rustc_data_structures::sync::is_dyn_thread_safe() {
// Reverse the order of the later blocks since Rayon executes them in reverse order
// when using a single thread. This ensures the execution order matches that
// of a single threaded rustc.
parallel!(impl $fblock [] [$($blocks),*]);
} else {
$crate::sync::parallel_guard(|guard| {
guard.run(|| $fblock);
$(guard.run(|| $blocks);)*
});
}
};
}

use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};

pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
t: T,
for_each: impl Fn(I) + DynSync + DynSend
) {
parallel_guard(|guard| {
if mode::is_dyn_thread_safe() {
let for_each = FromDyn::from(for_each);
t.into_par_iter().for_each(|i| {
guard.run(|| for_each(i));
});
} else {
t.into_iter().for_each(|i| {
guard.run(|| for_each(i));
});
}
});
}

pub fn par_map<
I,
T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
R: std::marker::Send,
C: FromIterator<R> + FromParallelIterator<R>
>(
t: T,
map: impl Fn(I) -> R + DynSync + DynSend
) -> C {
parallel_guard(|guard| {
if mode::is_dyn_thread_safe() {
let map = FromDyn::from(map);
t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect()
} else {
t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
}
})
}

/// This makes locks panic if they are already held.
/// It is only useful when you are running in a single thread
const ERROR_CHECKING: bool = false;
Expand Down
Loading
Loading