Skip to content

Commit

Permalink
riscv32: Support 64-bit atomics (Zacas extension)
Browse files Browse the repository at this point in the history
  • Loading branch information
taiki-e committed Sep 18, 2024
1 parent f1238b1 commit 00722a4
Show file tree
Hide file tree
Showing 5 changed files with 521 additions and 8 deletions.
15 changes: 11 additions & 4 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,10 @@ jobs:
# target: powerpc64le-unknown-linux-gnu
- rust: nightly
target: powerpc64le-unknown-linux-gnu
- rust: nightly-2024-02-13 # Rust 1.78, LLVM 17
target: riscv32gc-unknown-linux-gnu
- rust: nightly-2024-07-31 # Rust 1.82, LLVM 18
target: riscv32gc-unknown-linux-gnu
- rust: nightly
target: riscv32gc-unknown-linux-gnu
- rust: nightly-2021-08-21 # Rust 1.56, LLVM 12
Expand Down Expand Up @@ -317,6 +321,9 @@ jobs:
# TODO: LLVM bug: Undefined temporary symbol error when building std.
- run: printf 'RELEASE=--release\n' >>"${GITHUB_ENV}"
if: startsWith(matrix.target, 'mips-') || startsWith(matrix.target, 'mipsel-')
# for serde
- run: printf '%s\n' "RUSTFLAGS=${RUSTFLAGS} --cfg no_diagnostic_namespace" >>"${GITHUB_ENV}"
if: matrix.rust == 'nightly-2024-02-13'

- run: tools/test.sh -vv ${TARGET:-} ${DOCTEST_XCOMPILE:-} ${BUILD_STD:-} ${RELEASE:-}
# We test doctest only once with the default build conditions because doctest is slow. Both api-test
Expand Down Expand Up @@ -388,21 +395,21 @@ jobs:
RUSTDOCFLAGS: ${{ env.RUSTDOCFLAGS }} -C target-cpu=pwr8
RUSTFLAGS: ${{ env.RUSTFLAGS }} -C target-cpu=pwr8
if: startsWith(matrix.target, 'powerpc64-')
# riscv64 +zabha
# riscv +zabha
- run: tools/test.sh -vv --tests ${TARGET:-} ${BUILD_STD:-} ${RELEASE:-}
env:
RUSTDOCFLAGS: ${{ env.RUSTDOCFLAGS }} -C target-feature=+zabha
RUSTFLAGS: ${{ env.RUSTFLAGS }} -C target-feature=+zabha
QEMU_CPU: max
# TODO: cranelift doesn't support cfg(target_feature): https://github.com/rust-lang/rustc_codegen_cranelift/issues/1400
if: startsWith(matrix.target, 'riscv64') && !contains(matrix.flags, 'codegen-backend=cranelift')
# riscv64 +experimental-zacas
if: startsWith(matrix.target, 'riscv') && !contains(matrix.flags, 'codegen-backend=cranelift')
# riscv +experimental-zacas
- run: tools/test.sh -vv --tests ${TARGET:-} ${BUILD_STD:-} ${RELEASE:-}
env:
RUSTDOCFLAGS: ${{ env.RUSTDOCFLAGS }} -C target-feature=+experimental-zacas
RUSTFLAGS: ${{ env.RUSTFLAGS }} -C target-feature=+experimental-zacas
# TODO: cranelift doesn't support cfg(target_feature): https://github.com/rust-lang/rustc_codegen_cranelift/issues/1400
if: startsWith(matrix.target, 'riscv64') && !contains(matrix.flags, 'codegen-backend=cranelift')
if: startsWith(matrix.target, 'riscv') && !contains(matrix.flags, 'codegen-backend=cranelift')
# s390x z196 (arch9)
- run: tools/test.sh -vv --tests ${TARGET:-} ${BUILD_STD:-} ${RELEASE:-}
env:
Expand Down
2 changes: 0 additions & 2 deletions src/imp/atomic128/riscv64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@ Generated asm:
- riscv64 (+experimental-zacas) https://godbolt.org/z/crdhjKPdq
*/

// TODO: 64-bit atomic using amocas.d for riscv32

include!("macros.rs");

// TODO
Expand Down
237 changes: 237 additions & 0 deletions src/imp/atomic64/macros.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,237 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT

macro_rules! atomic64 {
($atomic_type:ident, $int_type:ident, $atomic_max:ident, $atomic_min:ident) => {
#[repr(C, align(8))]
pub(crate) struct $atomic_type {
v: core::cell::UnsafeCell<$int_type>,
}

// Send is implicitly implemented.
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock.
unsafe impl Sync for $atomic_type {}

impl_default_no_fetch_ops!($atomic_type, $int_type);
impl_default_bit_opts!($atomic_type, $int_type);
impl $atomic_type {
#[inline]
pub(crate) const fn new(v: $int_type) -> Self {
Self { v: core::cell::UnsafeCell::new(v) }
}

#[inline]
pub(crate) fn is_lock_free() -> bool {
is_lock_free()
}
pub(crate) const IS_ALWAYS_LOCK_FREE: bool = IS_ALWAYS_LOCK_FREE;

#[inline]
pub(crate) fn get_mut(&mut self) -> &mut $int_type {
// SAFETY: the mutable reference guarantees unique ownership.
// (UnsafeCell::get_mut requires Rust 1.50)
unsafe { &mut *self.v.get() }
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn load(&self, order: Ordering) -> $int_type {
crate::utils::assert_load_ordering(order);
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
atomic_load(self.v.get().cast::<u64>(), order) as $int_type
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn store(&self, val: $int_type, order: Ordering) {
crate::utils::assert_store_ordering(order);
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
atomic_store(self.v.get().cast::<u64>(), val as u64, order)
}
}

#[inline]
pub(crate) fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
atomic_swap(self.v.get().cast::<u64>(), val as u64, order) as $int_type
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn compare_exchange(
&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering,
) -> Result<$int_type, $int_type> {
crate::utils::assert_compare_exchange_ordering(success, failure);
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
match atomic_compare_exchange(
self.v.get().cast::<u64>(),
current as u64,
new as u64,
success,
failure,
) {
Ok(v) => Ok(v as $int_type),
Err(v) => Err(v as $int_type),
}
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn compare_exchange_weak(
&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering,
) -> Result<$int_type, $int_type> {
crate::utils::assert_compare_exchange_ordering(success, failure);
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
match atomic_compare_exchange_weak(
self.v.get().cast::<u64>(),
current as u64,
new as u64,
success,
failure,
) {
Ok(v) => Ok(v as $int_type),
Err(v) => Err(v as $int_type),
}
}
}

#[inline]
pub(crate) fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
atomic_add(self.v.get().cast::<u64>(), val as u64, order) as $int_type
}
}

#[inline]
pub(crate) fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
atomic_sub(self.v.get().cast::<u64>(), val as u64, order) as $int_type
}
}

#[inline]
pub(crate) fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
atomic_and(self.v.get().cast::<u64>(), val as u64, order) as $int_type
}
}

#[inline]
pub(crate) fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
atomic_nand(self.v.get().cast::<u64>(), val as u64, order) as $int_type
}
}

#[inline]
pub(crate) fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
atomic_or(self.v.get().cast::<u64>(), val as u64, order) as $int_type
}
}

#[inline]
pub(crate) fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
atomic_xor(self.v.get().cast::<u64>(), val as u64, order) as $int_type
}
}

#[inline]
pub(crate) fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
$atomic_max(self.v.get().cast::<u64>(), val as u64, order) as $int_type
}
}

#[inline]
pub(crate) fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
$atomic_min(self.v.get().cast::<u64>(), val as u64, order) as $int_type
}
}

#[inline]
pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
atomic_not(self.v.get().cast::<u64>(), order) as $int_type
}
}
#[inline]
pub(crate) fn not(&self, order: Ordering) {
self.fetch_not(order);
}

#[inline]
pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
atomic_neg(self.v.get().cast::<u64>(), order) as $int_type
}
}
#[inline]
pub(crate) fn neg(&self, order: Ordering) {
self.fetch_neg(order);
}

#[inline]
pub(crate) const fn as_ptr(&self) -> *mut $int_type {
self.v.get()
}
}
};
}
Loading

0 comments on commit 00722a4

Please sign in to comment.