Skip to content

Commit

Permalink
Move definition of AtomicBool to crate root
Browse files Browse the repository at this point in the history
This is defined the same way in all backends.
  • Loading branch information
taiki-e committed Jul 27, 2023
1 parent ecbc3d0 commit 50a40b9
Show file tree
Hide file tree
Showing 5 changed files with 79 additions and 318 deletions.
113 changes: 0 additions & 113 deletions src/imp/core_atomic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,119 +18,6 @@ struct NoRefUnwindSafe(UnsafeCell<()>);
// SAFETY: this is a marker type and we'll never access the value.
unsafe impl Sync for NoRefUnwindSafe {}

#[cfg(not(portable_atomic_no_atomic_load_store))]
#[repr(transparent)]
pub(crate) struct AtomicBool {
inner: core::sync::atomic::AtomicBool,
// Prevent RefUnwindSafe from being propagated from the std atomic type.
_marker: PhantomData<NoRefUnwindSafe>,
}
#[cfg(not(portable_atomic_no_atomic_load_store))]
impl AtomicBool {
#[inline]
pub(crate) const fn new(v: bool) -> Self {
Self { inner: core::sync::atomic::AtomicBool::new(v), _marker: PhantomData }
}
#[inline]
pub(crate) fn is_lock_free() -> bool {
Self::is_always_lock_free()
}
#[inline]
pub(crate) const fn is_always_lock_free() -> bool {
true
}
#[inline]
pub(crate) fn get_mut(&mut self) -> &mut bool {
self.inner.get_mut()
}
#[inline]
pub(crate) fn into_inner(self) -> bool {
self.inner.into_inner()
}
#[inline]
#[cfg_attr(
any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
track_caller
)]
pub(crate) fn load(&self, order: Ordering) -> bool {
crate::utils::assert_load_ordering(order); // for track_caller (compiler can omit double check)
self.inner.load(order)
}
#[inline]
#[cfg_attr(
any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
track_caller
)]
pub(crate) fn store(&self, val: bool, order: Ordering) {
crate::utils::assert_store_ordering(order); // for track_caller (compiler can omit double check)
self.inner.store(val, order);
}
const_fn! {
const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))];
#[inline]
pub(crate) const fn as_ptr(&self) -> *mut bool {
// SAFETY: Self is #[repr(C)] and internally UnsafeCell<u8>.
// See also https://github.com/rust-lang/rust/pull/66705 and
// https://github.com/rust-lang/rust/issues/66136#issuecomment-557867116.
unsafe {
(*(self as *const Self as *const UnsafeCell<u8>)).get() as *mut bool
}
}
}
}
#[cfg(not(portable_atomic_no_atomic_load_store))]
#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))]
#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
impl_default_no_fetch_ops!(AtomicBool, bool);
#[cfg(not(portable_atomic_no_atomic_load_store))]
#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))]
#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
impl AtomicBool {
#[inline]
#[cfg_attr(
any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
track_caller
)]
pub(crate) fn compare_exchange(
&self,
current: bool,
new: bool,
success: Ordering,
failure: Ordering,
) -> Result<bool, bool> {
crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check)
#[cfg(portable_atomic_no_stronger_failure_ordering)]
let success = crate::utils::upgrade_success_ordering(success, failure);
self.inner.compare_exchange(current, new, success, failure)
}
#[inline]
#[cfg_attr(
any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
track_caller
)]
pub(crate) fn compare_exchange_weak(
&self,
current: bool,
new: bool,
success: Ordering,
failure: Ordering,
) -> Result<bool, bool> {
crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check)
#[cfg(portable_atomic_no_stronger_failure_ordering)]
let success = crate::utils::upgrade_success_ordering(success, failure);
self.inner.compare_exchange_weak(current, new, success, failure)
}
}
#[cfg(not(portable_atomic_no_atomic_load_store))]
impl core::ops::Deref for AtomicBool {
type Target = core::sync::atomic::AtomicBool;
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
fn deref(&self) -> &Self::Target {
&self.inner
}
}

#[repr(transparent)]
pub(crate) struct AtomicPtr<T> {
inner: core::sync::atomic::AtomicPtr<T>,
Expand Down
119 changes: 0 additions & 119 deletions src/imp/interrupt/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,124 +100,6 @@ where
r
}

#[repr(C, align(1))]
pub(crate) struct AtomicBool {
v: UnsafeCell<u8>,
}

// Send is implicitly implemented.
// SAFETY: any data races are prevented by disabling interrupts or
// atomic intrinsics (see module-level comments).
unsafe impl Sync for AtomicBool {}

impl AtomicBool {
#[inline]
pub(crate) const fn new(v: bool) -> Self {
Self { v: UnsafeCell::new(v as u8) }
}

#[inline]
pub(crate) fn is_lock_free() -> bool {
Self::is_always_lock_free()
}
#[inline]
pub(crate) const fn is_always_lock_free() -> bool {
IS_ALWAYS_LOCK_FREE
}

#[inline]
pub(crate) fn get_mut(&mut self) -> &mut bool {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(self.v.get() as *mut bool) }
}

#[inline]
pub(crate) fn into_inner(self) -> bool {
self.v.into_inner() != 0
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn load(&self, order: Ordering) -> bool {
self.as_atomic_u8().load(order) != 0
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn store(&self, val: bool, order: Ordering) {
self.as_atomic_u8().store(val as u8, order);
}

#[inline]
pub(crate) fn swap(&self, val: bool, order: Ordering) -> bool {
self.as_atomic_u8().swap(val as u8, order) != 0
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn compare_exchange(
&self,
current: bool,
new: bool,
success: Ordering,
failure: Ordering,
) -> Result<bool, bool> {
match self.as_atomic_u8().compare_exchange(current as u8, new as u8, success, failure) {
Ok(x) => Ok(x != 0),
Err(x) => Err(x != 0),
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn compare_exchange_weak(
&self,
current: bool,
new: bool,
success: Ordering,
failure: Ordering,
) -> Result<bool, bool> {
self.compare_exchange(current, new, success, failure)
}

#[inline]
pub(crate) fn fetch_and(&self, val: bool, order: Ordering) -> bool {
self.as_atomic_u8().fetch_and(val as u8, order) != 0
}
#[inline]
pub(crate) fn fetch_or(&self, val: bool, order: Ordering) -> bool {
self.as_atomic_u8().fetch_or(val as u8, order) != 0
}
#[inline]
pub(crate) fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
self.as_atomic_u8().fetch_xor(val as u8, order) != 0
}

#[inline]
pub(crate) fn and(&self, val: bool, order: Ordering) {
self.as_atomic_u8().and(val as u8, order);
}
#[inline]
pub(crate) fn or(&self, val: bool, order: Ordering) {
self.as_atomic_u8().or(val as u8, order);
}
#[inline]
pub(crate) fn xor(&self, val: bool, order: Ordering) {
self.as_atomic_u8().xor(val as u8, order);
}

#[inline]
pub(crate) const fn as_ptr(&self) -> *mut bool {
self.v.get() as *mut bool
}

#[inline]
fn as_atomic_u8(&self) -> &AtomicU8 {
// SAFETY: AtomicBool and AtomicU8 have the same layout,
unsafe { &*(self as *const AtomicBool).cast::<AtomicU8>() }
}
}

#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
Expand Down Expand Up @@ -732,7 +614,6 @@ atomic_int!(load_store_critical_session, AtomicU128, u128, 16);
mod tests {
use super::*;

test_atomic_bool_single_thread!();
test_atomic_ptr_single_thread!();
test_atomic_int_single_thread!(i8);
test_atomic_int_single_thread!(u8);
Expand Down
6 changes: 3 additions & 3 deletions src/imp/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,15 +222,15 @@ pub(crate) mod float;
)))
)]
pub(crate) use self::core_atomic::{
AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize,
AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize,
};
// RISC-V without A-extension
#[cfg(not(any(portable_atomic_unsafe_assume_single_core, feature = "critical-section")))]
#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(portable_atomic_no_atomic_cas))]
#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(not(target_has_atomic = "ptr")))]
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
pub(crate) use self::riscv::{
AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize,
AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize,
};
// no core Atomic{Isize,Usize,Bool,Ptr}/Atomic{I,U}{8,16} & assume single core => critical section based fallback
#[cfg(any(
Expand All @@ -242,7 +242,7 @@ pub(crate) use self::riscv::{
#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(portable_atomic_no_atomic_cas))]
#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(not(target_has_atomic = "ptr")))]
pub(crate) use self::interrupt::{
AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize,
AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize,
};
// bpf
#[cfg(all(
Expand Down
63 changes: 0 additions & 63 deletions src/imp/riscv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,68 +12,6 @@
use core::arch::asm;
use core::{cell::UnsafeCell, sync::atomic::Ordering};

#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
#[repr(transparent)]
pub(crate) struct AtomicBool {
v: UnsafeCell<u8>,
}

#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
// Send is implicitly implemented.
// SAFETY: any data races are prevented by atomic operations.
unsafe impl Sync for AtomicBool {}

#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
impl AtomicBool {
#[inline]
pub(crate) const fn new(v: bool) -> Self {
Self { v: UnsafeCell::new(v as u8) }
}

#[inline]
pub(crate) fn is_lock_free() -> bool {
Self::is_always_lock_free()
}
#[inline]
pub(crate) const fn is_always_lock_free() -> bool {
true
}

#[inline]
pub(crate) fn get_mut(&mut self) -> &mut bool {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *self.v.get().cast::<bool>() }
}

#[inline]
pub(crate) fn into_inner(self) -> bool {
self.v.into_inner() != 0
}

#[inline]
pub(crate) const fn as_ptr(&self) -> *mut bool {
self.v.get().cast::<bool>()
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn load(&self, order: Ordering) -> bool {
self.as_atomic_u8().load(order) != 0
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn store(&self, val: bool, order: Ordering) {
self.as_atomic_u8().store(val as u8, order);
}

#[inline]
fn as_atomic_u8(&self) -> &AtomicU8 {
// SAFETY: AtomicBool and AtomicU8 have the same layout,
unsafe { &*(self as *const AtomicBool).cast::<AtomicU8>() }
}
}

macro_rules! atomic {
($([$($generics:tt)*])? $atomic_type:ident, $value_type:ty, $asm_suffix:tt) => {
#[repr(transparent)]
Expand Down Expand Up @@ -226,7 +164,6 @@ atomic!([T] AtomicPtr, *mut T, "d");
mod tests {
use super::*;

test_atomic_bool_load_store!();
test_atomic_ptr_load_store!();
test_atomic_int_load_store!(i8);
test_atomic_int_load_store!(u8);
Expand Down
Loading

0 comments on commit 50a40b9

Please sign in to comment.