Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Directly use raw pointers in AtomicPtr store/load #77611

Merged
merged 4 commits into from
Dec 9, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,12 +146,12 @@ macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $

macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
match $ty.kind() {
ty::Uint(_) | ty::Int(_) => {}
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
$fx.tcx.sess.span_err(
$span,
&format!(
"`{}` intrinsic: expected basic integer type, found `{:?}`",
"`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
$intrinsic, $ty
),
);
Expand Down
55 changes: 41 additions & 14 deletions compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -437,16 +437,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match split[1] {
"cxchg" | "cxchgweak" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() {
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let weak = split[1] == "cxchgweak";
let pair = bx.atomic_cmpxchg(
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
order,
failorder,
weak,
);
let mut dst = args[0].immediate();
let mut cmp = args[1].immediate();
let mut src = args[2].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first.
let ptr_llty = bx.type_ptr_to(bx.type_isize());
dst = bx.pointercast(dst, ptr_llty);
cmp = bx.ptrtoint(cmp, bx.type_isize());
src = bx.ptrtoint(src, bx.type_isize());
}
let pair = bx.atomic_cmpxchg(dst, cmp, src, order, failorder, weak);
let val = bx.extract_value(pair, 0);
let success = bx.extract_value(pair, 1);
let val = bx.from_immediate(val);
Expand All @@ -464,19 +468,42 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {

"load" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() {
let size = bx.layout_of(ty).size;
bx.atomic_load(args[0].immediate(), order, size)
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let layout = bx.layout_of(ty);
let size = layout.size;
let mut source = args[0].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first...
let ptr_llty = bx.type_ptr_to(bx.type_isize());
source = bx.pointercast(source, ptr_llty);
}
let result = bx.atomic_load(source, order, size);
if ty.is_unsafe_ptr() {
// ... and then cast the result back to a pointer
bx.inttoptr(result, bx.backend_type(layout))
} else {
result
}
} else {
return invalid_monomorphization(ty);
}
}

"store" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() {
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let size = bx.layout_of(ty).size;
bx.atomic_store(args[1].immediate(), args[0].immediate(), order, size);
let mut val = args[1].immediate();
let mut ptr = args[0].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first.
let ptr_llty = bx.type_ptr_to(bx.type_isize());
ptr = bx.pointercast(ptr, ptr_llty);
val = bx.ptrtoint(val, bx.type_isize());
}
bx.atomic_store(val, ptr, order, size);
return;
} else {
return invalid_monomorphization(ty);
Expand Down
31 changes: 30 additions & 1 deletion library/core/src/sync/atomic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -966,8 +966,16 @@ impl<T> AtomicPtr<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn load(&self, order: Ordering) -> *mut T {
#[cfg(not(bootstrap))]
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
unsafe {
atomic_load(self.p.get(), order)
}
#[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_load(self.p.get() as *mut usize, order) as *mut T
}
}

/// Stores a value into the pointer.
Expand All @@ -994,6 +1002,12 @@ impl<T> AtomicPtr<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn store(&self, ptr: *mut T, order: Ordering) {
#[cfg(not(bootstrap))]
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_store(self.p.get(), ptr, order);
}
#[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_store(self.p.get() as *mut usize, ptr as usize, order);
Expand Down Expand Up @@ -1105,6 +1119,7 @@ impl<T> AtomicPtr<T> {
success: Ordering,
failure: Ordering,
) -> Result<*mut T, *mut T> {
#[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
let res = atomic_compare_exchange(
Expand All @@ -1119,6 +1134,11 @@ impl<T> AtomicPtr<T> {
Err(x) => Err(x as *mut T),
}
}
#[cfg(not(bootstrap))]
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange(self.p.get(), current, new, success, failure)
}
}

/// Stores a value into the pointer if the current value is the same as the `current` value.
Expand Down Expand Up @@ -1165,6 +1185,7 @@ impl<T> AtomicPtr<T> {
success: Ordering,
failure: Ordering,
) -> Result<*mut T, *mut T> {
#[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
let res = atomic_compare_exchange_weak(
Expand All @@ -1179,6 +1200,14 @@ impl<T> AtomicPtr<T> {
Err(x) => Err(x as *mut T),
}
}
#[cfg(not(bootstrap))]
// SAFETY: This intrinsic is unsafe because it operates on a raw pointer
// but we know for sure that the pointer is valid (we just got it from
// an `UnsafeCell` that we have by reference) and the atomic operation
// itself allows us to safely mutate the `UnsafeCell` contents.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This doesn't match the safety comment for the #[cfg(bootstrap)] case.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know, but I felt like it wasn't worth updating that, too, since it'll go away with the next beta anyway

unsafe {
atomic_compare_exchange_weak(self.p.get(), current, new, success, failure)
}
}

/// Fetches the value, and applies a function to it that returns an optional
Expand Down
13 changes: 0 additions & 13 deletions library/std/src/sys/windows/thread_local_key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,16 +110,6 @@ struct Node {
next: *mut Node,
}

#[cfg(miri)]
extern "Rust" {
/// Miri-provided extern function to mark the block `ptr` points to as a "root"
/// for some static memory. This memory and everything reachable by it is not
/// considered leaking even if it still exists when the program terminates.
///
/// `ptr` has to point to the beginning of an allocated block.
fn miri_static_root(ptr: *const u8);
}

unsafe fn register_dtor(key: Key, dtor: Dtor) {
let mut node = Box::new(Node { key, dtor, next: ptr::null_mut() });

Expand All @@ -128,9 +118,6 @@ unsafe fn register_dtor(key: Key, dtor: Dtor) {
node.next = head;
match DTORS.compare_exchange(head, &mut *node, SeqCst, SeqCst) {
Ok(_) => {
#[cfg(miri)]
miri_static_root(&*node as *const _ as *const u8);

mem::forget(node);
return;
}
Expand Down