Skip to content

Commit

Permalink
Auto merge of #1804 - RalfJung:ptrless-allocs, r=RalfJung
Browse files Browse the repository at this point in the history
update for Memory API changes

The Miri side of rust-lang/rust#85376.
  • Loading branch information
bors committed May 19, 2021
2 parents cda0b07 + e4a2715 commit 1ae39d9
Show file tree
Hide file tree
Showing 12 changed files with 117 additions and 97 deletions.
2 changes: 1 addition & 1 deletion rust-version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
3e99439f4dacc8ba0d2ca48d221694362d587927
3e827cc21e0734edd26170e8d1481f0d66a1426b
4 changes: 2 additions & 2 deletions src/data_race.rs
Original file line number Diff line number Diff line change
Expand Up @@ -720,7 +720,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
if let Some(data_race) = &mut this.memory.extra.data_race {
if data_race.multi_threaded.get() {
let alloc_meta =
this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race.as_mut().unwrap();
this.memory.get_alloc_extra_mut(ptr.alloc_id)?.data_race.as_mut().unwrap();
alloc_meta.reset_clocks(ptr.offset, size);
}
}
Expand Down Expand Up @@ -1024,7 +1024,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
let place_ptr = place.ptr.assert_ptr();
let size = place.layout.size;
let alloc_meta =
&this.memory.get_raw(place_ptr.alloc_id)?.extra.data_race.as_ref().unwrap();
&this.memory.get_alloc_extra(place_ptr.alloc_id)?.data_race.as_ref().unwrap();
log::trace!(
"Atomic op({}) with ordering {:?} on memory({:?}, offset={}, size={})",
description,
Expand Down
8 changes: 4 additions & 4 deletions src/diagnostics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,13 +136,13 @@ pub fn report_error<'tcx, 'mir>(

// Extra output to help debug specific issues.
match e.kind() {
UndefinedBehavior(UndefinedBehaviorInfo::InvalidUninitBytes(Some(access))) => {
UndefinedBehavior(UndefinedBehaviorInfo::InvalidUninitBytes(Some((alloc_id, access)))) => {
eprintln!(
"Uninitialized read occurred at offsets 0x{:x}..0x{:x} into this allocation:",
access.uninit_ptr.offset.bytes(),
access.uninit_ptr.offset.bytes() + access.uninit_size.bytes(),
access.uninit_offset.bytes(),
access.uninit_offset.bytes() + access.uninit_size.bytes(),
);
eprintln!("{:?}", ecx.memory.dump_alloc(access.uninit_ptr.alloc_id));
eprintln!("{:?}", ecx.memory.dump_alloc(*alloc_id));
}
_ => {}
}
Expand Down
15 changes: 10 additions & 5 deletions src/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use log::trace;
use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX};
use rustc_middle::mir;
use rustc_middle::ty::{self, layout::TyAndLayout, List, TyCtxt};
use rustc_target::abi::{FieldsShape, LayoutOf, Size, Variants};
use rustc_target::abi::{Align, FieldsShape, LayoutOf, Size, Variants};
use rustc_target::spec::abi::Abi;

use rand::RngCore;
Expand Down Expand Up @@ -577,10 +577,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let ptr = this.force_ptr(sptr)?; // We need to read at least 1 byte, so we can eagerly get a ptr.

// Step 1: determine the length.
let alloc = this.memory.get_raw(ptr.alloc_id)?;
let mut len = Size::ZERO;
loop {
let byte = alloc.read_scalar(this, ptr.offset(len, this)?, size1)?.to_u8()?;
// FIXME: We are re-getting the allocation each time around the loop.
// Would be nice if we could somehow "extend" an existing AllocRange.
let alloc = this.memory.get(ptr.offset(len, this)?.into(), size1, Align::ONE)?.unwrap(); // not a ZST, so we will get a result
let byte = alloc.read_scalar(alloc_range(Size::ZERO, size1))?.to_u8()?;
if byte == 0 {
break;
} else {
Expand All @@ -595,12 +597,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn read_wide_str(&self, sptr: Scalar<Tag>) -> InterpResult<'tcx, Vec<u16>> {
let this = self.eval_context_ref();
let size2 = Size::from_bytes(2);
let align2 = Align::from_bytes(2).unwrap();

let mut ptr = this.force_ptr(sptr)?; // We need to read at least 1 wchar, so we can eagerly get a ptr.
let mut wchars = Vec::new();
let alloc = this.memory.get_raw(ptr.alloc_id)?;
loop {
let wchar = alloc.read_scalar(this, ptr, size2)?.to_u16()?;
// FIXME: We are re-getting the allocation each time around the loop.
// Would be nice if we could somehow "extend" an existing AllocRange.
let alloc = this.memory.get(ptr.into(), size2, align2)?.unwrap(); // not a ZST, so we will get a result
let wchar = alloc.read_scalar(alloc_range(Size::ZERO, size2))?.to_u16()?;
if wchar == 0 {
break;
} else {
Expand Down
104 changes: 48 additions & 56 deletions src/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -506,15 +506,57 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
}

#[inline(always)]
fn before_deallocation(
memory_extra: &mut Self::MemoryExtra,
id: AllocId,
fn memory_read(
_memory_extra: &Self::MemoryExtra,
alloc: &Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if Some(id) == memory_extra.tracked_alloc_id {
register_diagnostic(NonHaltingDiagnostic::FreedAlloc(id));
if let Some(data_race) = &alloc.extra.data_race {
data_race.read(ptr, size)?;
}
if let Some(stacked_borrows) = &alloc.extra.stacked_borrows {
stacked_borrows.memory_read(ptr, size)
} else {
Ok(())
}
}

Ok(())
#[inline(always)]
fn memory_written(
_memory_extra: &mut Self::MemoryExtra,
alloc: &mut Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if let Some(data_race) = &mut alloc.extra.data_race {
data_race.write(ptr, size)?;
}
if let Some(stacked_borrows) = &mut alloc.extra.stacked_borrows {
stacked_borrows.memory_written(ptr, size)
} else {
Ok(())
}
}

#[inline(always)]
fn memory_deallocated(
memory_extra: &mut Self::MemoryExtra,
alloc: &mut Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
) -> InterpResult<'tcx> {
let size = alloc.size();
if Some(ptr.alloc_id) == memory_extra.tracked_alloc_id {
register_diagnostic(NonHaltingDiagnostic::FreedAlloc(ptr.alloc_id));
}
if let Some(data_race) = &mut alloc.extra.data_race {
data_race.deallocate(ptr, size)?;
}
if let Some(stacked_borrows) = &mut alloc.extra.stacked_borrows {
stacked_borrows.memory_deallocated(ptr, size)
} else {
Ok(())
}
}

fn after_static_mem_initialized(
Expand Down Expand Up @@ -601,53 +643,3 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
intptrcast::GlobalState::ptr_to_int(ptr, memory)
}
}

impl AllocationExtra<Tag> for AllocExtra {
#[inline(always)]
fn memory_read<'tcx>(
alloc: &Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if let Some(data_race) = &alloc.extra.data_race {
data_race.read(ptr, size)?;
}
if let Some(stacked_borrows) = &alloc.extra.stacked_borrows {
stacked_borrows.memory_read(ptr, size)
} else {
Ok(())
}
}

#[inline(always)]
fn memory_written<'tcx>(
alloc: &mut Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if let Some(data_race) = &mut alloc.extra.data_race {
data_race.write(ptr, size)?;
}
if let Some(stacked_borrows) = &mut alloc.extra.stacked_borrows {
stacked_borrows.memory_written(ptr, size)
} else {
Ok(())
}
}

#[inline(always)]
fn memory_deallocated<'tcx>(
alloc: &mut Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if let Some(data_race) = &mut alloc.extra.data_race {
data_race.deallocate(ptr, size)?;
}
if let Some(stacked_borrows) = &mut alloc.extra.stacked_borrows {
stacked_borrows.memory_deallocated(ptr, size)
} else {
Ok(())
}
}
}
36 changes: 31 additions & 5 deletions src/shims/intrinsics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -574,7 +574,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.memory.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
// Perform regular access.
this.write_scalar(val, dest)?;
Ok(())
}
Expand All @@ -594,7 +600,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.memory.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;

// Perform atomic store
this.write_scalar_atomic(val, &place, atomic)?;
Expand Down Expand Up @@ -644,7 +655,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.memory.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;

match atomic_op {
AtomicOp::Min => {
Expand Down Expand Up @@ -681,7 +697,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.memory.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;

let old = this.atomic_exchange_scalar(&place, new, atomic)?;
this.write_scalar(old, dest)?; // old value is returned
Expand All @@ -707,7 +728,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.memory.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;

let old = this.atomic_compare_exchange_scalar(
&place,
Expand Down
17 changes: 7 additions & 10 deletions src/shims/os_str.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use std::os::unix::ffi::{OsStrExt, OsStringExt};
#[cfg(windows)]
use std::os::windows::ffi::{OsStrExt, OsStringExt};

use rustc_target::abi::{LayoutOf, Size};
use rustc_target::abi::{Align, LayoutOf, Size};

use crate::*;

Expand Down Expand Up @@ -144,17 +144,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Store the UTF-16 string.
let size2 = Size::from_bytes(2);
let this = self.eval_context_mut();
let tcx = &*this.tcx;
let ptr = this.force_ptr(sptr)?; // we need to write at least the 0 terminator
let alloc = this.memory.get_raw_mut(ptr.alloc_id)?;
let mut alloc = this
.memory
.get_mut(sptr, Size::from_bytes(string_length), Align::from_bytes(2).unwrap())?
.unwrap(); // not a ZST, so we will get a result
for (offset, wchar) in u16_vec.into_iter().chain(iter::once(0x0000)).enumerate() {
let offset = u64::try_from(offset).unwrap();
alloc.write_scalar(
tcx,
ptr.offset(size2 * offset, tcx)?,
Scalar::from_u16(wchar).into(),
size2,
)?;
alloc
.write_scalar(alloc_range(size2 * offset, size2), Scalar::from_u16(wchar).into())?;
}
Ok((true, string_length - 1))
}
Expand Down
10 changes: 6 additions & 4 deletions src/shims/posix/fs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -677,10 +677,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
trace!("Reading from FD {}, size {}", fd, count);

// Check that the *entire* buffer is actually valid memory.
this.memory.check_ptr_access(
this.memory.check_ptr_access_align(
buf,
Size::from_bytes(count),
Align::from_bytes(1).unwrap(),
Align::ONE,
CheckInAllocMsg::MemoryAccessTest,
)?;

// We cap the number of read bytes to the largest value that we are able to fit in both the
Expand Down Expand Up @@ -722,10 +723,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Isolation check is done via `FileDescriptor` trait.

// Check that the *entire* buffer is actually valid memory.
this.memory.check_ptr_access(
this.memory.check_ptr_access_align(
buf,
Size::from_bytes(count),
Align::from_bytes(1).unwrap(),
Align::ONE,
CheckInAllocMsg::MemoryAccessTest,
)?;

// We cap the number of written bytes to the largest value that we are able to fit in both the
Expand Down
3 changes: 2 additions & 1 deletion src/shims/posix/linux/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,10 +82,11 @@ pub fn futex<'tcx>(
// Check the pointer for alignment and validity.
// The API requires `addr` to be a 4-byte aligned pointer, and will
// use the 4 bytes at the given address as an (atomic) i32.
this.memory.check_ptr_access(
this.memory.check_ptr_access_align(
addr.to_scalar()?,
Size::from_bytes(4),
Align::from_bytes(4).unwrap(),
CheckInAllocMsg::MemoryAccessTest,
)?;
// Read an `i32` through the pointer, regardless of any wrapper types.
// It's not uncommon for `addr` to be passed as another type than `*mut i32`, such as `*const AtomicI32`.
Expand Down
2 changes: 1 addition & 1 deletion src/stacked_borrows.rs
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
);

// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
let extra = &this.memory.get_raw(ptr.alloc_id)?.extra;
let extra = this.memory.get_alloc_extra(ptr.alloc_id)?;
let stacked_borrows =
extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data");
// Update the stacks.
Expand Down
3 changes: 2 additions & 1 deletion tests/compile-fail/data_race/alloc_read_race.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
// ignore-windows: Concurrency on Windows is not supported yet.
#![feature(new_uninit)]

use std::thread::spawn;
use std::ptr::null_mut;
Expand Down Expand Up @@ -29,7 +30,7 @@ pub fn main() {
// Uses relaxed semantics to not generate
// a release sequence.
let pointer = &*ptr.0;
pointer.store(Box::into_raw(Box::new(MaybeUninit::uninit())), Ordering::Relaxed);
pointer.store(Box::into_raw(Box::new_uninit()), Ordering::Relaxed);
});

let j2 = spawn(move || {
Expand Down
Loading

0 comments on commit 1ae39d9

Please sign in to comment.