Skip to content

Commit

Permalink
Safely keep track of pending extensions to the Heap
Browse files Browse the repository at this point in the history
This makes Heap::extend correct and fully infallible. To make this work, a
slight change to the usable size of the last Hole needed to happen.

Significant changes:
- pending_extend added to HoleList, tracks impartial extension amounts
- for odd-sized heaps or extensions, the last Hole will only utilize space up
  to the highest usize-aligned address
- the above change forces top to always be aligned to usize
- the correctness of `extend` relies on an aligned top pointer, so a debug
  assertion is added to catch any future code changes
- Heap::size() reports usable size for allocations (updated some tests
  accordingly)
- pub fn Heap::top() reports top-most pointer of memory claimed by the Heap,
  even if thost last bytes aren't usable yet. (private field HoleList.top still
  is always aligned)
- documented best values to give to extend
- removed ExtendError and try_extend
  • Loading branch information
evanrichter committed Sep 3, 2022
1 parent c7e3428 commit ede19c8
Show file tree
Hide file tree
Showing 3 changed files with 99 additions and 109 deletions.
94 changes: 50 additions & 44 deletions src/hole.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use core::mem::{align_of, size_of};
use core::ptr::null_mut;
use core::ptr::NonNull;

use crate::{align_up_size, ExtendError};
use crate::{align_down_size, align_up_size};

use super::align_up;

Expand All @@ -13,6 +13,7 @@ pub struct HoleList {
pub(crate) first: Hole, // dummy
pub(crate) bottom: *mut u8,
pub(crate) top: *mut u8,
pub(crate) pending_extend: u8,
}

pub(crate) struct Cursor {
Expand Down Expand Up @@ -278,6 +279,7 @@ impl HoleList {
},
bottom: null_mut(),
top: null_mut(),
pending_extend: 0,
}
}

Expand Down Expand Up @@ -328,6 +330,11 @@ impl HoleList {
/// alignment of the `hole_addr` pointer, the minimum size is between
/// `2 * size_of::<usize>` and `3 * size_of::<usize>`.
///
/// The usable size for allocations will be truncated to the nearest
/// alignment of `align_of::<usize>`. Any extra bytes left at the end
/// will be reclaimed once sufficient additional space is given to
/// [`extend`][crate::Heap::extend].
///
/// # Safety
///
/// This function is unsafe because it creates a hole at the given `hole_addr`.
Expand All @@ -338,7 +345,8 @@ impl HoleList {
assert!(hole_size >= size_of::<Hole>());

let aligned_hole_addr = align_up(hole_addr, align_of::<Hole>());
let aligned_hole_size = hole_size - ((aligned_hole_addr as usize) - (hole_addr as usize));
let requested_hole_size = hole_size - ((aligned_hole_addr as usize) - (hole_addr as usize));
let aligned_hole_size = align_down_size(requested_hole_size, align_of::<Hole>());
assert!(aligned_hole_size >= size_of::<Hole>());

let ptr = aligned_hole_addr as *mut Hole;
Expand All @@ -349,15 +357,17 @@ impl HoleList {

assert_eq!(
hole_addr.wrapping_add(hole_size),
aligned_hole_addr.wrapping_add(aligned_hole_size)
aligned_hole_addr.wrapping_add(requested_hole_size)
);

HoleList {
first: Hole {
size: 0,
next: Some(NonNull::new_unchecked(ptr)),
},
bottom: aligned_hole_addr,
top: hole_addr.wrapping_add(hole_size),
top: aligned_hole_addr.wrapping_add(aligned_hole_size),
pending_extend: (requested_hole_size - aligned_hole_size) as u8,
}
}

Expand Down Expand Up @@ -443,51 +453,40 @@ impl HoleList {
})
}

// amount previously extended but not big enough to be claimed by a Hole yet
pub(crate) fn pending_extend(&self) -> usize {
// this field is not used by anything else
self.first.size
}

pub(crate) unsafe fn try_extend(&mut self, by: usize) -> Result<(), ExtendError> {
pub(crate) unsafe fn extend(&mut self, by: usize) {
let top = self.top;
if top.is_null() {
// this is the only case where I don't think will need to make/init a new heap...
// since we can't do that from here, maybe we just add this to the
// documentation to not call .extend() on uninitialized Heaps
unreachable!();
}

// join this extend request with any pending (but not yet acted on) extensions
let by = self.pending_extend() + by;

let dead_space = top.align_offset(align_of::<Hole>());
let minimum_extend = dead_space + Self::min_size();

if by < minimum_extend {
self.first.size = by;
debug_assert_eq!(
0, dead_space,
"dead space detected during extend: {} bytes. This means top was unaligned",
dead_space
);

#[cfg(test)]
println!("pending extend size: {} bytes", self.pending_extend());
debug_assert!(
(self.pending_extend as usize) < Self::min_size(),
"pending extend was larger than expected"
);

return Ok(());
}
// join this extend request with any pending (but not yet acted on) extension
let extend_by = self.pending_extend as usize + by;

#[cfg(test)]
if dead_space > 0 {
println!("dead space created during extend: {} bytes", dead_space);
let minimum_extend = Self::min_size();
if extend_by < minimum_extend {
self.pending_extend = extend_by as u8;
return;
}

let aligned_top = align_up(top, align_of::<Hole>());
let layout = Layout::from_size_align(by - dead_space, 1).unwrap();

self.deallocate(NonNull::new_unchecked(aligned_top as *mut u8), layout);
self.top = top.add(by);
// only extend up to another valid boundary
let new_hole_size = align_down_size(extend_by, align_of::<Hole>());
let layout = Layout::from_size_align(new_hole_size, 1).unwrap();

// reset pending resizes
self.first.size = 0;
// instantiate the hole by forcing a deallocation on the new memory
self.deallocate(NonNull::new_unchecked(top as *mut u8), layout);
self.top = top.add(new_hole_size);

Ok(())
// save extra bytes given to extend that weren't aligned to the hole size
self.pending_extend = (extend_by - new_hole_size) as u8;
}
}

Expand Down Expand Up @@ -566,7 +565,10 @@ impl Cursor {
// Does hole overlap node?
assert!(
hole_u8.wrapping_add(hole_size) <= node_u8,
"Freed node aliases existing hole! Bad free?",
"Freed node ({:?}) aliases existing hole ({:?}[{}])! Bad free?",
node_u8,
hole_u8,
hole_size,
);

// All good! Let's insert that after.
Expand Down Expand Up @@ -692,7 +694,8 @@ fn deallocate(list: &mut HoleList, addr: *mut u8, size: usize) {
#[cfg(test)]
pub mod test {
use super::HoleList;
use crate::Heap;
use crate::{align_down_size, Heap};
use core::mem::size_of;
use std::{alloc::Layout, convert::TryInto, mem::MaybeUninit, prelude::v1::*, ptr::NonNull};

#[repr(align(128))]
Expand All @@ -715,8 +718,8 @@ pub mod test {
let assumed_location = data.as_mut_ptr().cast();

let heap = Heap::from_slice(data);
assert!(heap.bottom() == assumed_location);
assert!(heap.size() == HEAP_SIZE);
assert_eq!(heap.bottom(), assumed_location);
assert_eq!(heap.size(), align_down_size(HEAP_SIZE, size_of::<usize>()));
heap
}

Expand All @@ -727,7 +730,10 @@ pub mod test {
// This is the "dummy" node
assert_eq!(curs.previous().size, 0);
// This is the "full" heap
assert_eq!(curs.current().size, 1000);
assert_eq!(
curs.current().size,
align_down_size(1000, size_of::<usize>())
);
// There is no other hole
assert!(curs.next().is_none());
}
Expand Down
78 changes: 36 additions & 42 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,11 @@ impl Heap {
/// alignment of the `hole_addr` pointer, the minimum size is between
/// `2 * size_of::<usize>` and `3 * size_of::<usize>`.
///
/// The usable size for allocations will be truncated to the nearest
/// alignment of `align_of::<usize>`. Any extra bytes left at the end
/// will be reclaimed once sufficient additional space is given to
/// [`extend`][Heap::extend].
///
/// # Safety
///
/// This function must be called at most once and must only be used on an
Expand Down Expand Up @@ -94,6 +99,11 @@ impl Heap {
/// deallocation (e.g. a simple bump allocator). Then the overlaid linked-list-allocator can
/// provide memory reclamation.
///
/// The usable size for allocations will be truncated to the nearest
/// alignment of `align_of::<usize>`. Any extra bytes left at the end
/// will be reclaimed once sufficient additional space is given to
/// [`extend`][Heap::extend].
///
/// # Panics
///
/// This method panics if the heap is already initialized.
Expand Down Expand Up @@ -126,6 +136,11 @@ impl Heap {
/// alignment of the `hole_addr` pointer, the minimum size is between
/// `2 * size_of::<usize>` and `3 * size_of::<usize>`.
///
/// The usable size for allocations will be truncated to the nearest
/// alignment of `align_of::<usize>`. Any extra bytes left at the end
/// will be reclaimed once sufficient additional space is given to
/// [`extend`][Heap::extend].
///
/// # Safety
///
/// The bottom address must be valid and the memory in the
Expand Down Expand Up @@ -197,13 +212,21 @@ impl Heap {
}

/// Returns the size of the heap.
///
/// This is the size the heap is using for allocations, not necessarily the
/// total amount of bytes given to the heap. To determine the exact memory
/// boundaries, use [`bottom`][Self::bottom] and [`top`][Self::top].
pub fn size(&self) -> usize {
(self.top() as usize) - (self.bottom() as usize)
unsafe { self.holes.top.offset_from(self.holes.bottom) as usize }
}

/// Return the top address of the heap
/// Return the top address of the heap.
///
/// Note: The heap may choose to not use bytes at the end for allocations
/// until there is enough room for metadata, but it still retains ownership
/// over memory from [`bottom`][Self::bottom] to the address returned.
pub fn top(&self) -> *mut u8 {
self.holes.top
unsafe { self.holes.top.add(self.holes.pending_extend as usize) }
}

/// Returns the size of the used part of the heap
Expand All @@ -218,53 +241,24 @@ impl Heap {

/// Extends the size of the heap by creating a new hole at the end.
///
/// Panics when the heap extension fails. Use [`try_extend`] to handle potential errors.
/// Small extensions are not guaranteed to grow the usable size of
/// the heap. In order to grow the Heap most effectively, extend by
/// at least `2 * size_of::<usize>`, keeping the amount a multiple of
/// `size_of::<usize>`.
///
/// # Safety
///
/// The amount of data given in `by` MUST exist directly after the original
/// range of data provided when constructing the [Heap]. The additional data
/// must have the same lifetime of the original range of data.
pub unsafe fn extend(&mut self, by: usize) {
self.holes.try_extend(by).expect("heap extension failed");
}

/// Tries to extend the size of the heap by creating a new hole at the end.
///
/// # Safety
/// Even if this operation doesn't increase the [usable size][`Self::size`]
/// by exactly `by` bytes, those bytes are still owned by the Heap for
/// later use.
///
/// The amount of data given in `by` MUST exist directly after the original
/// range of data provided when constructing the [Heap]. The additional data
/// must have the same lifetime of the original range of data.
pub unsafe fn try_extend(&mut self, by: usize) -> Result<(), ExtendError> {
self.holes.try_extend(by)
}
}

#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum ExtendError {
/// The given extension size was not large enough to store the necessary metadata.
SizeTooSmall,
/// The given extension size was must be a multiple of 16.
OddSize,
/// The heap size must be a multiple of 16, otherwise extension is not possible.
OddHeapSize,
/// Extending an empty heap is not possible.
EmptyHeap,
}

impl core::fmt::Display for ExtendError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.write_str(match self {
ExtendError::SizeTooSmall => {
"heap extension size was not large enough to store the necessary metadata"
}
ExtendError::OddSize => "heap extension size is not a multiple of 16",
ExtendError::OddHeapSize => {
"heap extension not possible because heap size is not a multiple of 16"
}
ExtendError::EmptyHeap => "tried to extend an emtpy heap",
})
/// Calling this method on an uninitialized Heap is undefined behavior.
pub unsafe fn extend(&mut self, by: usize) {
self.holes.extend(by);
}
}

Expand Down
36 changes: 13 additions & 23 deletions src/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ fn new_heap() -> Heap {
let assumed_location = data.as_mut_ptr().cast();

let heap = Heap::from_slice(data);
assert!(heap.bottom() == assumed_location);
assert!(heap.size() == HEAP_SIZE);
assert_eq!(heap.bottom(), assumed_location);
assert_eq!(heap.size(), align_down_size(HEAP_SIZE, size_of::<usize>()));
heap
}

Expand All @@ -37,8 +37,8 @@ fn new_max_heap() -> Heap {

// Unsafe so that we have provenance over the whole allocation.
let heap = unsafe { Heap::new(start_ptr, HEAP_SIZE) };
assert!(heap.bottom() == start_ptr);
assert!(heap.size() == HEAP_SIZE);
assert_eq!(heap.bottom(), start_ptr);
assert_eq!(heap.size(), HEAP_SIZE);
heap
}

Expand Down Expand Up @@ -196,11 +196,12 @@ fn allocate_many_size_aligns() {
const STRATS: Range<usize> = 0..4;

let mut heap = new_heap();
assert_eq!(heap.size(), 1000);
let aligned_heap_size = align_down_size(1000, size_of::<usize>());
assert_eq!(heap.size(), aligned_heap_size);

heap.holes.debug();

let max_alloc = Layout::from_size_align(1000, 1).unwrap();
let max_alloc = Layout::from_size_align(aligned_heap_size, 1).unwrap();
let full = heap.allocate_first_fit(max_alloc).unwrap();
unsafe {
heap.deallocate(full, max_alloc);
Expand Down Expand Up @@ -507,7 +508,7 @@ fn small_heap_extension() {
unsafe {
let mut heap = Heap::new(HEAP.as_mut_ptr().cast(), 32);
heap.extend(1);
assert_eq!(1, heap.holes.pending_extend());
assert_eq!(1, heap.holes.pending_extend);
}
}

Expand All @@ -519,20 +520,8 @@ fn oddly_sized_heap_extension() {
unsafe {
let mut heap = Heap::new(HEAP.as_mut_ptr().cast(), 16);
heap.extend(17);
assert_eq!(0, heap.holes.pending_extend());
assert_eq!(16 + 17, heap.size());
}
}

/// Ensures that heap extension fails when trying to extend an empty heap.
///
/// Empty heaps always have a start pointer of 0.
#[test]
#[should_panic]
fn extend_empty() {
unsafe {
let mut heap = Heap::empty();
heap.extend(16);
assert_eq!(1, heap.holes.pending_extend);
assert_eq!(16 + 16, heap.size());
}
}

Expand All @@ -546,10 +535,11 @@ fn extend_odd_size() {
static mut HEAP: [u64; 5] = [0; 5];
unsafe {
let mut heap = Heap::new(HEAP.as_mut_ptr().cast(), 17);
assert_eq!(1, heap.holes.pending_extend);
heap.extend(16);
assert_eq!(16, heap.holes.pending_extend());
assert_eq!(1, heap.holes.pending_extend);
heap.extend(15);
assert_eq!(0, heap.holes.pending_extend());
assert_eq!(0, heap.holes.pending_extend);
assert_eq!(17 + 16 + 15, heap.size());
}
}

0 comments on commit ede19c8

Please sign in to comment.