Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve the dealloc and realloc functions to more agressively reuse memory #35

Merged
merged 2 commits into from
Oct 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions src/collections/raw_vec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -639,6 +639,15 @@ impl<'a, T> RawVec<'a, T> {
}
}

impl<'a, T> Drop for RawVec<'a, T> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
fn drop(&mut self) {
unsafe {
self.dealloc_buffer();
}
}
}

// We need to guarantee the following:
// * We don't ever allocate `> isize::MAX` byte-size objects
// * We don't overflow `usize::MAX` and actually allocate too little
Expand Down
73 changes: 46 additions & 27 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -779,6 +779,16 @@ impl Bump {
));
}
}

#[inline]
unsafe fn is_last_allocation(&self, ptr: NonNull<u8>, layout: Layout) -> bool {
let old_size = layout.size();
let footer = self.current_chunk_footer.get();
let footer = footer.as_ref();
let footer_ptr = footer.ptr.get();
let footer_usize = footer_ptr.as_ptr() as usize;
footer_usize.checked_sub(old_size) == Some(ptr.as_ptr() as usize)
}
}

/// An iterator over each chunk of allocated memory that
Expand Down Expand Up @@ -833,8 +843,14 @@ unsafe impl<'a> alloc::Alloc for &'a Bump {
Ok(self.alloc_layout(layout))
}

#[inline(always)]
unsafe fn dealloc(&mut self, _ptr: NonNull<u8>, _layout: Layout) {}
#[inline]
unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you replace the #[inline(always)] to just #[inline] now that this isn't a no-op? Thanks.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

// If the pointer is the last allocation we made, we can reuse the bytes,
// otherwise they are simply leaked -- at least until somebody calls reset().
if self.is_last_allocation(ptr, layout) {
self.current_chunk_footer.get().as_ref().ptr.set(ptr);
}
}

#[inline]
unsafe fn realloc(
Expand All @@ -845,35 +861,38 @@ unsafe impl<'a> alloc::Alloc for &'a Bump {
) -> Result<NonNull<u8>, alloc::AllocErr> {
let old_size = layout.size();

// Shrinking allocations. Do nothing.
if new_size < old_size {
return Ok(ptr);
}
if new_size <= old_size {
// Shrinking an allocation is easy: Just give the same pointer back
// If it's the last allocation, we can reclaim some of the bytes, otherwise
// they are simply leaked.
if self.is_last_allocation(ptr, layout) {
let new_end = NonNull::new_unchecked(ptr.as_ptr().add(new_size));
self.current_chunk_footer.get().as_ref().ptr.set(new_end);
}

// See if the `ptr` is the last allocation we made. If so, we can likely
// realloc in place by just bumping a bit further!
//
// Note that we align-up the bump pointer on new allocation requests
// (not eagerly) so if `ptr` was the result of our last allocation, then
// the bump pointer is still pointing just after it.
let footer = self.current_chunk_footer.get();
let footer = footer.as_ref();
let footer_ptr = footer.ptr.get().as_ptr() as usize;
if footer_ptr.checked_sub(old_size) == Some(ptr.as_ptr() as usize) {
let delta = layout_from_size_align(new_size - old_size, 1);
if self.try_alloc_layout_fast(delta).is_some() {
return Ok(ptr);
Ok(ptr)
} else {
// Increasing an allocation is harder. If it is the last allocation, we
// can *try* increasing it, but we might not have enough space in the
// current chunk. If this does not work, we must fall back to alloc+copy
if self.is_last_allocation(ptr, layout) {
// Since the old pointer is already aligned, we can simple request the
// remaining bytes without any alignment.
let delta = new_size - old_size;
if self
.try_alloc_layout_fast(layout_from_size_align(delta, 1))
.is_some()
{
return Ok(ptr);
}
}
}

// Otherwise, fall back on alloc + copy + dealloc.
let new_layout = layout_from_size_align(new_size, layout.align());
let result = self.alloc(new_layout);
if let Ok(new_ptr) = result {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), cmp::min(old_size, new_size));
self.dealloc(ptr, layout);
// Fallback: alloc+copy
let new_layout = layout_from_size_align(new_size, layout.align());
let new_ptr = self.alloc_layout(new_layout);
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size);
Ok(new_ptr)
}
result
}
}

Expand Down
121 changes: 121 additions & 0 deletions tests/vec.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#![cfg(feature = "collections")]
use bumpalo::{collections::Vec, Bump};
use std::cell::Cell;
use std::mem;

#[test]
fn push_a_bunch_of_items() {
Expand All @@ -9,3 +11,122 @@ fn push_a_bunch_of_items() {
v.push(x);
}
}

#[test]
#[allow(clippy::cognitive_complexity)]
fn test_realloc() {
let b = Bump::with_capacity(1000);
let v1_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let v2_data = [41, 42, 43, 44, 45, 46, 47, 48, 49, 50];
let mut v1 = bumpalo::vec![in &b; 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let v1_ptr = v1.as_ptr();
assert!(10 <= v1.capacity() && v1.capacity() < 20);

// Shift the size up and down a few times and see that it stays in place
v1.reserve(100);
assert!(v1.capacity() >= 100);
assert_eq!(v1.as_ptr(), v1_ptr);
assert_eq!(v1, v1_data);

v1.shrink_to_fit();
assert!(10 <= v1.capacity() && v1.capacity() < 20);
assert_eq!(v1.as_ptr(), v1_ptr);
assert_eq!(v1, v1_data);

v1.reserve(100);
assert!(v1.capacity() >= 100);
assert_eq!(v1.as_ptr(), v1_ptr);
assert_eq!(v1, v1_data);

v1.shrink_to_fit();
assert!(10 <= v1.capacity() && v1.capacity() < 20);
assert_eq!(v1.as_ptr(), v1_ptr);
assert_eq!(v1, v1_data);

// Allocate just after our buffer, to see that it blocks in-place expansion
let mut v2 = bumpalo::vec![in &b; 41, 42, 43, 44, 45, 46, 47, 48, 49, 50];
let v2_ptr = v2.as_ptr();
assert!(10 <= v2.capacity() && v2.capacity() < 20);
assert_eq!(unsafe { v1_ptr.add(v1.capacity()) }, v2_ptr);

v1.reserve(100);
assert!(v1.capacity() >= 100);
assert_ne!(v1.as_ptr(), v1_ptr);
assert_eq!(v1, v1_data);

// Our chunk is now [old, dead v1] [current v2] [current v1]
let v1_ptr = v1.as_ptr();
assert_eq!(unsafe { v2_ptr.add(v2.capacity()) }, v1_ptr);

// See that we can still shrink at the new location as well
v1.shrink_to_fit();
assert!(10 <= v1.capacity() && v1.capacity() < 20);
assert_eq!(v1.as_ptr(), v1_ptr);
assert_eq!(v1, v1_data);

// And see that we get a new chunk if we expand too much
v1.reserve(10_000);
assert!(v1.capacity() >= 10_000);
assert_ne!(v1.as_ptr(), v1_ptr);
assert_eq!(v1, v1_data);

// See that we can deallocate and re-use the existing memory
let v1_ptr = v1.as_ptr();
mem::drop(v1);

let mut v1 = bumpalo::vec![in &b; 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
assert!(10 <= v1.capacity() && v1.capacity() < 20);
assert_eq!(v1.as_ptr(), v1_ptr);

// At this point, the old chunk is [old, dead v1] [current v2] [old, dead v1]
// See that we can still shrink buffers that are not at the end without moving them
v2.truncate(5);
v2.shrink_to_fit();
assert!(v2.capacity() < 10);
assert_eq!(v2.as_ptr(), v2_ptr);
assert_eq!(v2, &v2_data[..5]);

// However we cannot increase their size back up again without moving
v2.extend_from_slice(&[46, 47, 48, 49, 50]);
assert!(10 <= v2.capacity() && v2.capacity() < 20);
assert_ne!(v2.as_ptr(), v2_ptr);
assert_eq!(v2, v2_data);
let v2_ptr = v2.as_ptr();

// At this point, our new chunk should be [current v1][current v2]
assert_eq!(unsafe { v1_ptr.add(v1.capacity()) }, v2_ptr);

// If we free v2, we should be able to extend v1 inplace
mem::drop(v2);
v1.reserve(100);
assert!(v1.capacity() >= 100);
assert_eq!(v1.as_ptr(), v1_ptr);
}

#[test]
fn recursive_vecs() {
// The purpose of this test is to see if the data structures with
// self references are allowed without causing a compile error
// because of the dropck
let b = Bump::new();

struct Node<'a> {
myself: Cell<Option<&'a Node<'a>>>,
edges: Cell<Vec<'a, &'a Node<'a>>>,
}

let node1: &Node = b.alloc(Node {
myself: Cell::new(None),
edges: Cell::new(Vec::new_in(&b)),
});
let node2: &Node = b.alloc(Node {
myself: Cell::new(None),
edges: Cell::new(Vec::new_in(&b)),
});

node1.myself.set(Some(node1));
node1.edges.set(bumpalo::vec![in &b; node1, node1, node2]);

node2.myself.set(Some(node2));
node2.edges.set(bumpalo::vec![in &b; node1, node2]);
}