Skip to content

Commit

Permalink
Fix bug with reclaiming pages and refactor code.
Browse files Browse the repository at this point in the history
  • Loading branch information
colesbury committed Mar 5, 2024
1 parent 89d8cd6 commit 1645d3a
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 45 deletions.
6 changes: 6 additions & 0 deletions Objects/mimalloc/page.c
Original file line number Diff line number Diff line change
Expand Up @@ -460,6 +460,9 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {

mi_page_set_has_aligned(page, false);

// any previous QSBR goals are no longer valid because we reused the page
_PyMem_mi_page_clear_qsbr(page);

// don't retire too often..
// (or we end up retiring and re-allocating most of the time)
// NOTE: refine this more: we should not retire if this
Expand Down Expand Up @@ -496,6 +499,9 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
if (mi_page_all_free(page)) {
page->retire_expire--;
if (force || page->retire_expire == 0) {
#ifdef Py_GIL_DISABLED
mi_assert_internal(page->qsbr_goal == 0);
#endif
_PyMem_mi_page_maybe_free(page, pq, force);
}
else {
Expand Down
8 changes: 7 additions & 1 deletion Objects/mimalloc/segment.c
Original file line number Diff line number Diff line change
Expand Up @@ -1278,6 +1278,9 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s
// if this page is all free now, free it without adding to any queues (yet)
mi_assert_internal(page->next == NULL && page->prev==NULL);
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
#ifdef Py_GIL_DISABLED
page->qsbr_goal = 0;
#endif
segment->abandoned--;
slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce!
mi_assert_internal(!mi_slice_is_used(slice));
Expand Down Expand Up @@ -1350,13 +1353,16 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
_mi_page_free_collect(page, false); // ensure used count is up to date
if (mi_page_all_free(page) && _PyMem_mi_page_is_safe_to_free(page)) {
// if everything free by now, free the page
#ifdef Py_GIL_DISABLED
page->qsbr_goal = 0;
#endif
slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing
}
else {
// otherwise reclaim it into the heap
_mi_page_reclaim(target_heap, page);
if (requested_block_size == page->xblock_size && mi_page_has_any_available(page) &&
heap == target_heap) {
requested_block_size <= MI_MEDIUM_OBJ_SIZE_MAX && heap == target_heap) {
if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
}
}
Expand Down
66 changes: 22 additions & 44 deletions Objects/obmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -104,65 +104,42 @@ _PyMem_mi_page_clear_qsbr(mi_page_t *page)
#endif
}

// Is the empty page safe to free? It's safe if there was a QSBR goal set and
// the goal has been reached.
// Check if an empty, newly reclaimed page is safe to free now.
static bool
_PyMem_mi_page_is_safe_to_free(mi_page_t *page)
{
assert(mi_page_all_free(page));
#ifdef Py_GIL_DISABLED
if (page->use_qsbr) {
if (page->qsbr_goal == 0) {
// No QSBR goal set, so we can't safely free the page yet.
return false;
}
assert(page->qsbr_node.next == NULL);
if (page->use_qsbr && page->qsbr_goal != 0) {
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
if (tstate == NULL) {
return false;
}
if (!_Py_qbsr_goal_reached(tstate->qsbr, page->qsbr_goal)) {
return false;
}
_PyMem_mi_page_clear_qsbr(page);
return true;
return _Py_qbsr_goal_reached(tstate->qsbr, page->qsbr_goal);
}
#endif
return true;
}

#ifdef Py_GIL_DISABLED
// Enqueue a page to be freed later when it's safe to do so (using QSBR).
// Note that we may still allocate from the page.
static void
enqueue_page_qsbr(mi_page_t *page, bool reclaimed)
{
assert(mi_page_all_free(page));
assert(reclaimed || page->qsbr_goal == 0);

_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)PyThreadState_GET();
page->retire_expire = 0;

// The goal may be set if we are reclaiming an empty abandoned page
if (page->qsbr_goal == 0) {
page->qsbr_goal = _Py_qsbr_deferred_advance(tstate->qsbr);
}

llist_insert_tail(&tstate->mimalloc.page_list, &page->qsbr_node);
}
#endif

static bool
_PyMem_mi_page_maybe_free(mi_page_t *page, mi_page_queue_t *pq, bool force)
{
#ifdef Py_GIL_DISABLED
if (!_PyMem_mi_page_is_safe_to_free(page)) {
// The page may already be in the QSBR linked list if we allocated from
// it after all blocks were freed.
if (page->qsbr_node.next != NULL) {
llist_remove(&page->qsbr_node);
page->qsbr_goal = 0;
assert(mi_page_all_free(page));
if (page->use_qsbr) {
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)PyThreadState_GET();
if (page->qsbr_goal != 0 && _Py_qbsr_goal_reached(tstate->qsbr, page->qsbr_goal)) {
_PyMem_mi_page_clear_qsbr(page);
_mi_page_free(page, pq, force);
return true;
}
enqueue_page_qsbr(page, false);

_PyMem_mi_page_clear_qsbr(page);
page->retire_expire = 0;
page->qsbr_goal = _Py_qsbr_deferred_advance(tstate->qsbr);
llist_insert_tail(&tstate->mimalloc.page_list, &page->qsbr_node);
return false;
}
#endif
Expand All @@ -177,7 +154,10 @@ _PyMem_mi_page_reclaimed(mi_page_t *page)
assert(page->qsbr_node.next == NULL);
if (page->qsbr_goal != 0) {
if (mi_page_all_free(page)) {
enqueue_page_qsbr(page, true);
assert(page->qsbr_node.next == NULL);
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)PyThreadState_GET();
page->retire_expire = 0;
llist_insert_tail(&tstate->mimalloc.page_list, &page->qsbr_node);
}
else {
page->qsbr_goal = 0;
Expand Down Expand Up @@ -205,17 +185,15 @@ _PyMem_mi_heap_collect_qsbr(mi_heap_t *heap)
mi_page_t *page = llist_data(node, mi_page_t, qsbr_node);
if (!mi_page_all_free(page)) {
// We allocated from this page some point after the delayed free
page->qsbr_goal = 0;
llist_remove(node);
_PyMem_mi_page_clear_qsbr(page);
continue;
}

if (!_Py_qsbr_poll(tstate->qsbr, page->qsbr_goal)) {
return;
}

page->qsbr_goal = 0;
llist_remove(node);
_PyMem_mi_page_clear_qsbr(page);
_mi_page_free(page, mi_page_queue_of(page), false);
}
#endif
Expand Down

0 comments on commit 1645d3a

Please sign in to comment.