diff --git a/Objects/mimalloc/page.c b/Objects/mimalloc/page.c index 5536a611a90f01..25ecd6ec7d7983 100644 --- a/Objects/mimalloc/page.c +++ b/Objects/mimalloc/page.c @@ -460,6 +460,9 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { mi_page_set_has_aligned(page, false); + // any previous QSBR goals are no longer valid because we reused the page + _PyMem_mi_page_clear_qsbr(page); + // don't retire too often.. // (or we end up retiring and re-allocating most of the time) // NOTE: refine this more: we should not retire if this @@ -496,6 +499,9 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) { if (mi_page_all_free(page)) { page->retire_expire--; if (force || page->retire_expire == 0) { +#ifdef Py_GIL_DISABLED + mi_assert_internal(page->qsbr_goal == 0); +#endif _PyMem_mi_page_maybe_free(page, pq, force); } else { diff --git a/Objects/mimalloc/segment.c b/Objects/mimalloc/segment.c index 3277c32c3093b7..08b156433653a4 100644 --- a/Objects/mimalloc/segment.c +++ b/Objects/mimalloc/segment.c @@ -1278,6 +1278,9 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s // if this page is all free now, free it without adding to any queues (yet) mi_assert_internal(page->next == NULL && page->prev==NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); +#ifdef Py_GIL_DISABLED + page->qsbr_goal = 0; +#endif segment->abandoned--; slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce! mi_assert_internal(!mi_slice_is_used(slice)); @@ -1350,13 +1353,16 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, _mi_page_free_collect(page, false); // ensure used count is up to date if (mi_page_all_free(page) && _PyMem_mi_page_is_safe_to_free(page)) { // if everything free by now, free the page +#ifdef Py_GIL_DISABLED + page->qsbr_goal = 0; +#endif slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing } else { // otherwise reclaim it into the heap _mi_page_reclaim(target_heap, page); if (requested_block_size == page->xblock_size && mi_page_has_any_available(page) && - heap == target_heap) { + requested_block_size <= MI_MEDIUM_OBJ_SIZE_MAX && heap == target_heap) { if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; } } } diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index 395ae99c8688b7..e7813807674abe 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -104,65 +104,42 @@ _PyMem_mi_page_clear_qsbr(mi_page_t *page) #endif } -// Is the empty page safe to free? It's safe if there was a QSBR goal set and -// the goal has been reached. +// Check if an empty, newly reclaimed page is safe to free now. static bool _PyMem_mi_page_is_safe_to_free(mi_page_t *page) { assert(mi_page_all_free(page)); #ifdef Py_GIL_DISABLED - if (page->use_qsbr) { - if (page->qsbr_goal == 0) { - // No QSBR goal set, so we can't safely free the page yet. - return false; - } + assert(page->qsbr_node.next == NULL); + if (page->use_qsbr && page->qsbr_goal != 0) { _PyThreadStateImpl *tstate = (_PyThreadStateImpl *)_PyThreadState_GET(); if (tstate == NULL) { return false; } - if (!_Py_qbsr_goal_reached(tstate->qsbr, page->qsbr_goal)) { - return false; - } - _PyMem_mi_page_clear_qsbr(page); - return true; + return _Py_qbsr_goal_reached(tstate->qsbr, page->qsbr_goal); } #endif return true; -} - -#ifdef Py_GIL_DISABLED -// Enqueue a page to be freed later when it's safe to do so (using QSBR). -// Note that we may still allocate from the page. -static void -enqueue_page_qsbr(mi_page_t *page, bool reclaimed) -{ - assert(mi_page_all_free(page)); - assert(reclaimed || page->qsbr_goal == 0); - - _PyThreadStateImpl *tstate = (_PyThreadStateImpl *)PyThreadState_GET(); - page->retire_expire = 0; - - // The goal may be set if we are reclaiming an empty abandoned page - if (page->qsbr_goal == 0) { - page->qsbr_goal = _Py_qsbr_deferred_advance(tstate->qsbr); - } - llist_insert_tail(&tstate->mimalloc.page_list, &page->qsbr_node); } -#endif static bool _PyMem_mi_page_maybe_free(mi_page_t *page, mi_page_queue_t *pq, bool force) { #ifdef Py_GIL_DISABLED - if (!_PyMem_mi_page_is_safe_to_free(page)) { - // The page may already be in the QSBR linked list if we allocated from - // it after all blocks were freed. - if (page->qsbr_node.next != NULL) { - llist_remove(&page->qsbr_node); - page->qsbr_goal = 0; + assert(mi_page_all_free(page)); + if (page->use_qsbr) { + _PyThreadStateImpl *tstate = (_PyThreadStateImpl *)PyThreadState_GET(); + if (page->qsbr_goal != 0 && _Py_qbsr_goal_reached(tstate->qsbr, page->qsbr_goal)) { + _PyMem_mi_page_clear_qsbr(page); + _mi_page_free(page, pq, force); + return true; } - enqueue_page_qsbr(page, false); + + _PyMem_mi_page_clear_qsbr(page); + page->retire_expire = 0; + page->qsbr_goal = _Py_qsbr_deferred_advance(tstate->qsbr); + llist_insert_tail(&tstate->mimalloc.page_list, &page->qsbr_node); return false; } #endif @@ -177,7 +154,10 @@ _PyMem_mi_page_reclaimed(mi_page_t *page) assert(page->qsbr_node.next == NULL); if (page->qsbr_goal != 0) { if (mi_page_all_free(page)) { - enqueue_page_qsbr(page, true); + assert(page->qsbr_node.next == NULL); + _PyThreadStateImpl *tstate = (_PyThreadStateImpl *)PyThreadState_GET(); + page->retire_expire = 0; + llist_insert_tail(&tstate->mimalloc.page_list, &page->qsbr_node); } else { page->qsbr_goal = 0; @@ -205,8 +185,7 @@ _PyMem_mi_heap_collect_qsbr(mi_heap_t *heap) mi_page_t *page = llist_data(node, mi_page_t, qsbr_node); if (!mi_page_all_free(page)) { // We allocated from this page some point after the delayed free - page->qsbr_goal = 0; - llist_remove(node); + _PyMem_mi_page_clear_qsbr(page); continue; } @@ -214,8 +193,7 @@ _PyMem_mi_heap_collect_qsbr(mi_heap_t *heap) return; } - page->qsbr_goal = 0; - llist_remove(node); + _PyMem_mi_page_clear_qsbr(page); _mi_page_free(page, mi_page_queue_of(page), false); } #endif