diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 2161142c..2954eabd 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -184,12 +184,13 @@ size_t _mi_bin_size(uint8_t bin); // for stats uint8_t _mi_bin(size_t size); // for stats // "heap.c" -void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id); +void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag); void _mi_heap_destroy_pages(mi_heap_t* heap); void _mi_heap_collect_abandon(mi_heap_t* heap); void _mi_heap_set_default_direct(mi_heap_t* heap); bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid); void _mi_heap_unsafe_destroy_all(void); +mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag); // "stats.c" void _mi_stats_done(mi_stats_t* stats); @@ -508,6 +509,7 @@ static inline mi_heap_t* mi_page_heap(const mi_page_t* page) { static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) { mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING); mi_atomic_store_release(&page->xheap,(uintptr_t)heap); + if (heap != NULL) { page->heap_tag = heap->tag; } } // Thread free flag helpers diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index e99742ac..ed326c69 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -307,6 +307,7 @@ typedef struct mi_page_s { mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) uint16_t used; // number of blocks in use (including blocks in `thread_free`) uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`) + uint8_t heap_tag; // tag of the owning heap, used for separated heaps by object type // padding size_t block_size; // size available in each block (always `>0`) uint8_t* page_start; // start of the page area containing the blocks @@ -481,6 +482,7 @@ struct mi_heap_s { size_t page_retired_max; // largest retired index into the `pages` array. mi_heap_t* next; // list of heaps per thread bool no_reclaim; // `true` if this heap should not reclaim abandoned pages + uint8_t tag; // custom tag, can be used for separating heaps based on the object types mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") }; diff --git a/src/heap.c b/src/heap.c index 8817887d..f6f23549 100644 --- a/src/heap.c +++ b/src/heap.c @@ -204,11 +204,13 @@ mi_heap_t* mi_heap_get_backing(void) { return bheap; } -void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id) { +void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag) { _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t)); heap->tld = tld; - heap->thread_id = _mi_thread_id(); - heap->arena_id = arena_id; + heap->thread_id = _mi_thread_id(); + heap->arena_id = arena_id; + heap->no_reclaim = noreclaim; + heap->tag = tag; if (heap == tld->heap_backing) { _mi_random_init(&heap->random); } @@ -218,18 +220,17 @@ void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id) { heap->cookie = _mi_heap_random_next(heap) | 1; heap->keys[0] = _mi_heap_random_next(heap); heap->keys[1] = _mi_heap_random_next(heap); + // push on the thread local heaps list + heap->next = heap->tld->heaps; + heap->tld->heaps = heap; } mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) { mi_heap_t* bheap = mi_heap_get_backing(); mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode? if (heap == NULL) return NULL; - _mi_heap_init(heap, bheap->tld, arena_id); - // don't reclaim abandoned pages or otherwise destroy is unsafe - heap->no_reclaim = true; - // push on the thread local heaps list - heap->next = heap->tld->heaps; - heap->tld->heaps = heap; + // don't reclaim abandoned pages or otherwise destroy is unsafe + _mi_heap_init(heap, bheap->tld, arena_id, true /* no reclaim */, 0 /* default tag */); return heap; } @@ -287,6 +288,18 @@ static void mi_heap_free(mi_heap_t* heap) { mi_free(heap); } +// return a heap on the same thread as `heap` specialized for the specified tag (if it exists) +mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag) { + if (heap->tag == tag) { + return heap; + } + for (mi_heap_t *curr = heap->tld->heaps; curr != NULL; curr = curr->next) { + if (curr->tag == tag) { + return curr; + } + } + return NULL; +} /* ----------------------------------------------------------- Heap destroy diff --git a/src/init.c b/src/init.c index 9d8632e4..62bb69dd 100644 --- a/src/init.c +++ b/src/init.c @@ -25,6 +25,7 @@ const mi_page_t _mi_page_empty = { NULL, // local_free 0, // used 0, // block size shift + 0, // heap tag 0, // block_size NULL, // page_start #if (MI_PADDING || MI_ENCODE_FREELIST) @@ -108,7 +109,8 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = { 0, // page count MI_BIN_FULL, 0, // page retired min/max NULL, // next - false, + false, // can reclaim + 0, // tag MI_SMALL_PAGES_EMPTY, MI_PAGE_QUEUES_EMPTY }; @@ -146,6 +148,7 @@ mi_heap_t _mi_heap_main = { MI_BIN_FULL, 0, // page retired min/max NULL, // next heap false, // can reclaim + 0, // tag MI_SMALL_PAGES_EMPTY, MI_PAGE_QUEUES_EMPTY }; @@ -281,7 +284,7 @@ static bool _mi_thread_heap_init(void) { mi_tld_t* tld = &td->tld; mi_heap_t* heap = &td->heap; _mi_tld_init(tld, heap); // must be before `_mi_heap_init` - _mi_heap_init(heap, tld, _mi_arena_id_none()); + _mi_heap_init(heap, tld, _mi_arena_id_none(), false /* can reclaim */, 0 /* default tag */); _mi_heap_set_default_direct(heap); } return false; @@ -291,7 +294,7 @@ static bool _mi_thread_heap_init(void) { void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) { _mi_memzero_aligned(tld,sizeof(mi_tld_t)); tld->heap_backing = bheap; - tld->heaps = bheap; + tld->heaps = NULL; tld->segments.stats = &tld->stats; tld->segments.os = &tld->os; tld->os.stats = &tld->stats; diff --git a/src/segment.c b/src/segment.c index cfd6c1a3..fc13d2e7 100644 --- a/src/segment.c +++ b/src/segment.c @@ -714,6 +714,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg // zero the page data, but not the segment fields and capacity, page start, and block_size (for page size calculations) size_t block_size = page->block_size; uint8_t block_size_shift = page->block_size_shift; + uint8_t heap_tag = page->heap_tag; uint8_t* page_start = page->page_start; uint16_t capacity = page->capacity; uint16_t reserved = page->reserved; @@ -723,6 +724,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg page->reserved = reserved; page->block_size = block_size; page->block_size_shift = block_size_shift; + page->heap_tag = heap_tag; page->page_start = page_start; segment->used--; @@ -898,7 +900,12 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, mi_assert(page->next == NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); // set the heap again and allow heap thread delayed free again. - mi_page_set_heap(page, heap); + mi_heap_t* target_heap = _mi_heap_by_tag(heap, page->heap_tag); // allow custom heaps to separate objects + if (target_heap == NULL) { + target_heap = heap; + _mi_error_message(EINVAL, "page with tag %u cannot be reclaimed by a heap with the same tag (using %u instead)\n", page->heap_tag, heap->tag ); + } + mi_page_set_heap(page, target_heap); _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) _mi_page_free_collect(page, false); // ensure used count is up to date if (mi_page_all_free(page)) { @@ -907,8 +914,8 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, } else { // otherwise reclaim it into the heap - _mi_page_reclaim(heap, page); - if (requested_block_size == mi_page_block_size(page) && mi_page_has_any_available(page)) { + _mi_page_reclaim(target_heap, page); + if (requested_block_size == mi_page_block_size(page) && mi_page_has_any_available(page) && heap == target_heap) { if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; } } }