From 672b6f4a69f3b10eea7f76471a785ab6b691d3aa Mon Sep 17 00:00:00 2001 From: Dino Viehland Date: Tue, 26 Sep 2023 17:41:58 -0700 Subject: [PATCH] Fix mimalloc formatting Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: --- Include/mimalloc/mimalloc.h | 14 +-- Include/mimalloc/mimalloc/atomic.h | 2 +- Include/mimalloc/mimalloc/internal.h | 2 +- Include/mimalloc/mimalloc/prim.h | 14 +-- Include/mimalloc/mimalloc/track.h | 4 +- Include/mimalloc/mimalloc/types.h | 22 ++-- Objects/mimalloc/alloc-aligned.c | 6 +- Objects/mimalloc/alloc.c | 14 +-- Objects/mimalloc/arena.c | 95 +++++++++--------- Objects/mimalloc/bitmap.c | 16 +-- Objects/mimalloc/bitmap.h | 4 +- Objects/mimalloc/os.c | 44 ++++---- Objects/mimalloc/page.c | 24 ++--- Objects/mimalloc/prim/unix/prim.c | 46 ++++----- Objects/mimalloc/prim/wasi/prim.c | 8 +- Objects/mimalloc/prim/windows/prim.c | 14 +-- Objects/mimalloc/random.c | 2 +- Objects/mimalloc/segment-map.c | 4 +- Objects/mimalloc/segment.c | 144 +++++++++++++-------------- Objects/mimalloc/stats.c | 2 +- 20 files changed, 239 insertions(+), 242 deletions(-) diff --git a/Include/mimalloc/mimalloc.h b/Include/mimalloc/mimalloc.h index 894ea4ca1e6eacc..2cdad95b17f8f46 100644 --- a/Include/mimalloc/mimalloc.h +++ b/Include/mimalloc/mimalloc.h @@ -340,18 +340,18 @@ typedef enum mi_option_e { mi_option_deprecated_segment_cache, mi_option_deprecated_page_reset, mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination - mi_option_deprecated_segment_reset, - mi_option_eager_commit_delay, + mi_option_deprecated_segment_reset, + mi_option_eager_commit_delay, mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes. mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas) mi_option_os_tag, // tag used for OS logging (macOS only for now) mi_option_max_errors, // issue at most N error messages mi_option_max_warnings, // issue at most N warning messages - mi_option_max_segment_reclaim, + mi_option_max_segment_reclaim, mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe. mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit) - mi_option_arena_purge_mult, + mi_option_arena_purge_mult, mi_option_purge_extend_delay, _mi_option_last, // legacy option names @@ -521,7 +521,7 @@ template struct _mi_heap_stl_allocator_common : publi protected: std::shared_ptr heap; template friend struct _mi_heap_stl_allocator_common; - + _mi_heap_stl_allocator_common() { mi_heap_t* hp = mi_heap_new(); this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */ @@ -538,7 +538,7 @@ template struct _mi_heap_stl_allocator_common : publi template struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common { using typename _mi_heap_stl_allocator_common::size_type; mi_heap_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is deleted when the destructor is called - mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap template mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; } @@ -555,7 +555,7 @@ template bool operator!=(const mi_heap_stl_allocator& x, template struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common { using typename _mi_heap_stl_allocator_common::size_type; mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is destroyed when the destructor is called - mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap template mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; } diff --git a/Include/mimalloc/mimalloc/atomic.h b/Include/mimalloc/mimalloc/atomic.h index fe418fab314a407..c6b8146ffdb0490 100644 --- a/Include/mimalloc/mimalloc/atomic.h +++ b/Include/mimalloc/mimalloc/atomic.h @@ -300,7 +300,7 @@ typedef _Atomic(uintptr_t) mi_atomic_once_t; // Returns true only on the first invocation static inline bool mi_atomic_once( mi_atomic_once_t* once ) { - if (mi_atomic_load_relaxed(once) != 0) return false; // quick test + if (mi_atomic_load_relaxed(once) != 0) return false; // quick test uintptr_t expected = 0; return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1 } diff --git a/Include/mimalloc/mimalloc/internal.h b/Include/mimalloc/mimalloc/internal.h index 27d213e29306c86..822aeefe35cd1b4 100644 --- a/Include/mimalloc/mimalloc/internal.h +++ b/Include/mimalloc/mimalloc/internal.h @@ -89,7 +89,7 @@ void _mi_thread_abandon(mi_tld_t *tld); // os.c void _mi_os_init(void); // called from process init -void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats); +void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats); void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats); void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats); diff --git a/Include/mimalloc/mimalloc/prim.h b/Include/mimalloc/mimalloc/prim.h index 9e560696fa47831..4b9e4dc4194d776 100644 --- a/Include/mimalloc/mimalloc/prim.h +++ b/Include/mimalloc/mimalloc/prim.h @@ -35,10 +35,10 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ); // Free OS memory int _mi_prim_free(void* addr, size_t size ); - + // Allocate OS memory. Return NULL on error. // The `try_alignment` is just a hint and the returned pointer does not have to be aligned. -// If `commit` is false, the virtual memory range only needs to be reserved (with no access) +// If `commit` is false, the virtual memory range only needs to be reserved (with no access) // which will later be committed explicitly using `_mi_prim_commit`. // `is_zero` is set to true if the memory was zero initialized (as on most OS's) // pre: !commit => !allow_large @@ -82,11 +82,11 @@ mi_msecs_t _mi_prim_clock_now(void); typedef struct mi_process_info_s { mi_msecs_t elapsed; mi_msecs_t utime; - mi_msecs_t stime; - size_t current_rss; - size_t peak_rss; + mi_msecs_t stime; + size_t current_rss; + size_t peak_rss; size_t current_commit; - size_t peak_commit; + size_t peak_commit; size_t page_faults; } mi_process_info_t; @@ -117,7 +117,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap); //------------------------------------------------------------------- // Thread id: `_mi_prim_thread_id()` -// +// // Getting the thread id should be performant as it is called in the // fast path of `_mi_free` and we specialize for various platforms as // inlined definitions. Regular code should call `init.c:_mi_thread_id()`. diff --git a/Include/mimalloc/mimalloc/track.h b/Include/mimalloc/mimalloc/track.h index 9545f7507ac99f2..fa1a048d846a9cf 100644 --- a/Include/mimalloc/mimalloc/track.h +++ b/Include/mimalloc/mimalloc/track.h @@ -34,7 +34,7 @@ The corresponding `mi_track_free` still uses the block start pointer and origina The `mi_track_resize` is currently unused but could be called on reallocations within a block. `mi_track_init` is called at program start. -The following macros are for tools like asan and valgrind to track whether memory is +The following macros are for tools like asan and valgrind to track whether memory is defined, undefined, or not accessible at all: #define mi_track_mem_defined(p,size) @@ -94,7 +94,7 @@ defined, undefined, or not accessible at all: // no tracking #define MI_TRACK_ENABLED 0 -#define MI_TRACK_HEAP_DESTROY 0 +#define MI_TRACK_HEAP_DESTROY 0 #define MI_TRACK_TOOL "none" #define mi_track_malloc_size(p,reqsize,size,zero) diff --git a/Include/mimalloc/mimalloc/types.h b/Include/mimalloc/mimalloc/types.h index 993a1637fe2cee6..3a149ca971dcb55 100644 --- a/Include/mimalloc/mimalloc/types.h +++ b/Include/mimalloc/mimalloc/types.h @@ -183,7 +183,7 @@ typedef int32_t mi_ssize_t; #define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 8KiB on 64-bit #define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB on 64-bit -#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE) +#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE) #define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 32MiB on 64-bit #define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE) @@ -201,10 +201,10 @@ typedef int32_t mi_ssize_t; #define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB)) // blocks up to this size are always allocated aligned -#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE) +#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE) -// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments -#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1) +// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments +#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1) // ------------------------------------------------------ @@ -293,7 +293,7 @@ typedef uintptr_t mi_thread_free_t; typedef struct mi_page_s { // "owned" by the segment uint32_t slice_count; // slices in this page (0 if not a page) - uint32_t slice_offset; // distance from the actual page data slice (0 if a page) + uint32_t slice_offset; // distance from the actual page data slice (0 if a page) uint8_t is_committed : 1; // `true` if the page virtual memory is committed uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized uint8_t tag : 4; // heap tag (mi_heap_tag_t) @@ -349,7 +349,7 @@ typedef enum mi_segment_kind_e { // A segment holds a commit mask where a bit is set if // the corresponding MI_COMMIT_SIZE area is committed. // The MI_COMMIT_SIZE must be a multiple of the slice -// size. If it is equal we have the most fine grained +// size. If it is equal we have the most fine grained // decommit (but setting it higher can be more efficient). // The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will // be committed in one go which can be set higher than @@ -357,9 +357,9 @@ typedef enum mi_segment_kind_e { // is still tracked in fine-grained MI_COMMIT_SIZE chunks) // ------------------------------------------------------ -#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE) +#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE) #define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB -#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE) +#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE) #define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS #define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS) @@ -432,11 +432,11 @@ typedef struct mi_segment_s { // from here is zero initialized struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`) - + size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`) size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long) size_t used; // count of pages in use - uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie` + uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie` size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT` size_t segment_info_slices; // initial slices we are using segment info and possible guard pages. @@ -507,7 +507,7 @@ struct mi_heap_s { mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") _Atomic(mi_block_t*) thread_delayed_free; mi_threadid_t thread_id; // thread this heap belongs too - mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0) + mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0) uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`) uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list mi_random_ctx_t random; // random number context used for secure allocation diff --git a/Objects/mimalloc/alloc-aligned.c b/Objects/mimalloc/alloc-aligned.c index 1cd809f15d613c8..4c15f4043ec117f 100644 --- a/Objects/mimalloc/alloc-aligned.c +++ b/Objects/mimalloc/alloc-aligned.c @@ -47,7 +47,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size); p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block // zero afterwards as only the area from the aligned_p may be committed! - if (p == NULL) return NULL; + if (p == NULL) return NULL; } else { // otherwise over-allocate @@ -73,7 +73,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0); mi_assert_internal(mi_usable_size(aligned_p)>=size); mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust); - + // now zero the block if needed if (alignment > MI_ALIGNMENT_MAX) { // for the tracker, on huge aligned allocations only from the start of the large block is defined @@ -85,7 +85,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* if (p != aligned_p) { mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p)); - } + } return aligned_p; } diff --git a/Objects/mimalloc/alloc.c b/Objects/mimalloc/alloc.c index ce8b9739d1d9594..686acb1e1988038 100644 --- a/Objects/mimalloc/alloc.c +++ b/Objects/mimalloc/alloc.c @@ -70,7 +70,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz } else { _mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE); - } + } } #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN @@ -126,7 +126,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, if (size == 0) { size = sizeof(void*); } #endif mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE); - void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero); + void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero); mi_track_malloc(p,size,zero); #if MI_STAT>1 if (p != NULL) { @@ -359,7 +359,7 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { // only maintain stats for smaller objects if requested #if (MI_STAT>0) static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { - #if (MI_STAT < 2) + #if (MI_STAT < 2) MI_UNUSED(block); #endif mi_heap_t* const heap = mi_heap_get_default(); @@ -367,7 +367,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { #if (MI_STAT>1) const size_t usize = mi_page_usable_size_of(page, block); mi_heap_stat_decrease(heap, malloc, usize); - #endif + #endif if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { mi_heap_stat_decrease(heap, normal, bsize); #if (MI_STAT > 1) @@ -379,7 +379,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { } else { mi_heap_stat_decrease(heap, huge, bsize); - } + } } #else static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { @@ -418,7 +418,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc // that is safe as these are constant and the page won't be freed (as the block is not freed yet). mi_check_padding(page, block); _mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection - + // huge page segments are always abandoned and can be freed immediately mi_segment_t* segment = _mi_page_segment(page); if (segment->kind == MI_SEGMENT_HUGE) { @@ -434,7 +434,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc _mi_segment_huge_page_reset(segment, page, block); #endif } - + #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content _mi_debug_fill(page, block, MI_DEBUG_FREED, mi_usable_size(block)); diff --git a/Objects/mimalloc/arena.c b/Objects/mimalloc/arena.c index 17b33cbe1988b4c..aa9e3ebf6dfb94b 100644 --- a/Objects/mimalloc/arena.c +++ b/Objects/mimalloc/arena.c @@ -13,7 +13,7 @@ threads and need to be accessed using atomic operations. Arenas are used to for huge OS page (1GiB) reservations or for reserving OS memory upfront which can be improve performance or is sometimes needed -on embedded devices. We can also employ this with WASI or `sbrk` systems +on embedded devices. We can also employ this with WASI or `sbrk` systems to reserve large arenas upfront and be able to reuse the memory more effectively. The arena allocation needs to be thread safe and we use an atomic bitmap to allocate. @@ -48,13 +48,13 @@ typedef struct mi_arena_s { size_t meta_size; // size of the arena structure itself (including its bitmaps) mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation) int numa_node; // associated NUMA node - bool exclusive; // only allow allocations if specifically for this arena + bool exclusive; // only allow allocations if specifically for this arena bool is_large; // memory area consists of large- or huge OS pages (always committed) _Atomic(size_t) search_idx; // optimization to start the search for free blocks - _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`. + _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`. mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted) - mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted) + mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted) mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`) } mi_arena_t; @@ -103,7 +103,7 @@ bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) { } /* ----------------------------------------------------------- - Arena allocations get a (currently) 16-bit memory id where the + Arena allocations get a (currently) 16-bit memory id where the lower 8 bits are the arena id, and the upper bits the block index. ----------------------------------------------------------- */ @@ -205,7 +205,7 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index { size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) { - mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around + mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around return true; }; return false; @@ -225,7 +225,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar mi_bitmap_index_t bitmap_index; if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL; - // claimed it! + // claimed it! void* p = mi_arena_block_start(arena, bitmap_index); *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index); memid->is_pinned = arena->memid.is_pinned; @@ -265,21 +265,21 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar // no need to commit, but check if already fully committed memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index); } - + return p; } // allocate in a speficic arena -static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment, - bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) +static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment, + bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) { MI_UNUSED_RELEASE(alignment); mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); - const size_t bcount = mi_block_count_of_size(size); + const size_t bcount = mi_block_count_of_size(size); const size_t arena_index = mi_arena_id_index(arena_id); mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count)); mi_assert_internal(size <= mi_arena_block_size(bcount)); - + // Check arena suitability mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]); if (arena == NULL) return NULL; @@ -299,7 +299,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no // allocate from an arena with fallback to the OS -static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment, +static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) { @@ -307,9 +307,9 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); if mi_likely(max_arena == 0) return NULL; - + if (req_arena_id != _mi_arena_id_none()) { - // try a specific arena if requested + // try a specific arena if requested if (mi_arena_id_index(req_arena_id) < max_arena) { void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); if (p != NULL) return p; @@ -317,7 +317,7 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz } else { // try numa affine allocation - for (size_t i = 0; i < max_arena; i++) { + for (size_t i = 0; i < max_arena; i++) { void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); if (p != NULL) return p; } @@ -345,22 +345,22 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve); if (arena_reserve == 0) return false; - if (!_mi_os_has_virtual_reserve()) { + if (!_mi_os_has_virtual_reserve()) { arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for some embedded systems for example) } arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE); if (arena_count >= 8 && arena_count <= 128) { arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially - } + } if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size - + // commit eagerly? bool arena_commit = false; if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); } else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; } return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0); -} +} void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, @@ -375,9 +375,9 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data) if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) { void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); - if (p != NULL) return p; + if (p != NULL) return p; - // otherwise, try to first eagerly reserve a new arena + // otherwise, try to first eagerly reserve a new arena if (req_arena_id == _mi_arena_id_none()) { mi_arena_id_t arena_id = 0; if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) { @@ -394,14 +394,14 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset errno = ENOMEM; return NULL; } - + // finally, fall back to the OS if (align_offset > 0) { return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats); } else { return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats); - } + } } void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) @@ -437,22 +437,22 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_assert_internal(arena->blocks_purge != NULL); mi_assert_internal(!arena->memid.is_pinned); const size_t size = mi_arena_block_size(blocks); - void* const p = mi_arena_block_start(arena, bitmap_idx); + void* const p = mi_arena_block_start(arena, bitmap_idx); bool needs_recommit; if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) { // all blocks are committed, we can purge freely needs_recommit = _mi_os_purge(p, size, stats); } else { - // some blocks are not committed -- this can happen when a partially committed block is freed + // some blocks are not committed -- this can happen when a partially committed block is freed // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge - // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory), + // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory), // and also undo the decommit stats (as it was already adjusted) mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits)); needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats); _mi_stat_increase(&stats->committed, size); } - + // clear the purged blocks _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx); // update committed bitmap @@ -470,7 +470,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t if (_mi_preloading() || delay == 0) { // decommit directly - mi_arena_purge(arena, bitmap_idx, blocks, stats); + mi_arena_purge(arena, bitmap_idx, blocks, stats); } else { // schedule decommit @@ -512,7 +512,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, } // returns true if anything was purged -static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats) +static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats) { if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false; mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); @@ -521,10 +521,10 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi // reset expire (if not already set concurrently) mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, 0); - + // potential purges scheduled, walk through the bitmap bool any_purged = false; - bool full_purge = true; + bool full_purge = true; for (size_t i = 0; i < arena->field_count; i++) { size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]); if (purge != 0) { @@ -575,7 +575,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) // allow only one thread to purge at a time static mi_atomic_guard_t purge_guard; - mi_atomic_guard(&purge_guard) + mi_atomic_guard(&purge_guard) { mi_msecs_t now = _mi_clock_now(); size_t max_purge_count = (visit_all ? max_arena : 1); @@ -588,7 +588,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) } } } - } + } } @@ -602,7 +602,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi if (p==NULL) return; if (size==0) return; const bool all_committed = (committed_size == size); - + if (mi_memkind_is_os(memid.memkind)) { // was a direct OS allocation, pass through if (!all_committed && committed_size > 0) { @@ -620,7 +620,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]); mi_assert_internal(arena != NULL); const size_t blocks = mi_block_count_of_size(size); - + // checks if (arena == NULL) { _mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid); @@ -642,7 +642,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi else { mi_assert_internal(arena->blocks_committed != NULL); mi_assert_internal(arena->blocks_purge != NULL); - + if (!all_committed) { // mark the entire range as no longer committed (so we recommit the full range when re-using) _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); @@ -657,9 +657,9 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi // works (as we should never reset decommitted parts). } // (delay) purge the entire range - mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats); + mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats); } - + // and make it available to others again bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx); if (!all_inuse) { @@ -684,9 +684,9 @@ static void mi_arenas_unsafe_destroy(void) { for (size_t i = 0; i < max_arena; i++) { mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); if (arena != NULL) { - if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) { + if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) { mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL); - _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main); + _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main); } else { new_max_arena = i; @@ -709,7 +709,7 @@ void _mi_arena_collect(bool force_purge, mi_stats_t* stats) { // for dynamic libraries that are unloaded and need to release all their allocated memory. void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) { mi_arenas_unsafe_destroy(); - _mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas + _mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas } // Is a pointer inside any of our arenas? @@ -717,8 +717,8 @@ bool _mi_arena_contains(const void* p) { const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); for (size_t i = 0; i < max_arena; i++) { mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); - if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) { - return true; + if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) { + return true; } } return false; @@ -762,7 +762,7 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int mi_memid_t meta_memid; mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS? if (arena == NULL) return false; - + // already zero'd due to os_alloc // _mi_memzero(arena, asize); arena->id = _mi_arena_id_none(); @@ -779,12 +779,12 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int arena->search_idx = 0; arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap - arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap + arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap // initialize committed bitmap? if (arena->blocks_committed != NULL && arena->memid.initially_committed) { memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning } - + // and claim leftover blocks if needed (so we never allocate there) ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount; mi_assert_internal(post >= 0); @@ -933,4 +933,3 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv if (err==0 && pages_reserved!=NULL) *pages_reserved = pages; return err; } - diff --git a/Objects/mimalloc/bitmap.c b/Objects/mimalloc/bitmap.c index a13dbe15bf33e84..ec3c755822dac1c 100644 --- a/Objects/mimalloc/bitmap.c +++ b/Objects/mimalloc/bitmap.c @@ -109,15 +109,15 @@ bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fiel } // Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled -bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, - const size_t start_field_idx, const size_t count, - mi_bitmap_pred_fun_t pred_fun, void* pred_arg, +bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, + const size_t start_field_idx, const size_t count, + mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx) { size_t idx = start_field_idx; for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) { if (idx >= bitmap_fields) idx = 0; // wrap if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) { - if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) { + if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) { return true; } // predicate returned false, unclaim and look further @@ -164,7 +164,7 @@ static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size return ((field & mask) == mask); } -// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically. +// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically. // Returns `true` if successful when all previous `count` bits were 0. bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { const size_t idx = mi_bitmap_index_field(bitmap_idx); @@ -172,9 +172,9 @@ bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count const size_t mask = mi_bitmap_mask_(count, bitidx); mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); size_t expected = mi_atomic_load_relaxed(&bitmap[idx]); - do { + do { if ((expected & mask) != 0) return false; - } + } while (!mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask)); mi_assert_internal((expected & mask) == 0); return true; @@ -212,7 +212,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit if (initial == 0) return false; if (initial >= count) return _mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx); // no need to cross fields (this case won't happen for us) if (_mi_divide_up(count - initial, MI_BITMAP_FIELD_BITS) >= (bitmap_fields - idx)) return false; // not enough entries - + // scan ahead size_t found = initial; size_t mask = 0; // mask bits for the final field diff --git a/Objects/mimalloc/bitmap.h b/Objects/mimalloc/bitmap.h index 0a765c714bae090..9ba15d5d6f09ea1 100644 --- a/Objects/mimalloc/bitmap.h +++ b/Objects/mimalloc/bitmap.h @@ -41,7 +41,7 @@ static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx } // Create a bit index. -static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) { +static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) { return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS); } @@ -80,7 +80,7 @@ bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap // Returns `true` if all `count` bits were 1 previously. bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); -// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically. +// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically. // Returns `true` if successful when all previous `count` bits were 0. bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); diff --git a/Objects/mimalloc/os.c b/Objects/mimalloc/os.c index 804288e14b85bfd..94edfd4a21b5827 100644 --- a/Objects/mimalloc/os.c +++ b/Objects/mimalloc/os.c @@ -29,7 +29,7 @@ bool _mi_os_has_overcommit(void) { return mi_os_mem_config.has_overcommit; } -bool _mi_os_has_virtual_reserve(void) { +bool _mi_os_has_virtual_reserve(void) { return mi_os_mem_config.has_virtual_reserve; } @@ -173,7 +173,7 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me } } else { - // nothing to do + // nothing to do mi_assert(memid.memkind < MI_MEM_OS); } } @@ -197,7 +197,7 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning *is_zero = false; - void* p = NULL; + void* p = NULL; int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p); if (err != 0) { _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large); @@ -205,14 +205,14 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo mi_stat_counter_increase(stats->mmap_calls, 1); if (p != NULL) { _mi_stat_increase(&stats->reserved, size); - if (commit) { - _mi_stat_increase(&stats->committed, size); + if (commit) { + _mi_stat_increase(&stats->committed, size); // seems needed for asan (or `mimalloc-test-api` fails) #ifdef MI_TRACK_ASAN if (*is_zero) { mi_track_mem_defined(p,size); } else { mi_track_mem_undefined(p,size); } #endif - } + } } return p; } @@ -250,7 +250,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit // over-allocate uncommitted (virtual) memory p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats); if (p == NULL) return NULL; - + // set p to the aligned part in the full region // note: this is dangerous on Windows as VirtualFree needs the actual base pointer // this is handled though by having the `base` field in the memid's @@ -266,7 +266,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit // overallocate... p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats); if (p == NULL) return NULL; - + // and selectively unmap parts around the over-allocated area. (noop on sbrk) void* aligned_p = mi_align_up_ptr(p, alignment); size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p; @@ -277,7 +277,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); } // we can return the aligned pointer on `mmap` (and sbrk) systems p = aligned_p; - *base = aligned_p; // since we freed the pre part, `*base == p`. + *base = aligned_p; // since we freed the pre part, `*base == p`. } } @@ -301,7 +301,7 @@ void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* tld_stats) { void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats); if (p != NULL) { *memid = _mi_memid_create_os(true, os_is_zero, os_is_large); - } + } return p; } @@ -313,7 +313,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo if (size == 0) return NULL; size = _mi_os_good_alloc_size(size); alignment = _mi_align_up(alignment, _mi_os_page_size()); - + bool os_is_large = false; bool os_is_zero = false; void* os_base = NULL; @@ -391,7 +391,7 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t* bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) { MI_UNUSED(tld_stats); - mi_stats_t* stats = &_mi_stats_main; + mi_stats_t* stats = &_mi_stats_main; if (is_zero != NULL) { *is_zero = false; } _mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit _mi_stat_counter_increase(&stats->commit_calls, 1); @@ -401,21 +401,21 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize); if (csize == 0) return true; - // commit + // commit bool os_is_zero = false; - int err = _mi_prim_commit(start, csize, &os_is_zero); + int err = _mi_prim_commit(start, csize, &os_is_zero); if (err != 0) { _mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); return false; } - if (os_is_zero && is_zero != NULL) { + if (os_is_zero && is_zero != NULL) { *is_zero = true; mi_assert_expensive(mi_mem_is_zero(start, csize)); } // note: the following seems required for asan (otherwise `mimalloc-test-stress` fails) #ifdef MI_TRACK_ASAN if (os_is_zero) { mi_track_mem_defined(start,csize); } - else { mi_track_mem_undefined(start,csize); } + else { mi_track_mem_undefined(start,csize); } #endif return true; } @@ -429,11 +429,11 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_ // page align size_t csize; void* start = mi_os_page_align_area_conservative(addr, size, &csize); - if (csize == 0) return true; + if (csize == 0) return true; // decommit *needs_recommit = true; - int err = _mi_prim_decommit(start,csize,needs_recommit); + int err = _mi_prim_decommit(start,csize,needs_recommit); if (err != 0) { _mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); } @@ -451,7 +451,7 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) { // but may be used later again. This will release physical memory // pages and reduce swapping while keeping the memory committed. // We page align to a conservative area inside the range to reset. -bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { +bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { // page align conservatively within the range size_t csize; void* start = mi_os_page_align_area_conservative(addr, size, &csize); @@ -471,7 +471,7 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { } -// either resets or decommits memory, returns true if the memory needs +// either resets or decommits memory, returns true if the memory needs // to be recommitted if it is to be re-used later on. bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) { @@ -484,7 +484,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) { bool needs_recommit = true; mi_os_decommit_ex(p, size, &needs_recommit, stats); - return needs_recommit; + return needs_recommit; } else { if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed @@ -494,7 +494,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) } } -// either resets or decommits memory, returns true if the memory needs +// either resets or decommits memory, returns true if the memory needs // to be recommitted if it is to be re-used later on. bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) { return _mi_os_purge_ex(p, size, true, stats); diff --git a/Objects/mimalloc/page.c b/Objects/mimalloc/page.c index 369c0262cc12062..423825a1128150f 100644 --- a/Objects/mimalloc/page.c +++ b/Objects/mimalloc/page.c @@ -125,9 +125,9 @@ bool _mi_page_is_valid(mi_page_t* page) { mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id); #if MI_HUGE_PAGE_ABANDON - if (segment->kind != MI_SEGMENT_HUGE) + if (segment->kind != MI_SEGMENT_HUGE) #endif - { + { mi_page_queue_t* pq = mi_page_queue_of(page); mi_assert_internal(mi_page_queue_contains(pq, page)); mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page)); @@ -441,7 +441,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { mi_assert_internal(page != NULL); mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(mi_page_all_free(page)); - + mi_page_set_has_aligned(page, false); // don't retire too often.. @@ -454,7 +454,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_queue_is_special(pq)) { // not too large && not full or huge queue? if (pq->last==page && pq->first==page) { // the only page in the queue? mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); - page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); + page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); mi_heap_t* heap = mi_page_heap(page); mi_assert_internal(pq >= heap->pages); const size_t index = pq - heap->pages; @@ -608,7 +608,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co // allocations but this did not speed up any benchmark (due to an // extra test in malloc? or cache effects?) static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) { - MI_UNUSED(tld); + MI_UNUSED(tld); mi_assert_expensive(mi_page_is_valid_init(page)); #if (MI_SECURE<=2) mi_assert(page->free == NULL); @@ -720,7 +720,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p while (page != NULL) { mi_page_t* next = page->next; // remember next - #if MI_STAT + #if MI_STAT count++; #endif @@ -840,19 +840,19 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment); if (page != NULL) { mi_assert_internal(mi_page_immediate_available(page)); - + if (is_huge) { mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE); mi_assert_internal(_mi_page_segment(page)->used==1); #if MI_HUGE_PAGE_ABANDON mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue mi_page_set_heap(page, NULL); - #endif + #endif } else { mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); } - + const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { mi_heap_stat_increase(heap, large, bsize); @@ -871,7 +871,7 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t // Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed. static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept { // huge allocation? - const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` + const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) { if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see ) _mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size); @@ -884,7 +884,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme else { // otherwise find a page with free blocks in our size segregated queues #if MI_PADDING - mi_assert_internal(size >= MI_PADDING_SIZE); + mi_assert_internal(size >= MI_PADDING_SIZE); #endif return mi_find_free_page(heap, size); } @@ -900,7 +900,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al // initialize if necessary if mi_unlikely(!mi_heap_is_initialized(heap)) { - heap = mi_heap_get_default(); // calls mi_thread_init + heap = mi_heap_get_default(); // calls mi_thread_init if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; } } mi_assert_internal(mi_heap_is_initialized(heap)); diff --git a/Objects/mimalloc/prim/unix/prim.c b/Objects/mimalloc/prim/unix/prim.c index 314281fe891c465..298aeee0822634c 100644 --- a/Objects/mimalloc/prim/unix/prim.c +++ b/Objects/mimalloc/prim/unix/prim.c @@ -57,7 +57,7 @@ terms of the MIT license. A copy of the license can be found in the file //------------------------------------------------------------------------------------ // Use syscalls for some primitives to allow for libraries that override open/read/close etc. -// and do allocation themselves; using syscalls prevents recursion when mimalloc is +// and do allocation themselves; using syscalls prevents recursion when mimalloc is // still initializing (issue #713) //------------------------------------------------------------------------------------ @@ -120,7 +120,7 @@ static bool unix_detect_overcommit(void) { os_overcommit = (val != 0); } #else - // default: overcommit is true + // default: overcommit is true #endif return os_overcommit; } @@ -168,12 +168,12 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p size_t n = mi_bsr(try_alignment); if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0); - if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { + if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { int err = errno; _mi_warning_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr); } if (p!=MAP_FAILED) return p; - // fall back to regular mmap + // fall back to regular mmap } } #elif defined(MAP_ALIGN) // Solaris @@ -189,7 +189,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p void* hint = _mi_os_get_aligned_hint(try_alignment, size); if (hint != NULL) { p = mmap(hint, size, protect_flags, flags, fd, 0); - if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { + if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { #if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly? int err = 0; #else @@ -198,7 +198,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p _mi_warning_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint); } if (p!=MAP_FAILED) return p; - // fall back to regular mmap + // fall back to regular mmap } } #endif @@ -327,9 +327,9 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); mi_assert_internal(commit || !allow_large); mi_assert_internal(try_alignment > 0); - + *is_zero = true; - int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); + int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); *addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large); return (*addr != NULL ? 0 : errno); } @@ -357,19 +357,19 @@ int _mi_prim_commit(void* start, size_t size, bool* is_zero) { // was either from mmap PROT_NONE, or from decommit MADV_DONTNEED, but // we sometimes call commit on a range with still partially committed // memory and `mprotect` does not zero the range. - *is_zero = false; + *is_zero = false; int err = mprotect(start, size, (PROT_READ | PROT_WRITE)); - if (err != 0) { - err = errno; + if (err != 0) { + err = errno; unix_mprotect_hint(err); } return err; } int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) { - int err = 0; + int err = 0; // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE) - err = unix_madvise(start, size, MADV_DONTNEED); + err = unix_madvise(start, size, MADV_DONTNEED); #if !MI_DEBUG && !MI_SECURE *needs_recommit = false; #else @@ -381,15 +381,15 @@ int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) { *needs_recommit = true; const int fd = unix_mmap_fd(); void* p = mmap(start, size, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0); - if (p != start) { err = errno; } + if (p != start) { err = errno; } */ return err; } int _mi_prim_reset(void* start, size_t size) { - // We try to use `MADV_FREE` as that is the fastest. A drawback though is that it + // We try to use `MADV_FREE` as that is the fastest. A drawback though is that it // will not reduce the `rss` stats in tools like `top` even though the memory is available - // to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by + // to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by // default `MADV_DONTNEED` is used though. #if defined(MADV_FREE) static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE); @@ -409,7 +409,7 @@ int _mi_prim_reset(void* start, size_t size) { int _mi_prim_protect(void* start, size_t size, bool protect) { int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE)); - if (err != 0) { err = errno; } + if (err != 0) { err = errno; } unix_mprotect_hint(err); return err; } @@ -450,7 +450,7 @@ int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bo if (err != 0) { err = errno; _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err); - } + } } return (*addr != NULL ? 0 : errno); } @@ -567,9 +567,9 @@ mi_msecs_t _mi_prim_clock_now(void) { // low resolution timer mi_msecs_t _mi_prim_clock_now(void) { #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0) - return (mi_msecs_t)clock(); + return (mi_msecs_t)clock(); #elif (CLOCKS_PER_SEC < 1000) - return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); + return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); #else return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000); #endif @@ -609,7 +609,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo) pinfo->stime = timeval_secs(&rusage.ru_stime); #if !defined(__HAIKU__) pinfo->page_faults = rusage.ru_majflt; -#endif +#endif #if defined(__HAIKU__) // Haiku does not have (yet?) a way to // get these stats per process @@ -750,7 +750,7 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) { #elif defined(__ANDROID__) || defined(__DragonFly__) || \ defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ - defined(__sun) + defined(__sun) #include bool _mi_prim_random_buf(void* buf, size_t buf_len) { @@ -842,7 +842,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { } } -#else +#else void _mi_prim_thread_init_auto_done(void) { // nothing diff --git a/Objects/mimalloc/prim/wasi/prim.c b/Objects/mimalloc/prim/wasi/prim.c index 50511f0b5bf0b47..25e64fa3c40bf5d 100644 --- a/Objects/mimalloc/prim/wasi/prim.c +++ b/Objects/mimalloc/prim/wasi/prim.c @@ -19,7 +19,7 @@ terms of the MIT license. A copy of the license can be found in the file void _mi_prim_mem_init( mi_os_mem_config_t* config ) { config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB config->alloc_granularity = 16; - config->has_overcommit = false; + config->has_overcommit = false; config->must_free_whole = true; config->has_virtual_reserve = false; } @@ -129,7 +129,7 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la //--------------------------------------------- int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { - MI_UNUSED(addr); MI_UNUSED(size); + MI_UNUSED(addr); MI_UNUSED(size); *is_zero = false; return 0; } @@ -194,9 +194,9 @@ mi_msecs_t _mi_prim_clock_now(void) { // low resolution timer mi_msecs_t _mi_prim_clock_now(void) { #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0) - return (mi_msecs_t)clock(); + return (mi_msecs_t)clock(); #elif (CLOCKS_PER_SEC < 1000) - return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); + return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); #else return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000); #endif diff --git a/Objects/mimalloc/prim/windows/prim.c b/Objects/mimalloc/prim/windows/prim.c index e6b61079299cbdd..a038277ad21cb04 100644 --- a/Objects/mimalloc/prim/windows/prim.c +++ b/Objects/mimalloc/prim/windows/prim.c @@ -276,7 +276,7 @@ int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { return 0; } -int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { BOOL ok = VirtualFree(addr, size, MEM_DECOMMIT); *needs_recommit = true; // for safety, assume always decommitted even in the case of an error. return (ok ? 0 : (int)GetLastError()); @@ -451,7 +451,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo) GetProcessTimes(GetCurrentProcess(), &ct, &et, &st, &ut); pinfo->utime = filetime_msecs(&ut); pinfo->stime = filetime_msecs(&st); - + // load psapi on demand if (pGetProcessMemoryInfo == NULL) { HINSTANCE hDll = LoadLibrary(TEXT("psapi.dll")); @@ -465,7 +465,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo) memset(&info, 0, sizeof(info)); if (pGetProcessMemoryInfo != NULL) { pGetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info)); - } + } pinfo->current_rss = (size_t)info.WorkingSetSize; pinfo->peak_rss = (size_t)info.PeakWorkingSetSize; pinfo->current_commit = (size_t)info.PagefileUsage; @@ -477,7 +477,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo) // Output //---------------------------------------------------------------- -void _mi_prim_out_stderr( const char* msg ) +void _mi_prim_out_stderr( const char* msg ) { // on windows with redirection, the C runtime cannot handle locale dependent output // after the main thread closes so we use direct console output. @@ -560,7 +560,7 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) { } if (pBCryptGenRandom == NULL) return false; } - return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0); + return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0); } #endif // MI_USE_RTLGENRANDOM @@ -595,9 +595,9 @@ void _mi_prim_thread_init_auto_done(void) { } void _mi_prim_thread_done_auto_done(void) { - // call thread-done on all threads (except the main thread) to prevent + // call thread-done on all threads (except the main thread) to prevent // dangling callback pointer if statically linked with a DLL; Issue #208 - FlsFree(mi_fls_key); + FlsFree(mi_fls_key); } void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { diff --git a/Objects/mimalloc/random.c b/Objects/mimalloc/random.c index 4fc8b2f8fb0bc3a..2a18b5aa992dadb 100644 --- a/Objects/mimalloc/random.c +++ b/Objects/mimalloc/random.c @@ -160,7 +160,7 @@ If we cannot get good randomness, we fall back to weak randomness based on a tim uintptr_t _mi_os_random_weak(uintptr_t extra_seed) { uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random - x ^= _mi_prim_clock_now(); + x ^= _mi_prim_clock_now(); // and do a few randomization steps uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1; for (uintptr_t i = 0; i < max; i++) { diff --git a/Objects/mimalloc/segment-map.c b/Objects/mimalloc/segment-map.c index 4c2104bd8154d4b..3cd2127e56c1a78 100644 --- a/Objects/mimalloc/segment-map.c +++ b/Objects/mimalloc/segment-map.c @@ -102,8 +102,8 @@ static mi_segment_t* _mi_segment_of(const void* p) { uintptr_t lomask = mask; loindex = index; do { - loindex--; - lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]); + loindex--; + lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]); } while (lomask != 0 && loindex > 0); if (lomask == 0) return NULL; lobitidx = mi_bsr(lomask); // lomask != 0 diff --git a/Objects/mimalloc/segment.c b/Objects/mimalloc/segment.c index ad55af99fb65288..0c34a6d087fc367 100644 --- a/Objects/mimalloc/segment.c +++ b/Objects/mimalloc/segment.c @@ -17,7 +17,7 @@ static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* // ------------------------------------------------------------------- -// commit mask +// commit mask // ------------------------------------------------------------------- static bool mi_commit_mask_all_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) { @@ -326,7 +326,7 @@ static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, c uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) { const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page); - uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, page->xblock_size, page_size); + uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, page->xblock_size, page_size); mi_assert_internal(page->xblock_size > 0 || _mi_ptr_page(p) == page); mi_assert_internal(_mi_ptr_segment(p) == segment); return p; @@ -337,7 +337,7 @@ static size_t mi_segment_calculate_slices(size_t required, size_t* pre_size, siz size_t page_size = _mi_os_page_size(); size_t isize = _mi_align_up(sizeof(mi_segment_t), page_size); size_t guardsize = 0; - + if (MI_SECURE>0) { // in secure mode, we set up a protected page in between the segment info // and the page data (and one at the end of the segment) @@ -350,7 +350,7 @@ static size_t mi_segment_calculate_slices(size_t required, size_t* pre_size, siz if (pre_size != NULL) *pre_size = isize; isize = _mi_align_up(isize + guardsize, MI_SEGMENT_SLICE_SIZE); if (info_slices != NULL) *info_slices = isize / MI_SEGMENT_SLICE_SIZE; - size_t segment_size = (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + guardsize, MI_SEGMENT_SLICE_SIZE) ); + size_t segment_size = (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + guardsize, MI_SEGMENT_SLICE_SIZE) ); mi_assert_internal(segment_size % MI_SEGMENT_SLICE_SIZE == 0); return (segment_size / MI_SEGMENT_SLICE_SIZE); } @@ -386,7 +386,7 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) { // purge delayed decommits now? (no, leave it to the arena) // mi_segment_try_purge(segment,true,tld->stats); - + const size_t size = mi_segment_size(segment); const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size); @@ -394,7 +394,7 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) { _mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid, tld->stats); } -// called by threads that are terminating +// called by threads that are terminating void _mi_segment_thread_collect(mi_segments_tld_t* tld) { MI_UNUSED(tld); // nothing to do @@ -446,7 +446,7 @@ static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uin size_t bitidx = start / MI_COMMIT_SIZE; mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS); - + size_t bitcount = *full_size / MI_COMMIT_SIZE; // can be 0 if (bitidx + bitcount > MI_COMMIT_MASK_BITS) { _mi_warning_message("commit mask overflow: idx=%zu count=%zu start=%zx end=%zx p=0x%p size=%zu fullsize=%zu\n", bitidx, bitcount, start, end, p, size, *full_size); @@ -474,7 +474,7 @@ static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size, mi if (!_mi_os_commit(start, full_size, &is_zero, stats)) return false; mi_commit_mask_set(&segment->commit_mask, &mask); } - + // increase purge expiration when using part of delayed purges -- we assume more allocations are coming soon. if (mi_commit_mask_any_set(&segment->purge_mask, &mask)) { segment->purge_expire = _mi_clock_now() + mi_option_get(mi_option_purge_delay); @@ -493,7 +493,7 @@ static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_ return mi_segment_commit(segment, p, size, stats); } -static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { +static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); if (!segment->allow_purge) return true; @@ -512,11 +512,11 @@ static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_ if (decommitted) { mi_commit_mask_t cmask; mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); - _mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for double counting + _mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for double counting mi_commit_mask_clear(&segment->commit_mask, &mask); - } + } } - + // always clear any scheduled purges in our range mi_commit_mask_clear(&segment->purge_mask, &mask); return true; @@ -532,16 +532,16 @@ static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t // register for future purge in the purge mask uint8_t* start = NULL; size_t full_size = 0; - mi_commit_mask_t mask; + mi_commit_mask_t mask; mi_segment_commit_mask(segment, true /*conservative*/, p, size, &start, &full_size, &mask); if (mi_commit_mask_is_empty(&mask) || full_size==0) return; - + // update delayed commit - mi_assert_internal(segment->purge_expire > 0 || mi_commit_mask_is_empty(&segment->purge_mask)); + mi_assert_internal(segment->purge_expire > 0 || mi_commit_mask_is_empty(&segment->purge_mask)); mi_commit_mask_t cmask; mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only purge what is committed; span_free may try to decommit more mi_commit_mask_set(&segment->purge_mask, &cmask); - mi_msecs_t now = _mi_clock_now(); + mi_msecs_t now = _mi_clock_now(); if (segment->purge_expire == 0) { // no previous purgess, initialize now segment->purge_expire = now + mi_option_get(mi_option_purge_delay); @@ -559,7 +559,7 @@ static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t // previous purge mask is not yet expired, increase the expiration by a bit. segment->purge_expire += mi_option_get(mi_option_purge_extend_delay); } - } + } } static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats) { @@ -597,7 +597,7 @@ static bool mi_segment_is_abandoned(mi_segment_t* segment) { // note: can be called on abandoned segments static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, bool allow_purge, mi_segments_tld_t* tld) { mi_assert_internal(slice_index < segment->slice_entries); - mi_span_queue_t* sq = (segment->kind == MI_SEGMENT_HUGE || mi_segment_is_abandoned(segment) + mi_span_queue_t* sq = (segment->kind == MI_SEGMENT_HUGE || mi_segment_is_abandoned(segment) ? NULL : mi_span_queue_for(slice_count,tld)); if (slice_count==0) slice_count = 1; mi_assert_internal(slice_index + slice_count - 1 < segment->slice_entries); @@ -618,7 +618,7 @@ static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size if (allow_purge) { mi_segment_schedule_purge(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats); } - + // and push it on the free page queue (if it was not a huge page) if (sq != NULL) mi_span_queue_push( sq, slice ); else slice->xblock_size = 0; // mark huge page as free anyways @@ -652,7 +652,7 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_ // issue #691: segment->used can be 0 if the huge page block was freed while abandoned (reclaim will get here in that case) mi_assert_internal((segment->used==0 && slice->xblock_size==0) || segment->used == 1); // decreased right after this call in `mi_segment_page_clear` slice->xblock_size = 0; // mark as free anyways - // we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to + // we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to // avoid a possible cache miss (and the segment is about to be freed) return slice; } @@ -714,7 +714,7 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i size_t extra = slice_count-1; if (extra > MI_MAX_SLICE_OFFSET) extra = MI_MAX_SLICE_OFFSET; if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices - + mi_slice_t* slice_next = slice + 1; for (size_t i = 1; i <= extra; i++, slice_next++) { slice_next->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i); @@ -732,7 +732,7 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i last->slice_count = 0; last->xblock_size = 1; } - + // and initialize the page page->is_committed = true; segment->used++; @@ -791,7 +791,7 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren ----------------------------------------------------------- */ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delayed, mi_arena_id_t req_arena_id, - size_t* psegment_slices, size_t* ppre_size, size_t* pinfo_slices, + size_t* psegment_slices, size_t* ppre_size, size_t* pinfo_slices, bool commit, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { @@ -799,7 +799,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy size_t align_offset = 0; size_t alignment = MI_SEGMENT_ALIGN; - + if (page_alignment > 0) { // mi_assert_internal(huge_page != NULL); mi_assert_internal(page_alignment >= MI_SEGMENT_ALIGN); @@ -817,21 +817,21 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment return NULL; // failed to allocate } - // ensure metadata part of the segment is committed - mi_commit_mask_t commit_mask; - if (memid.initially_committed) { - mi_commit_mask_create_full(&commit_mask); + // ensure metadata part of the segment is committed + mi_commit_mask_t commit_mask; + if (memid.initially_committed) { + mi_commit_mask_create_full(&commit_mask); } - else { + else { // at least commit the info slices const size_t commit_needed = _mi_divide_up((*pinfo_slices)*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE); mi_assert_internal(commit_needed>0); - mi_commit_mask_create(0, commit_needed, &commit_mask); + mi_commit_mask_create(0, commit_needed, &commit_mask); mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= (*pinfo_slices)*MI_SEGMENT_SLICE_SIZE); if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL, tld->stats)) { _mi_arena_free(segment,segment_size,0,memid,tld->stats); return NULL; - } + } } mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); @@ -843,7 +843,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment segment->purge_expire = 0; mi_commit_mask_create_empty(&segment->purge_mask); mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan - + mi_segments_track_size((long)(segment_size), tld); _mi_segment_map_allocated_at(segment); return segment; @@ -854,32 +854,32 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) { mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL)); - + // calculate needed sizes first size_t info_slices; size_t pre_size; size_t segment_slices = mi_segment_calculate_slices(required, &pre_size, &info_slices); - + // Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little) const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems _mi_current_thread_count() > 1 && // do not delay for the first N threads tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay)); const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit); - bool commit = eager || (required > 0); - - // Allocate the segment from the OS - mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id, + bool commit = eager || (required > 0); + + // Allocate the segment from the OS + mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id, &segment_slices, &pre_size, &info_slices, commit, tld, os_tld); if (segment == NULL) return NULL; - - // zero the segment info? -- not always needed as it may be zero initialized from the OS + + // zero the segment info? -- not always needed as it may be zero initialized from the OS if (!segment->memid.initially_zero) { ptrdiff_t ofs = offsetof(mi_segment_t, next); size_t prefix = offsetof(mi_segment_t, slices) - ofs; - size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more + size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more _mi_memzero((uint8_t*)segment + ofs, zsize); } - + // initialize the rest of the segment info const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices); segment->segment_slices = segment_slices; @@ -897,7 +897,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi if (MI_SECURE>0) { // in secure mode, we set up a protected page in between the segment info // and the page data, and at the end of the segment. - size_t os_pagesize = _mi_os_page_size(); + size_t os_pagesize = _mi_os_page_size(); mi_assert_internal(mi_segment_info_size(segment) - os_pagesize >= pre_size); _mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize); uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize; @@ -909,10 +909,10 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi // reserve first slices for segment info mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices, tld); - mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance + mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance mi_assert_internal(segment->used == 1); segment->used = 0; // don't count our internal slices towards usage - + // initialize initial free pages if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page mi_assert_internal(huge_page==NULL); @@ -923,7 +923,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask)); mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask)); *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices, tld); - mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance + mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance } mi_assert_expensive(mi_segment_is_valid(segment,tld)); @@ -977,7 +977,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld mi_assert_internal(mi_page_all_free(page)); mi_segment_t* segment = _mi_ptr_segment(page); mi_assert_internal(segment->used > 0); - + size_t inuse = page->capacity * mi_page_block_size(page); _mi_stat_decrease(&tld->stats->page_committed, inuse); _mi_stat_decrease(&tld->stats->pages, 1); @@ -985,7 +985,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld // reset the page memory to reduce memory pressure? if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) { size_t psize; - uint8_t* start = _mi_page_start(segment, page, &psize); + uint8_t* start = _mi_page_start(segment, page, &psize); _mi_os_reset(start, psize, tld->stats); } @@ -996,7 +996,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld page->xblock_size = 1; // and free it - mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld); + mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld); segment->used--; // cannot assert segment valid as it is called during reclaim // mi_assert_expensive(mi_segment_is_valid(segment, tld)); @@ -1219,7 +1219,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL); mi_assert_internal(segment->abandoned_visits == 0); mi_assert_expensive(mi_segment_is_valid(segment,tld)); - + // remove the free pages from the free page queues mi_slice_t* slice = &segment->slices[0]; const mi_slice_t* end = mi_segment_slices_end(segment); @@ -1234,8 +1234,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { } // perform delayed decommits (forcing is much slower on mstress) - mi_segment_try_purge(segment, mi_option_is_enabled(mi_option_abandoned_page_purge) /* force? */, tld->stats); - + mi_segment_try_purge(segment, mi_option_is_enabled(mi_option_abandoned_page_purge) /* force? */, tld->stats); + // all pages in the segment are abandoned; add it to the abandoned list _mi_stat_increase(&tld->stats->segments_abandoned, 1); mi_segments_track_size(-((long)mi_segment_size(segment)), tld); @@ -1252,7 +1252,7 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) { mi_segment_t* segment = _mi_page_segment(page); mi_assert_expensive(mi_segment_is_valid(segment,tld)); - segment->abandoned++; + segment->abandoned++; _mi_stat_increase(&tld->stats->pages_abandoned, 1); mi_assert_internal(segment->abandoned <= segment->used); @@ -1272,7 +1272,7 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE); mi_assert_internal(mi_segment_is_abandoned(segment)); bool has_page = false; - + // for all slices const mi_slice_t* end; mi_slice_t* slice = mi_slices_start_iterate(segment, &end); @@ -1284,7 +1284,7 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s mi_page_t* const page = mi_slice_to_page(slice); _mi_page_free_collect(page, false); if (mi_page_all_free(page)) { - // if this page is all free now, free it without adding to any queues (yet) + // if this page is all free now, free it without adding to any queues (yet) mi_assert_internal(page->next == NULL && page->prev==NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); segment->abandoned--; @@ -1299,7 +1299,7 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s // a page has available free blocks of the right size has_page = true; } - } + } } else { // empty span @@ -1332,7 +1332,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, mi_segments_track_size((long)mi_segment_size(segment), tld); mi_assert_internal(segment->next == NULL); _mi_stat_decrease(&tld->stats->segments_abandoned, 1); - + // for all slices const mi_slice_t* end; mi_slice_t* slice = mi_slices_start_iterate(segment, &end); @@ -1413,12 +1413,12 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice mi_segment_reclaim(segment, heap, 0, NULL, tld); } else if (has_page && is_suitable) { - // found a large enough free span, or a page of the right block_size with free space + // found a large enough free span, or a page of the right block_size with free space // we return the result of reclaim (which is usually `segment`) as it might free // the segment due to concurrent frees (in which case `NULL` is returned). return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld); } - else if (segment->abandoned_visits > 3 && is_suitable) { + else if (segment->abandoned_visits > 3 && is_suitable) { // always reclaim on 3rd visit to limit the abandoned queue length. mi_segment_reclaim(segment, heap, 0, NULL, tld); } @@ -1437,7 +1437,7 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld) mi_segment_t* segment; int max_tries = (force ? 16*1024 : 1024); // limit latency if (force) { - mi_abandoned_visited_revisit(); + mi_abandoned_visited_revisit(); } while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) { mi_segment_check_free(segment,0,0,heap->tag,tld); // try to free up pages (due to concurrent frees) @@ -1448,7 +1448,7 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld) mi_segment_reclaim(segment, heap, 0, NULL, tld); } else { - // otherwise, purge if needed and push on the visited list + // otherwise, purge if needed and push on the visited list // note: forced purge can be expensive if many threads are destroyed/created as in mstress. mi_segment_try_purge(segment, force, tld->stats); mi_abandoned_visited_push(segment); @@ -1464,7 +1464,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_ { mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE); mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX); - + // 1. try to reclaim an abandoned segment bool reclaimed; mi_segment_t* segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld); @@ -1478,7 +1478,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_ return segment; } // 2. otherwise allocate a fresh segment - return mi_segment_alloc(0, 0, heap->arena_id, tld, os_tld, NULL); + return mi_segment_alloc(0, 0, heap->arena_id, tld, os_tld, NULL); } @@ -1499,7 +1499,7 @@ static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_ki // no free page, allocate a new segment and try again if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld, os_tld) == NULL) { // OOM or reclaimed a good page in the heap - return NULL; + return NULL; } else { // otherwise try again @@ -1524,27 +1524,27 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_segment_t* segment = mi_segment_alloc(size,page_alignment,req_arena_id,tld,os_tld,&page); if (segment == NULL || page==NULL) return NULL; mi_assert_internal(segment->used==1); - mi_assert_internal(mi_page_block_size(page) >= size); + mi_assert_internal(mi_page_block_size(page) >= size); #if MI_HUGE_PAGE_ABANDON segment->thread_id = 0; // huge segments are immediately abandoned - #endif + #endif // for huge pages we initialize the xblock_size as we may // overallocate to accommodate large alignments. size_t psize; uint8_t* start = _mi_segment_page_start(segment, page, &psize); page->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize); - + // decommit the part of the prefix of a page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE) if (page_alignment > 0 && segment->allow_decommit) { uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)start, page_alignment); mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment)); - mi_assert_internal(psize - (aligned_p - start) >= size); + mi_assert_internal(psize - (aligned_p - start) >= size); uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list ptrdiff_t decommit_size = aligned_p - decommit_start; - _mi_os_reset(decommit_start, decommit_size, &_mi_stats_main); // note: cannot use segment_decommit on huge segments + _mi_os_reset(decommit_start, decommit_size, &_mi_stats_main); // note: cannot use segment_decommit on huge segments } - + return page; } @@ -1616,11 +1616,9 @@ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t pag page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld); } else { - page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld); + page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld); } mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid)); mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); return page; } - - diff --git a/Objects/mimalloc/stats.c b/Objects/mimalloc/stats.c index 300956ce1302e9f..e7ea397ec4c384e 100644 --- a/Objects/mimalloc/stats.c +++ b/Objects/mimalloc/stats.c @@ -455,7 +455,7 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s pinfo.page_faults = 0; _mi_prim_process_info(&pinfo); - + if (elapsed_msecs!=NULL) *elapsed_msecs = (pinfo.elapsed < 0 ? 0 : (pinfo.elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.elapsed : PTRDIFF_MAX)); if (user_msecs!=NULL) *user_msecs = (pinfo.utime < 0 ? 0 : (pinfo.utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.utime : PTRDIFF_MAX)); if (system_msecs!=NULL) *system_msecs = (pinfo.stime < 0 ? 0 : (pinfo.stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.stime : PTRDIFF_MAX));