Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 0dbe76b

Browse files
committedSep 27, 2023
Fix mimalloc formatting
Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags:
1 parent 1517fdd commit 0dbe76b

File tree

20 files changed

+239
-242
lines changed

20 files changed

+239
-242
lines changed
 

‎Include/mimalloc/mimalloc.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -340,18 +340,18 @@ typedef enum mi_option_e {
340340
mi_option_deprecated_segment_cache,
341341
mi_option_deprecated_page_reset,
342342
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
343-
mi_option_deprecated_segment_reset,
344-
mi_option_eager_commit_delay,
343+
mi_option_deprecated_segment_reset,
344+
mi_option_eager_commit_delay,
345345
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all.
346346
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
347347
mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas)
348348
mi_option_os_tag, // tag used for OS logging (macOS only for now)
349349
mi_option_max_errors, // issue at most N error messages
350350
mi_option_max_warnings, // issue at most N warning messages
351-
mi_option_max_segment_reclaim,
351+
mi_option_max_segment_reclaim,
352352
mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe.
353353
mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit)
354-
mi_option_arena_purge_mult,
354+
mi_option_arena_purge_mult,
355355
mi_option_purge_extend_delay,
356356
_mi_option_last,
357357
// legacy option names
@@ -521,7 +521,7 @@ template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : publi
521521
protected:
522522
std::shared_ptr<mi_heap_t> heap;
523523
template<class U, bool D> friend struct _mi_heap_stl_allocator_common;
524-
524+
525525
_mi_heap_stl_allocator_common() {
526526
mi_heap_t* hp = mi_heap_new();
527527
this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
@@ -538,7 +538,7 @@ template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : publi
538538
template<class T> struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common<T, false> {
539539
using typename _mi_heap_stl_allocator_common<T, false>::size_type;
540540
mi_heap_stl_allocator() : _mi_heap_stl_allocator_common<T, false>() { } // creates fresh heap that is deleted when the destructor is called
541-
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
541+
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
542542
template<class U> mi_heap_stl_allocator(const mi_heap_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, false>(x) { }
543543

544544
mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
@@ -555,7 +555,7 @@ template<class T1, class T2> bool operator!=(const mi_heap_stl_allocator<T1>& x,
555555
template<class T> struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common<T, true> {
556556
using typename _mi_heap_stl_allocator_common<T, true>::size_type;
557557
mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common<T, true>() { } // creates fresh heap that is destroyed when the destructor is called
558-
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
558+
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
559559
template<class U> mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, true>(x) { }
560560

561561
mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }

‎Include/mimalloc/mimalloc/atomic.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ typedef _Atomic(uintptr_t) mi_atomic_once_t;
300300

301301
// Returns true only on the first invocation
302302
static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
303-
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
303+
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
304304
uintptr_t expected = 0;
305305
return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
306306
}

‎Include/mimalloc/mimalloc/internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ void _mi_thread_abandon(mi_tld_t *tld);
8989

9090
// os.c
9191
void _mi_os_init(void); // called from process init
92-
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
92+
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
9393
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
9494
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
9595

‎Include/mimalloc/mimalloc/prim.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,10 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config );
3535

3636
// Free OS memory
3737
int _mi_prim_free(void* addr, size_t size );
38-
38+
3939
// Allocate OS memory. Return NULL on error.
4040
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
41-
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
41+
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
4242
// which will later be committed explicitly using `_mi_prim_commit`.
4343
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
4444
// pre: !commit => !allow_large
@@ -82,11 +82,11 @@ mi_msecs_t _mi_prim_clock_now(void);
8282
typedef struct mi_process_info_s {
8383
mi_msecs_t elapsed;
8484
mi_msecs_t utime;
85-
mi_msecs_t stime;
86-
size_t current_rss;
87-
size_t peak_rss;
85+
mi_msecs_t stime;
86+
size_t current_rss;
87+
size_t peak_rss;
8888
size_t current_commit;
89-
size_t peak_commit;
89+
size_t peak_commit;
9090
size_t page_faults;
9191
} mi_process_info_t;
9292

@@ -117,7 +117,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
117117

118118
//-------------------------------------------------------------------
119119
// Thread id: `_mi_prim_thread_id()`
120-
//
120+
//
121121
// Getting the thread id should be performant as it is called in the
122122
// fast path of `_mi_free` and we specialize for various platforms as
123123
// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.

‎Include/mimalloc/mimalloc/track.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ The corresponding `mi_track_free` still uses the block start pointer and origina
3434
The `mi_track_resize` is currently unused but could be called on reallocations within a block.
3535
`mi_track_init` is called at program start.
3636
37-
The following macros are for tools like asan and valgrind to track whether memory is
37+
The following macros are for tools like asan and valgrind to track whether memory is
3838
defined, undefined, or not accessible at all:
3939
4040
#define mi_track_mem_defined(p,size)
@@ -94,7 +94,7 @@ defined, undefined, or not accessible at all:
9494
// no tracking
9595

9696
#define MI_TRACK_ENABLED 0
97-
#define MI_TRACK_HEAP_DESTROY 0
97+
#define MI_TRACK_HEAP_DESTROY 0
9898
#define MI_TRACK_TOOL "none"
9999

100100
#define mi_track_malloc_size(p,reqsize,size,zero)

‎Include/mimalloc/mimalloc/types.h

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ typedef int32_t mi_ssize_t;
183183

184184
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 8KiB on 64-bit
185185
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB on 64-bit
186-
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
186+
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
187187
#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 32MiB on 64-bit
188188
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
189189

@@ -201,10 +201,10 @@ typedef int32_t mi_ssize_t;
201201
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
202202

203203
// blocks up to this size are always allocated aligned
204-
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
204+
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
205205

206-
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
207-
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
206+
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
207+
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
208208

209209

210210
// ------------------------------------------------------
@@ -293,7 +293,7 @@ typedef uintptr_t mi_thread_free_t;
293293
typedef struct mi_page_s {
294294
// "owned" by the segment
295295
uint32_t slice_count; // slices in this page (0 if not a page)
296-
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
296+
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
297297
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
298298
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
299299
uint8_t tag : 4; // heap tag (mi_heap_tag_t)
@@ -349,17 +349,17 @@ typedef enum mi_segment_kind_e {
349349
// A segment holds a commit mask where a bit is set if
350350
// the corresponding MI_COMMIT_SIZE area is committed.
351351
// The MI_COMMIT_SIZE must be a multiple of the slice
352-
// size. If it is equal we have the most fine grained
352+
// size. If it is equal we have the most fine grained
353353
// decommit (but setting it higher can be more efficient).
354354
// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
355355
// be committed in one go which can be set higher than
356356
// MI_COMMIT_SIZE for efficiency (while the decommit mask
357357
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
358358
// ------------------------------------------------------
359359

360-
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
360+
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
361361
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
362-
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
362+
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
363363
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
364364
#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
365365

@@ -432,11 +432,11 @@ typedef struct mi_segment_s {
432432

433433
// from here is zero initialized
434434
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
435-
435+
436436
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
437437
size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
438438
size_t used; // count of pages in use
439-
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
439+
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
440440

441441
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
442442
size_t segment_info_slices; // initial slices we are using segment info and possible guard pages.
@@ -507,7 +507,7 @@ struct mi_heap_s {
507507
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
508508
_Atomic(mi_block_t*) thread_delayed_free;
509509
mi_threadid_t thread_id; // thread this heap belongs too
510-
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
510+
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
511511
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
512512
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
513513
mi_random_ctx_t random; // random number context used for secure allocation

‎Objects/mimalloc/alloc-aligned.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
4747
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
4848
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
4949
// zero afterwards as only the area from the aligned_p may be committed!
50-
if (p == NULL) return NULL;
50+
if (p == NULL) return NULL;
5151
}
5252
else {
5353
// otherwise over-allocate
@@ -73,7 +73,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
7373
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
7474
mi_assert_internal(mi_usable_size(aligned_p)>=size);
7575
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
76-
76+
7777
// now zero the block if needed
7878
if (alignment > MI_ALIGNMENT_MAX) {
7979
// for the tracker, on huge aligned allocations only from the start of the large block is defined
@@ -85,7 +85,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
8585

8686
if (p != aligned_p) {
8787
mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
88-
}
88+
}
8989
return aligned_p;
9090
}
9191

‎Objects/mimalloc/alloc.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
7070
}
7171
else {
7272
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
73-
}
73+
}
7474
}
7575

7676
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
@@ -126,7 +126,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
126126
if (size == 0) { size = sizeof(void*); }
127127
#endif
128128
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
129-
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
129+
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
130130
mi_track_malloc(p,size,zero);
131131
#if MI_STAT>1
132132
if (p != NULL) {
@@ -359,15 +359,15 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
359359
// only maintain stats for smaller objects if requested
360360
#if (MI_STAT>0)
361361
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
362-
#if (MI_STAT < 2)
362+
#if (MI_STAT < 2)
363363
MI_UNUSED(block);
364364
#endif
365365
mi_heap_t* const heap = mi_heap_get_default();
366366
const size_t bsize = mi_page_usable_block_size(page);
367367
#if (MI_STAT>1)
368368
const size_t usize = mi_page_usable_size_of(page, block);
369369
mi_heap_stat_decrease(heap, malloc, usize);
370-
#endif
370+
#endif
371371
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
372372
mi_heap_stat_decrease(heap, normal, bsize);
373373
#if (MI_STAT > 1)
@@ -379,7 +379,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
379379
}
380380
else {
381381
mi_heap_stat_decrease(heap, huge, bsize);
382-
}
382+
}
383383
}
384384
#else
385385
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
@@ -418,7 +418,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
418418
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
419419
mi_check_padding(page, block);
420420
_mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
421-
421+
422422
// huge page segments are always abandoned and can be freed immediately
423423
mi_segment_t* segment = _mi_page_segment(page);
424424
if (segment->kind == MI_SEGMENT_HUGE) {
@@ -434,7 +434,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
434434
_mi_segment_huge_page_reset(segment, page, block);
435435
#endif
436436
}
437-
437+
438438
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
439439
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
440440
_mi_debug_fill(page, block, MI_DEBUG_FREED, mi_usable_size(block));

‎Objects/mimalloc/arena.c

Lines changed: 47 additions & 48 deletions
Large diffs are not rendered by default.

‎Objects/mimalloc/bitmap.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -109,15 +109,15 @@ bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fiel
109109
}
110110

111111
// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
112-
bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields,
113-
const size_t start_field_idx, const size_t count,
114-
mi_bitmap_pred_fun_t pred_fun, void* pred_arg,
112+
bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields,
113+
const size_t start_field_idx, const size_t count,
114+
mi_bitmap_pred_fun_t pred_fun, void* pred_arg,
115115
mi_bitmap_index_t* bitmap_idx) {
116116
size_t idx = start_field_idx;
117117
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
118118
if (idx >= bitmap_fields) idx = 0; // wrap
119119
if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
120-
if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) {
120+
if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) {
121121
return true;
122122
}
123123
// predicate returned false, unclaim and look further
@@ -164,17 +164,17 @@ static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size
164164
return ((field & mask) == mask);
165165
}
166166

167-
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
167+
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
168168
// Returns `true` if successful when all previous `count` bits were 0.
169169
bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
170170
const size_t idx = mi_bitmap_index_field(bitmap_idx);
171171
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
172172
const size_t mask = mi_bitmap_mask_(count, bitidx);
173173
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
174174
size_t expected = mi_atomic_load_relaxed(&bitmap[idx]);
175-
do {
175+
do {
176176
if ((expected & mask) != 0) return false;
177-
}
177+
}
178178
while (!mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask));
179179
mi_assert_internal((expected & mask) == 0);
180180
return true;
@@ -212,7 +212,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit
212212
if (initial == 0) return false;
213213
if (initial >= count) return _mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx); // no need to cross fields (this case won't happen for us)
214214
if (_mi_divide_up(count - initial, MI_BITMAP_FIELD_BITS) >= (bitmap_fields - idx)) return false; // not enough entries
215-
215+
216216
// scan ahead
217217
size_t found = initial;
218218
size_t mask = 0; // mask bits for the final field

‎Objects/mimalloc/bitmap.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx
4141
}
4242

4343
// Create a bit index.
44-
static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) {
44+
static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) {
4545
return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS);
4646
}
4747

@@ -80,7 +80,7 @@ bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap
8080
// Returns `true` if all `count` bits were 1 previously.
8181
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
8282

83-
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
83+
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
8484
// Returns `true` if successful when all previous `count` bits were 0.
8585
bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
8686

‎Objects/mimalloc/os.c

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ bool _mi_os_has_overcommit(void) {
2929
return mi_os_mem_config.has_overcommit;
3030
}
3131

32-
bool _mi_os_has_virtual_reserve(void) {
32+
bool _mi_os_has_virtual_reserve(void) {
3333
return mi_os_mem_config.has_virtual_reserve;
3434
}
3535

@@ -173,7 +173,7 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
173173
}
174174
}
175175
else {
176-
// nothing to do
176+
// nothing to do
177177
mi_assert(memid.memkind < MI_MEM_OS);
178178
}
179179
}
@@ -197,22 +197,22 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo
197197
if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning
198198

199199
*is_zero = false;
200-
void* p = NULL;
200+
void* p = NULL;
201201
int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p);
202202
if (err != 0) {
203203
_mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large);
204204
}
205205
mi_stat_counter_increase(stats->mmap_calls, 1);
206206
if (p != NULL) {
207207
_mi_stat_increase(&stats->reserved, size);
208-
if (commit) {
209-
_mi_stat_increase(&stats->committed, size);
208+
if (commit) {
209+
_mi_stat_increase(&stats->committed, size);
210210
// seems needed for asan (or `mimalloc-test-api` fails)
211211
#ifdef MI_TRACK_ASAN
212212
if (*is_zero) { mi_track_mem_defined(p,size); }
213213
else { mi_track_mem_undefined(p,size); }
214214
#endif
215-
}
215+
}
216216
}
217217
return p;
218218
}
@@ -250,7 +250,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
250250
// over-allocate uncommitted (virtual) memory
251251
p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats);
252252
if (p == NULL) return NULL;
253-
253+
254254
// set p to the aligned part in the full region
255255
// note: this is dangerous on Windows as VirtualFree needs the actual base pointer
256256
// this is handled though by having the `base` field in the memid's
@@ -266,7 +266,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
266266
// overallocate...
267267
p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats);
268268
if (p == NULL) return NULL;
269-
269+
270270
// and selectively unmap parts around the over-allocated area. (noop on sbrk)
271271
void* aligned_p = mi_align_up_ptr(p, alignment);
272272
size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
@@ -277,7 +277,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
277277
if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); }
278278
// we can return the aligned pointer on `mmap` (and sbrk) systems
279279
p = aligned_p;
280-
*base = aligned_p; // since we freed the pre part, `*base == p`.
280+
*base = aligned_p; // since we freed the pre part, `*base == p`.
281281
}
282282
}
283283

@@ -301,7 +301,7 @@ void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* tld_stats) {
301301
void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats);
302302
if (p != NULL) {
303303
*memid = _mi_memid_create_os(true, os_is_zero, os_is_large);
304-
}
304+
}
305305
return p;
306306
}
307307

@@ -313,7 +313,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
313313
if (size == 0) return NULL;
314314
size = _mi_os_good_alloc_size(size);
315315
alignment = _mi_align_up(alignment, _mi_os_page_size());
316-
316+
317317
bool os_is_large = false;
318318
bool os_is_zero = false;
319319
void* os_base = NULL;
@@ -391,7 +391,7 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
391391

392392
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
393393
MI_UNUSED(tld_stats);
394-
mi_stats_t* stats = &_mi_stats_main;
394+
mi_stats_t* stats = &_mi_stats_main;
395395
if (is_zero != NULL) { *is_zero = false; }
396396
_mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
397397
_mi_stat_counter_increase(&stats->commit_calls, 1);
@@ -401,21 +401,21 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats
401401
void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize);
402402
if (csize == 0) return true;
403403

404-
// commit
404+
// commit
405405
bool os_is_zero = false;
406-
int err = _mi_prim_commit(start, csize, &os_is_zero);
406+
int err = _mi_prim_commit(start, csize, &os_is_zero);
407407
if (err != 0) {
408408
_mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
409409
return false;
410410
}
411-
if (os_is_zero && is_zero != NULL) {
411+
if (os_is_zero && is_zero != NULL) {
412412
*is_zero = true;
413413
mi_assert_expensive(mi_mem_is_zero(start, csize));
414414
}
415415
// note: the following seems required for asan (otherwise `mimalloc-test-stress` fails)
416416
#ifdef MI_TRACK_ASAN
417417
if (os_is_zero) { mi_track_mem_defined(start,csize); }
418-
else { mi_track_mem_undefined(start,csize); }
418+
else { mi_track_mem_undefined(start,csize); }
419419
#endif
420420
return true;
421421
}
@@ -429,11 +429,11 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_
429429
// page align
430430
size_t csize;
431431
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
432-
if (csize == 0) return true;
432+
if (csize == 0) return true;
433433

434434
// decommit
435435
*needs_recommit = true;
436-
int err = _mi_prim_decommit(start,csize,needs_recommit);
436+
int err = _mi_prim_decommit(start,csize,needs_recommit);
437437
if (err != 0) {
438438
_mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
439439
}
@@ -451,7 +451,7 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
451451
// but may be used later again. This will release physical memory
452452
// pages and reduce swapping while keeping the memory committed.
453453
// We page align to a conservative area inside the range to reset.
454-
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
454+
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
455455
// page align conservatively within the range
456456
size_t csize;
457457
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
@@ -471,7 +471,7 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
471471
}
472472

473473

474-
// either resets or decommits memory, returns true if the memory needs
474+
// either resets or decommits memory, returns true if the memory needs
475475
// to be recommitted if it is to be re-used later on.
476476
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
477477
{
@@ -484,7 +484,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
484484
{
485485
bool needs_recommit = true;
486486
mi_os_decommit_ex(p, size, &needs_recommit, stats);
487-
return needs_recommit;
487+
return needs_recommit;
488488
}
489489
else {
490490
if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
@@ -494,7 +494,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
494494
}
495495
}
496496

497-
// either resets or decommits memory, returns true if the memory needs
497+
// either resets or decommits memory, returns true if the memory needs
498498
// to be recommitted if it is to be re-used later on.
499499
bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) {
500500
return _mi_os_purge_ex(p, size, true, stats);

‎Objects/mimalloc/page.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -125,9 +125,9 @@ bool _mi_page_is_valid(mi_page_t* page) {
125125

126126
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
127127
#if MI_HUGE_PAGE_ABANDON
128-
if (segment->kind != MI_SEGMENT_HUGE)
128+
if (segment->kind != MI_SEGMENT_HUGE)
129129
#endif
130-
{
130+
{
131131
mi_page_queue_t* pq = mi_page_queue_of(page);
132132
mi_assert_internal(mi_page_queue_contains(pq, page));
133133
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
@@ -441,7 +441,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
441441
mi_assert_internal(page != NULL);
442442
mi_assert_expensive(_mi_page_is_valid(page));
443443
mi_assert_internal(mi_page_all_free(page));
444-
444+
445445
mi_page_set_has_aligned(page, false);
446446

447447
// don't retire too often..
@@ -454,7 +454,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
454454
if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_queue_is_special(pq)) { // not too large && not full or huge queue?
455455
if (pq->last==page && pq->first==page) { // the only page in the queue?
456456
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
457-
page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
457+
page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
458458
mi_heap_t* heap = mi_page_heap(page);
459459
mi_assert_internal(pq >= heap->pages);
460460
const size_t index = pq - heap->pages;
@@ -608,7 +608,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
608608
// allocations but this did not speed up any benchmark (due to an
609609
// extra test in malloc? or cache effects?)
610610
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) {
611-
MI_UNUSED(tld);
611+
MI_UNUSED(tld);
612612
mi_assert_expensive(mi_page_is_valid_init(page));
613613
#if (MI_SECURE<=2)
614614
mi_assert(page->free == NULL);
@@ -720,7 +720,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
720720
while (page != NULL)
721721
{
722722
mi_page_t* next = page->next; // remember next
723-
#if MI_STAT
723+
#if MI_STAT
724724
count++;
725725
#endif
726726

@@ -840,19 +840,19 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t
840840
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
841841
if (page != NULL) {
842842
mi_assert_internal(mi_page_immediate_available(page));
843-
843+
844844
if (is_huge) {
845845
mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
846846
mi_assert_internal(_mi_page_segment(page)->used==1);
847847
#if MI_HUGE_PAGE_ABANDON
848848
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
849849
mi_page_set_heap(page, NULL);
850-
#endif
850+
#endif
851851
}
852852
else {
853853
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
854854
}
855-
855+
856856
const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding
857857
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
858858
mi_heap_stat_increase(heap, large, bsize);
@@ -871,7 +871,7 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t
871871
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
872872
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
873873
// huge allocation?
874-
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
874+
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
875875
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
876876
if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
877877
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
@@ -884,7 +884,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme
884884
else {
885885
// otherwise find a page with free blocks in our size segregated queues
886886
#if MI_PADDING
887-
mi_assert_internal(size >= MI_PADDING_SIZE);
887+
mi_assert_internal(size >= MI_PADDING_SIZE);
888888
#endif
889889
return mi_find_free_page(heap, size);
890890
}
@@ -900,7 +900,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
900900

901901
// initialize if necessary
902902
if mi_unlikely(!mi_heap_is_initialized(heap)) {
903-
heap = mi_heap_get_default(); // calls mi_thread_init
903+
heap = mi_heap_get_default(); // calls mi_thread_init
904904
if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
905905
}
906906
mi_assert_internal(mi_heap_is_initialized(heap));

‎Objects/mimalloc/prim/unix/prim.c

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ terms of the MIT license. A copy of the license can be found in the file
5757

5858
//------------------------------------------------------------------------------------
5959
// Use syscalls for some primitives to allow for libraries that override open/read/close etc.
60-
// and do allocation themselves; using syscalls prevents recursion when mimalloc is
60+
// and do allocation themselves; using syscalls prevents recursion when mimalloc is
6161
// still initializing (issue #713)
6262
//------------------------------------------------------------------------------------
6363

@@ -120,7 +120,7 @@ static bool unix_detect_overcommit(void) {
120120
os_overcommit = (val != 0);
121121
}
122122
#else
123-
// default: overcommit is true
123+
// default: overcommit is true
124124
#endif
125125
return os_overcommit;
126126
}
@@ -168,12 +168,12 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
168168
size_t n = mi_bsr(try_alignment);
169169
if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB
170170
p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0);
171-
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
171+
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
172172
int err = errno;
173173
_mi_warning_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr);
174174
}
175175
if (p!=MAP_FAILED) return p;
176-
// fall back to regular mmap
176+
// fall back to regular mmap
177177
}
178178
}
179179
#elif defined(MAP_ALIGN) // Solaris
@@ -189,7 +189,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
189189
void* hint = _mi_os_get_aligned_hint(try_alignment, size);
190190
if (hint != NULL) {
191191
p = mmap(hint, size, protect_flags, flags, fd, 0);
192-
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
192+
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
193193
#if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly?
194194
int err = 0;
195195
#else
@@ -198,7 +198,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
198198
_mi_warning_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint);
199199
}
200200
if (p!=MAP_FAILED) return p;
201-
// fall back to regular mmap
201+
// fall back to regular mmap
202202
}
203203
}
204204
#endif
@@ -327,9 +327,9 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la
327327
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
328328
mi_assert_internal(commit || !allow_large);
329329
mi_assert_internal(try_alignment > 0);
330-
330+
331331
*is_zero = true;
332-
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
332+
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
333333
*addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large);
334334
return (*addr != NULL ? 0 : errno);
335335
}
@@ -357,19 +357,19 @@ int _mi_prim_commit(void* start, size_t size, bool* is_zero) {
357357
// was either from mmap PROT_NONE, or from decommit MADV_DONTNEED, but
358358
// we sometimes call commit on a range with still partially committed
359359
// memory and `mprotect` does not zero the range.
360-
*is_zero = false;
360+
*is_zero = false;
361361
int err = mprotect(start, size, (PROT_READ | PROT_WRITE));
362-
if (err != 0) {
363-
err = errno;
362+
if (err != 0) {
363+
err = errno;
364364
unix_mprotect_hint(err);
365365
}
366366
return err;
367367
}
368368

369369
int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
370-
int err = 0;
370+
int err = 0;
371371
// decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE)
372-
err = unix_madvise(start, size, MADV_DONTNEED);
372+
err = unix_madvise(start, size, MADV_DONTNEED);
373373
#if !MI_DEBUG && !MI_SECURE
374374
*needs_recommit = false;
375375
#else
@@ -381,15 +381,15 @@ int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
381381
*needs_recommit = true;
382382
const int fd = unix_mmap_fd();
383383
void* p = mmap(start, size, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0);
384-
if (p != start) { err = errno; }
384+
if (p != start) { err = errno; }
385385
*/
386386
return err;
387387
}
388388

389389
int _mi_prim_reset(void* start, size_t size) {
390-
// We try to use `MADV_FREE` as that is the fastest. A drawback though is that it
390+
// We try to use `MADV_FREE` as that is the fastest. A drawback though is that it
391391
// will not reduce the `rss` stats in tools like `top` even though the memory is available
392-
// to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by
392+
// to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by
393393
// default `MADV_DONTNEED` is used though.
394394
#if defined(MADV_FREE)
395395
static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE);
@@ -409,7 +409,7 @@ int _mi_prim_reset(void* start, size_t size) {
409409

410410
int _mi_prim_protect(void* start, size_t size, bool protect) {
411411
int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
412-
if (err != 0) { err = errno; }
412+
if (err != 0) { err = errno; }
413413
unix_mprotect_hint(err);
414414
return err;
415415
}
@@ -450,7 +450,7 @@ int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bo
450450
if (err != 0) {
451451
err = errno;
452452
_mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err);
453-
}
453+
}
454454
}
455455
return (*addr != NULL ? 0 : errno);
456456
}
@@ -567,9 +567,9 @@ mi_msecs_t _mi_prim_clock_now(void) {
567567
// low resolution timer
568568
mi_msecs_t _mi_prim_clock_now(void) {
569569
#if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
570-
return (mi_msecs_t)clock();
570+
return (mi_msecs_t)clock();
571571
#elif (CLOCKS_PER_SEC < 1000)
572-
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
572+
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
573573
#else
574574
return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
575575
#endif
@@ -609,7 +609,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo)
609609
pinfo->stime = timeval_secs(&rusage.ru_stime);
610610
#if !defined(__HAIKU__)
611611
pinfo->page_faults = rusage.ru_majflt;
612-
#endif
612+
#endif
613613
#if defined(__HAIKU__)
614614
// Haiku does not have (yet?) a way to
615615
// get these stats per process
@@ -750,7 +750,7 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) {
750750

751751
#elif defined(__ANDROID__) || defined(__DragonFly__) || \
752752
defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \
753-
defined(__sun)
753+
defined(__sun)
754754

755755
#include <stdlib.h>
756756
bool _mi_prim_random_buf(void* buf, size_t buf_len) {
@@ -842,7 +842,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
842842
}
843843
}
844844

845-
#else
845+
#else
846846

847847
void _mi_prim_thread_init_auto_done(void) {
848848
// nothing

‎Objects/mimalloc/prim/wasi/prim.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ terms of the MIT license. A copy of the license can be found in the file
1919
void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
2020
config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB
2121
config->alloc_granularity = 16;
22-
config->has_overcommit = false;
22+
config->has_overcommit = false;
2323
config->must_free_whole = true;
2424
config->has_virtual_reserve = false;
2525
}
@@ -129,7 +129,7 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la
129129
//---------------------------------------------
130130

131131
int _mi_prim_commit(void* addr, size_t size, bool* is_zero) {
132-
MI_UNUSED(addr); MI_UNUSED(size);
132+
MI_UNUSED(addr); MI_UNUSED(size);
133133
*is_zero = false;
134134
return 0;
135135
}
@@ -194,9 +194,9 @@ mi_msecs_t _mi_prim_clock_now(void) {
194194
// low resolution timer
195195
mi_msecs_t _mi_prim_clock_now(void) {
196196
#if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
197-
return (mi_msecs_t)clock();
197+
return (mi_msecs_t)clock();
198198
#elif (CLOCKS_PER_SEC < 1000)
199-
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
199+
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
200200
#else
201201
return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
202202
#endif

‎Objects/mimalloc/prim/windows/prim.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,7 @@ int _mi_prim_commit(void* addr, size_t size, bool* is_zero) {
276276
return 0;
277277
}
278278

279-
int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) {
279+
int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) {
280280
BOOL ok = VirtualFree(addr, size, MEM_DECOMMIT);
281281
*needs_recommit = true; // for safety, assume always decommitted even in the case of an error.
282282
return (ok ? 0 : (int)GetLastError());
@@ -451,7 +451,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo)
451451
GetProcessTimes(GetCurrentProcess(), &ct, &et, &st, &ut);
452452
pinfo->utime = filetime_msecs(&ut);
453453
pinfo->stime = filetime_msecs(&st);
454-
454+
455455
// load psapi on demand
456456
if (pGetProcessMemoryInfo == NULL) {
457457
HINSTANCE hDll = LoadLibrary(TEXT("psapi.dll"));
@@ -465,7 +465,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo)
465465
memset(&info, 0, sizeof(info));
466466
if (pGetProcessMemoryInfo != NULL) {
467467
pGetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info));
468-
}
468+
}
469469
pinfo->current_rss = (size_t)info.WorkingSetSize;
470470
pinfo->peak_rss = (size_t)info.PeakWorkingSetSize;
471471
pinfo->current_commit = (size_t)info.PagefileUsage;
@@ -477,7 +477,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo)
477477
// Output
478478
//----------------------------------------------------------------
479479

480-
void _mi_prim_out_stderr( const char* msg )
480+
void _mi_prim_out_stderr( const char* msg )
481481
{
482482
// on windows with redirection, the C runtime cannot handle locale dependent output
483483
// after the main thread closes so we use direct console output.
@@ -560,7 +560,7 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) {
560560
}
561561
if (pBCryptGenRandom == NULL) return false;
562562
}
563-
return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0);
563+
return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0);
564564
}
565565

566566
#endif // MI_USE_RTLGENRANDOM
@@ -595,9 +595,9 @@ void _mi_prim_thread_init_auto_done(void) {
595595
}
596596

597597
void _mi_prim_thread_done_auto_done(void) {
598-
// call thread-done on all threads (except the main thread) to prevent
598+
// call thread-done on all threads (except the main thread) to prevent
599599
// dangling callback pointer if statically linked with a DLL; Issue #208
600-
FlsFree(mi_fls_key);
600+
FlsFree(mi_fls_key);
601601
}
602602

603603
void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {

‎Objects/mimalloc/random.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ If we cannot get good randomness, we fall back to weak randomness based on a tim
160160

161161
uintptr_t _mi_os_random_weak(uintptr_t extra_seed) {
162162
uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random
163-
x ^= _mi_prim_clock_now();
163+
x ^= _mi_prim_clock_now();
164164
// and do a few randomization steps
165165
uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1;
166166
for (uintptr_t i = 0; i < max; i++) {

‎Objects/mimalloc/segment-map.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ static mi_segment_t* _mi_segment_of(const void* p) {
102102
uintptr_t lomask = mask;
103103
loindex = index;
104104
do {
105-
loindex--;
106-
lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]);
105+
loindex--;
106+
lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]);
107107
} while (lomask != 0 && loindex > 0);
108108
if (lomask == 0) return NULL;
109109
lobitidx = mi_bsr(lomask); // lomask != 0

‎Objects/mimalloc/segment.c

Lines changed: 71 additions & 73 deletions
Large diffs are not rendered by default.

‎Objects/mimalloc/stats.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -455,7 +455,7 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s
455455
pinfo.page_faults = 0;
456456

457457
_mi_prim_process_info(&pinfo);
458-
458+
459459
if (elapsed_msecs!=NULL) *elapsed_msecs = (pinfo.elapsed < 0 ? 0 : (pinfo.elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.elapsed : PTRDIFF_MAX));
460460
if (user_msecs!=NULL) *user_msecs = (pinfo.utime < 0 ? 0 : (pinfo.utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.utime : PTRDIFF_MAX));
461461
if (system_msecs!=NULL) *system_msecs = (pinfo.stime < 0 ? 0 : (pinfo.stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.stime : PTRDIFF_MAX));

0 commit comments

Comments
 (0)
Please sign in to comment.