Commit e3b8492e authored by antirez's avatar antirez
Browse files

Revert "Jemalloc updated to 4.4.0."

This reverts commit 36c1acc2.
parent 238cebdd
...@@ -15,21 +15,12 @@ huge_node_get(const void *ptr) ...@@ -15,21 +15,12 @@ huge_node_get(const void *ptr)
} }
static bool static bool
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node) huge_node_set(const void *ptr, extent_node_t *node)
{ {
assert(extent_node_addr_get(node) == ptr); assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node)); assert(!extent_node_achunk_get(node));
return (chunk_register(tsdn, ptr, node)); return (chunk_register(ptr, node));
}
static void
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
{
bool err;
err = huge_node_set(tsdn, ptr, node);
assert(!err);
} }
static void static void
...@@ -40,39 +31,39 @@ huge_node_unset(const void *ptr, const extent_node_t *node) ...@@ -40,39 +31,39 @@ huge_node_unset(const void *ptr, const extent_node_t *node)
} }
void * void *
huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache)
{ {
size_t usize;
assert(usize == s2u(usize)); usize = s2u(size);
if (usize == 0) {
/* size_t overflow. */
return (NULL);
}
return (huge_palloc(tsdn, arena, usize, chunksize, zero)); return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
} }
void * void *
huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
bool zero) bool zero, tcache_t *tcache)
{ {
void *ret; void *ret;
size_t ausize; size_t usize;
arena_t *iarena;
extent_node_t *node; extent_node_t *node;
size_t sn;
bool is_zeroed; bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */ /* Allocate one or more contiguous chunks for this request. */
assert(!tsdn_null(tsdn) || arena != NULL); usize = sa2u(size, alignment);
if (unlikely(usize == 0))
ausize = sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
return (NULL); return (NULL);
assert(ausize >= chunksize); assert(usize >= chunksize);
/* Allocate an extent node with which to track the chunk. */ /* Allocate an extent node with which to track the chunk. */
iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
a0get(); CACHELINE, false, tcache, true, arena);
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, NULL, true, iarena);
if (node == NULL) if (node == NULL)
return (NULL); return (NULL);
...@@ -81,35 +72,33 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -81,35 +72,33 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
* it is possible to make correct junk/zero fill decisions below. * it is possible to make correct junk/zero fill decisions below.
*/ */
is_zeroed = zero; is_zeroed = zero;
if (likely(!tsdn_null(tsdn))) arena = arena_choose(tsd, arena);
arena = arena_choose(tsdn_tsd(tsdn), arena); if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn, size, alignment, &is_zeroed)) == NULL) {
arena, usize, alignment, &sn, &is_zeroed)) == NULL) { idalloctm(tsd, node, tcache, true);
idalloctm(tsdn, node, NULL, true, true);
return (NULL); return (NULL);
} }
extent_node_init(node, arena, ret, usize, sn, is_zeroed, true); extent_node_init(node, arena, ret, size, is_zeroed, true);
if (huge_node_set(tsdn, ret, node)) { if (huge_node_set(ret, node)) {
arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn); arena_chunk_dalloc_huge(arena, ret, size);
idalloctm(tsdn, node, NULL, true, true); idalloctm(tsd, node, tcache, true);
return (NULL); return (NULL);
} }
/* Insert node into huge. */ /* Insert node into huge. */
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(&arena->huge_mtx);
ql_elm_new(node, ql_link); ql_elm_new(node, ql_link);
ql_tail_insert(&arena->huge, node, ql_link); ql_tail_insert(&arena->huge, node, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(&arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) { if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed) if (!is_zeroed)
memset(ret, 0, usize); memset(ret, 0, size);
} else if (config_fill && unlikely(opt_junk_alloc)) } else if (config_fill && unlikely(opt_junk_alloc))
memset(ret, JEMALLOC_ALLOC_JUNK, usize); memset(ret, 0xa5, size);
arena_decay_tick(tsdn, arena);
return (ret); return (ret);
} }
...@@ -127,7 +116,7 @@ huge_dalloc_junk(void *ptr, size_t usize) ...@@ -127,7 +116,7 @@ huge_dalloc_junk(void *ptr, size_t usize)
* unmapped. * unmapped.
*/ */
if (!config_munmap || (have_dss && chunk_in_dss(ptr))) if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
memset(ptr, JEMALLOC_FREE_JUNK, usize); memset(ptr, 0x5a, usize);
} }
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
...@@ -137,8 +126,8 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); ...@@ -137,8 +126,8 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif #endif
static void static void
huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_min, size_t usize_max, bool zero) size_t usize_max, bool zero)
{ {
size_t usize, usize_next; size_t usize, usize_next;
extent_node_t *node; extent_node_t *node;
...@@ -162,28 +151,24 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, ...@@ -162,28 +151,24 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
if (oldsize > usize) { if (oldsize > usize) {
size_t sdiff = oldsize - usize; size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) { if (config_fill && unlikely(opt_junk_free)) {
memset((void *)((uintptr_t)ptr + usize), memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
JEMALLOC_FREE_JUNK, sdiff);
post_zeroed = false; post_zeroed = false;
} else { } else {
post_zeroed = !chunk_purge_wrapper(tsdn, arena, post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
&chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize, ptr, CHUNK_CEILING(oldsize), usize, sdiff);
sdiff);
} }
} else } else
post_zeroed = pre_zeroed; post_zeroed = pre_zeroed;
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */ /* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
assert(extent_node_size_get(node) != usize); assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize); extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */ /* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed); extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(&arena->huge_mtx);
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize); arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
/* Fill if necessary (growing). */ /* Fill if necessary (growing). */
if (oldsize < usize) { if (oldsize < usize) {
...@@ -193,15 +178,14 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, ...@@ -193,15 +178,14 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
usize - oldsize); usize - oldsize);
} }
} else if (config_fill && unlikely(opt_junk_alloc)) { } else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize), memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
JEMALLOC_ALLOC_JUNK, usize - oldsize); oldsize);
} }
} }
} }
static bool static bool
huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
size_t usize)
{ {
extent_node_t *node; extent_node_t *node;
arena_t *arena; arena_t *arena;
...@@ -212,7 +196,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, ...@@ -212,7 +196,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
node = huge_node_get(ptr); node = huge_node_get(ptr);
arena = extent_node_arena_get(node); arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node); pre_zeroed = extent_node_zeroed_get(node);
chunk_hooks = chunk_hooks_get(tsdn, arena); chunk_hooks = chunk_hooks_get(arena);
assert(oldsize > usize); assert(oldsize > usize);
...@@ -229,59 +213,53 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, ...@@ -229,59 +213,53 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
sdiff); sdiff);
post_zeroed = false; post_zeroed = false;
} else { } else {
post_zeroed = !chunk_purge_wrapper(tsdn, arena, post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
&chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr + CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
usize), CHUNK_CEILING(oldsize), CHUNK_CEILING(oldsize),
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
} }
} else } else
post_zeroed = pre_zeroed; post_zeroed = pre_zeroed;
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */ /* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize); extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */ /* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed); extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(&arena->huge_mtx);
/* Zap the excess chunks. */ /* Zap the excess chunks. */
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize, arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
extent_node_sn_get(node));
return (false); return (false);
} }
static bool static bool
huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize, huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
size_t usize, bool zero) {
extent_node_t *node; extent_node_t *node;
arena_t *arena; arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk; bool is_zeroed_subchunk, is_zeroed_chunk;
node = huge_node_get(ptr); node = huge_node_get(ptr);
arena = extent_node_arena_get(node); arena = extent_node_arena_get(node);
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(&arena->huge_mtx);
is_zeroed_subchunk = extent_node_zeroed_get(node); is_zeroed_subchunk = extent_node_zeroed_get(node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(&arena->huge_mtx);
/* /*
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed, * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
* update extent's zeroed field, and zero as necessary. * that it is possible to make correct junk/zero fill decisions below.
*/ */
is_zeroed_chunk = false; is_zeroed_chunk = zero;
if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
&is_zeroed_chunk)) &is_zeroed_chunk))
return (true); return (true);
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(&arena->huge_mtx);
huge_node_unset(ptr, node); /* Update the size of the huge allocation. */
extent_node_size_set(node, usize); extent_node_size_set(node, usize);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) && malloc_mutex_unlock(&arena->huge_mtx);
is_zeroed_chunk);
huge_node_reset(tsdn, ptr, node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) { if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed_subchunk) { if (!is_zeroed_subchunk) {
...@@ -294,21 +272,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize, ...@@ -294,21 +272,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
CHUNK_CEILING(oldsize)); CHUNK_CEILING(oldsize));
} }
} else if (config_fill && unlikely(opt_junk_alloc)) { } else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK, memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
usize - oldsize); oldsize);
} }
return (false); return (false);
} }
bool bool
huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero) size_t usize_max, bool zero)
{ {
assert(s2u(oldsize) == oldsize); assert(s2u(oldsize) == oldsize);
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
/* Both allocations must be huge to avoid a move. */ /* Both allocations must be huge to avoid a move. */
if (oldsize < chunksize || usize_max < chunksize) if (oldsize < chunksize || usize_max < chunksize)
...@@ -316,18 +292,13 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, ...@@ -316,18 +292,13 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
/* Attempt to expand the allocation in-place. */ /* Attempt to expand the allocation in-place. */
if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max, if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
zero)) {
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false); return (false);
}
/* Try again, this time with usize_min. */ /* Try again, this time with usize_min. */
if (usize_min < usize_max && CHUNK_CEILING(usize_min) > if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn, CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
ptr, oldsize, usize_min, zero)) { oldsize, usize_min, zero))
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false); return (false);
}
} }
/* /*
...@@ -336,46 +307,36 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, ...@@ -336,46 +307,36 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
*/ */
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min, huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
usize_max, zero); zero);
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false); return (false);
} }
/* Attempt to shrink the allocation in-place. */ /* Attempt to shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) { if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize, return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
usize_max)) {
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
}
return (true); return (true);
} }
static void * static void *
huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero) size_t alignment, bool zero, tcache_t *tcache)
{ {
if (alignment <= chunksize) if (alignment <= chunksize)
return (huge_malloc(tsdn, arena, usize, zero)); return (huge_malloc(tsd, arena, usize, zero, tcache));
return (huge_palloc(tsdn, arena, usize, alignment, zero)); return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
} }
void * void *
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache) size_t alignment, bool zero, tcache_t *tcache)
{ {
void *ret; void *ret;
size_t copysize; size_t copysize;
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= HUGE_MAXCLASS);
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize, if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
zero))
return (ptr); return (ptr);
/* /*
...@@ -383,19 +344,19 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, ...@@ -383,19 +344,19 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
* different size class. In that case, fall back to allocating new * different size class. In that case, fall back to allocating new
* space and copying. * space and copying.
*/ */
ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment, ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
zero); tcache);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
copysize = (usize < oldsize) ? usize : oldsize; copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache, true); isqalloc(tsd, ptr, oldsize, tcache);
return (ret); return (ret);
} }
void void
huge_dalloc(tsdn_t *tsdn, void *ptr) huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{ {
extent_node_t *node; extent_node_t *node;
arena_t *arena; arena_t *arena;
...@@ -403,18 +364,15 @@ huge_dalloc(tsdn_t *tsdn, void *ptr) ...@@ -403,18 +364,15 @@ huge_dalloc(tsdn_t *tsdn, void *ptr)
node = huge_node_get(ptr); node = huge_node_get(ptr);
arena = extent_node_arena_get(node); arena = extent_node_arena_get(node);
huge_node_unset(ptr, node); huge_node_unset(ptr, node);
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(&arena->huge_mtx);
ql_remove(&arena->huge, node, ql_link); ql_remove(&arena->huge, node, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(&arena->huge_mtx);
huge_dalloc_junk(extent_node_addr_get(node), huge_dalloc_junk(extent_node_addr_get(node),
extent_node_size_get(node)); extent_node_size_get(node));
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node), arena_chunk_dalloc_huge(extent_node_arena_get(node),
extent_node_addr_get(node), extent_node_size_get(node), extent_node_addr_get(node), extent_node_size_get(node));
extent_node_sn_get(node)); idalloctm(tsd, node, tcache, true);
idalloctm(tsdn, node, NULL, true, true);
arena_decay_tick(tsdn, arena);
} }
arena_t * arena_t *
...@@ -425,7 +383,7 @@ huge_aalloc(const void *ptr) ...@@ -425,7 +383,7 @@ huge_aalloc(const void *ptr)
} }
size_t size_t
huge_salloc(tsdn_t *tsdn, const void *ptr) huge_salloc(const void *ptr)
{ {
size_t size; size_t size;
extent_node_t *node; extent_node_t *node;
...@@ -433,15 +391,15 @@ huge_salloc(tsdn_t *tsdn, const void *ptr) ...@@ -433,15 +391,15 @@ huge_salloc(tsdn_t *tsdn, const void *ptr)
node = huge_node_get(ptr); node = huge_node_get(ptr);
arena = extent_node_arena_get(node); arena = extent_node_arena_get(node);
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(&arena->huge_mtx);
size = extent_node_size_get(node); size = extent_node_size_get(node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(&arena->huge_mtx);
return (size); return (size);
} }
prof_tctx_t * prof_tctx_t *
huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr) huge_prof_tctx_get(const void *ptr)
{ {
prof_tctx_t *tctx; prof_tctx_t *tctx;
extent_node_t *node; extent_node_t *node;
...@@ -449,29 +407,29 @@ huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr) ...@@ -449,29 +407,29 @@ huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
node = huge_node_get(ptr); node = huge_node_get(ptr);
arena = extent_node_arena_get(node); arena = extent_node_arena_get(node);
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(&arena->huge_mtx);
tctx = extent_node_prof_tctx_get(node); tctx = extent_node_prof_tctx_get(node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(&arena->huge_mtx);
return (tctx); return (tctx);
} }
void void
huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{ {
extent_node_t *node; extent_node_t *node;
arena_t *arena; arena_t *arena;
node = huge_node_get(ptr); node = huge_node_get(ptr);
arena = extent_node_arena_get(node); arena = extent_node_arena_get(node);
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(&arena->huge_mtx);
extent_node_prof_tctx_set(node, tctx); extent_node_prof_tctx_set(node, tctx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(&arena->huge_mtx);
} }
void void
huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr) huge_prof_tctx_reset(const void *ptr)
{ {
huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U); huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
} }
...@@ -5,11 +5,7 @@ ...@@ -5,11 +5,7 @@
/* Data. */ /* Data. */
/* Runtime configuration options. */ /* Runtime configuration options. */
const char *je_malloc_conf const char *je_malloc_conf JEMALLOC_ATTR(weak);
#ifndef _WIN32
JEMALLOC_ATTR(weak)
#endif
;
bool opt_abort = bool opt_abort =
#ifdef JEMALLOC_DEBUG #ifdef JEMALLOC_DEBUG
true true
...@@ -44,14 +40,14 @@ bool opt_redzone = false; ...@@ -44,14 +40,14 @@ bool opt_redzone = false;
bool opt_utrace = false; bool opt_utrace = false;
bool opt_xmalloc = false; bool opt_xmalloc = false;
bool opt_zero = false; bool opt_zero = false;
unsigned opt_narenas = 0; size_t opt_narenas = 0;
/* Initialized to true if the process is running inside Valgrind. */ /* Initialized to true if the process is running inside Valgrind. */
bool in_valgrind; bool in_valgrind;
unsigned ncpus; unsigned ncpus;
/* Protects arenas initialization. */ /* Protects arenas initialization (arenas, narenas_total). */
static malloc_mutex_t arenas_lock; static malloc_mutex_t arenas_lock;
/* /*
* Arenas that are used to service external requests. Not all elements of the * Arenas that are used to service external requests. Not all elements of the
...@@ -61,10 +57,10 @@ static malloc_mutex_t arenas_lock; ...@@ -61,10 +57,10 @@ static malloc_mutex_t arenas_lock;
* arenas. arenas[narenas_auto..narenas_total) are only used if the application * arenas. arenas[narenas_auto..narenas_total) are only used if the application
* takes some action to create them and allocate from them. * takes some action to create them and allocate from them.
*/ */
arena_t **arenas; static arena_t **arenas;
static unsigned narenas_total; /* Use narenas_total_*(). */ static unsigned narenas_total;
static arena_t *a0; /* arenas[0]; read-only after initialization. */ static arena_t *a0; /* arenas[0]; read-only after initialization. */
unsigned narenas_auto; /* Read-only after initialization. */ static unsigned narenas_auto; /* Read-only after initialization. */
typedef enum { typedef enum {
malloc_init_uninitialized = 3, malloc_init_uninitialized = 3,
...@@ -74,37 +70,9 @@ typedef enum { ...@@ -74,37 +70,9 @@ typedef enum {
} malloc_init_t; } malloc_init_t;
static malloc_init_t malloc_init_state = malloc_init_uninitialized; static malloc_init_t malloc_init_state = malloc_init_uninitialized;
/* False should be the common case. Set to true to trigger initialization. */
static bool malloc_slow = true;
/* When malloc_slow is true, set the corresponding bits for sanity check. */
enum {
flag_opt_junk_alloc = (1U),
flag_opt_junk_free = (1U << 1),
flag_opt_quarantine = (1U << 2),
flag_opt_zero = (1U << 3),
flag_opt_utrace = (1U << 4),
flag_in_valgrind = (1U << 5),
flag_opt_xmalloc = (1U << 6)
};
static uint8_t malloc_slow_flags;
JEMALLOC_ALIGNED(CACHELINE)
const size_t pind2sz_tab[NPSIZES] = {
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
#define PSZ_no(lg_grp, ndelta, lg_delta)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
PSZ_##psz(lg_grp, ndelta, lg_delta)
SIZE_CLASSES
#undef PSZ_yes
#undef PSZ_no
#undef SC
};
JEMALLOC_ALIGNED(CACHELINE) JEMALLOC_ALIGNED(CACHELINE)
const size_t index2size_tab[NSIZES] = { const size_t index2size_tab[NSIZES] = {
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
SIZE_CLASSES SIZE_CLASSES
#undef SC #undef SC
...@@ -176,7 +144,7 @@ const uint8_t size2index_tab[] = { ...@@ -176,7 +144,7 @@ const uint8_t size2index_tab[] = {
#define S2B_11(i) S2B_10(i) S2B_10(i) #define S2B_11(i) S2B_10(i) S2B_10(i)
#endif #endif
#define S2B_no(i) #define S2B_no(i)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
S2B_##lg_delta_lookup(index) S2B_##lg_delta_lookup(index)
SIZE_CLASSES SIZE_CLASSES
#undef S2B_3 #undef S2B_3
...@@ -227,7 +195,7 @@ _init_init_lock(void) ...@@ -227,7 +195,7 @@ _init_init_lock(void)
* really only matters early in the process creation, before any * really only matters early in the process creation, before any
* separate thread normally starts doing anything. */ * separate thread normally starts doing anything. */
if (!init_lock_initialized) if (!init_lock_initialized)
malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); malloc_mutex_init(&init_lock);
init_lock_initialized = true; init_lock_initialized = true;
} }
...@@ -322,10 +290,18 @@ malloc_init(void) ...@@ -322,10 +290,18 @@ malloc_init(void)
} }
/* /*
* The a0*() functions are used instead of i{d,}alloc() in situations that * The a0*() functions are used instead of i[mcd]alloc() in situations that
* cannot tolerate TLS variable access. * cannot tolerate TLS variable access.
*/ */
arena_t *
a0get(void)
{
assert(a0 != NULL);
return (a0);
}
static void * static void *
a0ialloc(size_t size, bool zero, bool is_metadata) a0ialloc(size_t size, bool zero, bool is_metadata)
{ {
...@@ -333,22 +309,14 @@ a0ialloc(size_t size, bool zero, bool is_metadata) ...@@ -333,22 +309,14 @@ a0ialloc(size_t size, bool zero, bool is_metadata)
if (unlikely(malloc_init_a0())) if (unlikely(malloc_init_a0()))
return (NULL); return (NULL);
return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
is_metadata, arena_get(TSDN_NULL, 0, true), true));
} }
static void static void
a0idalloc(void *ptr, bool is_metadata) a0idalloc(void *ptr, bool is_metadata)
{ {
idalloctm(TSDN_NULL, ptr, false, is_metadata, true); idalloctm(NULL, ptr, false, is_metadata);
}
arena_t *
a0get(void)
{
return (a0);
} }
void * void *
...@@ -405,228 +373,224 @@ bootstrap_free(void *ptr) ...@@ -405,228 +373,224 @@ bootstrap_free(void *ptr)
a0idalloc(ptr, false); a0idalloc(ptr, false);
} }
static void
arena_set(unsigned ind, arena_t *arena)
{
atomic_write_p((void **)&arenas[ind], arena);
}
static void
narenas_total_set(unsigned narenas)
{
atomic_write_u(&narenas_total, narenas);
}
static void
narenas_total_inc(void)
{
atomic_add_u(&narenas_total, 1);
}
unsigned
narenas_total_get(void)
{
return (atomic_read_u(&narenas_total));
}
/* Create a new arena and insert it into the arenas array at index ind. */ /* Create a new arena and insert it into the arenas array at index ind. */
static arena_t * static arena_t *
arena_init_locked(tsdn_t *tsdn, unsigned ind) arena_init_locked(unsigned ind)
{ {
arena_t *arena; arena_t *arena;
assert(ind <= narenas_total_get()); /* Expand arenas if necessary. */
assert(ind <= narenas_total);
if (ind > MALLOCX_ARENA_MAX) if (ind > MALLOCX_ARENA_MAX)
return (NULL); return (NULL);
if (ind == narenas_total_get()) if (ind == narenas_total) {
narenas_total_inc(); unsigned narenas_new = narenas_total + 1;
arena_t **arenas_new =
(arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
sizeof(arena_t *)));
if (arenas_new == NULL)
return (NULL);
memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
arenas_new[ind] = NULL;
/*
* Deallocate only if arenas came from a0malloc() (not
* base_alloc()).
*/
if (narenas_total != narenas_auto)
a0dalloc(arenas);
arenas = arenas_new;
narenas_total = narenas_new;
}
/* /*
* Another thread may have already initialized arenas[ind] if it's an * Another thread may have already initialized arenas[ind] if it's an
* auto arena. * auto arena.
*/ */
arena = arena_get(tsdn, ind, false); arena = arenas[ind];
if (arena != NULL) { if (arena != NULL) {
assert(ind < narenas_auto); assert(ind < narenas_auto);
return (arena); return (arena);
} }
/* Actually initialize the arena. */ /* Actually initialize the arena. */
arena = arena_new(tsdn, ind); arena = arenas[ind] = arena_new(ind);
arena_set(ind, arena);
return (arena); return (arena);
} }
arena_t * arena_t *
arena_init(tsdn_t *tsdn, unsigned ind) arena_init(unsigned ind)
{ {
arena_t *arena; arena_t *arena;
malloc_mutex_lock(tsdn, &arenas_lock); malloc_mutex_lock(&arenas_lock);
arena = arena_init_locked(tsdn, ind); arena = arena_init_locked(ind);
malloc_mutex_unlock(tsdn, &arenas_lock); malloc_mutex_unlock(&arenas_lock);
return (arena); return (arena);
} }
unsigned
narenas_total_get(void)
{
unsigned narenas;
malloc_mutex_lock(&arenas_lock);
narenas = narenas_total;
malloc_mutex_unlock(&arenas_lock);
return (narenas);
}
static void static void
arena_bind(tsd_t *tsd, unsigned ind, bool internal) arena_bind_locked(tsd_t *tsd, unsigned ind)
{ {
arena_t *arena; arena_t *arena;
if (!tsd_nominal(tsd)) arena = arenas[ind];
return; arena->nthreads++;
arena = arena_get(tsd_tsdn(tsd), ind, false);
arena_nthreads_inc(arena, internal);
if (internal) if (tsd_nominal(tsd))
tsd_iarena_set(tsd, arena);
else
tsd_arena_set(tsd, arena); tsd_arena_set(tsd, arena);
} }
static void
arena_bind(tsd_t *tsd, unsigned ind)
{
malloc_mutex_lock(&arenas_lock);
arena_bind_locked(tsd, ind);
malloc_mutex_unlock(&arenas_lock);
}
void void
arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
{ {
arena_t *oldarena, *newarena; arena_t *oldarena, *newarena;
oldarena = arena_get(tsd_tsdn(tsd), oldind, false); malloc_mutex_lock(&arenas_lock);
newarena = arena_get(tsd_tsdn(tsd), newind, false); oldarena = arenas[oldind];
arena_nthreads_dec(oldarena, false); newarena = arenas[newind];
arena_nthreads_inc(newarena, false); oldarena->nthreads--;
newarena->nthreads++;
malloc_mutex_unlock(&arenas_lock);
tsd_arena_set(tsd, newarena); tsd_arena_set(tsd, newarena);
} }
unsigned
arena_nbound(unsigned ind)
{
unsigned nthreads;
malloc_mutex_lock(&arenas_lock);
nthreads = arenas[ind]->nthreads;
malloc_mutex_unlock(&arenas_lock);
return (nthreads);
}
static void static void
arena_unbind(tsd_t *tsd, unsigned ind, bool internal) arena_unbind(tsd_t *tsd, unsigned ind)
{ {
arena_t *arena; arena_t *arena;
arena = arena_get(tsd_tsdn(tsd), ind, false); malloc_mutex_lock(&arenas_lock);
arena_nthreads_dec(arena, internal); arena = arenas[ind];
if (internal) arena->nthreads--;
tsd_iarena_set(tsd, NULL); malloc_mutex_unlock(&arenas_lock);
else tsd_arena_set(tsd, NULL);
tsd_arena_set(tsd, NULL);
} }
arena_tdata_t * arena_t *
arena_tdata_get_hard(tsd_t *tsd, unsigned ind) arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
{ {
arena_tdata_t *tdata, *arenas_tdata_old; arena_t *arena;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
unsigned narenas_tdata_old, i; unsigned narenas_cache = tsd_narenas_cache_get(tsd);
unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
unsigned narenas_actual = narenas_total_get(); unsigned narenas_actual = narenas_total_get();
/* /* Deallocate old cache if it's too small. */
* Dissociate old tdata array (and set up for deallocation upon return) if (arenas_cache != NULL && narenas_cache < narenas_actual) {
* if it's too small. a0dalloc(arenas_cache);
*/ arenas_cache = NULL;
if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { narenas_cache = 0;
arenas_tdata_old = arenas_tdata; tsd_arenas_cache_set(tsd, arenas_cache);
narenas_tdata_old = narenas_tdata; tsd_narenas_cache_set(tsd, narenas_cache);
arenas_tdata = NULL; }
narenas_tdata = 0;
tsd_arenas_tdata_set(tsd, arenas_tdata); /* Allocate cache if it's missing. */
tsd_narenas_tdata_set(tsd, narenas_tdata); if (arenas_cache == NULL) {
} else { bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
arenas_tdata_old = NULL; assert(ind < narenas_actual || !init_if_missing);
narenas_tdata_old = 0; narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
}
if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
/* Allocate tdata array if it's missing. */ *arenas_cache_bypassp = true;
if (arenas_tdata == NULL) { arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); narenas_cache);
narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; *arenas_cache_bypassp = false;
if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
*arenas_tdata_bypassp = true;
arenas_tdata = (arena_tdata_t *)a0malloc(
sizeof(arena_tdata_t) * narenas_tdata);
*arenas_tdata_bypassp = false;
} }
if (arenas_tdata == NULL) { if (arenas_cache == NULL) {
tdata = NULL; /*
goto label_return; * This function must always tell the truth, even if
* it's slow, so don't let OOM, thread cleanup (note
* tsd_nominal check), nor recursive allocation
* avoidance (note arenas_cache_bypass check) get in the
* way.
*/
if (ind >= narenas_actual)
return (NULL);
malloc_mutex_lock(&arenas_lock);
arena = arenas[ind];
malloc_mutex_unlock(&arenas_lock);
return (arena);
} }
assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
tsd_arenas_tdata_set(tsd, arenas_tdata); tsd_arenas_cache_set(tsd, arenas_cache);
tsd_narenas_tdata_set(tsd, narenas_tdata); tsd_narenas_cache_set(tsd, narenas_cache);
} }
/* /*
* Copy to tdata array. It's possible that the actual number of arenas * Copy to cache. It's possible that the actual number of arenas has
* has increased since narenas_total_get() was called above, but that * increased since narenas_total_get() was called above, but that causes
* causes no correctness issues unless two threads concurrently execute * no correctness issues unless two threads concurrently execute the
* the arenas.extend mallctl, which we trust mallctl synchronization to * arenas.extend mallctl, which we trust mallctl synchronization to
* prevent. * prevent.
*/ */
malloc_mutex_lock(&arenas_lock);
/* Copy/initialize tickers. */ memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
for (i = 0; i < narenas_actual; i++) { malloc_mutex_unlock(&arenas_lock);
if (i < narenas_tdata_old) { if (narenas_cache > narenas_actual) {
ticker_copy(&arenas_tdata[i].decay_ticker, memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
&arenas_tdata_old[i].decay_ticker); (narenas_cache - narenas_actual));
} else { }
ticker_init(&arenas_tdata[i].decay_ticker,
DECAY_NTICKS_PER_UPDATE); /* Read the refreshed cache, and init the arena if necessary. */
} arena = arenas_cache[ind];
} if (init_if_missing && arena == NULL)
if (narenas_tdata > narenas_actual) { arena = arenas_cache[ind] = arena_init(ind);
memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) return (arena);
* (narenas_tdata - narenas_actual));
}
/* Read the refreshed tdata array. */
tdata = &arenas_tdata[ind];
label_return:
if (arenas_tdata_old != NULL)
a0dalloc(arenas_tdata_old);
return (tdata);
} }
/* Slow path, called only by arena_choose(). */ /* Slow path, called only by arena_choose(). */
arena_t * arena_t *
arena_choose_hard(tsd_t *tsd, bool internal) arena_choose_hard(tsd_t *tsd)
{ {
arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); arena_t *ret;
if (narenas_auto > 1) { if (narenas_auto > 1) {
unsigned i, j, choose[2], first_null; unsigned i, choose, first_null;
/*
* Determine binding for both non-internal and internal
* allocation.
*
* choose[0]: For application allocation.
* choose[1]: For internal metadata allocation.
*/
for (j = 0; j < 2; j++)
choose[j] = 0;
choose = 0;
first_null = narenas_auto; first_null = narenas_auto;
malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); malloc_mutex_lock(&arenas_lock);
assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); assert(a0get() != NULL);
for (i = 1; i < narenas_auto; i++) { for (i = 1; i < narenas_auto; i++) {
if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { if (arenas[i] != NULL) {
/* /*
* Choose the first arena that has the lowest * Choose the first arena that has the lowest
* number of threads assigned to it. * number of threads assigned to it.
*/ */
for (j = 0; j < 2; j++) { if (arenas[i]->nthreads <
if (arena_nthreads_get(arena_get( arenas[choose]->nthreads)
tsd_tsdn(tsd), i, false), !!j) < choose = i;
arena_nthreads_get(arena_get(
tsd_tsdn(tsd), choose[j], false),
!!j))
choose[j] = i;
}
} else if (first_null == narenas_auto) { } else if (first_null == narenas_auto) {
/* /*
* Record the index of the first uninitialized * Record the index of the first uninitialized
...@@ -641,40 +605,27 @@ arena_choose_hard(tsd_t *tsd, bool internal) ...@@ -641,40 +605,27 @@ arena_choose_hard(tsd_t *tsd, bool internal)
} }
} }
for (j = 0; j < 2; j++) { if (arenas[choose]->nthreads == 0
if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), || first_null == narenas_auto) {
choose[j], false), !!j) == 0 || first_null == /*
narenas_auto) { * Use an unloaded arena, or the least loaded arena if
/* * all arenas are already initialized.
* Use an unloaded arena, or the least loaded */
* arena if all arenas are already initialized. ret = arenas[choose];
*/ } else {
if (!!j == internal) { /* Initialize a new arena. */
ret = arena_get(tsd_tsdn(tsd), choose = first_null;
choose[j], false); ret = arena_init_locked(choose);
} if (ret == NULL) {
} else { malloc_mutex_unlock(&arenas_lock);
arena_t *arena; return (NULL);
/* Initialize a new arena. */
choose[j] = first_null;
arena = arena_init_locked(tsd_tsdn(tsd),
choose[j]);
if (arena == NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd),
&arenas_lock);
return (NULL);
}
if (!!j == internal)
ret = arena;
} }
arena_bind(tsd, choose[j], !!j);
} }
malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); arena_bind_locked(tsd, choose);
malloc_mutex_unlock(&arenas_lock);
} else { } else {
ret = arena_get(tsd_tsdn(tsd), 0, false); ret = a0get();
arena_bind(tsd, 0, false); arena_bind(tsd, 0);
arena_bind(tsd, 0, true);
} }
return (ret); return (ret);
...@@ -694,16 +645,6 @@ thread_deallocated_cleanup(tsd_t *tsd) ...@@ -694,16 +645,6 @@ thread_deallocated_cleanup(tsd_t *tsd)
/* Do nothing. */ /* Do nothing. */
} }
void
iarena_cleanup(tsd_t *tsd)
{
arena_t *iarena;
iarena = tsd_iarena_get(tsd);
if (iarena != NULL)
arena_unbind(tsd, iarena->ind, true);
}
void void
arena_cleanup(tsd_t *tsd) arena_cleanup(tsd_t *tsd)
{ {
...@@ -711,33 +652,30 @@ arena_cleanup(tsd_t *tsd) ...@@ -711,33 +652,30 @@ arena_cleanup(tsd_t *tsd)
arena = tsd_arena_get(tsd); arena = tsd_arena_get(tsd);
if (arena != NULL) if (arena != NULL)
arena_unbind(tsd, arena->ind, false); arena_unbind(tsd, arena->ind);
} }
void void
arenas_tdata_cleanup(tsd_t *tsd) arenas_cache_cleanup(tsd_t *tsd)
{ {
arena_tdata_t *arenas_tdata; arena_t **arenas_cache;
/* Prevent tsd->arenas_tdata from being (re)created. */ arenas_cache = tsd_arenas_cache_get(tsd);
*tsd_arenas_tdata_bypassp_get(tsd) = true; if (arenas_cache != NULL) {
tsd_arenas_cache_set(tsd, NULL);
arenas_tdata = tsd_arenas_tdata_get(tsd); a0dalloc(arenas_cache);
if (arenas_tdata != NULL) {
tsd_arenas_tdata_set(tsd, NULL);
a0dalloc(arenas_tdata);
} }
} }
void void
narenas_tdata_cleanup(tsd_t *tsd) narenas_cache_cleanup(tsd_t *tsd)
{ {
/* Do nothing. */ /* Do nothing. */
} }
void void
arenas_tdata_bypass_cleanup(tsd_t *tsd) arenas_cache_bypass_cleanup(tsd_t *tsd)
{ {
/* Do nothing. */ /* Do nothing. */
...@@ -748,11 +686,8 @@ stats_print_atexit(void) ...@@ -748,11 +686,8 @@ stats_print_atexit(void)
{ {
if (config_tcache && config_stats) { if (config_tcache && config_stats) {
tsdn_t *tsdn;
unsigned narenas, i; unsigned narenas, i;
tsdn = tsdn_fetch();
/* /*
* Merge stats from extant threads. This is racy, since * Merge stats from extant threads. This is racy, since
* individual threads do not lock when recording tcache stats * individual threads do not lock when recording tcache stats
...@@ -761,7 +696,7 @@ stats_print_atexit(void) ...@@ -761,7 +696,7 @@ stats_print_atexit(void)
* continue to allocate. * continue to allocate.
*/ */
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena = arena_get(tsdn, i, false); arena_t *arena = arenas[i];
if (arena != NULL) { if (arena != NULL) {
tcache_t *tcache; tcache_t *tcache;
...@@ -771,11 +706,11 @@ stats_print_atexit(void) ...@@ -771,11 +706,11 @@ stats_print_atexit(void)
* and bin locks in the opposite order, * and bin locks in the opposite order,
* deadlocks may result. * deadlocks may result.
*/ */
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
ql_foreach(tcache, &arena->tcache_ql, link) { ql_foreach(tcache, &arena->tcache_ql, link) {
tcache_stats_merge(tsdn, tcache, arena); tcache_stats_merge(tcache, arena);
} }
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
} }
} }
} }
...@@ -812,20 +747,6 @@ malloc_ncpus(void) ...@@ -812,20 +747,6 @@ malloc_ncpus(void)
SYSTEM_INFO si; SYSTEM_INFO si;
GetSystemInfo(&si); GetSystemInfo(&si);
result = si.dwNumberOfProcessors; result = si.dwNumberOfProcessors;
#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
/*
* glibc >= 2.6 has the CPU_COUNT macro.
*
* glibc's sysconf() uses isspace(). glibc allocates for the first time
* *before* setting up the isspace tables. Therefore we need a
* different method to get the number of CPUs.
*/
{
cpu_set_t set;
pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
result = CPU_COUNT(&set);
}
#else #else
result = sysconf(_SC_NPROCESSORS_ONLN); result = sysconf(_SC_NPROCESSORS_ONLN);
#endif #endif
...@@ -917,26 +838,6 @@ malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, ...@@ -917,26 +838,6 @@ malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
(int)vlen, v); (int)vlen, v);
} }
static void
malloc_slow_flag_init(void)
{
/*
* Combine the runtime options into malloc_slow for fast path. Called
* after processing all the options.
*/
malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
| (opt_junk_free ? flag_opt_junk_free : 0)
| (opt_quarantine ? flag_opt_quarantine : 0)
| (opt_zero ? flag_opt_zero : 0)
| (opt_utrace ? flag_opt_utrace : 0)
| (opt_xmalloc ? flag_opt_xmalloc : 0);
if (config_valgrind)
malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
malloc_slow = (malloc_slow_flags != 0);
}
static void static void
malloc_conf_init(void) malloc_conf_init(void)
{ {
...@@ -963,13 +864,10 @@ malloc_conf_init(void) ...@@ -963,13 +864,10 @@ malloc_conf_init(void)
opt_tcache = false; opt_tcache = false;
} }
for (i = 0; i < 4; i++) { for (i = 0; i < 3; i++) {
/* Get runtime configuration. */ /* Get runtime configuration. */
switch (i) { switch (i) {
case 0: case 0:
opts = config_malloc_conf;
break;
case 1:
if (je_malloc_conf != NULL) { if (je_malloc_conf != NULL) {
/* /*
* Use options that were compiled into the * Use options that were compiled into the
...@@ -982,8 +880,8 @@ malloc_conf_init(void) ...@@ -982,8 +880,8 @@ malloc_conf_init(void)
opts = buf; opts = buf;
} }
break; break;
case 2: { case 1: {
ssize_t linklen = 0; int linklen = 0;
#ifndef _WIN32 #ifndef _WIN32
int saved_errno = errno; int saved_errno = errno;
const char *linkname = const char *linkname =
...@@ -1009,7 +907,7 @@ malloc_conf_init(void) ...@@ -1009,7 +907,7 @@ malloc_conf_init(void)
buf[linklen] = '\0'; buf[linklen] = '\0';
opts = buf; opts = buf;
break; break;
} case 3: { } case 2: {
const char *envname = const char *envname =
#ifdef JEMALLOC_PREFIX #ifdef JEMALLOC_PREFIX
JEMALLOC_CPREFIX"MALLOC_CONF" JEMALLOC_CPREFIX"MALLOC_CONF"
...@@ -1056,11 +954,7 @@ malloc_conf_init(void) ...@@ -1056,11 +954,7 @@ malloc_conf_init(void)
if (cont) \ if (cont) \
continue; \ continue; \
} }
#define CONF_MIN_no(um, min) false #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
#define CONF_MIN_yes(um, min) ((um) < (min))
#define CONF_MAX_no(um, max) false
#define CONF_MAX_yes(um, max) ((um) > (max))
#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
if (CONF_MATCH(n)) { \ if (CONF_MATCH(n)) { \
uintmax_t um; \ uintmax_t um; \
char *end; \ char *end; \
...@@ -1073,35 +967,24 @@ malloc_conf_init(void) ...@@ -1073,35 +967,24 @@ malloc_conf_init(void)
"Invalid conf value", \ "Invalid conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
} else if (clip) { \ } else if (clip) { \
if (CONF_MIN_##check_min(um, \ if ((min) != 0 && um < (min)) \
(min))) \ o = (min); \
o = (t)(min); \ else if (um > (max)) \
else if (CONF_MAX_##check_max( \ o = (max); \
um, (max))) \
o = (t)(max); \
else \ else \
o = (t)um; \ o = um; \
} else { \ } else { \
if (CONF_MIN_##check_min(um, \ if (((min) != 0 && um < (min)) \
(min)) || \ || um > (max)) { \
CONF_MAX_##check_max(um, \
(max))) { \
malloc_conf_error( \ malloc_conf_error( \
"Out-of-range " \ "Out-of-range " \
"conf value", \ "conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
} else \ } else \
o = (t)um; \ o = um; \
} \ } \
continue; \ continue; \
} }
#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
clip) \
CONF_HANDLE_T_U(unsigned, o, n, min, max, \
check_min, check_max, clip)
#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
CONF_HANDLE_T_U(size_t, o, n, min, max, \
check_min, check_max, clip)
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
if (CONF_MATCH(n)) { \ if (CONF_MATCH(n)) { \
long l; \ long l; \
...@@ -1144,7 +1027,7 @@ malloc_conf_init(void) ...@@ -1144,7 +1027,7 @@ malloc_conf_init(void)
*/ */
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
(sizeof(size_t) << 3) - 1, yes, yes, true) (sizeof(size_t) << 3) - 1, true)
if (strncmp("dss", k, klen) == 0) { if (strncmp("dss", k, klen) == 0) {
int i; int i;
bool match = false; bool match = false;
...@@ -1169,47 +1052,17 @@ malloc_conf_init(void) ...@@ -1169,47 +1052,17 @@ malloc_conf_init(void)
} }
continue; continue;
} }
CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
UINT_MAX, yes, no, false) SIZE_T_MAX, false)
if (strncmp("purge", k, klen) == 0) {
int i;
bool match = false;
for (i = 0; i < purge_mode_limit; i++) {
if (strncmp(purge_mode_names[i], v,
vlen) == 0) {
opt_purge = (purge_mode_t)i;
match = true;
break;
}
}
if (!match) {
malloc_conf_error("Invalid conf value",
k, klen, v, vlen);
}
continue;
}
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1) -1, (sizeof(size_t) << 3) - 1)
CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
NSTIME_SEC_MAX);
CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
if (config_fill) { if (config_fill) {
if (CONF_MATCH("junk")) { if (CONF_MATCH("junk")) {
if (CONF_MATCH_VALUE("true")) { if (CONF_MATCH_VALUE("true")) {
if (config_valgrind && opt_junk = "true";
unlikely(in_valgrind)) { opt_junk_alloc = opt_junk_free =
malloc_conf_error( true;
"Deallocation-time "
"junk filling cannot "
"be enabled while "
"running inside "
"Valgrind", k, klen, v,
vlen);
} else {
opt_junk = "true";
opt_junk_alloc = true;
opt_junk_free = true;
}
} else if (CONF_MATCH_VALUE("false")) { } else if (CONF_MATCH_VALUE("false")) {
opt_junk = "false"; opt_junk = "false";
opt_junk_alloc = opt_junk_free = opt_junk_alloc = opt_junk_free =
...@@ -1219,20 +1072,9 @@ malloc_conf_init(void) ...@@ -1219,20 +1072,9 @@ malloc_conf_init(void)
opt_junk_alloc = true; opt_junk_alloc = true;
opt_junk_free = false; opt_junk_free = false;
} else if (CONF_MATCH_VALUE("free")) { } else if (CONF_MATCH_VALUE("free")) {
if (config_valgrind && opt_junk = "free";
unlikely(in_valgrind)) { opt_junk_alloc = false;
malloc_conf_error( opt_junk_free = true;
"Deallocation-time "
"junk filling cannot "
"be enabled while "
"running inside "
"Valgrind", k, klen, v,
vlen);
} else {
opt_junk = "free";
opt_junk_alloc = false;
opt_junk_free = true;
}
} else { } else {
malloc_conf_error( malloc_conf_error(
"Invalid conf value", k, "Invalid conf value", k,
...@@ -1241,7 +1083,7 @@ malloc_conf_init(void) ...@@ -1241,7 +1083,7 @@ malloc_conf_init(void)
continue; continue;
} }
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
0, SIZE_T_MAX, no, no, false) 0, SIZE_T_MAX, false)
CONF_HANDLE_BOOL(opt_redzone, "redzone", true) CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
CONF_HANDLE_BOOL(opt_zero, "zero", true) CONF_HANDLE_BOOL(opt_zero, "zero", true)
} }
...@@ -1278,8 +1120,8 @@ malloc_conf_init(void) ...@@ -1278,8 +1120,8 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_prof_thread_active_init, CONF_HANDLE_BOOL(opt_prof_thread_active_init,
"prof_thread_active_init", true) "prof_thread_active_init", true)
CONF_HANDLE_SIZE_T(opt_lg_prof_sample, CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
"lg_prof_sample", 0, (sizeof(uint64_t) << 3) "lg_prof_sample", 0,
- 1, no, yes, true) (sizeof(uint64_t) << 3) - 1, true)
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
true) true)
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
...@@ -1295,14 +1137,7 @@ malloc_conf_init(void) ...@@ -1295,14 +1137,7 @@ malloc_conf_init(void)
malloc_conf_error("Invalid conf pair", k, klen, v, malloc_conf_error("Invalid conf pair", k, klen, v,
vlen); vlen);
#undef CONF_MATCH #undef CONF_MATCH
#undef CONF_MATCH_VALUE
#undef CONF_HANDLE_BOOL #undef CONF_HANDLE_BOOL
#undef CONF_MIN_no
#undef CONF_MIN_yes
#undef CONF_MAX_no
#undef CONF_MAX_yes
#undef CONF_HANDLE_T_U
#undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P #undef CONF_HANDLE_CHAR_P
...@@ -1310,6 +1145,7 @@ malloc_conf_init(void) ...@@ -1310,6 +1145,7 @@ malloc_conf_init(void)
} }
} }
/* init_lock must be held. */
static bool static bool
malloc_init_hard_needed(void) malloc_init_hard_needed(void)
{ {
...@@ -1325,14 +1161,11 @@ malloc_init_hard_needed(void) ...@@ -1325,14 +1161,11 @@ malloc_init_hard_needed(void)
} }
#ifdef JEMALLOC_THREADED_INIT #ifdef JEMALLOC_THREADED_INIT
if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
spin_t spinner;
/* Busy-wait until the initializing thread completes. */ /* Busy-wait until the initializing thread completes. */
spin_init(&spinner);
do { do {
malloc_mutex_unlock(TSDN_NULL, &init_lock); malloc_mutex_unlock(&init_lock);
spin_adaptive(&spinner); CPU_SPINWAIT;
malloc_mutex_lock(TSDN_NULL, &init_lock); malloc_mutex_lock(&init_lock);
} while (!malloc_initialized()); } while (!malloc_initialized());
return (false); return (false);
} }
...@@ -1340,8 +1173,9 @@ malloc_init_hard_needed(void) ...@@ -1340,8 +1173,9 @@ malloc_init_hard_needed(void)
return (true); return (true);
} }
/* init_lock must be held. */
static bool static bool
malloc_init_hard_a0_locked() malloc_init_hard_a0_locked(void)
{ {
malloc_initializer = INITIALIZER; malloc_initializer = INITIALIZER;
...@@ -1357,7 +1191,6 @@ malloc_init_hard_a0_locked() ...@@ -1357,7 +1191,6 @@ malloc_init_hard_a0_locked()
abort(); abort();
} }
} }
pages_boot();
if (base_boot()) if (base_boot())
return (true); return (true);
if (chunk_boot()) if (chunk_boot())
...@@ -1366,28 +1199,26 @@ malloc_init_hard_a0_locked() ...@@ -1366,28 +1199,26 @@ malloc_init_hard_a0_locked()
return (true); return (true);
if (config_prof) if (config_prof)
prof_boot1(); prof_boot1();
arena_boot(); if (arena_boot())
if (config_tcache && tcache_boot(TSDN_NULL)) return (true);
if (config_tcache && tcache_boot())
return (true); return (true);
if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) if (malloc_mutex_init(&arenas_lock))
return (true); return (true);
/* /*
* Create enough scaffolding to allow recursive allocation in * Create enough scaffolding to allow recursive allocation in
* malloc_ncpus(). * malloc_ncpus().
*/ */
narenas_auto = 1; narenas_total = narenas_auto = 1;
narenas_total_set(narenas_auto);
arenas = &a0; arenas = &a0;
memset(arenas, 0, sizeof(arena_t *) * narenas_auto); memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/* /*
* Initialize one arena here. The rest are lazily created in * Initialize one arena here. The rest are lazily created in
* arena_choose_hard(). * arena_choose_hard().
*/ */
if (arena_init(TSDN_NULL, 0) == NULL) if (arena_init(0) == NULL)
return (true); return (true);
malloc_init_state = malloc_init_a0_initialized; malloc_init_state = malloc_init_a0_initialized;
return (false); return (false);
} }
...@@ -1396,42 +1227,45 @@ malloc_init_hard_a0(void) ...@@ -1396,42 +1227,45 @@ malloc_init_hard_a0(void)
{ {
bool ret; bool ret;
malloc_mutex_lock(TSDN_NULL, &init_lock); malloc_mutex_lock(&init_lock);
ret = malloc_init_hard_a0_locked(); ret = malloc_init_hard_a0_locked();
malloc_mutex_unlock(TSDN_NULL, &init_lock); malloc_mutex_unlock(&init_lock);
return (ret); return (ret);
} }
/* Initialize data structures which may trigger recursive allocation. */ /*
static bool * Initialize data structures which may trigger recursive allocation.
*
* init_lock must be held.
*/
static void
malloc_init_hard_recursible(void) malloc_init_hard_recursible(void)
{ {
malloc_init_state = malloc_init_recursible; malloc_init_state = malloc_init_recursible;
malloc_mutex_unlock(&init_lock);
ncpus = malloc_ncpus(); ncpus = malloc_ncpus();
#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
&& !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ && !defined(_WIN32) && !defined(__native_client__))
!defined(__native_client__)) /* LinuxThreads's pthread_atfork() allocates. */
/* LinuxThreads' pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) { jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n"); malloc_write("<jemalloc>: Error in pthread_atfork()\n");
if (opt_abort) if (opt_abort)
abort(); abort();
return (true);
} }
#endif #endif
malloc_mutex_lock(&init_lock);
return (false);
} }
/* init_lock must be held. */
static bool static bool
malloc_init_hard_finish(tsdn_t *tsdn) malloc_init_hard_finish(void)
{ {
if (malloc_mutex_boot()) if (mutex_boot())
return (true); return (true);
if (opt_narenas == 0) { if (opt_narenas == 0) {
...@@ -1446,69 +1280,68 @@ malloc_init_hard_finish(tsdn_t *tsdn) ...@@ -1446,69 +1280,68 @@ malloc_init_hard_finish(tsdn_t *tsdn)
} }
narenas_auto = opt_narenas; narenas_auto = opt_narenas;
/* /*
* Limit the number of arenas to the indexing range of MALLOCX_ARENA(). * Make sure that the arenas array can be allocated. In practice, this
* limit is enough to allow the allocator to function, but the ctl
* machinery will fail to allocate memory at far lower limits.
*/ */
if (narenas_auto > MALLOCX_ARENA_MAX) { if (narenas_auto > chunksize / sizeof(arena_t *)) {
narenas_auto = MALLOCX_ARENA_MAX; narenas_auto = chunksize / sizeof(arena_t *);
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
narenas_auto); narenas_auto);
} }
narenas_total_set(narenas_auto); narenas_total = narenas_auto;
/* Allocate and initialize arenas. */ /* Allocate and initialize arenas. */
arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
(MALLOCX_ARENA_MAX+1));
if (arenas == NULL) if (arenas == NULL)
return (true); return (true);
/*
* Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure.
*/
memset(arenas, 0, sizeof(arena_t *) * narenas_total);
/* Copy the pointer to the one arena that was already initialized. */ /* Copy the pointer to the one arena that was already initialized. */
arena_set(0, a0); arenas[0] = a0;
malloc_init_state = malloc_init_initialized; malloc_init_state = malloc_init_initialized;
malloc_slow_flag_init();
return (false); return (false);
} }
static bool static bool
malloc_init_hard(void) malloc_init_hard(void)
{ {
tsd_t *tsd;
#if defined(_WIN32) && _WIN32_WINNT < 0x0600 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
_init_init_lock(); _init_init_lock();
#endif #endif
malloc_mutex_lock(TSDN_NULL, &init_lock); malloc_mutex_lock(&init_lock);
if (!malloc_init_hard_needed()) { if (!malloc_init_hard_needed()) {
malloc_mutex_unlock(TSDN_NULL, &init_lock); malloc_mutex_unlock(&init_lock);
return (false); return (false);
} }
if (malloc_init_state != malloc_init_a0_initialized && if (malloc_init_state != malloc_init_a0_initialized &&
malloc_init_hard_a0_locked()) { malloc_init_hard_a0_locked()) {
malloc_mutex_unlock(TSDN_NULL, &init_lock); malloc_mutex_unlock(&init_lock);
return (true); return (true);
} }
if (malloc_tsd_boot0()) {
malloc_mutex_unlock(TSDN_NULL, &init_lock); malloc_mutex_unlock(&init_lock);
/* Recursive allocation relies on functional tsd. */
tsd = malloc_tsd_boot0();
if (tsd == NULL)
return (true); return (true);
if (malloc_init_hard_recursible()) }
return (true); if (config_prof && prof_boot2()) {
malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); malloc_mutex_unlock(&init_lock);
if (config_prof && prof_boot2(tsd)) {
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
return (true); return (true);
} }
if (malloc_init_hard_finish(tsd_tsdn(tsd))) { malloc_init_hard_recursible();
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
if (malloc_init_hard_finish()) {
malloc_mutex_unlock(&init_lock);
return (true); return (true);
} }
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); malloc_mutex_unlock(&init_lock);
malloc_tsd_boot1(); malloc_tsd_boot1();
return (false); return (false);
} }
...@@ -1522,104 +1355,61 @@ malloc_init_hard(void) ...@@ -1522,104 +1355,61 @@ malloc_init_hard(void)
*/ */
static void * static void *
ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
prof_tctx_t *tctx, bool slow_path)
{ {
void *p; void *p;
if (tctx == NULL) if (tctx == NULL)
return (NULL); return (NULL);
if (usize <= SMALL_MAXCLASS) { if (usize <= SMALL_MAXCLASS) {
szind_t ind_large = size2index(LARGE_MINCLASS); p = imalloc(tsd, LARGE_MINCLASS);
p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
if (p == NULL) if (p == NULL)
return (NULL); return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), p, usize); arena_prof_promoted(p, usize);
} else } else
p = ialloc(tsd, usize, ind, zero, slow_path); p = imalloc(tsd, usize);
return (p); return (p);
} }
JEMALLOC_ALWAYS_INLINE_C void * JEMALLOC_ALWAYS_INLINE_C void *
ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) imalloc_prof(tsd_t *tsd, size_t usize)
{ {
void *p; void *p;
prof_tctx_t *tctx; prof_tctx_t *tctx;
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); p = imalloc_prof_sample(tsd, usize, tctx);
else else
p = ialloc(tsd, usize, ind, zero, slow_path); p = imalloc(tsd, usize);
if (unlikely(p == NULL)) { if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true); prof_alloc_rollback(tsd, tctx, true);
return (NULL); return (NULL);
} }
prof_malloc(tsd_tsdn(tsd), p, usize, tctx); prof_malloc(p, usize, tctx);
return (p); return (p);
} }
/*
* ialloc_body() is inlined so that fast and slow paths are generated separately
* with statically known slow_path.
*
* This function guarantees that *tsdn is non-NULL on success.
*/
JEMALLOC_ALWAYS_INLINE_C void * JEMALLOC_ALWAYS_INLINE_C void *
ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
bool slow_path)
{ {
tsd_t *tsd;
szind_t ind;
if (slow_path && unlikely(malloc_init())) {
*tsdn = NULL;
return (NULL);
}
tsd = tsd_fetch(); if (unlikely(malloc_init()))
*tsdn = tsd_tsdn(tsd);
witness_assert_lockless(tsd_tsdn(tsd));
ind = size2index(size);
if (unlikely(ind >= NSIZES))
return (NULL); return (NULL);
*tsd = tsd_fetch();
if (config_stats || (config_prof && opt_prof) || (slow_path && if (config_prof && opt_prof) {
config_valgrind && unlikely(in_valgrind))) { *usize = s2u(size);
*usize = index2size(ind); if (unlikely(*usize == 0))
assert(*usize > 0 && *usize <= HUGE_MAXCLASS); return (NULL);
return (imalloc_prof(*tsd, *usize));
} }
if (config_prof && opt_prof) if (config_stats || (config_valgrind && unlikely(in_valgrind)))
return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); *usize = s2u(size);
return (imalloc(*tsd, size));
return (ialloc(tsd, size, ind, zero, slow_path));
}
JEMALLOC_ALWAYS_INLINE_C void
ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
bool update_errno, bool slow_path)
{
assert(!tsdn_null(tsdn) || ret == NULL);
if (unlikely(ret == NULL)) {
if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
malloc_printf("<jemalloc>: Error in %s(): out of "
"memory\n", func);
abort();
}
if (update_errno)
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(tsdn, ret, config_prof));
*tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
}
witness_assert_lockless(tsdn);
} }
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
...@@ -1628,22 +1418,27 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) ...@@ -1628,22 +1418,27 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_malloc(size_t size) je_malloc(size_t size)
{ {
void *ret; void *ret;
tsdn_t *tsdn; tsd_t *tsd;
size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0) if (size == 0)
size = 1; size = 1;
if (likely(!malloc_slow)) { ret = imalloc_body(size, &tsd, &usize);
ret = ialloc_body(size, false, &tsdn, &usize, false); if (unlikely(ret == NULL)) {
ialloc_post_check(ret, tsdn, usize, "malloc", true, false); if (config_xmalloc && unlikely(opt_xmalloc)) {
} else { malloc_write("<jemalloc>: Error in malloc(): "
ret = ialloc_body(size, false, &tsdn, &usize, true); "out of memory\n");
ialloc_post_check(ret, tsdn, usize, "malloc", true, true); abort();
UTRACE(0, size, ret); }
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); set_errno(ENOMEM);
} }
if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(ret, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
return (ret); return (ret);
} }
...@@ -1660,7 +1455,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, ...@@ -1660,7 +1455,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
if (p == NULL) if (p == NULL)
return (NULL); return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), p, usize); arena_prof_promoted(p, usize);
} else } else
p = ipalloc(tsd, usize, alignment, false); p = ipalloc(tsd, usize, alignment, false);
...@@ -1682,7 +1477,7 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) ...@@ -1682,7 +1477,7 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
prof_alloc_rollback(tsd, tctx, true); prof_alloc_rollback(tsd, tctx, true);
return (NULL); return (NULL);
} }
prof_malloc(tsd_tsdn(tsd), p, usize, tctx); prof_malloc(p, usize, tctx);
return (p); return (p);
} }
...@@ -1699,12 +1494,10 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) ...@@ -1699,12 +1494,10 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
assert(min_alignment != 0); assert(min_alignment != 0);
if (unlikely(malloc_init())) { if (unlikely(malloc_init())) {
tsd = NULL;
result = NULL; result = NULL;
goto label_oom; goto label_oom;
} }
tsd = tsd_fetch(); tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
if (size == 0) if (size == 0)
size = 1; size = 1;
...@@ -1722,7 +1515,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) ...@@ -1722,7 +1515,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
} }
usize = sa2u(size, alignment); usize = sa2u(size, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { if (unlikely(usize == 0)) {
result = NULL; result = NULL;
goto label_oom; goto label_oom;
} }
...@@ -1739,13 +1532,10 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) ...@@ -1739,13 +1532,10 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
ret = 0; ret = 0;
label_return: label_return:
if (config_stats && likely(result != NULL)) { if (config_stats && likely(result != NULL)) {
assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); assert(usize == isalloc(result, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_allocatedp_get(tsd) += usize;
} }
UTRACE(0, size, result); UTRACE(0, size, result);
JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
false);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret); return (ret);
label_oom: label_oom:
assert(result == NULL); assert(result == NULL);
...@@ -1755,7 +1545,6 @@ label_oom: ...@@ -1755,7 +1545,6 @@ label_oom:
abort(); abort();
} }
ret = ENOMEM; ret = ENOMEM;
witness_assert_lockless(tsd_tsdn(tsd));
goto label_return; goto label_return;
} }
...@@ -1763,10 +1552,9 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW ...@@ -1763,10 +1552,9 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW
JEMALLOC_ATTR(nonnull(1)) JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void **memptr, size_t alignment, size_t size) je_posix_memalign(void **memptr, size_t alignment, size_t size)
{ {
int ret; int ret = imemalign(memptr, alignment, size, sizeof(void *));
JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
ret = imemalign(memptr, alignment, size, sizeof(void *)); config_prof), false);
return (ret); return (ret);
} }
...@@ -1782,45 +1570,114 @@ je_aligned_alloc(size_t alignment, size_t size) ...@@ -1782,45 +1570,114 @@ je_aligned_alloc(size_t alignment, size_t size)
ret = NULL; ret = NULL;
set_errno(err); set_errno(err);
} }
JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
false);
return (ret); return (ret);
} }
static void *
icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
void *p;
if (tctx == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
p = icalloc(tsd, LARGE_MINCLASS);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = icalloc(tsd, usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
icalloc_prof(tsd_t *tsd, size_t usize)
{
void *p;
prof_tctx_t *tctx;
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
p = icalloc_prof_sample(tsd, usize, tctx);
else
p = icalloc(tsd, usize);
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
prof_malloc(p, usize, tctx);
return (p);
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW * void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
je_calloc(size_t num, size_t size) je_calloc(size_t num, size_t size)
{ {
void *ret; void *ret;
tsdn_t *tsdn; tsd_t *tsd;
size_t num_size; size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (unlikely(malloc_init())) {
num_size = 0;
ret = NULL;
goto label_return;
}
tsd = tsd_fetch();
num_size = num * size; num_size = num * size;
if (unlikely(num_size == 0)) { if (unlikely(num_size == 0)) {
if (num == 0 || size == 0) if (num == 0 || size == 0)
num_size = 1; num_size = 1;
else else {
num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ ret = NULL;
goto label_return;
}
/* /*
* Try to avoid division here. We know that it isn't possible to * Try to avoid division here. We know that it isn't possible to
* overflow during multiplication if neither operand uses any of the * overflow during multiplication if neither operand uses any of the
* most significant half of the bits in a size_t. * most significant half of the bits in a size_t.
*/ */
} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
2))) && (num_size / size != num))) 2))) && (num_size / size != num))) {
num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ /* size_t overflow. */
ret = NULL;
goto label_return;
}
if (likely(!malloc_slow)) { if (config_prof && opt_prof) {
ret = ialloc_body(num_size, true, &tsdn, &usize, false); usize = s2u(num_size);
ialloc_post_check(ret, tsdn, usize, "calloc", true, false); if (unlikely(usize == 0)) {
ret = NULL;
goto label_return;
}
ret = icalloc_prof(tsd, usize);
} else { } else {
ret = ialloc_body(num_size, true, &tsdn, &usize, true); if (config_stats || (config_valgrind && unlikely(in_valgrind)))
ialloc_post_check(ret, tsdn, usize, "calloc", true, true); usize = s2u(num_size);
UTRACE(0, num_size, ret); ret = icalloc(tsd, num_size);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
} }
label_return:
if (unlikely(ret == NULL)) {
if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in calloc(): out of "
"memory\n");
abort();
}
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(ret, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, num_size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
return (ret); return (ret);
} }
...@@ -1836,7 +1693,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, ...@@ -1836,7 +1693,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
if (p == NULL) if (p == NULL)
return (NULL); return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), p, usize); arena_prof_promoted(p, usize);
} else } else
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
...@@ -1851,7 +1708,7 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) ...@@ -1851,7 +1708,7 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
prof_tctx_t *old_tctx, *tctx; prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked(); prof_active = prof_active_get_unlocked();
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); old_tctx = prof_tctx_get(old_ptr);
tctx = prof_alloc_prep(tsd, usize, prof_active, true); tctx = prof_alloc_prep(tsd, usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
...@@ -1868,41 +1725,32 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) ...@@ -1868,41 +1725,32 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
} }
JEMALLOC_INLINE_C void JEMALLOC_INLINE_C void
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
{ {
size_t usize; size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
witness_assert_lockless(tsd_tsdn(tsd));
assert(ptr != NULL); assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); usize = isalloc(ptr, config_prof);
prof_free(tsd, ptr, usize); prof_free(tsd, ptr, usize);
} else if (config_stats || config_valgrind) } else if (config_stats || config_valgrind)
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); usize = isalloc(ptr, config_prof);
if (config_stats) if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += usize;
if (config_valgrind && unlikely(in_valgrind))
if (likely(!slow_path)) rzsize = p2rz(ptr);
iqalloc(tsd, ptr, tcache, false); iqalloc(tsd, ptr, tcache);
else { JEMALLOC_VALGRIND_FREE(ptr, rzsize);
if (config_valgrind && unlikely(in_valgrind))
rzsize = p2rz(tsd_tsdn(tsd), ptr);
iqalloc(tsd, ptr, tcache, true);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
} }
JEMALLOC_INLINE_C void JEMALLOC_INLINE_C void
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
{ {
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
witness_assert_lockless(tsd_tsdn(tsd));
assert(ptr != NULL); assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
...@@ -1911,8 +1759,8 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) ...@@ -1911,8 +1759,8 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
if (config_stats) if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += usize;
if (config_valgrind && unlikely(in_valgrind)) if (config_valgrind && unlikely(in_valgrind))
rzsize = p2rz(tsd_tsdn(tsd), ptr); rzsize = p2rz(ptr);
isqalloc(tsd, ptr, usize, tcache, slow_path); isqalloc(tsd, ptr, usize, tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize); JEMALLOC_VALGRIND_FREE(ptr, rzsize);
} }
...@@ -1922,57 +1770,44 @@ JEMALLOC_ALLOC_SIZE(2) ...@@ -1922,57 +1770,44 @@ JEMALLOC_ALLOC_SIZE(2)
je_realloc(void *ptr, size_t size) je_realloc(void *ptr, size_t size)
{ {
void *ret; void *ret;
tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0; size_t old_usize = 0;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (unlikely(size == 0)) { if (unlikely(size == 0)) {
if (ptr != NULL) { if (ptr != NULL) {
tsd_t *tsd;
/* realloc(ptr, 0) is equivalent to free(ptr). */ /* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0); UTRACE(ptr, 0, 0);
tsd = tsd_fetch(); tsd = tsd_fetch();
ifree(tsd, ptr, tcache_get(tsd, false), true); ifree(tsd, ptr, tcache_get(tsd, false));
return (NULL); return (NULL);
} }
size = 1; size = 1;
} }
if (likely(ptr != NULL)) { if (likely(ptr != NULL)) {
tsd_t *tsd;
assert(malloc_initialized() || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init(); malloc_thread_init();
tsd = tsd_fetch(); tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd)); old_usize = isalloc(ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind))
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
if (config_valgrind && unlikely(in_valgrind)) {
old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
u2rz(old_usize);
}
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = s2u(size); usize = s2u(size);
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
NULL : irealloc_prof(tsd, ptr, old_usize, usize); ptr, old_usize, usize);
} else { } else {
if (config_stats || (config_valgrind && if (config_stats || (config_valgrind &&
unlikely(in_valgrind))) unlikely(in_valgrind)))
usize = s2u(size); usize = s2u(size);
ret = iralloc(tsd, ptr, old_usize, size, 0, false); ret = iralloc(tsd, ptr, old_usize, size, 0, false);
} }
tsdn = tsd_tsdn(tsd);
} else { } else {
/* realloc(NULL, size) is equivalent to malloc(size). */ /* realloc(NULL, size) is equivalent to malloc(size). */
if (likely(!malloc_slow)) ret = imalloc_body(size, &tsd, &usize);
ret = ialloc_body(size, false, &tsdn, &usize, false);
else
ret = ialloc_body(size, false, &tsdn, &usize, true);
assert(!tsdn_null(tsdn) || ret == NULL);
} }
if (unlikely(ret == NULL)) { if (unlikely(ret == NULL)) {
...@@ -1984,17 +1819,13 @@ je_realloc(void *ptr, size_t size) ...@@ -1984,17 +1819,13 @@ je_realloc(void *ptr, size_t size)
set_errno(ENOMEM); set_errno(ENOMEM);
} }
if (config_stats && likely(ret != NULL)) { if (config_stats && likely(ret != NULL)) {
tsd_t *tsd; assert(usize == isalloc(ret, config_prof));
assert(usize == isalloc(tsdn, ret, config_prof));
tsd = tsdn_tsd(tsdn);
*tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize; *tsd_thread_deallocatedp_get(tsd) += old_usize;
} }
UTRACE(ptr, size, ret); UTRACE(ptr, size, ret);
JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr, JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
old_usize, old_rzsize, maybe, false); old_rzsize, true, false);
witness_assert_lockless(tsdn);
return (ret); return (ret);
} }
...@@ -2005,12 +1836,7 @@ je_free(void *ptr) ...@@ -2005,12 +1836,7 @@ je_free(void *ptr)
UTRACE(ptr, 0, 0); UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) { if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch(); tsd_t *tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd)); ifree(tsd, ptr, tcache_get(tsd, false));
if (likely(!malloc_slow))
ifree(tsd, ptr, tcache_get(tsd, false), false);
else
ifree(tsd, ptr, tcache_get(tsd, false), true);
witness_assert_lockless(tsd_tsdn(tsd));
} }
} }
...@@ -2031,6 +1857,7 @@ je_memalign(size_t alignment, size_t size) ...@@ -2031,6 +1857,7 @@ je_memalign(size_t alignment, size_t size)
void *ret JEMALLOC_CC_SILENCE_INIT(NULL); void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
ret = NULL; ret = NULL;
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret); return (ret);
} }
#endif #endif
...@@ -2044,6 +1871,7 @@ je_valloc(size_t size) ...@@ -2044,6 +1871,7 @@ je_valloc(size_t size)
void *ret JEMALLOC_CC_SILENCE_INIT(NULL); void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
ret = NULL; ret = NULL;
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret); return (ret);
} }
#endif #endif
...@@ -2073,29 +1901,6 @@ JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; ...@@ -2073,29 +1901,6 @@ JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign; je_memalign;
# endif # endif
#ifdef CPU_COUNT
/*
* To enable static linking with glibc, the libc specific malloc interface must
* be implemented also, so none of glibc's malloc.o functions are added to the
* link.
*/
#define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
/* To force macro expansion of je_ prefix before stringification. */
#define PREALIAS(je_fn) ALIAS(je_fn)
void *__libc_malloc(size_t size) PREALIAS(je_malloc);
void __libc_free(void* ptr) PREALIAS(je_free);
void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
void *__libc_valloc(size_t size) PREALIAS(je_valloc);
int __posix_memalign(void** r, size_t a, size_t s)
PREALIAS(je_posix_memalign);
#undef PREALIAS
#undef ALIAS
#endif
#endif #endif
/* /*
...@@ -2107,7 +1912,7 @@ int __posix_memalign(void** r, size_t a, size_t s) ...@@ -2107,7 +1912,7 @@ int __posix_memalign(void** r, size_t a, size_t s)
*/ */
JEMALLOC_ALWAYS_INLINE_C bool JEMALLOC_ALWAYS_INLINE_C bool
imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
{ {
...@@ -2118,8 +1923,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, ...@@ -2118,8 +1923,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
*usize = sa2u(size, *alignment); *usize = sa2u(size, *alignment);
} }
if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) assert(*usize != 0);
return (true);
*zero = MALLOCX_ZERO_GET(flags); *zero = MALLOCX_ZERO_GET(flags);
if ((flags & MALLOCX_TCACHE_MASK) != 0) { if ((flags & MALLOCX_TCACHE_MASK) != 0) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
...@@ -2130,7 +1934,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, ...@@ -2130,7 +1934,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
*tcache = tcache_get(tsd, true); *tcache = tcache_get(tsd, true);
if ((flags & MALLOCX_ARENA_MASK) != 0) { if ((flags & MALLOCX_ARENA_MASK) != 0) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags); unsigned arena_ind = MALLOCX_ARENA_GET(flags);
*arena = arena_get(tsd_tsdn(tsd), arena_ind, true); *arena = arena_get(tsd, arena_ind, true, true);
if (unlikely(*arena == NULL)) if (unlikely(*arena == NULL))
return (true); return (true);
} else } else
...@@ -2138,44 +1942,59 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, ...@@ -2138,44 +1942,59 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
return (false); return (false);
} }
JEMALLOC_ALWAYS_INLINE_C bool
imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
{
if (likely(flags == 0)) {
*usize = s2u(size);
assert(*usize != 0);
*alignment = 0;
*zero = false;
*tcache = tcache_get(tsd, true);
*arena = NULL;
return (false);
} else {
return (imallocx_flags_decode_hard(tsd, size, flags, usize,
alignment, zero, tcache, arena));
}
}
JEMALLOC_ALWAYS_INLINE_C void * JEMALLOC_ALWAYS_INLINE_C void *
imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena, bool slow_path) tcache_t *tcache, arena_t *arena)
{ {
szind_t ind;
if (unlikely(alignment != 0)) if (unlikely(alignment != 0))
return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
ind = size2index(usize); if (unlikely(zero))
assert(ind < NSIZES); return (icalloct(tsd, usize, tcache, arena));
return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, return (imalloct(tsd, usize, tcache, arena));
slow_path));
} }
static void * static void *
imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena, bool slow_path) tcache_t *tcache, arena_t *arena)
{ {
void *p; void *p;
if (usize <= SMALL_MAXCLASS) { if (usize <= SMALL_MAXCLASS) {
assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
tcache, arena, slow_path); arena);
if (p == NULL) if (p == NULL)
return (NULL); return (NULL);
arena_prof_promoted(tsdn, p, usize); arena_prof_promoted(p, usize);
} else { } else
p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
slow_path);
}
return (p); return (p);
} }
JEMALLOC_ALWAYS_INLINE_C void * JEMALLOC_ALWAYS_INLINE_C void *
imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
{ {
void *p; void *p;
size_t alignment; size_t alignment;
...@@ -2188,27 +2007,25 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) ...@@ -2188,27 +2007,25 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
&zero, &tcache, &arena))) &zero, &tcache, &arena)))
return (NULL); return (NULL);
tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
if (likely((uintptr_t)tctx == (uintptr_t)1U)) { if (likely((uintptr_t)tctx == (uintptr_t)1U))
p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
tcache, arena, slow_path); else if ((uintptr_t)tctx > (uintptr_t)1U) {
} else if ((uintptr_t)tctx > (uintptr_t)1U) { p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, arena);
tcache, arena, slow_path);
} else } else
p = NULL; p = NULL;
if (unlikely(p == NULL)) { if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true); prof_alloc_rollback(tsd, tctx, true);
return (NULL); return (NULL);
} }
prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); prof_malloc(p, *usize, tctx);
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p); return (p);
} }
JEMALLOC_ALWAYS_INLINE_C void * JEMALLOC_ALWAYS_INLINE_C void *
imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
bool slow_path)
{ {
void *p; void *p;
size_t alignment; size_t alignment;
...@@ -2216,53 +2033,18 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, ...@@ -2216,53 +2033,18 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
tcache_t *tcache; tcache_t *tcache;
arena_t *arena; arena_t *arena;
if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
&zero, &tcache, &arena)))
return (NULL);
p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
arena, slow_path);
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p);
}
/* This function guarantees that *tsdn is non-NULL on success. */
JEMALLOC_ALWAYS_INLINE_C void *
imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
bool slow_path)
{
tsd_t *tsd;
if (slow_path && unlikely(malloc_init())) {
*tsdn = NULL;
return (NULL);
}
tsd = tsd_fetch();
*tsdn = tsd_tsdn(tsd);
witness_assert_lockless(tsd_tsdn(tsd));
if (likely(flags == 0)) { if (likely(flags == 0)) {
szind_t ind = size2index(size); if (config_stats || (config_valgrind && unlikely(in_valgrind)))
if (unlikely(ind >= NSIZES)) *usize = s2u(size);
return (NULL); return (imalloc(tsd, size));
if (config_stats || (config_prof && opt_prof) || (slow_path &&
config_valgrind && unlikely(in_valgrind))) {
*usize = index2size(ind);
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
}
if (config_prof && opt_prof) {
return (ialloc_prof(tsd, *usize, ind, false,
slow_path));
}
return (ialloc(tsd, size, ind, false, slow_path));
} }
if (config_prof && opt_prof) if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
return (imallocx_prof(tsd, size, flags, usize, slow_path)); &alignment, &zero, &tcache, &arena)))
return (NULL);
return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p);
} }
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
...@@ -2270,24 +2052,37 @@ void JEMALLOC_NOTHROW * ...@@ -2270,24 +2052,37 @@ void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_mallocx(size_t size, int flags) je_mallocx(size_t size, int flags)
{ {
tsdn_t *tsdn; tsd_t *tsd;
void *p; void *p;
size_t usize; size_t usize;
assert(size != 0); assert(size != 0);
if (likely(!malloc_slow)) { if (unlikely(malloc_init()))
p = imallocx_body(size, flags, &tsdn, &usize, false); goto label_oom;
ialloc_post_check(p, tsdn, usize, "mallocx", false, false); tsd = tsd_fetch();
} else {
p = imallocx_body(size, flags, &tsdn, &usize, true);
ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
UTRACE(0, size, p);
JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
MALLOCX_ZERO_GET(flags));
}
if (config_prof && opt_prof)
p = imallocx_prof(tsd, size, flags, &usize);
else
p = imallocx_no_prof(tsd, size, flags, &usize);
if (unlikely(p == NULL))
goto label_oom;
if (config_stats) {
assert(usize == isalloc(p, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, p);
JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
return (p); return (p);
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
abort();
}
UTRACE(0, size, 0);
return (NULL);
} }
static void * static void *
...@@ -2304,7 +2099,7 @@ irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, ...@@ -2304,7 +2099,7 @@ irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
zero, tcache, arena); zero, tcache, arena);
if (p == NULL) if (p == NULL)
return (NULL); return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), p, usize); arena_prof_promoted(p, usize);
} else { } else {
p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
tcache, arena); tcache, arena);
...@@ -2323,8 +2118,8 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, ...@@ -2323,8 +2118,8 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
prof_tctx_t *old_tctx, *tctx; prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked(); prof_active = prof_active_get_unlocked();
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); old_tctx = prof_tctx_get(old_ptr);
tctx = prof_alloc_prep(tsd, *usize, prof_active, false); tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
alignment, zero, tcache, arena, tctx); alignment, zero, tcache, arena, tctx);
...@@ -2333,7 +2128,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, ...@@ -2333,7 +2128,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
tcache, arena); tcache, arena);
} }
if (unlikely(p == NULL)) { if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, false); prof_alloc_rollback(tsd, tctx, true);
return (NULL); return (NULL);
} }
...@@ -2346,9 +2141,9 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, ...@@ -2346,9 +2141,9 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
* be the same as the current usize because of in-place large * be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize. * reallocation. Therefore, query the actual value of usize.
*/ */
*usize = isalloc(tsd_tsdn(tsd), p, config_prof); *usize = isalloc(p, config_prof);
} }
prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
old_usize, old_tctx); old_usize, old_tctx);
return (p); return (p);
...@@ -2374,11 +2169,10 @@ je_rallocx(void *ptr, size_t size, int flags) ...@@ -2374,11 +2169,10 @@ je_rallocx(void *ptr, size_t size, int flags)
assert(malloc_initialized() || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init(); malloc_thread_init();
tsd = tsd_fetch(); tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags); unsigned arena_ind = MALLOCX_ARENA_GET(flags);
arena = arena_get(tsd_tsdn(tsd), arena_ind, true); arena = arena_get(tsd, arena_ind, true, true);
if (unlikely(arena == NULL)) if (unlikely(arena == NULL))
goto label_oom; goto label_oom;
} else } else
...@@ -2392,14 +2186,13 @@ je_rallocx(void *ptr, size_t size, int flags) ...@@ -2392,14 +2186,13 @@ je_rallocx(void *ptr, size_t size, int flags)
} else } else
tcache = tcache_get(tsd, true); tcache = tcache_get(tsd, true);
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); old_usize = isalloc(ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind)) if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize); old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) assert(usize != 0);
goto label_oom;
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
zero, tcache, arena); zero, tcache, arena);
if (unlikely(p == NULL)) if (unlikely(p == NULL))
...@@ -2410,7 +2203,7 @@ je_rallocx(void *ptr, size_t size, int flags) ...@@ -2410,7 +2203,7 @@ je_rallocx(void *ptr, size_t size, int flags)
if (unlikely(p == NULL)) if (unlikely(p == NULL))
goto label_oom; goto label_oom;
if (config_stats || (config_valgrind && unlikely(in_valgrind))) if (config_stats || (config_valgrind && unlikely(in_valgrind)))
usize = isalloc(tsd_tsdn(tsd), p, config_prof); usize = isalloc(p, config_prof);
} }
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
...@@ -2419,9 +2212,8 @@ je_rallocx(void *ptr, size_t size, int flags) ...@@ -2419,9 +2212,8 @@ je_rallocx(void *ptr, size_t size, int flags)
*tsd_thread_deallocatedp_get(tsd) += old_usize; *tsd_thread_deallocatedp_get(tsd) += old_usize;
} }
UTRACE(ptr, size, p); UTRACE(ptr, size, p);
JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr, JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
old_usize, old_rzsize, no, zero); old_rzsize, false, zero);
witness_assert_lockless(tsd_tsdn(tsd));
return (p); return (p);
label_oom: label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) { if (config_xmalloc && unlikely(opt_xmalloc)) {
...@@ -2429,33 +2221,31 @@ label_oom: ...@@ -2429,33 +2221,31 @@ label_oom:
abort(); abort();
} }
UTRACE(ptr, size, 0); UTRACE(ptr, size, 0);
witness_assert_lockless(tsd_tsdn(tsd));
return (NULL); return (NULL);
} }
JEMALLOC_ALWAYS_INLINE_C size_t JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
size_t extra, size_t alignment, bool zero) size_t alignment, bool zero)
{ {
size_t usize; size_t usize;
if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
return (old_usize); return (old_usize);
usize = isalloc(tsdn, ptr, config_prof); usize = isalloc(ptr, config_prof);
return (usize); return (usize);
} }
static size_t static size_t
ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) size_t alignment, bool zero, prof_tctx_t *tctx)
{ {
size_t usize; size_t usize;
if (tctx == NULL) if (tctx == NULL)
return (old_usize); return (old_usize);
usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero);
zero);
return (usize); return (usize);
} }
...@@ -2469,36 +2259,23 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, ...@@ -2469,36 +2259,23 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
prof_tctx_t *old_tctx, *tctx; prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked(); prof_active = prof_active_get_unlocked();
old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); old_tctx = prof_tctx_get(ptr);
/* /*
* usize isn't knowable before ixalloc() returns when extra is non-zero. * usize isn't knowable before ixalloc() returns when extra is non-zero.
* Therefore, compute its maximum possible value and use that in * Therefore, compute its maximum possible value and use that in
* prof_alloc_prep() to decide whether to capture a backtrace. * prof_alloc_prep() to decide whether to capture a backtrace.
* prof_realloc() will use the actual usize to decide whether to sample. * prof_realloc() will use the actual usize to decide whether to sample.
*/ */
if (alignment == 0) { usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
usize_max = s2u(size+extra); alignment);
assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); assert(usize_max != 0);
} else {
usize_max = sa2u(size+extra, alignment);
if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
/*
* usize_max is out of range, and chances are that
* allocation will fail, but use the maximum possible
* value and carry on with prof_alloc_prep(), just in
* case allocation succeeds.
*/
usize_max = HUGE_MAXCLASS;
}
}
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
size, extra, alignment, zero, tctx); alignment, zero, tctx);
} else { } else {
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
extra, alignment, zero); zero);
} }
if (usize == old_usize) { if (usize == old_usize) {
prof_alloc_rollback(tsd, tctx, false); prof_alloc_rollback(tsd, tctx, false);
...@@ -2525,25 +2302,18 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) ...@@ -2525,25 +2302,18 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
assert(malloc_initialized() || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init(); malloc_thread_init();
tsd = tsd_fetch(); tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); old_usize = isalloc(ptr, config_prof);
/* /* Clamp extra if necessary to avoid (size + extra) overflow. */
* The API explicitly absolves itself of protecting against (size + if (unlikely(size + extra > HUGE_MAXCLASS)) {
* extra) numerical overflow, but we may need to clamp extra to avoid /* Check for size overflow. */
* exceeding HUGE_MAXCLASS. if (unlikely(size > HUGE_MAXCLASS)) {
* usize = old_usize;
* Ordinarily, size limit checking is handled deeper down, but here we goto label_not_resized;
* have to check as part of (size + extra) clamping, since we need the }
* clamped value in the above helper functions.
*/
if (unlikely(size > HUGE_MAXCLASS)) {
usize = old_usize;
goto label_not_resized;
}
if (unlikely(HUGE_MAXCLASS - size < extra))
extra = HUGE_MAXCLASS - size; extra = HUGE_MAXCLASS - size;
}
if (config_valgrind && unlikely(in_valgrind)) if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize); old_rzsize = u2rz(old_usize);
...@@ -2552,8 +2322,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) ...@@ -2552,8 +2322,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
alignment, zero); alignment, zero);
} else { } else {
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
extra, alignment, zero); zero);
} }
if (unlikely(usize == old_usize)) if (unlikely(usize == old_usize))
goto label_not_resized; goto label_not_resized;
...@@ -2562,11 +2332,10 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) ...@@ -2562,11 +2332,10 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
*tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize; *tsd_thread_deallocatedp_get(tsd) += old_usize;
} }
JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr, JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
old_usize, old_rzsize, no, zero); old_rzsize, false, zero);
label_not_resized: label_not_resized:
UTRACE(ptr, size, ptr); UTRACE(ptr, size, ptr);
witness_assert_lockless(tsd_tsdn(tsd));
return (usize); return (usize);
} }
...@@ -2575,20 +2344,15 @@ JEMALLOC_ATTR(pure) ...@@ -2575,20 +2344,15 @@ JEMALLOC_ATTR(pure)
je_sallocx(const void *ptr, int flags) je_sallocx(const void *ptr, int flags)
{ {
size_t usize; size_t usize;
tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init(); malloc_thread_init();
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
if (config_ivsalloc) if (config_ivsalloc)
usize = ivsalloc(tsdn, ptr, config_prof); usize = ivsalloc(ptr, config_prof);
else else
usize = isalloc(tsdn, ptr, config_prof); usize = isalloc(ptr, config_prof);
witness_assert_lockless(tsdn);
return (usize); return (usize);
} }
...@@ -2602,7 +2366,6 @@ je_dallocx(void *ptr, int flags) ...@@ -2602,7 +2366,6 @@ je_dallocx(void *ptr, int flags)
assert(malloc_initialized() || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch(); tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL; tcache = NULL;
...@@ -2612,25 +2375,19 @@ je_dallocx(void *ptr, int flags) ...@@ -2612,25 +2375,19 @@ je_dallocx(void *ptr, int flags)
tcache = tcache_get(tsd, false); tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0); UTRACE(ptr, 0, 0);
if (likely(!malloc_slow)) ifree(tsd_fetch(), ptr, tcache);
ifree(tsd, ptr, tcache, false);
else
ifree(tsd, ptr, tcache, true);
witness_assert_lockless(tsd_tsdn(tsd));
} }
JEMALLOC_ALWAYS_INLINE_C size_t JEMALLOC_ALWAYS_INLINE_C size_t
inallocx(tsdn_t *tsdn, size_t size, int flags) inallocx(size_t size, int flags)
{ {
size_t usize; size_t usize;
witness_assert_lockless(tsdn);
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
usize = s2u(size); usize = s2u(size);
else else
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
witness_assert_lockless(tsdn); assert(usize != 0);
return (usize); return (usize);
} }
...@@ -2643,11 +2400,10 @@ je_sdallocx(void *ptr, size_t size, int flags) ...@@ -2643,11 +2400,10 @@ je_sdallocx(void *ptr, size_t size, int flags)
assert(ptr != NULL); assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch(); usize = inallocx(size, flags);
usize = inallocx(tsd_tsdn(tsd), size, flags); assert(usize == isalloc(ptr, config_prof));
assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
witness_assert_lockless(tsd_tsdn(tsd)); tsd = tsd_fetch();
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL; tcache = NULL;
...@@ -2657,116 +2413,75 @@ je_sdallocx(void *ptr, size_t size, int flags) ...@@ -2657,116 +2413,75 @@ je_sdallocx(void *ptr, size_t size, int flags)
tcache = tcache_get(tsd, false); tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0); UTRACE(ptr, 0, 0);
if (likely(!malloc_slow)) isfree(tsd, ptr, usize, tcache);
isfree(tsd, ptr, usize, tcache, false);
else
isfree(tsd, ptr, usize, tcache, true);
witness_assert_lockless(tsd_tsdn(tsd));
} }
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure) JEMALLOC_ATTR(pure)
je_nallocx(size_t size, int flags) je_nallocx(size_t size, int flags)
{ {
size_t usize;
tsdn_t *tsdn;
assert(size != 0); assert(size != 0);
if (unlikely(malloc_init())) if (unlikely(malloc_init()))
return (0); return (0);
tsdn = tsdn_fetch(); return (inallocx(size, flags));
witness_assert_lockless(tsdn);
usize = inallocx(tsdn, size, flags);
if (unlikely(usize > HUGE_MAXCLASS))
return (0);
witness_assert_lockless(tsdn);
return (usize);
} }
JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) size_t newlen)
{ {
int ret;
tsd_t *tsd;
if (unlikely(malloc_init())) if (unlikely(malloc_init()))
return (EAGAIN); return (EAGAIN);
tsd = tsd_fetch(); return (ctl_byname(name, oldp, oldlenp, newp, newlen));
witness_assert_lockless(tsd_tsdn(tsd));
ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
} }
JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
{ {
int ret;
tsdn_t *tsdn;
if (unlikely(malloc_init())) if (unlikely(malloc_init()))
return (EAGAIN); return (EAGAIN);
tsdn = tsdn_fetch(); return (ctl_nametomib(name, mibp, miblenp));
witness_assert_lockless(tsdn);
ret = ctl_nametomib(tsdn, name, mibp, miblenp);
witness_assert_lockless(tsdn);
return (ret);
} }
JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret;
tsd_t *tsd;
if (unlikely(malloc_init())) if (unlikely(malloc_init()))
return (EAGAIN); return (EAGAIN);
tsd = tsd_fetch(); return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
witness_assert_lockless(tsd_tsdn(tsd));
ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
} }
JEMALLOC_EXPORT void JEMALLOC_NOTHROW JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts) const char *opts)
{ {
tsdn_t *tsdn;
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
stats_print(write_cb, cbopaque, opts); stats_print(write_cb, cbopaque, opts);
witness_assert_lockless(tsdn);
} }
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{ {
size_t ret; size_t ret;
tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init(); malloc_thread_init();
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
if (config_ivsalloc) if (config_ivsalloc)
ret = ivsalloc(tsdn, ptr, config_prof); ret = ivsalloc(ptr, config_prof);
else else
ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
witness_assert_lockless(tsdn);
return (ret); return (ret);
} }
...@@ -2792,7 +2507,6 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) ...@@ -2792,7 +2507,6 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
* to trigger the deadlock described above, but doing so would involve forking * to trigger the deadlock described above, but doing so would involve forking
* via a library constructor that runs before jemalloc's runs. * via a library constructor that runs before jemalloc's runs.
*/ */
#ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor) JEMALLOC_ATTR(constructor)
static void static void
jemalloc_constructor(void) jemalloc_constructor(void)
...@@ -2800,7 +2514,6 @@ jemalloc_constructor(void) ...@@ -2800,7 +2514,6 @@ jemalloc_constructor(void)
malloc_init(); malloc_init();
} }
#endif
#ifndef JEMALLOC_MUTEX_INIT_CB #ifndef JEMALLOC_MUTEX_INIT_CB
void void
...@@ -2810,9 +2523,7 @@ JEMALLOC_EXPORT void ...@@ -2810,9 +2523,7 @@ JEMALLOC_EXPORT void
_malloc_prefork(void) _malloc_prefork(void)
#endif #endif
{ {
tsd_t *tsd; unsigned i;
unsigned i, j, narenas;
arena_t *arena;
#ifdef JEMALLOC_MUTEX_INIT_CB #ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized()) if (!malloc_initialized())
...@@ -2820,40 +2531,16 @@ _malloc_prefork(void) ...@@ -2820,40 +2531,16 @@ _malloc_prefork(void)
#endif #endif
assert(malloc_initialized()); assert(malloc_initialized());
tsd = tsd_fetch();
narenas = narenas_total_get();
witness_prefork(tsd);
/* Acquire all mutexes in a safe order. */ /* Acquire all mutexes in a safe order. */
ctl_prefork(tsd_tsdn(tsd)); ctl_prefork();
malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); prof_prefork();
prof_prefork0(tsd_tsdn(tsd)); malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < 3; i++) { for (i = 0; i < narenas_total; i++) {
for (j = 0; j < narenas; j++) { if (arenas[i] != NULL)
if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != arena_prefork(arenas[i]);
NULL) {
switch (i) {
case 0:
arena_prefork0(tsd_tsdn(tsd), arena);
break;
case 1:
arena_prefork1(tsd_tsdn(tsd), arena);
break;
case 2:
arena_prefork2(tsd_tsdn(tsd), arena);
break;
default: not_reached();
}
}
}
}
base_prefork(tsd_tsdn(tsd));
for (i = 0; i < narenas; i++) {
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
arena_prefork3(tsd_tsdn(tsd), arena);
} }
prof_prefork1(tsd_tsdn(tsd)); chunk_prefork();
base_prefork();
} }
#ifndef JEMALLOC_MUTEX_INIT_CB #ifndef JEMALLOC_MUTEX_INIT_CB
...@@ -2864,8 +2551,7 @@ JEMALLOC_EXPORT void ...@@ -2864,8 +2551,7 @@ JEMALLOC_EXPORT void
_malloc_postfork(void) _malloc_postfork(void)
#endif #endif
{ {
tsd_t *tsd; unsigned i;
unsigned i, narenas;
#ifdef JEMALLOC_MUTEX_INIT_CB #ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized()) if (!malloc_initialized())
...@@ -2873,44 +2559,35 @@ _malloc_postfork(void) ...@@ -2873,44 +2559,35 @@ _malloc_postfork(void)
#endif #endif
assert(malloc_initialized()); assert(malloc_initialized());
tsd = tsd_fetch();
witness_postfork_parent(tsd);
/* Release all mutexes, now that fork() has completed. */ /* Release all mutexes, now that fork() has completed. */
base_postfork_parent(tsd_tsdn(tsd)); base_postfork_parent();
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { chunk_postfork_parent();
arena_t *arena; for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) arena_postfork_parent(arenas[i]);
arena_postfork_parent(tsd_tsdn(tsd), arena);
} }
prof_postfork_parent(tsd_tsdn(tsd)); malloc_mutex_postfork_parent(&arenas_lock);
malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); prof_postfork_parent();
ctl_postfork_parent(tsd_tsdn(tsd)); ctl_postfork_parent();
} }
void void
jemalloc_postfork_child(void) jemalloc_postfork_child(void)
{ {
tsd_t *tsd; unsigned i;
unsigned i, narenas;
assert(malloc_initialized()); assert(malloc_initialized());
tsd = tsd_fetch();
witness_postfork_child(tsd);
/* Release all mutexes, now that fork() has completed. */ /* Release all mutexes, now that fork() has completed. */
base_postfork_child(tsd_tsdn(tsd)); base_postfork_child();
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { chunk_postfork_child();
arena_t *arena; for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) arena_postfork_child(arenas[i]);
arena_postfork_child(tsd_tsdn(tsd), arena);
} }
prof_postfork_child(tsd_tsdn(tsd)); malloc_mutex_postfork_child(&arenas_lock);
malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); prof_postfork_child();
ctl_postfork_child(tsd_tsdn(tsd)); ctl_postfork_child();
} }
/******************************************************************************/ /******************************************************************************/
...@@ -2930,10 +2607,9 @@ je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) { ...@@ -2930,10 +2607,9 @@ je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* indication that this is not a LARGE alloc */ if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* indication that this is not a LARGE alloc */
arena_t *arena = extent_node_arena_get(&chunk->node); arena_t *arena = extent_node_arena_get(&chunk->node);
size_t rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); size_t rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
arena_run_t *run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; arena_run_t *run = &arena_miscelm_get(chunk, rpages_ind)->run;
arena_bin_t *bin = &arena->bins[run->binind]; arena_bin_t *bin = &arena->bins[run->binind];
tsd_t *tsd = tsd_fetch(); malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
/* runs that are in the same chunk in as the current chunk, are likely to be the next currun */ /* runs that are in the same chunk in as the current chunk, are likely to be the next currun */
if (chunk != (arena_chunk_t *)CHUNK_ADDR2BASE(bin->runcur)) { if (chunk != (arena_chunk_t *)CHUNK_ADDR2BASE(bin->runcur)) {
arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
...@@ -2942,7 +2618,7 @@ je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) { ...@@ -2942,7 +2618,7 @@ je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
*run_util = ((bin_info->nregs - run->nfree)<<16) / bin_info->nregs; *run_util = ((bin_info->nregs - run->nfree)<<16) / bin_info->nregs;
defrag = 1; defrag = 1;
} }
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_unlock(&bin->lock);
} }
} }
return defrag; return defrag;
......
...@@ -69,7 +69,7 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, ...@@ -69,7 +69,7 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
#endif #endif
bool bool
malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) malloc_mutex_init(malloc_mutex_t *mutex)
{ {
#ifdef _WIN32 #ifdef _WIN32
...@@ -80,8 +80,6 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) ...@@ -80,8 +80,6 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
_CRT_SPINCOUNT)) _CRT_SPINCOUNT))
return (true); return (true);
# endif # endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
mutex->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0; mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB)) #elif (defined(JEMALLOC_MUTEX_INIT_CB))
...@@ -105,34 +103,31 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) ...@@ -105,34 +103,31 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
} }
pthread_mutexattr_destroy(&attr); pthread_mutexattr_destroy(&attr);
#endif #endif
if (config_debug)
witness_init(&mutex->witness, name, rank, NULL);
return (false); return (false);
} }
void void
malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) malloc_mutex_prefork(malloc_mutex_t *mutex)
{ {
malloc_mutex_lock(tsdn, mutex); malloc_mutex_lock(mutex);
} }
void void
malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
{ {
malloc_mutex_unlock(tsdn, mutex); malloc_mutex_unlock(mutex);
} }
void void
malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) malloc_mutex_postfork_child(malloc_mutex_t *mutex)
{ {
#ifdef JEMALLOC_MUTEX_INIT_CB #ifdef JEMALLOC_MUTEX_INIT_CB
malloc_mutex_unlock(tsdn, mutex); malloc_mutex_unlock(mutex);
#else #else
if (malloc_mutex_init(mutex, mutex->witness.name, if (malloc_mutex_init(mutex)) {
mutex->witness.rank)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in " malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n"); "child\n");
if (opt_abort) if (opt_abort)
...@@ -142,7 +137,7 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) ...@@ -142,7 +137,7 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
} }
bool bool
malloc_mutex_boot(void) mutex_boot(void)
{ {
#ifdef JEMALLOC_MUTEX_INIT_CB #ifdef JEMALLOC_MUTEX_INIT_CB
......
#include "jemalloc/internal/jemalloc_internal.h"
#define BILLION UINT64_C(1000000000)
void
nstime_init(nstime_t *time, uint64_t ns)
{
time->ns = ns;
}
void
nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec)
{
time->ns = sec * BILLION + nsec;
}
uint64_t
nstime_ns(const nstime_t *time)
{
return (time->ns);
}
uint64_t
nstime_sec(const nstime_t *time)
{
return (time->ns / BILLION);
}
uint64_t
nstime_nsec(const nstime_t *time)
{
return (time->ns % BILLION);
}
void
nstime_copy(nstime_t *time, const nstime_t *source)
{
*time = *source;
}
int
nstime_compare(const nstime_t *a, const nstime_t *b)
{
return ((a->ns > b->ns) - (a->ns < b->ns));
}
void
nstime_add(nstime_t *time, const nstime_t *addend)
{
assert(UINT64_MAX - time->ns >= addend->ns);
time->ns += addend->ns;
}
void
nstime_subtract(nstime_t *time, const nstime_t *subtrahend)
{
assert(nstime_compare(time, subtrahend) >= 0);
time->ns -= subtrahend->ns;
}
void
nstime_imultiply(nstime_t *time, uint64_t multiplier)
{
assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
time->ns *= multiplier;
}
void
nstime_idivide(nstime_t *time, uint64_t divisor)
{
assert(divisor != 0);
time->ns /= divisor;
}
uint64_t
nstime_divide(const nstime_t *time, const nstime_t *divisor)
{
assert(divisor->ns != 0);
return (time->ns / divisor->ns);
}
#ifdef _WIN32
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time)
{
FILETIME ft;
uint64_t ticks_100ns;
GetSystemTimeAsFileTime(&ft);
ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
nstime_init(time, ticks_100ns * 100);
}
#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
}
#elif JEMALLOC_HAVE_CLOCK_MONOTONIC
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
}
#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time)
{
nstime_init(time, mach_absolute_time());
}
#else
# define NSTIME_MONOTONIC false
static void
nstime_get(nstime_t *time)
{
struct timeval tv;
gettimeofday(&tv, NULL);
nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
}
#endif
#ifdef JEMALLOC_JET
#undef nstime_monotonic
#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic)
#endif
bool
nstime_monotonic(void)
{
return (NSTIME_MONOTONIC);
#undef NSTIME_MONOTONIC
}
#ifdef JEMALLOC_JET
#undef nstime_monotonic
#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic);
#endif
#ifdef JEMALLOC_JET
#undef nstime_update
#define nstime_update JEMALLOC_N(n_nstime_update)
#endif
bool
nstime_update(nstime_t *time)
{
nstime_t old_time;
nstime_copy(&old_time, time);
nstime_get(time);
/* Handle non-monotonic clocks. */
if (unlikely(nstime_compare(&old_time, time) > 0)) {
nstime_copy(time, &old_time);
return (true);
}
return (false);
}
#ifdef JEMALLOC_JET
#undef nstime_update
#define nstime_update JEMALLOC_N(nstime_update)
nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update);
#endif
#define JEMALLOC_PAGES_C_ #define JEMALLOC_PAGES_C_
#include "jemalloc/internal/jemalloc_internal.h" #include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#include <sys/sysctl.h>
#endif
/******************************************************************************/
/* Data. */
#ifndef _WIN32
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
# define PAGES_PROT_DECOMMIT (PROT_NONE)
static int mmap_flags;
#endif
static bool os_overcommits;
/******************************************************************************/ /******************************************************************************/
void * void *
pages_map(void *addr, size_t size, bool *commit) pages_map(void *addr, size_t size)
{ {
void *ret; void *ret;
assert(size != 0); assert(size != 0);
if (os_overcommits)
*commit = true;
#ifdef _WIN32 #ifdef _WIN32
/* /*
* If VirtualAlloc can't allocate at the given address when one is * If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL. * given, it fails and returns NULL.
*/ */
ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0), ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE); PAGE_READWRITE);
#else #else
/* /*
* We don't use MAP_FIXED here, because it can cause the *replacement* * We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings. * of existing mappings, and we only want to create new mappings.
*/ */
{ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; -1, 0);
ret = mmap(addr, size, prot, mmap_flags, -1, 0);
}
assert(ret != NULL); assert(ret != NULL);
if (ret == MAP_FAILED) if (ret == MAP_FAILED)
...@@ -87,8 +67,7 @@ pages_unmap(void *addr, size_t size) ...@@ -87,8 +67,7 @@ pages_unmap(void *addr, size_t size)
} }
void * void *
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
bool *commit)
{ {
void *ret = (void *)((uintptr_t)addr + leadsize); void *ret = (void *)((uintptr_t)addr + leadsize);
...@@ -98,7 +77,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, ...@@ -98,7 +77,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
void *new_addr; void *new_addr;
pages_unmap(addr, alloc_size); pages_unmap(addr, alloc_size);
new_addr = pages_map(ret, size, commit); new_addr = pages_map(ret, size);
if (new_addr == ret) if (new_addr == ret)
return (ret); return (ret);
if (new_addr) if (new_addr)
...@@ -122,17 +101,17 @@ static bool ...@@ -122,17 +101,17 @@ static bool
pages_commit_impl(void *addr, size_t size, bool commit) pages_commit_impl(void *addr, size_t size, bool commit)
{ {
if (os_overcommits) #ifndef _WIN32
return (true); /*
* The following decommit/commit implementation is functional, but
#ifdef _WIN32 * always disabled because it doesn't add value beyong improved
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, * debugging (at the cost of extra system calls) on systems that
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); * overcommit.
#else */
{ if (false) {
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
-1, 0); MAP_FIXED, -1, 0);
if (result == MAP_FAILED) if (result == MAP_FAILED)
return (true); return (true);
if (result != addr) { if (result != addr) {
...@@ -146,6 +125,7 @@ pages_commit_impl(void *addr, size_t size, bool commit) ...@@ -146,6 +125,7 @@ pages_commit_impl(void *addr, size_t size, bool commit)
return (false); return (false);
} }
#endif #endif
return (true);
} }
bool bool
...@@ -170,16 +150,15 @@ pages_purge(void *addr, size_t size) ...@@ -170,16 +150,15 @@ pages_purge(void *addr, size_t size)
#ifdef _WIN32 #ifdef _WIN32
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
unzeroed = true; unzeroed = true;
#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \ #elif defined(JEMALLOC_HAVE_MADVISE)
defined(JEMALLOC_PURGE_MADVISE_DONTNEED)) # ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# if defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
# define JEMALLOC_MADV_PURGE MADV_DONTNEED # define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_ZEROS true # define JEMALLOC_MADV_ZEROS true
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# else # else
# error No madvise(2) flag defined for purging unused dirty pages # error "No madvise(2) flag defined for purging unused dirty pages."
# endif # endif
int err = madvise(addr, size, JEMALLOC_MADV_PURGE); int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0); unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
...@@ -192,111 +171,3 @@ pages_purge(void *addr, size_t size) ...@@ -192,111 +171,3 @@ pages_purge(void *addr, size_t size)
return (unzeroed); return (unzeroed);
} }
bool
pages_huge(void *addr, size_t size)
{
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
#ifdef JEMALLOC_THP
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
#else
return (false);
#endif
}
bool
pages_nohuge(void *addr, size_t size)
{
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
#ifdef JEMALLOC_THP
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
#else
return (false);
#endif
}
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static bool
os_overcommits_sysctl(void)
{
int vm_overcommit;
size_t sz;
sz = sizeof(vm_overcommit);
if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
return (false); /* Error. */
return ((vm_overcommit & 0x3) == 0);
}
#endif
#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/*
* Use syscall(2) rather than {open,read,close}(2) when possible to avoid
* reentry during bootstrapping if another library has interposed system call
* wrappers.
*/
static bool
os_overcommits_proc(void)
{
int fd;
char buf[1];
ssize_t nread;
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
#else
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
#endif
if (fd == -1)
return (false); /* Error. */
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
#else
nread = read(fd, &buf, sizeof(buf));
#endif
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
syscall(SYS_close, fd);
#else
close(fd);
#endif
if (nread < 1)
return (false); /* Error. */
/*
* /proc/sys/vm/overcommit_memory meanings:
* 0: Heuristic overcommit.
* 1: Always overcommit.
* 2: Never overcommit.
*/
return (buf[0] == '0' || buf[0] == '1');
}
#endif
void
pages_boot(void)
{
#ifndef _WIN32
mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
os_overcommits = os_overcommits_sysctl();
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
os_overcommits = os_overcommits_proc();
# ifdef MAP_NORESERVE
if (os_overcommits)
mmap_flags |= MAP_NORESERVE;
# endif
#else
os_overcommits = false;
#endif
}
#define JEMALLOC_PRNG_C_
#include "jemalloc/internal/jemalloc_internal.h"
...@@ -109,7 +109,7 @@ static char prof_dump_buf[ ...@@ -109,7 +109,7 @@ static char prof_dump_buf[
1 1
#endif #endif
]; ];
static size_t prof_dump_buf_end; static unsigned prof_dump_buf_end;
static int prof_dump_fd; static int prof_dump_fd;
/* Do not dump any profiles until bootstrapping is complete. */ /* Do not dump any profiles until bootstrapping is complete. */
...@@ -121,13 +121,13 @@ static bool prof_booted = false; ...@@ -121,13 +121,13 @@ static bool prof_booted = false;
* definition. * definition.
*/ */
static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, static bool prof_tdata_should_destroy(prof_tdata_t *tdata,
bool even_if_attached); bool even_if_attached);
static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached); bool even_if_attached);
static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
/******************************************************************************/ /******************************************************************************/
/* Red-black trees. */ /* Red-black trees. */
...@@ -213,23 +213,22 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) ...@@ -213,23 +213,22 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
} }
if ((uintptr_t)tctx > (uintptr_t)1U) { if ((uintptr_t)tctx > (uintptr_t)1U) {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); malloc_mutex_lock(tctx->tdata->lock);
tctx->prepared = false; tctx->prepared = false;
if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) if (prof_tctx_should_destroy(tctx))
prof_tctx_destroy(tsd, tctx); prof_tctx_destroy(tsd, tctx);
else else
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); malloc_mutex_unlock(tctx->tdata->lock);
} }
} }
void void
prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
prof_tctx_t *tctx)
{ {
prof_tctx_set(tsdn, ptr, usize, tctx); prof_tctx_set(ptr, usize, tctx);
malloc_mutex_lock(tsdn, tctx->tdata->lock); malloc_mutex_lock(tctx->tdata->lock);
tctx->cnts.curobjs++; tctx->cnts.curobjs++;
tctx->cnts.curbytes += usize; tctx->cnts.curbytes += usize;
if (opt_prof_accum) { if (opt_prof_accum) {
...@@ -237,23 +236,23 @@ prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, ...@@ -237,23 +236,23 @@ prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
tctx->cnts.accumbytes += usize; tctx->cnts.accumbytes += usize;
} }
tctx->prepared = false; tctx->prepared = false;
malloc_mutex_unlock(tsdn, tctx->tdata->lock); malloc_mutex_unlock(tctx->tdata->lock);
} }
void void
prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{ {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); malloc_mutex_lock(tctx->tdata->lock);
assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curobjs > 0);
assert(tctx->cnts.curbytes >= usize); assert(tctx->cnts.curbytes >= usize);
tctx->cnts.curobjs--; tctx->cnts.curobjs--;
tctx->cnts.curbytes -= usize; tctx->cnts.curbytes -= usize;
if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) if (prof_tctx_should_destroy(tctx))
prof_tctx_destroy(tsd, tctx); prof_tctx_destroy(tsd, tctx);
else else
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); malloc_mutex_unlock(tctx->tdata->lock);
} }
void void
...@@ -278,7 +277,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata) ...@@ -278,7 +277,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
tdata->enq = true; tdata->enq = true;
} }
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); malloc_mutex_lock(&bt2gctx_mtx);
} }
JEMALLOC_INLINE_C void JEMALLOC_INLINE_C void
...@@ -288,7 +287,7 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata) ...@@ -288,7 +287,7 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
cassert(config_prof); cassert(config_prof);
assert(tdata == prof_tdata_get(tsd, false)); assert(tdata == prof_tdata_get(tsd, false));
malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); malloc_mutex_unlock(&bt2gctx_mtx);
if (tdata != NULL) { if (tdata != NULL) {
bool idump, gdump; bool idump, gdump;
...@@ -301,9 +300,9 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata) ...@@ -301,9 +300,9 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
tdata->enq_gdump = false; tdata->enq_gdump = false;
if (idump) if (idump)
prof_idump(tsd_tsdn(tsd)); prof_idump();
if (gdump) if (gdump)
prof_gdump(tsd_tsdn(tsd)); prof_gdump();
} }
} }
...@@ -547,15 +546,14 @@ prof_tdata_mutex_choose(uint64_t thr_uid) ...@@ -547,15 +546,14 @@ prof_tdata_mutex_choose(uint64_t thr_uid)
} }
static prof_gctx_t * static prof_gctx_t *
prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
{ {
/* /*
* Create a single allocation that has space for vec of length bt->len. * Create a single allocation that has space for vec of length bt->len.
*/ */
size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t,
prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true),
size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), true, NULL);
true);
if (gctx == NULL) if (gctx == NULL)
return (NULL); return (NULL);
gctx->lock = prof_gctx_mutex_choose(); gctx->lock = prof_gctx_mutex_choose();
...@@ -587,7 +585,7 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, ...@@ -587,7 +585,7 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
* into this function. * into this function.
*/ */
prof_enter(tsd, tdata_self); prof_enter(tsd, tdata_self);
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); malloc_mutex_lock(gctx->lock);
assert(gctx->nlimbo != 0); assert(gctx->nlimbo != 0);
if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
/* Remove gctx from bt2gctx. */ /* Remove gctx from bt2gctx. */
...@@ -595,25 +593,24 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, ...@@ -595,25 +593,24 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
not_reached(); not_reached();
prof_leave(tsd, tdata_self); prof_leave(tsd, tdata_self);
/* Destroy gctx. */ /* Destroy gctx. */
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); malloc_mutex_unlock(gctx->lock);
idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true); idalloctm(tsd, gctx, tcache_get(tsd, false), true);
} else { } else {
/* /*
* Compensate for increment in prof_tctx_destroy() or * Compensate for increment in prof_tctx_destroy() or
* prof_lookup(). * prof_lookup().
*/ */
gctx->nlimbo--; gctx->nlimbo--;
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); malloc_mutex_unlock(gctx->lock);
prof_leave(tsd, tdata_self); prof_leave(tsd, tdata_self);
} }
} }
/* tctx->tdata->lock must be held. */
static bool static bool
prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) prof_tctx_should_destroy(prof_tctx_t *tctx)
{ {
malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
if (opt_prof_accum) if (opt_prof_accum)
return (false); return (false);
if (tctx->cnts.curobjs != 0) if (tctx->cnts.curobjs != 0)
...@@ -636,6 +633,7 @@ prof_gctx_should_destroy(prof_gctx_t *gctx) ...@@ -636,6 +633,7 @@ prof_gctx_should_destroy(prof_gctx_t *gctx)
return (true); return (true);
} }
/* tctx->tdata->lock is held upon entry, and released before return. */
static void static void
prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
{ {
...@@ -643,8 +641,6 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) ...@@ -643,8 +641,6 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
prof_gctx_t *gctx = tctx->gctx; prof_gctx_t *gctx = tctx->gctx;
bool destroy_tdata, destroy_tctx, destroy_gctx; bool destroy_tdata, destroy_tctx, destroy_gctx;
malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curobjs == 0);
assert(tctx->cnts.curbytes == 0); assert(tctx->cnts.curbytes == 0);
assert(!opt_prof_accum); assert(!opt_prof_accum);
...@@ -652,10 +648,10 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) ...@@ -652,10 +648,10 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
assert(tctx->cnts.accumbytes == 0); assert(tctx->cnts.accumbytes == 0);
ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); destroy_tdata = prof_tdata_should_destroy(tdata, false);
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); malloc_mutex_unlock(tdata->lock);
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); malloc_mutex_lock(gctx->lock);
switch (tctx->state) { switch (tctx->state) {
case prof_tctx_state_nominal: case prof_tctx_state_nominal:
tctx_tree_remove(&gctx->tctxs, tctx); tctx_tree_remove(&gctx->tctxs, tctx);
...@@ -695,19 +691,17 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) ...@@ -695,19 +691,17 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
destroy_tctx = false; destroy_tctx = false;
destroy_gctx = false; destroy_gctx = false;
} }
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); malloc_mutex_unlock(gctx->lock);
if (destroy_gctx) { if (destroy_gctx) {
prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
tdata); tdata);
} }
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
if (destroy_tdata) if (destroy_tdata)
prof_tdata_destroy(tsd, tdata, false); prof_tdata_destroy(tsd, tdata, false);
if (destroy_tctx) if (destroy_tctx)
idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true); idalloctm(tsd, tctx, tcache_get(tsd, false), true);
} }
static bool static bool
...@@ -727,7 +721,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, ...@@ -727,7 +721,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
prof_enter(tsd, tdata); prof_enter(tsd, tdata);
if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
/* bt has never been seen before. Insert it. */ /* bt has never been seen before. Insert it. */
gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); gctx.p = prof_gctx_create(tsd, bt);
if (gctx.v == NULL) { if (gctx.v == NULL) {
prof_leave(tsd, tdata); prof_leave(tsd, tdata);
return (true); return (true);
...@@ -736,7 +730,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, ...@@ -736,7 +730,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */ /* OOM. */
prof_leave(tsd, tdata); prof_leave(tsd, tdata);
idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true); idalloctm(tsd, gctx.v, tcache_get(tsd, false), true);
return (true); return (true);
} }
new_gctx = true; new_gctx = true;
...@@ -745,9 +739,9 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, ...@@ -745,9 +739,9 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
* Increment nlimbo, in order to avoid a race condition with * Increment nlimbo, in order to avoid a race condition with
* prof_tctx_destroy()/prof_gctx_try_destroy(). * prof_tctx_destroy()/prof_gctx_try_destroy().
*/ */
malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); malloc_mutex_lock(gctx.p->lock);
gctx.p->nlimbo++; gctx.p->nlimbo++;
malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); malloc_mutex_unlock(gctx.p->lock);
new_gctx = false; new_gctx = false;
} }
prof_leave(tsd, tdata); prof_leave(tsd, tdata);
...@@ -774,12 +768,13 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) ...@@ -774,12 +768,13 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
if (tdata == NULL) if (tdata == NULL)
return (NULL); return (NULL);
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); malloc_mutex_lock(tdata->lock);
not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
if (!not_found) /* Note double negative! */ if (!not_found) /* Note double negative! */
ret.p->prepared = true; ret.p->prepared = true;
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); malloc_mutex_unlock(tdata->lock);
if (not_found) { if (not_found) {
tcache_t *tcache;
void *btkey; void *btkey;
prof_gctx_t *gctx; prof_gctx_t *gctx;
bool new_gctx, error; bool new_gctx, error;
...@@ -793,9 +788,9 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) ...@@ -793,9 +788,9 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
return (NULL); return (NULL);
/* Link a prof_tctx_t into gctx for this thread. */ /* Link a prof_tctx_t into gctx for this thread. */
ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), tcache = tcache_get(tsd, true);
size2index(sizeof(prof_tctx_t)), false, NULL, true, ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true,
arena_ichoose(tsd, NULL), true); NULL);
if (ret.p == NULL) { if (ret.p == NULL) {
if (new_gctx) if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata); prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
...@@ -809,41 +804,41 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) ...@@ -809,41 +804,41 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
ret.p->tctx_uid = tdata->tctx_uid_next++; ret.p->tctx_uid = tdata->tctx_uid_next++;
ret.p->prepared = true; ret.p->prepared = true;
ret.p->state = prof_tctx_state_initializing; ret.p->state = prof_tctx_state_initializing;
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); malloc_mutex_lock(tdata->lock);
error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); malloc_mutex_unlock(tdata->lock);
if (error) { if (error) {
if (new_gctx) if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata); prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true); idalloctm(tsd, ret.v, tcache, true);
return (NULL); return (NULL);
} }
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); malloc_mutex_lock(gctx->lock);
ret.p->state = prof_tctx_state_nominal; ret.p->state = prof_tctx_state_nominal;
tctx_tree_insert(&gctx->tctxs, ret.p); tctx_tree_insert(&gctx->tctxs, ret.p);
gctx->nlimbo--; gctx->nlimbo--;
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); malloc_mutex_unlock(gctx->lock);
} }
return (ret.p); return (ret.p);
} }
/*
* The bodies of this function and prof_leakcheck() are compiled out unless heap
* profiling is enabled, so that it is possible to compile jemalloc with
* floating point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a workaround for
* versions of glibc that don't properly save/restore floating point registers
* during dynamic lazy symbol loading (which internally calls into whatever
* malloc implementation happens to be integrated into the application). Note
* that some compilers (e.g. gcc 4.8) may use floating point registers for fast
* memory moves, so jemalloc must be compiled with such optimizations disabled
* (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
void void
prof_sample_threshold_update(prof_tdata_t *tdata) prof_sample_threshold_update(prof_tdata_t *tdata)
{ {
/*
* The body of this function is compiled out unless heap profiling is
* enabled, so that it is possible to compile jemalloc with floating
* point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a
* workaround for versions of glibc that don't properly save/restore
* floating point registers during dynamic lazy symbol loading (which
* internally calls into whatever malloc implementation happens to be
* integrated into the application). Note that some compilers (e.g.
* gcc 4.8) may use floating point registers for fast memory moves, so
* jemalloc must be compiled with such optimizations disabled (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
uint64_t r; uint64_t r;
double u; double u;
...@@ -874,7 +869,8 @@ prof_sample_threshold_update(prof_tdata_t *tdata) ...@@ -874,7 +869,8 @@ prof_sample_threshold_update(prof_tdata_t *tdata)
* pp 500 * pp 500
* (http://luc.devroye.org/rnbookindex.html) * (http://luc.devroye.org/rnbookindex.html)
*/ */
r = prng_lg_range_u64(&tdata->prng_state, 53); prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005),
UINT64_C(1442695040888963407));
u = (double)r * (1.0/9007199254740992.0L); u = (double)r * (1.0/9007199254740992.0L);
tdata->bytes_until_sample = (uint64_t)(log(u) / tdata->bytes_until_sample = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
...@@ -897,13 +893,11 @@ size_t ...@@ -897,13 +893,11 @@ size_t
prof_tdata_count(void) prof_tdata_count(void)
{ {
size_t tdata_count = 0; size_t tdata_count = 0;
tsdn_t *tsdn;
tsdn = tsdn_fetch(); malloc_mutex_lock(&tdatas_mtx);
malloc_mutex_lock(tsdn, &tdatas_mtx);
tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
(void *)&tdata_count); (void *)&tdata_count);
malloc_mutex_unlock(tsdn, &tdatas_mtx); malloc_mutex_unlock(&tdatas_mtx);
return (tdata_count); return (tdata_count);
} }
...@@ -922,9 +916,9 @@ prof_bt_count(void) ...@@ -922,9 +916,9 @@ prof_bt_count(void)
if (tdata == NULL) if (tdata == NULL)
return (0); return (0);
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); malloc_mutex_lock(&bt2gctx_mtx);
bt_count = ckh_count(&bt2gctx); bt_count = ckh_count(&bt2gctx);
malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); malloc_mutex_unlock(&bt2gctx_mtx);
return (bt_count); return (bt_count);
} }
...@@ -994,7 +988,7 @@ prof_dump_close(bool propagate_err) ...@@ -994,7 +988,7 @@ prof_dump_close(bool propagate_err)
static bool static bool
prof_dump_write(bool propagate_err, const char *s) prof_dump_write(bool propagate_err, const char *s)
{ {
size_t i, slen, n; unsigned i, slen, n;
cassert(config_prof); cassert(config_prof);
...@@ -1037,21 +1031,20 @@ prof_dump_printf(bool propagate_err, const char *format, ...) ...@@ -1037,21 +1031,20 @@ prof_dump_printf(bool propagate_err, const char *format, ...)
return (ret); return (ret);
} }
/* tctx->tdata->lock is held. */
static void static void
prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
{ {
malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); malloc_mutex_lock(tctx->gctx->lock);
malloc_mutex_lock(tsdn, tctx->gctx->lock);
switch (tctx->state) { switch (tctx->state) {
case prof_tctx_state_initializing: case prof_tctx_state_initializing:
malloc_mutex_unlock(tsdn, tctx->gctx->lock); malloc_mutex_unlock(tctx->gctx->lock);
return; return;
case prof_tctx_state_nominal: case prof_tctx_state_nominal:
tctx->state = prof_tctx_state_dumping; tctx->state = prof_tctx_state_dumping;
malloc_mutex_unlock(tsdn, tctx->gctx->lock); malloc_mutex_unlock(tctx->gctx->lock);
memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
...@@ -1070,12 +1063,11 @@ prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) ...@@ -1070,12 +1063,11 @@ prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
} }
} }
/* gctx->lock is held. */
static void static void
prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
{ {
malloc_mutex_assert_owner(tsdn, gctx->lock);
gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
if (opt_prof_accum) { if (opt_prof_accum) {
...@@ -1084,12 +1076,10 @@ prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) ...@@ -1084,12 +1076,10 @@ prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
} }
} }
/* tctx->gctx is held. */
static prof_tctx_t * static prof_tctx_t *
prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{ {
tsdn_t *tsdn = (tsdn_t *)arg;
malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
switch (tctx->state) { switch (tctx->state) {
case prof_tctx_state_nominal: case prof_tctx_state_nominal:
...@@ -1097,7 +1087,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) ...@@ -1097,7 +1087,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
break; break;
case prof_tctx_state_dumping: case prof_tctx_state_dumping:
case prof_tctx_state_purgatory: case prof_tctx_state_purgatory:
prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); prof_tctx_merge_gctx(tctx, tctx->gctx);
break; break;
default: default:
not_reached(); not_reached();
...@@ -1106,18 +1096,11 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) ...@@ -1106,18 +1096,11 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
return (NULL); return (NULL);
} }
struct prof_tctx_dump_iter_arg_s { /* gctx->lock is held. */
tsdn_t *tsdn;
bool propagate_err;
};
static prof_tctx_t * static prof_tctx_t *
prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{ {
struct prof_tctx_dump_iter_arg_s *arg = bool propagate_err = *(bool *)arg;
(struct prof_tctx_dump_iter_arg_s *)opaque;
malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
switch (tctx->state) { switch (tctx->state) {
case prof_tctx_state_initializing: case prof_tctx_state_initializing:
...@@ -1126,7 +1109,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) ...@@ -1126,7 +1109,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
break; break;
case prof_tctx_state_dumping: case prof_tctx_state_dumping:
case prof_tctx_state_purgatory: case prof_tctx_state_purgatory:
if (prof_dump_printf(arg->propagate_err, if (prof_dump_printf(propagate_err,
" t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
"%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
...@@ -1139,14 +1122,12 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) ...@@ -1139,14 +1122,12 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
return (NULL); return (NULL);
} }
/* tctx->gctx is held. */
static prof_tctx_t * static prof_tctx_t *
prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{ {
tsdn_t *tsdn = (tsdn_t *)arg;
prof_tctx_t *ret; prof_tctx_t *ret;
malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
switch (tctx->state) { switch (tctx->state) {
case prof_tctx_state_nominal: case prof_tctx_state_nominal:
/* New since dumping started; ignore. */ /* New since dumping started; ignore. */
...@@ -1167,12 +1148,12 @@ label_return: ...@@ -1167,12 +1148,12 @@ label_return:
} }
static void static void
prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
{ {
cassert(config_prof); cassert(config_prof);
malloc_mutex_lock(tsdn, gctx->lock); malloc_mutex_lock(gctx->lock);
/* /*
* Increment nlimbo so that gctx won't go away before dump. * Increment nlimbo so that gctx won't go away before dump.
...@@ -1184,26 +1165,19 @@ prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) ...@@ -1184,26 +1165,19 @@ prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
malloc_mutex_unlock(tsdn, gctx->lock); malloc_mutex_unlock(gctx->lock);
} }
struct prof_gctx_merge_iter_arg_s {
tsdn_t *tsdn;
size_t leak_ngctx;
};
static prof_gctx_t * static prof_gctx_t *
prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
{ {
struct prof_gctx_merge_iter_arg_s *arg = size_t *leak_ngctx = (size_t *)arg;
(struct prof_gctx_merge_iter_arg_s *)opaque;
malloc_mutex_lock(arg->tsdn, gctx->lock); malloc_mutex_lock(gctx->lock);
tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
(void *)arg->tsdn);
if (gctx->cnt_summed.curobjs != 0) if (gctx->cnt_summed.curobjs != 0)
arg->leak_ngctx++; (*leak_ngctx)++;
malloc_mutex_unlock(arg->tsdn, gctx->lock); malloc_mutex_unlock(gctx->lock);
return (NULL); return (NULL);
} }
...@@ -1222,7 +1196,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) ...@@ -1222,7 +1196,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
*/ */
while ((gctx = gctx_tree_first(gctxs)) != NULL) { while ((gctx = gctx_tree_first(gctxs)) != NULL) {
gctx_tree_remove(gctxs, gctx); gctx_tree_remove(gctxs, gctx);
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); malloc_mutex_lock(gctx->lock);
{ {
prof_tctx_t *next; prof_tctx_t *next;
...@@ -1230,15 +1204,14 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) ...@@ -1230,15 +1204,14 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
do { do {
prof_tctx_t *to_destroy = prof_tctx_t *to_destroy =
tctx_tree_iter(&gctx->tctxs, next, tctx_tree_iter(&gctx->tctxs, next,
prof_tctx_finish_iter, prof_tctx_finish_iter, NULL);
(void *)tsd_tsdn(tsd));
if (to_destroy != NULL) { if (to_destroy != NULL) {
next = tctx_tree_next(&gctx->tctxs, next = tctx_tree_next(&gctx->tctxs,
to_destroy); to_destroy);
tctx_tree_remove(&gctx->tctxs, tctx_tree_remove(&gctx->tctxs,
to_destroy); to_destroy);
idalloctm(tsd_tsdn(tsd), to_destroy, idalloctm(tsd, to_destroy,
NULL, true, true); tcache_get(tsd, false), true);
} else } else
next = NULL; next = NULL;
} while (next != NULL); } while (next != NULL);
...@@ -1246,26 +1219,19 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) ...@@ -1246,26 +1219,19 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
gctx->nlimbo--; gctx->nlimbo--;
if (prof_gctx_should_destroy(gctx)) { if (prof_gctx_should_destroy(gctx)) {
gctx->nlimbo++; gctx->nlimbo++;
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); malloc_mutex_unlock(gctx->lock);
prof_gctx_try_destroy(tsd, tdata, gctx, tdata); prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
} else } else
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); malloc_mutex_unlock(gctx->lock);
} }
} }
struct prof_tdata_merge_iter_arg_s {
tsdn_t *tsdn;
prof_cnt_t cnt_all;
};
static prof_tdata_t * static prof_tdata_t *
prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
void *opaque)
{ {
struct prof_tdata_merge_iter_arg_s *arg = prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
(struct prof_tdata_merge_iter_arg_s *)opaque;
malloc_mutex_lock(arg->tsdn, tdata->lock); malloc_mutex_lock(tdata->lock);
if (!tdata->expired) { if (!tdata->expired) {
size_t tabind; size_t tabind;
union { union {
...@@ -1277,17 +1243,17 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, ...@@ -1277,17 +1243,17 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
&tctx.v);) &tctx.v);)
prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); prof_tctx_merge_tdata(tctx.p, tdata);
arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; cnt_all->curobjs += tdata->cnt_summed.curobjs;
arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; cnt_all->curbytes += tdata->cnt_summed.curbytes;
if (opt_prof_accum) { if (opt_prof_accum) {
arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
} }
} else } else
tdata->dumping = false; tdata->dumping = false;
malloc_mutex_unlock(arg->tsdn, tdata->lock); malloc_mutex_unlock(tdata->lock);
return (NULL); return (NULL);
} }
...@@ -1316,7 +1282,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) ...@@ -1316,7 +1282,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
#define prof_dump_header JEMALLOC_N(prof_dump_header_impl) #define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
#endif #endif
static bool static bool
prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
{ {
bool ret; bool ret;
...@@ -1327,10 +1293,10 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) ...@@ -1327,10 +1293,10 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
return (true); return (true);
malloc_mutex_lock(tsdn, &tdatas_mtx); malloc_mutex_lock(&tdatas_mtx);
ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
(void *)&propagate_err) != NULL); (void *)&propagate_err) != NULL);
malloc_mutex_unlock(tsdn, &tdatas_mtx); malloc_mutex_unlock(&tdatas_mtx);
return (ret); return (ret);
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
...@@ -1339,16 +1305,15 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) ...@@ -1339,16 +1305,15 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
#endif #endif
/* gctx->lock is held. */
static bool static bool
prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
const prof_bt_t *bt, prof_gctx_tree_t *gctxs) prof_gctx_tree_t *gctxs)
{ {
bool ret; bool ret;
unsigned i; unsigned i;
struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
cassert(config_prof); cassert(config_prof);
malloc_mutex_assert_owner(tsdn, gctx->lock);
/* Avoid dumping such gctx's that have no useful data. */ /* Avoid dumping such gctx's that have no useful data. */
if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
...@@ -1382,10 +1347,8 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, ...@@ -1382,10 +1347,8 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
goto label_return; goto label_return;
} }
prof_tctx_dump_iter_arg.tsdn = tsdn;
prof_tctx_dump_iter_arg.propagate_err = propagate_err;
if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
(void *)&prof_tctx_dump_iter_arg) != NULL) { (void *)&propagate_err) != NULL) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
...@@ -1395,7 +1358,6 @@ label_return: ...@@ -1395,7 +1358,6 @@ label_return:
return (ret); return (ret);
} }
#ifndef _WIN32
JEMALLOC_FORMAT_PRINTF(1, 2) JEMALLOC_FORMAT_PRINTF(1, 2)
static int static int
prof_open_maps(const char *format, ...) prof_open_maps(const char *format, ...)
...@@ -1411,18 +1373,6 @@ prof_open_maps(const char *format, ...) ...@@ -1411,18 +1373,6 @@ prof_open_maps(const char *format, ...)
return (mfd); return (mfd);
} }
#endif
static int
prof_getpid(void)
{
#ifdef _WIN32
return (GetCurrentProcessId());
#else
return (getpid());
#endif
}
static bool static bool
prof_dump_maps(bool propagate_err) prof_dump_maps(bool propagate_err)
...@@ -1433,11 +1383,9 @@ prof_dump_maps(bool propagate_err) ...@@ -1433,11 +1383,9 @@ prof_dump_maps(bool propagate_err)
cassert(config_prof); cassert(config_prof);
#ifdef __FreeBSD__ #ifdef __FreeBSD__
mfd = prof_open_maps("/proc/curproc/map"); mfd = prof_open_maps("/proc/curproc/map");
#elif defined(_WIN32)
mfd = -1; // Not implemented
#else #else
{ {
int pid = prof_getpid(); int pid = getpid();
mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
if (mfd == -1) if (mfd == -1)
...@@ -1478,66 +1426,39 @@ label_return: ...@@ -1478,66 +1426,39 @@ label_return:
return (ret); return (ret);
} }
/*
* See prof_sample_threshold_update() comment for why the body of this function
* is conditionally compiled.
*/
static void static void
prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
const char *filename) const char *filename)
{ {
#ifdef JEMALLOC_PROF
/*
* Scaling is equivalent AdjustSamples() in jeprof, but the result may
* differ slightly from what jeprof reports, because here we scale the
* summary values, whereas jeprof scales each context individually and
* reports the sums of the scaled values.
*/
if (cnt_all->curbytes != 0) { if (cnt_all->curbytes != 0) {
double sample_period = (double)((uint64_t)1 << lg_prof_sample); malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
double ratio = (((double)cnt_all->curbytes) / FMTu64" object%s, %zu context%s\n",
(double)cnt_all->curobjs) / sample_period; cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
double scale_factor = 1.0 / (1.0 - exp(-ratio)); cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) leak_ngctx, (leak_ngctx != 1) ? "s" : "");
* scale_factor);
uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
scale_factor);
malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
" byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf( malloc_printf(
"<jemalloc>: Run jeprof on \"%s\" for leak detail\n", "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
filename); filename);
} }
#endif
} }
struct prof_gctx_dump_iter_arg_s {
tsdn_t *tsdn;
bool propagate_err;
};
static prof_gctx_t * static prof_gctx_t *
prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
{ {
prof_gctx_t *ret; prof_gctx_t *ret;
struct prof_gctx_dump_iter_arg_s *arg = bool propagate_err = *(bool *)arg;
(struct prof_gctx_dump_iter_arg_s *)opaque;
malloc_mutex_lock(arg->tsdn, gctx->lock); malloc_mutex_lock(gctx->lock);
if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
gctxs)) {
ret = gctx; ret = gctx;
goto label_return; goto label_return;
} }
ret = NULL; ret = NULL;
label_return: label_return:
malloc_mutex_unlock(arg->tsdn, gctx->lock); malloc_mutex_unlock(gctx->lock);
return (ret); return (ret);
} }
...@@ -1545,14 +1466,13 @@ static bool ...@@ -1545,14 +1466,13 @@ static bool
prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
{ {
prof_tdata_t *tdata; prof_tdata_t *tdata;
struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; prof_cnt_t cnt_all;
size_t tabind; size_t tabind;
union { union {
prof_gctx_t *p; prof_gctx_t *p;
void *v; void *v;
} gctx; } gctx;
struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; size_t leak_ngctx;
struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
prof_gctx_tree_t gctxs; prof_gctx_tree_t gctxs;
cassert(config_prof); cassert(config_prof);
...@@ -1561,7 +1481,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) ...@@ -1561,7 +1481,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
if (tdata == NULL) if (tdata == NULL)
return (true); return (true);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); malloc_mutex_lock(&prof_dump_mtx);
prof_enter(tsd, tdata); prof_enter(tsd, tdata);
/* /*
...@@ -1570,24 +1490,20 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) ...@@ -1570,24 +1490,20 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
*/ */
gctx_tree_new(&gctxs); gctx_tree_new(&gctxs);
for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs); prof_dump_gctx_prep(gctx.p, &gctxs);
/* /*
* Iterate over tdatas, and for the non-expired ones snapshot their tctx * Iterate over tdatas, and for the non-expired ones snapshot their tctx
* stats and merge them into the associated gctx's. * stats and merge them into the associated gctx's.
*/ */
prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd); memset(&cnt_all, 0, sizeof(prof_cnt_t));
memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t)); malloc_mutex_lock(&tdatas_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, malloc_mutex_unlock(&tdatas_mtx);
(void *)&prof_tdata_merge_iter_arg);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
/* Merge tctx stats into gctx's. */ /* Merge tctx stats into gctx's. */
prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd); leak_ngctx = 0;
prof_gctx_merge_iter_arg.leak_ngctx = 0; gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
(void *)&prof_gctx_merge_iter_arg);
prof_leave(tsd, tdata); prof_leave(tsd, tdata);
...@@ -1596,15 +1512,12 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) ...@@ -1596,15 +1512,12 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
goto label_open_close_error; goto label_open_close_error;
/* Dump profile header. */ /* Dump profile header. */
if (prof_dump_header(tsd_tsdn(tsd), propagate_err, if (prof_dump_header(propagate_err, &cnt_all))
&prof_tdata_merge_iter_arg.cnt_all))
goto label_write_error; goto label_write_error;
/* Dump per gctx profile stats. */ /* Dump per gctx profile stats. */
prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
prof_gctx_dump_iter_arg.propagate_err = propagate_err;
if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
(void *)&prof_gctx_dump_iter_arg) != NULL) (void *)&propagate_err) != NULL)
goto label_write_error; goto label_write_error;
/* Dump /proc/<pid>/maps if possible. */ /* Dump /proc/<pid>/maps if possible. */
...@@ -1615,18 +1528,17 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) ...@@ -1615,18 +1528,17 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
goto label_open_close_error; goto label_open_close_error;
prof_gctx_finish(tsd, &gctxs); prof_gctx_finish(tsd, &gctxs);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); malloc_mutex_unlock(&prof_dump_mtx);
if (leakcheck)
prof_leakcheck(&cnt_all, leak_ngctx, filename);
if (leakcheck) {
prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
prof_gctx_merge_iter_arg.leak_ngctx, filename);
}
return (false); return (false);
label_write_error: label_write_error:
prof_dump_close(propagate_err); prof_dump_close(propagate_err);
label_open_close_error: label_open_close_error:
prof_gctx_finish(tsd, &gctxs); prof_gctx_finish(tsd, &gctxs);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); malloc_mutex_unlock(&prof_dump_mtx);
return (true); return (true);
} }
...@@ -1642,12 +1554,12 @@ prof_dump_filename(char *filename, char v, uint64_t vseq) ...@@ -1642,12 +1554,12 @@ prof_dump_filename(char *filename, char v, uint64_t vseq)
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c%"FMTu64".heap", "%s.%d.%"FMTu64".%c%"FMTu64".heap",
opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
} else { } else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */ /* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c.heap", "%s.%d.%"FMTu64".%c.heap",
opt_prof_prefix, prof_getpid(), prof_dump_seq, v); opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
} }
prof_dump_seq++; prof_dump_seq++;
} }
...@@ -1666,23 +1578,23 @@ prof_fdump(void) ...@@ -1666,23 +1578,23 @@ prof_fdump(void)
return; return;
tsd = tsd_fetch(); tsd = tsd_fetch();
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'f', VSEQ_INVALID); prof_dump_filename(filename, 'f', VSEQ_INVALID);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, opt_prof_leak); prof_dump(tsd, false, filename, opt_prof_leak);
} }
void void
prof_idump(tsdn_t *tsdn) prof_idump(void)
{ {
tsd_t *tsd; tsd_t *tsd;
prof_tdata_t *tdata; prof_tdata_t *tdata;
cassert(config_prof); cassert(config_prof);
if (!prof_booted || tsdn_null(tsdn)) if (!prof_booted)
return; return;
tsd = tsdn_tsd(tsdn); tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false); tdata = prof_tdata_get(tsd, false);
if (tdata == NULL) if (tdata == NULL)
return; return;
...@@ -1693,48 +1605,50 @@ prof_idump(tsdn_t *tsdn) ...@@ -1693,48 +1605,50 @@ prof_idump(tsdn_t *tsdn)
if (opt_prof_prefix[0] != '\0') { if (opt_prof_prefix[0] != '\0') {
char filename[PATH_MAX + 1]; char filename[PATH_MAX + 1];
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++; prof_dump_iseq++;
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, false); prof_dump(tsd, false, filename, false);
} }
} }
bool bool
prof_mdump(tsd_t *tsd, const char *filename) prof_mdump(const char *filename)
{ {
tsd_t *tsd;
char filename_buf[DUMP_FILENAME_BUFSIZE]; char filename_buf[DUMP_FILENAME_BUFSIZE];
cassert(config_prof); cassert(config_prof);
if (!opt_prof || !prof_booted) if (!opt_prof || !prof_booted)
return (true); return (true);
tsd = tsd_fetch();
if (filename == NULL) { if (filename == NULL) {
/* No filename specified, so automatically generate one. */ /* No filename specified, so automatically generate one. */
if (opt_prof_prefix[0] == '\0') if (opt_prof_prefix[0] == '\0')
return (true); return (true);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++; prof_dump_mseq++;
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); malloc_mutex_unlock(&prof_dump_seq_mtx);
filename = filename_buf; filename = filename_buf;
} }
return (prof_dump(tsd, true, filename, false)); return (prof_dump(tsd, true, filename, false));
} }
void void
prof_gdump(tsdn_t *tsdn) prof_gdump(void)
{ {
tsd_t *tsd; tsd_t *tsd;
prof_tdata_t *tdata; prof_tdata_t *tdata;
cassert(config_prof); cassert(config_prof);
if (!prof_booted || tsdn_null(tsdn)) if (!prof_booted)
return; return;
tsd = tsdn_tsd(tsdn); tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false); tdata = prof_tdata_get(tsd, false);
if (tdata == NULL) if (tdata == NULL)
return; return;
...@@ -1745,10 +1659,10 @@ prof_gdump(tsdn_t *tsdn) ...@@ -1745,10 +1659,10 @@ prof_gdump(tsdn_t *tsdn)
if (opt_prof_prefix[0] != '\0') { if (opt_prof_prefix[0] != '\0') {
char filename[DUMP_FILENAME_BUFSIZE]; char filename[DUMP_FILENAME_BUFSIZE];
malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++; prof_dump_useq++;
malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, false); prof_dump(tsd, false, filename, false);
} }
} }
...@@ -1777,14 +1691,14 @@ prof_bt_keycomp(const void *k1, const void *k2) ...@@ -1777,14 +1691,14 @@ prof_bt_keycomp(const void *k1, const void *k2)
} }
JEMALLOC_INLINE_C uint64_t JEMALLOC_INLINE_C uint64_t
prof_thr_uid_alloc(tsdn_t *tsdn) prof_thr_uid_alloc(void)
{ {
uint64_t thr_uid; uint64_t thr_uid;
malloc_mutex_lock(tsdn, &next_thr_uid_mtx); malloc_mutex_lock(&next_thr_uid_mtx);
thr_uid = next_thr_uid; thr_uid = next_thr_uid;
next_thr_uid++; next_thr_uid++;
malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); malloc_mutex_unlock(&next_thr_uid_mtx);
return (thr_uid); return (thr_uid);
} }
...@@ -1794,13 +1708,14 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, ...@@ -1794,13 +1708,14 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
char *thread_name, bool active) char *thread_name, bool active)
{ {
prof_tdata_t *tdata; prof_tdata_t *tdata;
tcache_t *tcache;
cassert(config_prof); cassert(config_prof);
/* Initialize an empty cache for this thread. */ /* Initialize an empty cache for this thread. */
tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), tcache = tcache_get(tsd, true);
size2index(sizeof(prof_tdata_t)), false, NULL, true, tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false,
arena_get(TSDN_NULL, 0, true), true); tcache, true, NULL);
if (tdata == NULL) if (tdata == NULL)
return (NULL); return (NULL);
...@@ -1812,9 +1727,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, ...@@ -1812,9 +1727,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata->expired = false; tdata->expired = false;
tdata->tctx_uid_next = 0; tdata->tctx_uid_next = 0;
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
prof_bt_keycomp)) { prof_bt_hash, prof_bt_keycomp)) {
idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true); idalloctm(tsd, tdata, tcache, true);
return (NULL); return (NULL);
} }
...@@ -1828,9 +1743,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, ...@@ -1828,9 +1743,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata->dumping = false; tdata->dumping = false;
tdata->active = active; tdata->active = active;
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); malloc_mutex_lock(&tdatas_mtx);
tdata_tree_insert(&tdatas, tdata); tdata_tree_insert(&tdatas, tdata);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); malloc_mutex_unlock(&tdatas_mtx);
return (tdata); return (tdata);
} }
...@@ -1839,12 +1754,13 @@ prof_tdata_t * ...@@ -1839,12 +1754,13 @@ prof_tdata_t *
prof_tdata_init(tsd_t *tsd) prof_tdata_init(tsd_t *tsd)
{ {
return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
NULL, prof_thread_active_init_get(tsd_tsdn(tsd)))); prof_thread_active_init_get()));
} }
/* tdata->lock must be held. */
static bool static bool
prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
{ {
if (tdata->attached && !even_if_attached) if (tdata->attached && !even_if_attached)
...@@ -1854,40 +1770,32 @@ prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) ...@@ -1854,40 +1770,32 @@ prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
return (true); return (true);
} }
static bool /* tdatas_mtx must be held. */
prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
bool even_if_attached)
{
malloc_mutex_assert_owner(tsdn, tdata->lock);
return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
}
static void static void
prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached) bool even_if_attached)
{ {
tcache_t *tcache;
malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); assert(prof_tdata_should_destroy(tdata, even_if_attached));
assert(tsd_prof_tdata_get(tsd) != tdata);
tdata_tree_remove(&tdatas, tdata); tdata_tree_remove(&tdatas, tdata);
assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); tcache = tcache_get(tsd, false);
if (tdata->thread_name != NULL) if (tdata->thread_name != NULL)
idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true); idalloctm(tsd, tdata->thread_name, tcache, true);
ckh_delete(tsd, &tdata->bt2tctx); ckh_delete(tsd, &tdata->bt2tctx);
idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true); idalloctm(tsd, tdata, tcache, true);
} }
static void static void
prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
{ {
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); malloc_mutex_lock(&tdatas_mtx);
prof_tdata_destroy_locked(tsd, tdata, even_if_attached); prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); malloc_mutex_unlock(&tdatas_mtx);
} }
static void static void
...@@ -1895,10 +1803,9 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) ...@@ -1895,10 +1803,9 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
{ {
bool destroy_tdata; bool destroy_tdata;
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); malloc_mutex_lock(tdata->lock);
if (tdata->attached) { if (tdata->attached) {
destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, destroy_tdata = prof_tdata_should_destroy(tdata, true);
true);
/* /*
* Only detach if !destroy_tdata, because detaching would allow * Only detach if !destroy_tdata, because detaching would allow
* another thread to win the race to destroy tdata. * another thread to win the race to destroy tdata.
...@@ -1908,7 +1815,7 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) ...@@ -1908,7 +1815,7 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
tsd_prof_tdata_set(tsd, NULL); tsd_prof_tdata_set(tsd, NULL);
} else } else
destroy_tdata = false; destroy_tdata = false;
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); malloc_mutex_unlock(tdata->lock);
if (destroy_tdata) if (destroy_tdata)
prof_tdata_destroy(tsd, tdata, true); prof_tdata_destroy(tsd, tdata, true);
} }
...@@ -1919,7 +1826,7 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) ...@@ -1919,7 +1826,7 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
uint64_t thr_uid = tdata->thr_uid; uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1; uint64_t thr_discrim = tdata->thr_discrim + 1;
char *thread_name = (tdata->thread_name != NULL) ? char *thread_name = (tdata->thread_name != NULL) ?
prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
bool active = tdata->active; bool active = tdata->active;
prof_tdata_detach(tsd, tdata); prof_tdata_detach(tsd, tdata);
...@@ -1928,18 +1835,18 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) ...@@ -1928,18 +1835,18 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
} }
static bool static bool
prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) prof_tdata_expire(prof_tdata_t *tdata)
{ {
bool destroy_tdata; bool destroy_tdata;
malloc_mutex_lock(tsdn, tdata->lock); malloc_mutex_lock(tdata->lock);
if (!tdata->expired) { if (!tdata->expired) {
tdata->expired = true; tdata->expired = true;
destroy_tdata = tdata->attached ? false : destroy_tdata = tdata->attached ? false :
prof_tdata_should_destroy(tsdn, tdata, false); prof_tdata_should_destroy(tdata, false);
} else } else
destroy_tdata = false; destroy_tdata = false;
malloc_mutex_unlock(tsdn, tdata->lock); malloc_mutex_unlock(tdata->lock);
return (destroy_tdata); return (destroy_tdata);
} }
...@@ -1947,9 +1854,8 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) ...@@ -1947,9 +1854,8 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
static prof_tdata_t * static prof_tdata_t *
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
{ {
tsdn_t *tsdn = (tsdn_t *)arg;
return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); return (prof_tdata_expire(tdata) ? tdata : NULL);
} }
void void
...@@ -1959,15 +1865,15 @@ prof_reset(tsd_t *tsd, size_t lg_sample) ...@@ -1959,15 +1865,15 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
assert(lg_sample < (sizeof(uint64_t) << 3)); assert(lg_sample < (sizeof(uint64_t) << 3));
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); malloc_mutex_lock(&prof_dump_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); malloc_mutex_lock(&tdatas_mtx);
lg_prof_sample = lg_sample; lg_prof_sample = lg_sample;
next = NULL; next = NULL;
do { do {
prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
prof_tdata_reset_iter, (void *)tsd); prof_tdata_reset_iter, NULL);
if (to_destroy != NULL) { if (to_destroy != NULL) {
next = tdata_tree_next(&tdatas, to_destroy); next = tdata_tree_next(&tdatas, to_destroy);
prof_tdata_destroy_locked(tsd, to_destroy, false); prof_tdata_destroy_locked(tsd, to_destroy, false);
...@@ -1975,8 +1881,8 @@ prof_reset(tsd_t *tsd, size_t lg_sample) ...@@ -1975,8 +1881,8 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
next = NULL; next = NULL;
} while (next != NULL); } while (next != NULL);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); malloc_mutex_unlock(&tdatas_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); malloc_mutex_unlock(&prof_dump_mtx);
} }
void void
...@@ -1993,33 +1899,35 @@ prof_tdata_cleanup(tsd_t *tsd) ...@@ -1993,33 +1899,35 @@ prof_tdata_cleanup(tsd_t *tsd)
} }
bool bool
prof_active_get(tsdn_t *tsdn) prof_active_get(void)
{ {
bool prof_active_current; bool prof_active_current;
malloc_mutex_lock(tsdn, &prof_active_mtx); malloc_mutex_lock(&prof_active_mtx);
prof_active_current = prof_active; prof_active_current = prof_active;
malloc_mutex_unlock(tsdn, &prof_active_mtx); malloc_mutex_unlock(&prof_active_mtx);
return (prof_active_current); return (prof_active_current);
} }
bool bool
prof_active_set(tsdn_t *tsdn, bool active) prof_active_set(bool active)
{ {
bool prof_active_old; bool prof_active_old;
malloc_mutex_lock(tsdn, &prof_active_mtx); malloc_mutex_lock(&prof_active_mtx);
prof_active_old = prof_active; prof_active_old = prof_active;
prof_active = active; prof_active = active;
malloc_mutex_unlock(tsdn, &prof_active_mtx); malloc_mutex_unlock(&prof_active_mtx);
return (prof_active_old); return (prof_active_old);
} }
const char * const char *
prof_thread_name_get(tsd_t *tsd) prof_thread_name_get(void)
{ {
tsd_t *tsd;
prof_tdata_t *tdata; prof_tdata_t *tdata;
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true); tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) if (tdata == NULL)
return (""); return ("");
...@@ -2027,7 +1935,7 @@ prof_thread_name_get(tsd_t *tsd) ...@@ -2027,7 +1935,7 @@ prof_thread_name_get(tsd_t *tsd)
} }
static char * static char *
prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
{ {
char *ret; char *ret;
size_t size; size_t size;
...@@ -2039,8 +1947,7 @@ prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) ...@@ -2039,8 +1947,7 @@ prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
if (size == 1) if (size == 1)
return (""); return ("");
ret = iallocztm(tsdn, size, size2index(size), false, NULL, true, ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL);
arena_get(TSDN_NULL, 0, true), true);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
memcpy(ret, thread_name, size); memcpy(ret, thread_name, size);
...@@ -2067,12 +1974,13 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) ...@@ -2067,12 +1974,13 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
return (EFAULT); return (EFAULT);
} }
s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); s = prof_thread_name_alloc(tsd, thread_name);
if (s == NULL) if (s == NULL)
return (EAGAIN); return (EAGAIN);
if (tdata->thread_name != NULL) { if (tdata->thread_name != NULL) {
idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true); idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
true);
tdata->thread_name = NULL; tdata->thread_name = NULL;
} }
if (strlen(s) > 0) if (strlen(s) > 0)
...@@ -2081,10 +1989,12 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) ...@@ -2081,10 +1989,12 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
} }
bool bool
prof_thread_active_get(tsd_t *tsd) prof_thread_active_get(void)
{ {
tsd_t *tsd;
prof_tdata_t *tdata; prof_tdata_t *tdata;
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true); tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) if (tdata == NULL)
return (false); return (false);
...@@ -2092,10 +2002,12 @@ prof_thread_active_get(tsd_t *tsd) ...@@ -2092,10 +2002,12 @@ prof_thread_active_get(tsd_t *tsd)
} }
bool bool
prof_thread_active_set(tsd_t *tsd, bool active) prof_thread_active_set(bool active)
{ {
tsd_t *tsd;
prof_tdata_t *tdata; prof_tdata_t *tdata;
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true); tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) if (tdata == NULL)
return (true); return (true);
...@@ -2104,48 +2016,48 @@ prof_thread_active_set(tsd_t *tsd, bool active) ...@@ -2104,48 +2016,48 @@ prof_thread_active_set(tsd_t *tsd, bool active)
} }
bool bool
prof_thread_active_init_get(tsdn_t *tsdn) prof_thread_active_init_get(void)
{ {
bool active_init; bool active_init;
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); malloc_mutex_lock(&prof_thread_active_init_mtx);
active_init = prof_thread_active_init; active_init = prof_thread_active_init;
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); malloc_mutex_unlock(&prof_thread_active_init_mtx);
return (active_init); return (active_init);
} }
bool bool
prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) prof_thread_active_init_set(bool active_init)
{ {
bool active_init_old; bool active_init_old;
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); malloc_mutex_lock(&prof_thread_active_init_mtx);
active_init_old = prof_thread_active_init; active_init_old = prof_thread_active_init;
prof_thread_active_init = active_init; prof_thread_active_init = active_init;
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); malloc_mutex_unlock(&prof_thread_active_init_mtx);
return (active_init_old); return (active_init_old);
} }
bool bool
prof_gdump_get(tsdn_t *tsdn) prof_gdump_get(void)
{ {
bool prof_gdump_current; bool prof_gdump_current;
malloc_mutex_lock(tsdn, &prof_gdump_mtx); malloc_mutex_lock(&prof_gdump_mtx);
prof_gdump_current = prof_gdump_val; prof_gdump_current = prof_gdump_val;
malloc_mutex_unlock(tsdn, &prof_gdump_mtx); malloc_mutex_unlock(&prof_gdump_mtx);
return (prof_gdump_current); return (prof_gdump_current);
} }
bool bool
prof_gdump_set(tsdn_t *tsdn, bool gdump) prof_gdump_set(bool gdump)
{ {
bool prof_gdump_old; bool prof_gdump_old;
malloc_mutex_lock(tsdn, &prof_gdump_mtx); malloc_mutex_lock(&prof_gdump_mtx);
prof_gdump_old = prof_gdump_val; prof_gdump_old = prof_gdump_val;
prof_gdump_val = gdump; prof_gdump_val = gdump;
malloc_mutex_unlock(tsdn, &prof_gdump_mtx); malloc_mutex_unlock(&prof_gdump_mtx);
return (prof_gdump_old); return (prof_gdump_old);
} }
...@@ -2186,54 +2098,47 @@ prof_boot1(void) ...@@ -2186,54 +2098,47 @@ prof_boot1(void)
} }
bool bool
prof_boot2(tsd_t *tsd) prof_boot2(void)
{ {
cassert(config_prof); cassert(config_prof);
if (opt_prof) { if (opt_prof) {
tsd_t *tsd;
unsigned i; unsigned i;
lg_prof_sample = opt_lg_prof_sample; lg_prof_sample = opt_lg_prof_sample;
prof_active = opt_prof_active; prof_active = opt_prof_active;
if (malloc_mutex_init(&prof_active_mtx, "prof_active", if (malloc_mutex_init(&prof_active_mtx))
WITNESS_RANK_PROF_ACTIVE))
return (true); return (true);
prof_gdump_val = opt_prof_gdump; prof_gdump_val = opt_prof_gdump;
if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", if (malloc_mutex_init(&prof_gdump_mtx))
WITNESS_RANK_PROF_GDUMP))
return (true); return (true);
prof_thread_active_init = opt_prof_thread_active_init; prof_thread_active_init = opt_prof_thread_active_init;
if (malloc_mutex_init(&prof_thread_active_init_mtx, if (malloc_mutex_init(&prof_thread_active_init_mtx))
"prof_thread_active_init",
WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
return (true); return (true);
tsd = tsd_fetch();
if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp)) prof_bt_keycomp))
return (true); return (true);
if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", if (malloc_mutex_init(&bt2gctx_mtx))
WITNESS_RANK_PROF_BT2GCTX))
return (true); return (true);
tdata_tree_new(&tdatas); tdata_tree_new(&tdatas);
if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", if (malloc_mutex_init(&tdatas_mtx))
WITNESS_RANK_PROF_TDATAS))
return (true); return (true);
next_thr_uid = 0; next_thr_uid = 0;
if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", if (malloc_mutex_init(&next_thr_uid_mtx))
WITNESS_RANK_PROF_NEXT_THR_UID))
return (true); return (true);
if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", if (malloc_mutex_init(&prof_dump_seq_mtx))
WITNESS_RANK_PROF_DUMP_SEQ))
return (true); return (true);
if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", if (malloc_mutex_init(&prof_dump_mtx))
WITNESS_RANK_PROF_DUMP))
return (true); return (true);
if (opt_prof_final && opt_prof_prefix[0] != '\0' && if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
...@@ -2243,23 +2148,21 @@ prof_boot2(tsd_t *tsd) ...@@ -2243,23 +2148,21 @@ prof_boot2(tsd_t *tsd)
abort(); abort();
} }
gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
PROF_NCTX_LOCKS * sizeof(malloc_mutex_t)); sizeof(malloc_mutex_t));
if (gctx_locks == NULL) if (gctx_locks == NULL)
return (true); return (true);
for (i = 0; i < PROF_NCTX_LOCKS; i++) { for (i = 0; i < PROF_NCTX_LOCKS; i++) {
if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", if (malloc_mutex_init(&gctx_locks[i]))
WITNESS_RANK_PROF_GCTX))
return (true); return (true);
} }
tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t)); sizeof(malloc_mutex_t));
if (tdata_locks == NULL) if (tdata_locks == NULL)
return (true); return (true);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) { for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", if (malloc_mutex_init(&tdata_locks[i]))
WITNESS_RANK_PROF_TDATA))
return (true); return (true);
} }
} }
...@@ -2278,77 +2181,56 @@ prof_boot2(tsd_t *tsd) ...@@ -2278,77 +2181,56 @@ prof_boot2(tsd_t *tsd)
} }
void void
prof_prefork0(tsdn_t *tsdn) prof_prefork(void)
{ {
if (opt_prof) { if (opt_prof) {
unsigned i; unsigned i;
malloc_mutex_prefork(tsdn, &prof_dump_mtx); malloc_mutex_prefork(&tdatas_mtx);
malloc_mutex_prefork(tsdn, &bt2gctx_mtx); malloc_mutex_prefork(&bt2gctx_mtx);
malloc_mutex_prefork(tsdn, &tdatas_mtx); malloc_mutex_prefork(&next_thr_uid_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) malloc_mutex_prefork(&prof_dump_seq_mtx);
malloc_mutex_prefork(tsdn, &tdata_locks[i]);
for (i = 0; i < PROF_NCTX_LOCKS; i++) for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_prefork(tsdn, &gctx_locks[i]); malloc_mutex_prefork(&gctx_locks[i]);
} for (i = 0; i < PROF_NTDATA_LOCKS; i++)
} malloc_mutex_prefork(&tdata_locks[i]);
void
prof_prefork1(tsdn_t *tsdn)
{
if (opt_prof) {
malloc_mutex_prefork(tsdn, &prof_active_mtx);
malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
} }
} }
void void
prof_postfork_parent(tsdn_t *tsdn) prof_postfork_parent(void)
{ {
if (opt_prof) { if (opt_prof) {
unsigned i; unsigned i;
malloc_mutex_postfork_parent(tsdn,
&prof_thread_active_init_mtx);
malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); malloc_mutex_postfork_parent(&tdata_locks[i]);
malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_parent(&gctx_locks[i]);
malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
malloc_mutex_postfork_parent(&next_thr_uid_mtx);
malloc_mutex_postfork_parent(&bt2gctx_mtx);
malloc_mutex_postfork_parent(&tdatas_mtx);
} }
} }
void void
prof_postfork_child(tsdn_t *tsdn) prof_postfork_child(void)
{ {
if (opt_prof) { if (opt_prof) {
unsigned i; unsigned i;
malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); malloc_mutex_postfork_child(&tdata_locks[i]);
malloc_mutex_postfork_child(tsdn, &tdatas_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_child(&gctx_locks[i]);
malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); malloc_mutex_postfork_child(&prof_dump_seq_mtx);
malloc_mutex_postfork_child(&next_thr_uid_mtx);
malloc_mutex_postfork_child(&bt2gctx_mtx);
malloc_mutex_postfork_child(&tdatas_mtx);
} }
} }
......
...@@ -13,22 +13,22 @@ ...@@ -13,22 +13,22 @@
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine); static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine); static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
size_t upper_bound); size_t upper_bound);
/******************************************************************************/ /******************************************************************************/
static quarantine_t * static quarantine_t *
quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs) quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
{ {
quarantine_t *quarantine; quarantine_t *quarantine;
size_t size;
size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) * assert(tsd_nominal(tsd));
sizeof(quarantine_obj_t));
quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size), quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs)
false, NULL, true, arena_get(TSDN_NULL, 0, true), true); + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false,
tcache_get(tsd, true), true, NULL);
if (quarantine == NULL) if (quarantine == NULL)
return (NULL); return (NULL);
quarantine->curbytes = 0; quarantine->curbytes = 0;
...@@ -47,7 +47,7 @@ quarantine_alloc_hook_work(tsd_t *tsd) ...@@ -47,7 +47,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (!tsd_nominal(tsd)) if (!tsd_nominal(tsd))
return; return;
quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT); quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
/* /*
* Check again whether quarantine has been initialized, because * Check again whether quarantine has been initialized, because
* quarantine_init() may have triggered recursive initialization. * quarantine_init() may have triggered recursive initialization.
...@@ -55,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd) ...@@ -55,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (tsd_quarantine_get(tsd) == NULL) if (tsd_quarantine_get(tsd) == NULL)
tsd_quarantine_set(tsd, quarantine); tsd_quarantine_set(tsd, quarantine);
else else
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
} }
static quarantine_t * static quarantine_t *
...@@ -63,9 +63,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) ...@@ -63,9 +63,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
{ {
quarantine_t *ret; quarantine_t *ret;
ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1); ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
if (ret == NULL) { if (ret == NULL) {
quarantine_drain_one(tsd_tsdn(tsd), quarantine); quarantine_drain_one(tsd, quarantine);
return (quarantine); return (quarantine);
} }
...@@ -87,18 +87,18 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) ...@@ -87,18 +87,18 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t)); sizeof(quarantine_obj_t));
} }
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
tsd_quarantine_set(tsd, ret); tsd_quarantine_set(tsd, ret);
return (ret); return (ret);
} }
static void static void
quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine) quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
{ {
quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof)); assert(obj->usize == isalloc(obj->ptr, config_prof));
idalloctm(tsdn, obj->ptr, NULL, false, true); idalloctm(tsd, obj->ptr, NULL, false);
quarantine->curbytes -= obj->usize; quarantine->curbytes -= obj->usize;
quarantine->curobjs--; quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) << quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
...@@ -106,24 +106,24 @@ quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine) ...@@ -106,24 +106,24 @@ quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
} }
static void static void
quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound) quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
{ {
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
quarantine_drain_one(tsdn, quarantine); quarantine_drain_one(tsd, quarantine);
} }
void void
quarantine(tsd_t *tsd, void *ptr) quarantine(tsd_t *tsd, void *ptr)
{ {
quarantine_t *quarantine; quarantine_t *quarantine;
size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); size_t usize = isalloc(ptr, config_prof);
cassert(config_fill); cassert(config_fill);
assert(opt_quarantine); assert(opt_quarantine);
if ((quarantine = tsd_quarantine_get(tsd)) == NULL) { if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); idalloctm(tsd, ptr, NULL, false);
return; return;
} }
/* /*
...@@ -133,7 +133,7 @@ quarantine(tsd_t *tsd, void *ptr) ...@@ -133,7 +133,7 @@ quarantine(tsd_t *tsd, void *ptr)
if (quarantine->curbytes + usize > opt_quarantine) { if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0; - usize : 0;
quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound); quarantine_drain(tsd, quarantine, upper_bound);
} }
/* Grow the quarantine ring buffer if it's full. */ /* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
...@@ -158,11 +158,11 @@ quarantine(tsd_t *tsd, void *ptr) ...@@ -158,11 +158,11 @@ quarantine(tsd_t *tsd, void *ptr)
&& usize <= SMALL_MAXCLASS) && usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize); arena_quarantine_junk_small(ptr, usize);
else else
memset(ptr, JEMALLOC_FREE_JUNK, usize); memset(ptr, 0x5a, usize);
} }
} else { } else {
assert(quarantine->curbytes == 0); assert(quarantine->curbytes == 0);
idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); idalloctm(tsd, ptr, NULL, false);
} }
} }
...@@ -176,8 +176,8 @@ quarantine_cleanup(tsd_t *tsd) ...@@ -176,8 +176,8 @@ quarantine_cleanup(tsd_t *tsd)
quarantine = tsd_quarantine_get(tsd); quarantine = tsd_quarantine_get(tsd);
if (quarantine != NULL) { if (quarantine != NULL) {
quarantine_drain(tsd_tsdn(tsd), quarantine, 0); quarantine_drain(tsd, quarantine, 0);
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
tsd_quarantine_set(tsd, NULL); tsd_quarantine_set(tsd, NULL);
} }
} }
...@@ -15,8 +15,6 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, ...@@ -15,8 +15,6 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
{ {
unsigned bits_in_leaf, height, i; unsigned bits_in_leaf, height, i;
assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
RTREE_BITS_PER_LEVEL));
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
...@@ -96,15 +94,12 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp) ...@@ -96,15 +94,12 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
rtree_node_elm_t *node; rtree_node_elm_t *node;
if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) { if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
spin_t spinner;
/* /*
* Another thread is already in the process of initializing. * Another thread is already in the process of initializing.
* Spin-wait until initialization is complete. * Spin-wait until initialization is complete.
*/ */
spin_init(&spinner);
do { do {
spin_adaptive(&spinner); CPU_SPINWAIT;
node = atomic_read_p((void **)elmp); node = atomic_read_p((void **)elmp);
} while (node == RTREE_NODE_INITIALIZING); } while (node == RTREE_NODE_INITIALIZING);
} else { } else {
...@@ -128,5 +123,5 @@ rtree_node_elm_t * ...@@ -128,5 +123,5 @@ rtree_node_elm_t *
rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
{ {
return (rtree_node_init(rtree, level+1, &elm->child)); return (rtree_node_init(rtree, level, &elm->child));
} }
#define JEMALLOC_SPIN_C_
#include "jemalloc/internal/jemalloc_internal.h"
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#define CTL_GET(n, v, t) do { \ #define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \ size_t sz = sizeof(t); \
xmallctl(n, (void *)v, &sz, NULL, 0); \ xmallctl(n, v, &sz, NULL, 0); \
} while (0) } while (0)
#define CTL_M2_GET(n, i, v, t) do { \ #define CTL_M2_GET(n, i, v, t) do { \
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
size_t sz = sizeof(t); \ size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \ xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \ mib[2] = (i); \
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0) } while (0)
#define CTL_M2_M4_GET(n, i, j, v, t) do { \ #define CTL_M2_M4_GET(n, i, j, v, t) do { \
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
xmallctlnametomib(n, mib, &miblen); \ xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \ mib[2] = (i); \
mib[4] = (j); \ mib[4] = (j); \
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0) } while (0)
/******************************************************************************/ /******************************************************************************/
...@@ -32,107 +32,86 @@ bool opt_stats_print = false; ...@@ -32,107 +32,86 @@ bool opt_stats_print = false;
size_t stats_cactive = 0; size_t stats_cactive = 0;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_hchunks_print(
void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i, bool bins, bool large, bool huge);
/******************************************************************************/ /******************************************************************************/
static void static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool large, bool huge, unsigned i) unsigned i)
{ {
size_t page; size_t page;
bool config_tcache, in_gap, in_gap_prev; bool config_tcache, in_gap;
unsigned nbins, j; unsigned nbins, j;
CTL_GET("arenas.page", &page, size_t); CTL_GET("arenas.page", &page, size_t);
CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("config.tcache", &config_tcache, bool);
if (json) { if (config_tcache) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"bins\": [\n"); "bins: size ind allocated nmalloc"
" ndalloc nrequests curregs curruns regs"
" pgs util nfills nflushes newruns"
" reruns\n");
} else { } else {
CTL_GET("config.tcache", &config_tcache, bool); malloc_cprintf(write_cb, cbopaque,
if (config_tcache) { "bins: size ind allocated nmalloc"
malloc_cprintf(write_cb, cbopaque, " ndalloc nrequests curregs curruns regs"
"bins: size ind allocated nmalloc" " pgs util newruns reruns\n");
" ndalloc nrequests curregs"
" curruns regs pgs util nfills"
" nflushes newruns reruns\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"bins: size ind allocated nmalloc"
" ndalloc nrequests curregs"
" curruns regs pgs util newruns"
" reruns\n");
}
} }
CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, in_gap = false; j < nbins; j++) { for (j = 0, in_gap = false; j < nbins; j++) {
uint64_t nruns; uint64_t nruns;
size_t reg_size, run_size, curregs;
size_t curruns;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nreruns;
CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns, CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
uint64_t); uint64_t);
in_gap_prev = in_gap; if (nruns == 0)
in_gap = (nruns == 0); in_gap = true;
else {
if (!json && in_gap_prev && !in_gap) { size_t reg_size, run_size, curregs, availregs, milli;
malloc_cprintf(write_cb, cbopaque, size_t curruns;
" ---\n"); uint32_t nregs;
} uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns;
CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t); char util[6]; /* "x.yyy". */
CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
&nrequests, uint64_t);
if (config_tcache) {
CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j,
&nfills, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j,
&nflushes, uint64_t);
}
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns,
uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns,
size_t);
if (json) { if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t{\n"
"\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
"\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
"\t\t\t\t\t\t\"curregs\": %zu,\n"
"\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n",
nmalloc,
ndalloc,
curregs,
nrequests);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\t\"nfills\": %"FMTu64",\n" " ---\n");
"\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n", in_gap = false;
nfills,
nflushes);
} }
malloc_cprintf(write_cb, cbopaque, CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
"\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n" CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
"\t\t\t\t\t\t\"curruns\": %zu\n" CTL_M2_GET("arenas.bin.0.run_size", j, &run_size,
"\t\t\t\t\t}%s\n", size_t);
nreruns, CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
curruns, &nmalloc, uint64_t);
(j + 1 < nbins) ? "," : ""); CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j,
} else if (!in_gap) { &ndalloc, uint64_t);
size_t availregs, milli; CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j,
char util[6]; /* "x.yyy". */ &curregs, size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
&nrequests, uint64_t);
if (config_tcache) {
CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i,
j, &nfills, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
i, j, &nflushes, uint64_t);
}
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j,
&reruns, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j,
&curruns, size_t);
availregs = nregs * curruns; availregs = nregs * curruns;
milli = (availregs != 0) ? (1000 * curregs) / availregs milli = (availregs != 0) ? (1000 * curregs) / availregs
...@@ -159,7 +138,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, ...@@ -159,7 +138,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
reg_size, j, curregs * reg_size, nmalloc, reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, curruns, nregs, ndalloc, nrequests, curregs, curruns, nregs,
run_size / page, util, nfills, nflushes, run_size / page, util, nfills, nflushes,
nruns, nreruns); nruns, reruns);
} else { } else {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64 "%20zu %3u %12zu %12"FMTu64
...@@ -168,38 +147,28 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, ...@@ -168,38 +147,28 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
" %12"FMTu64"\n", " %12"FMTu64"\n",
reg_size, j, curregs * reg_size, nmalloc, reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, curruns, nregs, ndalloc, nrequests, curregs, curruns, nregs,
run_size / page, util, nruns, nreruns); run_size / page, util, nruns, reruns);
} }
} }
} }
if (json) { if (in_gap) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t]%s\n", (large || huge) ? "," : ""); " ---\n");
} else {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
} }
} }
static void static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool huge, unsigned i) unsigned i)
{ {
unsigned nbins, nlruns, j; unsigned nbins, nlruns, j;
bool in_gap, in_gap_prev; bool in_gap;
malloc_cprintf(write_cb, cbopaque,
"large: size ind allocated nmalloc ndalloc"
" nrequests curruns\n");
CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nbins", &nbins, unsigned);
CTL_GET("arenas.nlruns", &nlruns, unsigned); CTL_GET("arenas.nlruns", &nlruns, unsigned);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"lruns\": [\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"large: size ind allocated nmalloc"
" ndalloc nrequests curruns\n");
}
for (j = 0, in_gap = false; j < nlruns; j++) { for (j = 0, in_gap = false; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests; uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, curruns; size_t run_size, curruns;
...@@ -210,25 +179,17 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, ...@@ -210,25 +179,17 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
uint64_t); uint64_t);
CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j, CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
&nrequests, uint64_t); &nrequests, uint64_t);
in_gap_prev = in_gap; if (nrequests == 0)
in_gap = (nrequests == 0); in_gap = true;
else {
if (!json && in_gap_prev && !in_gap) { CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
malloc_cprintf(write_cb, cbopaque, CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
" ---\n"); &curruns, size_t);
} if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); " ---\n");
CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns, in_gap = false;
size_t); }
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t{\n"
"\t\t\t\t\t\t\"curruns\": %zu\n"
"\t\t\t\t\t}%s\n",
curruns,
(j + 1 < nlruns) ? "," : "");
} else if (!in_gap) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64 "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n", " %12"FMTu64" %12zu\n",
...@@ -236,35 +197,25 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, ...@@ -236,35 +197,25 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
ndalloc, nrequests, curruns); ndalloc, nrequests, curruns);
} }
} }
if (json) { if (in_gap) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t]%s\n", huge ? "," : ""); " ---\n");
} else {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
} }
} }
static void static void
stats_arena_hchunks_print(void (*write_cb)(void *, const char *), stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
void *cbopaque, bool json, unsigned i) void *cbopaque, unsigned i)
{ {
unsigned nbins, nlruns, nhchunks, j; unsigned nbins, nlruns, nhchunks, j;
bool in_gap, in_gap_prev; bool in_gap;
malloc_cprintf(write_cb, cbopaque,
"huge: size ind allocated nmalloc ndalloc"
" nrequests curhchunks\n");
CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nbins", &nbins, unsigned);
CTL_GET("arenas.nlruns", &nlruns, unsigned); CTL_GET("arenas.nlruns", &nlruns, unsigned);
CTL_GET("arenas.nhchunks", &nhchunks, unsigned); CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"hchunks\": [\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"huge: size ind allocated nmalloc"
" ndalloc nrequests curhchunks\n");
}
for (j = 0, in_gap = false; j < nhchunks; j++) { for (j = 0, in_gap = false; j < nhchunks; j++) {
uint64_t nmalloc, ndalloc, nrequests; uint64_t nmalloc, ndalloc, nrequests;
size_t hchunk_size, curhchunks; size_t hchunk_size, curhchunks;
...@@ -275,25 +226,18 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *), ...@@ -275,25 +226,18 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
&ndalloc, uint64_t); &ndalloc, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j, CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
&nrequests, uint64_t); &nrequests, uint64_t);
in_gap_prev = in_gap; if (nrequests == 0)
in_gap = (nrequests == 0); in_gap = true;
else {
if (!json && in_gap_prev && !in_gap) { CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size,
malloc_cprintf(write_cb, cbopaque, size_t);
" ---\n"); CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i,
} j, &curhchunks, size_t);
if (in_gap) {
CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t); malloc_cprintf(write_cb, cbopaque,
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j, " ---\n");
&curhchunks, size_t); in_gap = false;
if (json) { }
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t{\n"
"\t\t\t\t\t\t\"curhchunks\": %zu\n"
"\t\t\t\t\t}%s\n",
curhchunks,
(j + 1 < nhchunks) ? "," : "");
} else if (!in_gap) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64 "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n", " %12"FMTu64" %12zu\n",
...@@ -302,25 +246,20 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *), ...@@ -302,25 +246,20 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
nrequests, curhchunks); nrequests, curhchunks);
} }
} }
if (json) { if (in_gap) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t]\n"); " ---\n");
} else {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
} }
} }
static void static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, unsigned i, bool bins, bool large, bool huge) unsigned i, bool bins, bool large, bool huge)
{ {
unsigned nthreads; unsigned nthreads;
const char *dss; const char *dss;
ssize_t lg_dirty_mult, decay_time; ssize_t lg_dirty_mult;
size_t page, pactive, pdirty, mapped, retained; size_t page, pactive, pdirty, mapped;
size_t metadata_mapped, metadata_allocated; size_t metadata_mapped, metadata_allocated;
uint64_t npurge, nmadvise, purged; uint64_t npurge, nmadvise, purged;
size_t small_allocated; size_t small_allocated;
...@@ -333,435 +272,240 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, ...@@ -333,435 +272,240 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("arenas.page", &page, size_t); CTL_GET("arenas.page", &page, size_t);
CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
if (json) { malloc_cprintf(write_cb, cbopaque,
malloc_cprintf(write_cb, cbopaque, "assigned threads: %u\n", nthreads);
"\t\t\t\t\"nthreads\": %u,\n", nthreads);
} else {
malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads);
}
CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
if (json) { malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
malloc_cprintf(write_cb, cbopaque, dss);
"\t\t\t\t\"dss\": \"%s\",\n", dss);
} else {
malloc_cprintf(write_cb, cbopaque,
"dss allocation precedence: %s\n", dss);
}
CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t); CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
if (json) { if (lg_dirty_mult >= 0) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult); "min active:dirty page ratio: %u:1\n",
(1U << lg_dirty_mult));
} else { } else {
if (opt_purge == purge_mode_ratio) {
if (lg_dirty_mult >= 0) {
malloc_cprintf(write_cb, cbopaque,
"min active:dirty page ratio: %u:1\n",
(1U << lg_dirty_mult));
} else {
malloc_cprintf(write_cb, cbopaque,
"min active:dirty page ratio: N/A\n");
}
}
}
CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
if (json) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"decay_time\": %zd,\n", decay_time); "min active:dirty page ratio: N/A\n");
} else {
if (opt_purge == purge_mode_decay) {
if (decay_time >= 0) {
malloc_cprintf(write_cb, cbopaque,
"decay time: %zd\n", decay_time);
} else {
malloc_cprintf(write_cb, cbopaque,
"decay time: N/A\n");
}
}
} }
CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t); CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t); CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
if (json) { malloc_cprintf(write_cb, cbopaque,
malloc_cprintf(write_cb, cbopaque, "dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64
"\t\t\t\t\"pactive\": %zu,\n", pactive); " madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge ==
malloc_cprintf(write_cb, cbopaque, 1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged);
"\t\t\t\t\"pdirty\": %zu,\n", pdirty);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"npurge\": %"FMTu64",\n", npurge); " allocated nmalloc ndalloc"
malloc_cprintf(write_cb, cbopaque, " nrequests\n");
"\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"purged\": %"FMTu64",\n", purged);
} else {
malloc_cprintf(write_cb, cbopaque,
"purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64
", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
}
CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
size_t); size_t);
CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
uint64_t); uint64_t);
if (json) { malloc_cprintf(write_cb, cbopaque,
malloc_cprintf(write_cb, cbopaque, "small: %12zu %12"FMTu64" %12"FMTu64
"\t\t\t\t\"small\": {\n"); " %12"FMTu64"\n",
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"allocated\": %zu,\n", small_allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t},\n");
} else {
malloc_cprintf(write_cb, cbopaque,
" allocated nmalloc"
" ndalloc nrequests\n");
malloc_cprintf(write_cb, cbopaque,
"small: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
small_allocated, small_nmalloc, small_ndalloc,
small_nrequests);
}
CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
size_t); size_t);
CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
uint64_t); uint64_t);
if (json) { malloc_cprintf(write_cb, cbopaque,
malloc_cprintf(write_cb, cbopaque, "large: %12zu %12"FMTu64" %12"FMTu64
"\t\t\t\t\"large\": {\n"); " %12"FMTu64"\n",
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"allocated\": %zu,\n", large_allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t},\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"large: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
large_allocated, large_nmalloc, large_ndalloc,
large_nrequests);
}
CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t); CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests, CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
uint64_t); uint64_t);
if (json) { malloc_cprintf(write_cb, cbopaque,
malloc_cprintf(write_cb, cbopaque, "huge: %12zu %12"FMTu64" %12"FMTu64
"\t\t\t\t\"huge\": {\n"); " %12"FMTu64"\n",
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated); "total: %12zu %12"FMTu64" %12"FMTu64
malloc_cprintf(write_cb, cbopaque, " %12"FMTu64"\n",
"\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc); small_allocated + large_allocated + huge_allocated,
malloc_cprintf(write_cb, cbopaque, small_nmalloc + large_nmalloc + huge_nmalloc,
"\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc); small_ndalloc + large_ndalloc + huge_ndalloc,
malloc_cprintf(write_cb, cbopaque, small_nrequests + large_nrequests + huge_nrequests);
"\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests); malloc_cprintf(write_cb, cbopaque,
"active: %12zu\n", pactive * page);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t},\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"huge: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
small_allocated + large_allocated + huge_allocated,
small_nmalloc + large_nmalloc + huge_nmalloc,
small_ndalloc + large_ndalloc + huge_ndalloc,
small_nrequests + large_nrequests + huge_nrequests);
}
if (!json) {
malloc_cprintf(write_cb, cbopaque,
"active: %12zu\n", pactive * page);
}
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
if (json) { malloc_cprintf(write_cb, cbopaque,
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
"\t\t\t\t\"mapped\": %zu,\n", mapped);
} else {
malloc_cprintf(write_cb, cbopaque,
"mapped: %12zu\n", mapped);
}
CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"retained\": %zu,\n", retained);
} else {
malloc_cprintf(write_cb, cbopaque,
"retained: %12zu\n", retained);
}
CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped, CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
size_t); size_t);
CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated, CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
size_t); size_t);
if (json) { malloc_cprintf(write_cb, cbopaque,
malloc_cprintf(write_cb, cbopaque, "metadata: mapped: %zu, allocated: %zu\n",
"\t\t\t\t\"metadata\": {\n"); metadata_mapped, metadata_allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t},\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"metadata: mapped: %zu, allocated: %zu\n",
metadata_mapped, metadata_allocated);
}
if (bins) { if (bins)
stats_arena_bins_print(write_cb, cbopaque, json, large, huge, stats_arena_bins_print(write_cb, cbopaque, i);
i);
}
if (large) if (large)
stats_arena_lruns_print(write_cb, cbopaque, json, huge, i); stats_arena_lruns_print(write_cb, cbopaque, i);
if (huge) if (huge)
stats_arena_hchunks_print(write_cb, cbopaque, json, i); stats_arena_hchunks_print(write_cb, cbopaque, i);
} }
static void void
stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool merged, bool unmerged) const char *opts)
{ {
const char *cpv; int err;
bool bv; uint64_t epoch;
unsigned uv; size_t u64sz;
uint32_t u32v; bool general = true;
uint64_t u64v; bool merged = true;
ssize_t ssv; bool unmerged = true;
size_t sv, bsz, usz, ssz, sssz, cpsz; bool bins = true;
bool large = true;
bool huge = true;
bsz = sizeof(bool); /*
usz = sizeof(unsigned); * Refresh stats, in case mallctl() was called by the application.
ssz = sizeof(size_t); *
sssz = sizeof(ssize_t); * Check for OOM here, since refreshing the ctl cache can trigger
cpsz = sizeof(const char *); * allocation. In practice, none of the subsequent mallctl()-related
* calls in this function will cause OOM if this one succeeds.
* */
epoch = 1;
u64sz = sizeof(uint64_t);
err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
if (err != 0) {
if (err == EAGAIN) {
malloc_write("<jemalloc>: Memory allocation failure in "
"mallctl(\"epoch\", ...)\n");
return;
}
malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
"...)\n");
abort();
}
CTL_GET("version", &cpv, const char *); if (opts != NULL) {
if (json) { unsigned i;
malloc_cprintf(write_cb, cbopaque,
"\t\t\"version\": \"%s\",\n", cpv);
} else
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
/* config. */ for (i = 0; opts[i] != '\0'; i++) {
#define CONFIG_WRITE_BOOL_JSON(n, c) \ switch (opts[i]) {
if (json) { \ case 'g':
CTL_GET("config."#n, &bv, bool); \ general = false;
malloc_cprintf(write_cb, cbopaque, \ break;
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \ case 'm':
(c)); \ merged = false;
break;
case 'a':
unmerged = false;
break;
case 'b':
bins = false;
break;
case 'l':
large = false;
break;
case 'h':
huge = false;
break;
default:;
}
}
} }
if (json) { malloc_cprintf(write_cb, cbopaque,
malloc_cprintf(write_cb, cbopaque, "___ Begin jemalloc statistics ___\n");
"\t\t\"config\": {\n"); if (general) {
} const char *cpv;
bool bv;
unsigned uv;
ssize_t ssv;
size_t sv, bsz, ssz, sssz, cpsz;
CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",") bsz = sizeof(bool);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
cpsz = sizeof(const char *);
CTL_GET("config.debug", &bv, bool); CTL_GET("version", &cpv, const char *);
if (json) { malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
malloc_cprintf(write_cb, cbopaque, CTL_GET("config.debug", &bv, bool);
"\t\t\t\"debug\": %s,\n", bv ? "true" : "false");
} else {
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
bv ? "enabled" : "disabled"); bv ? "enabled" : "disabled");
}
CONFIG_WRITE_BOOL_JSON(fill, ",")
CONFIG_WRITE_BOOL_JSON(lazy_lock, ",")
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"malloc_conf\": \"%s\",\n",
config_malloc_conf);
} else {
malloc_cprintf(write_cb, cbopaque,
"config.malloc_conf: \"%s\"\n", config_malloc_conf);
}
CONFIG_WRITE_BOOL_JSON(munmap, ",") #define OPT_WRITE_BOOL(n) \
CONFIG_WRITE_BOOL_JSON(prof, ",") if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \
CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
CONFIG_WRITE_BOOL_JSON(stats, ",")
CONFIG_WRITE_BOOL_JSON(tcache, ",")
CONFIG_WRITE_BOOL_JSON(tls, ",")
CONFIG_WRITE_BOOL_JSON(utrace, ",")
CONFIG_WRITE_BOOL_JSON(valgrind, ",")
CONFIG_WRITE_BOOL_JSON(xmalloc, "")
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t},\n");
}
#undef CONFIG_WRITE_BOOL_JSON
/* opt. */
#define OPT_WRITE_BOOL(n, c) \
if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
"false", (c)); \
} else { \
malloc_cprintf(write_cb, cbopaque, \ malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \ " opt."#n": %s\n", bv ? "true" : "false"); \
} \ }
} #define OPT_WRITE_BOOL_MUTABLE(n, m) { \
#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \ bool bv2; \
bool bv2; \ if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \
if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \ je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
"false", (c)); \
} else { \
malloc_cprintf(write_cb, cbopaque, \ malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s ("#m": %s)\n", bv ? "true" \ " opt."#n": %s ("#m": %s)\n", bv ? "true" \
: "false", bv2 ? "true" : "false"); \ : "false", bv2 ? "true" : "false"); \
} \ } \
} \
} }
#define OPT_WRITE_UNSIGNED(n, c) \ #define OPT_WRITE_SIZE_T(n) \
if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \ if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %u%s\n", uv, (c)); \
} else { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %u\n", uv); \
} \
}
#define OPT_WRITE_SIZE_T(n, c) \
if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %zu%s\n", sv, (c)); \
} else { \
malloc_cprintf(write_cb, cbopaque, \ malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \ " opt."#n": %zu\n", sv); \
} \ }
} #define OPT_WRITE_SSIZE_T(n) \
#define OPT_WRITE_SSIZE_T(n, c) \ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \
if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
} else { \
malloc_cprintf(write_cb, cbopaque, \ malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \ " opt."#n": %zd\n", ssv); \
} \ }
} #define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \
#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \ ssize_t ssv2; \
ssize_t ssv2; \ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \
if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \ je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \
je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
} else { \
malloc_cprintf(write_cb, cbopaque, \ malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd ("#m": %zd)\n", \ " opt."#n": %zd ("#m": %zd)\n", \
ssv, ssv2); \ ssv, ssv2); \
} \ } \
} \
} }
#define OPT_WRITE_CHAR_P(n, c) \ #define OPT_WRITE_CHAR_P(n) \
if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \ if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \
} else { \
malloc_cprintf(write_cb, cbopaque, \ malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \ " opt."#n": \"%s\"\n", cpv); \
} \ }
}
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"opt\": {\n");
} else {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"Run-time option settings:\n"); "Run-time option settings:\n");
} OPT_WRITE_BOOL(abort)
OPT_WRITE_BOOL(abort, ",") OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_SIZE_T(lg_chunk, ",") OPT_WRITE_CHAR_P(dss)
OPT_WRITE_CHAR_P(dss, ",") OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_UNSIGNED(narenas, ",") OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult)
OPT_WRITE_CHAR_P(purge, ",") OPT_WRITE_BOOL(stats_print)
if (json || opt_purge == purge_mode_ratio) { OPT_WRITE_CHAR_P(junk)
OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, OPT_WRITE_SIZE_T(quarantine)
arenas.lg_dirty_mult, ",") OPT_WRITE_BOOL(redzone)
} OPT_WRITE_BOOL(zero)
if (json || opt_purge == purge_mode_decay) { OPT_WRITE_BOOL(utrace)
OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",") OPT_WRITE_BOOL(valgrind)
} OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_CHAR_P(junk, ",") OPT_WRITE_BOOL(tcache)
OPT_WRITE_SIZE_T(quarantine, ",") OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(redzone, ",") OPT_WRITE_BOOL(prof)
OPT_WRITE_BOOL(zero, ",") OPT_WRITE_CHAR_P(prof_prefix)
OPT_WRITE_BOOL(utrace, ",") OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active)
OPT_WRITE_BOOL(xmalloc, ",") OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init,
OPT_WRITE_BOOL(tcache, ",") prof.thread_active_init)
OPT_WRITE_SSIZE_T(lg_tcache_max, ",") OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof, ",") OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_CHAR_P(prof_prefix, ",") OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",") OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init, OPT_WRITE_BOOL(prof_final)
",") OPT_WRITE_BOOL(prof_leak)
OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",")
OPT_WRITE_BOOL(prof_accum, ",")
OPT_WRITE_SSIZE_T(lg_prof_interval, ",")
OPT_WRITE_BOOL(prof_gdump, ",")
OPT_WRITE_BOOL(prof_final, ",")
OPT_WRITE_BOOL(prof_leak, ",")
/*
* stats_print is always emitted, so as long as stats_print comes last
* it's safe to unconditionally omit the comma here (rather than having
* to conditionally omit it elsewhere depending on configuration).
*/
OPT_WRITE_BOOL(stats_print, "")
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t},\n");
}
#undef OPT_WRITE_BOOL #undef OPT_WRITE_BOOL
#undef OPT_WRITE_BOOL_MUTABLE #undef OPT_WRITE_BOOL_MUTABLE
...@@ -769,386 +513,128 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, ...@@ -769,386 +513,128 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
#undef OPT_WRITE_SSIZE_T #undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P #undef OPT_WRITE_CHAR_P
/* arenas. */ malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"arenas\": {\n");
}
CTL_GET("arenas.narenas", &uv, unsigned); CTL_GET("arenas.narenas", &uv, unsigned);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"narenas\": %u,\n", uv);
} else
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
if (json) { sizeof(void *));
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"lg_dirty_mult\": %zd,\n", ssv);
} else if (opt_purge == purge_mode_ratio) {
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: "
"%u:1\n", (1U << ssv));
} else {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: "
"N/A\n");
}
}
CTL_GET("arenas.decay_time", &ssv, ssize_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"decay_time\": %zd,\n", ssv);
} else if (opt_purge == purge_mode_decay) {
malloc_cprintf(write_cb, cbopaque,
"Unused dirty page decay time: %zd%s\n",
ssv, (ssv < 0) ? " (no decay)" : "");
}
CTL_GET("arenas.quantum", &sv, size_t); CTL_GET("arenas.quantum", &sv, size_t);
if (json) { malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n",
malloc_cprintf(write_cb, cbopaque, sv);
"\t\t\t\"quantum\": %zu,\n", sv);
} else
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
CTL_GET("arenas.page", &sv, size_t); CTL_GET("arenas.page", &sv, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"page\": %zu,\n", sv);
} else
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
if (json) { if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"tcache_max\": %zu,\n", sv); "Min active:dirty page ratio per arena: %u:1\n",
(1U << ssv));
} else { } else {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"Maximum thread-cached size class: %zu\n", sv); "Min active:dirty page ratio per arena: N/A\n");
} }
} if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
if (json) {
unsigned nbins, nlruns, nhchunks, i;
CTL_GET("arenas.nbins", &nbins, unsigned);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nbins\": %u,\n", nbins);
CTL_GET("arenas.nhbins", &uv, unsigned);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nhbins\": %u,\n", uv);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"bin\": [\n");
for (i = 0; i < nbins; i++) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t{\n");
CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"size\": %zu,\n", sv);
CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v);
CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"run_size\": %zu\n", sv);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : "");
}
malloc_cprintf(write_cb, cbopaque,
"\t\t\t],\n");
CTL_GET("arenas.nlruns", &nlruns, unsigned);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nlruns\": %u,\n", nlruns);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"lrun\": [\n");
for (i = 0; i < nlruns; i++) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t{\n");
CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"size\": %zu\n", sv);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : ""); "Maximum thread-cached size class: %zu\n", sv);
} }
malloc_cprintf(write_cb, cbopaque, if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) {
"\t\t\t],\n"); CTL_GET("prof.lg_sample", &sv, size_t);
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nhchunks\": %u,\n", nhchunks);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"hchunk\": [\n");
for (i = 0; i < nhchunks; i++) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t{\n"); "Average profile sample interval: %"FMTu64
" (2^%zu)\n", (((uint64_t)1U) << sv), sv);
CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t); CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
malloc_cprintf(write_cb, cbopaque, if (ssv >= 0) {
"\t\t\t\t\t\"size\": %zu\n", sv); malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: %"FMTu64
malloc_cprintf(write_cb, cbopaque, " (2^%zd)\n",
"\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : ""); (((uint64_t)1U) << ssv), ssv);
} else {
malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: N/A\n");
}
} }
CTL_GET("opt.lg_chunk", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t]\n"); "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv);
malloc_cprintf(write_cb, cbopaque,
"\t\t},\n");
} }
/* prof. */ if (config_stats) {
if (json) { size_t *cactive;
malloc_cprintf(write_cb, cbopaque, size_t allocated, active, metadata, resident, mapped;
"\t\t\"prof\": {\n");
CTL_GET("prof.thread_active_init", &bv, bool);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"thread_active_init\": %s,\n", bv ? "true" :
"false");
CTL_GET("prof.active", &bv, bool);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"active\": %s,\n", bv ? "true" : "false");
CTL_GET("prof.gdump", &bv, bool);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"gdump\": %s,\n", bv ? "true" : "false");
CTL_GET("prof.interval", &u64v, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"interval\": %"FMTu64",\n", u64v);
CTL_GET("prof.lg_sample", &ssv, ssize_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"lg_sample\": %zd\n", ssv);
malloc_cprintf(write_cb, cbopaque,
"\t\t}%s\n", (config_stats || merged || unmerged) ? "," :
"");
}
}
static void
stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool merged, bool unmerged, bool bins, bool large, bool huge)
{
size_t *cactive;
size_t allocated, active, metadata, resident, mapped, retained;
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.metadata", &metadata, size_t);
CTL_GET("stats.resident", &resident, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
CTL_GET("stats.retained", &retained, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"stats\": {\n");
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive));
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"allocated\": %zu,\n", allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"active\": %zu,\n", active);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"metadata\": %zu,\n", metadata);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"resident\": %zu,\n", resident);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"mapped\": %zu,\n", mapped);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"retained\": %zu\n", retained);
malloc_cprintf(write_cb, cbopaque, CTL_GET("stats.cactive", &cactive, size_t *);
"\t\t}%s\n", (merged || unmerged) ? "," : ""); CTL_GET("stats.allocated", &allocated, size_t);
} else { CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.metadata", &metadata, size_t);
CTL_GET("stats.resident", &resident, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, metadata: %zu," "Allocated: %zu, active: %zu, metadata: %zu,"
" resident: %zu, mapped: %zu, retained: %zu\n", " resident: %zu, mapped: %zu\n",
allocated, active, metadata, resident, mapped, retained); allocated, active, metadata, resident, mapped);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n", "Current active ceiling: %zu\n",
atomic_read_z(cactive)); atomic_read_z(cactive));
}
if (merged || unmerged) { if (merged) {
unsigned narenas; unsigned narenas;
if (json) { CTL_GET("arenas.narenas", &narenas, unsigned);
malloc_cprintf(write_cb, cbopaque, {
"\t\t\"stats.arenas\": {\n"); VARIABLE_ARRAY(bool, initialized, narenas);
} size_t isz;
unsigned i, ninitialized;
CTL_GET("arenas.narenas", &narenas, unsigned);
{ isz = sizeof(bool) * narenas;
VARIABLE_ARRAY(bool, initialized, narenas); xmallctl("arenas.initialized", initialized,
size_t isz; &isz, NULL, 0);
unsigned i, j, ninitialized; for (i = ninitialized = 0; i < narenas; i++) {
if (initialized[i])
isz = sizeof(bool) * narenas; ninitialized++;
xmallctl("arenas.initialized", (void *)initialized, }
&isz, NULL, 0);
for (i = ninitialized = 0; i < narenas; i++) {
if (initialized[i])
ninitialized++;
}
/* Merged stats. */ if (ninitialized > 1 || !unmerged) {
if (merged && (ninitialized > 1 || !unmerged)) { /* Print merged arena stats. */
/* Print merged arena stats. */
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"merged\": {\n");
} else {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n"); "\nMerged arenas stats:\n");
}
stats_arena_print(write_cb, cbopaque, json,
narenas, bins, large, huge);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t}%s\n", (ninitialized > 1) ?
"," : "");
}
}
/* Unmerged stats. */
for (i = j = 0; i < narenas; i++) {
if (initialized[i]) {
if (json) {
j++;
malloc_cprintf(write_cb,
cbopaque,
"\t\t\t\"%u\": {\n", i);
} else {
malloc_cprintf(write_cb,
cbopaque, "\narenas[%u]:\n",
i);
}
stats_arena_print(write_cb, cbopaque, stats_arena_print(write_cb, cbopaque,
json, i, bins, large, huge); narenas, bins, large, huge);
if (json) {
malloc_cprintf(write_cb,
cbopaque,
"\t\t\t}%s\n", (j <
ninitialized) ? "," : "");
}
} }
} }
} }
if (json) { if (unmerged) {
malloc_cprintf(write_cb, cbopaque, unsigned narenas;
"\t\t}\n");
}
}
}
void /* Print stats for each arena. */
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
int err;
uint64_t epoch;
size_t u64sz;
bool json = false;
bool general = true;
bool merged = true;
bool unmerged = true;
bool bins = true;
bool large = true;
bool huge = true;
/* CTL_GET("arenas.narenas", &narenas, unsigned);
* Refresh stats, in case mallctl() was called by the application. {
* VARIABLE_ARRAY(bool, initialized, narenas);
* Check for OOM here, since refreshing the ctl cache can trigger size_t isz;
* allocation. In practice, none of the subsequent mallctl()-related unsigned i;
* calls in this function will cause OOM if this one succeeds.
* */
epoch = 1;
u64sz = sizeof(uint64_t);
err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
sizeof(uint64_t));
if (err != 0) {
if (err == EAGAIN) {
malloc_write("<jemalloc>: Memory allocation failure in "
"mallctl(\"epoch\", ...)\n");
return;
}
malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
"...)\n");
abort();
}
if (opts != NULL) { isz = sizeof(bool) * narenas;
unsigned i; xmallctl("arenas.initialized", initialized,
&isz, NULL, 0);
for (i = 0; opts[i] != '\0'; i++) { for (i = 0; i < narenas; i++) {
switch (opts[i]) { if (initialized[i]) {
case 'J': malloc_cprintf(write_cb,
json = true; cbopaque,
break; "\narenas[%u]:\n", i);
case 'g': stats_arena_print(write_cb,
general = false; cbopaque, i, bins, large,
break; huge);
case 'm': }
merged = false; }
break;
case 'a':
unmerged = false;
break;
case 'b':
bins = false;
break;
case 'l':
large = false;
break;
case 'h':
huge = false;
break;
default:;
} }
} }
} }
malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
if (json) {
malloc_cprintf(write_cb, cbopaque,
"{\n"
"\t\"jemalloc\": {\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"___ Begin jemalloc statistics ___\n");
}
if (general)
stats_general_print(write_cb, cbopaque, json, merged, unmerged);
if (config_stats) {
stats_print_helper(write_cb, cbopaque, json, merged, unmerged,
bins, large, huge);
}
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t}\n"
"}\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"--- End jemalloc statistics ---\n");
}
} }
...@@ -10,7 +10,7 @@ ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; ...@@ -10,7 +10,7 @@ ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
tcache_bin_info_t *tcache_bin_info; tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */ static unsigned stack_nelms; /* Total stack elms per tcache. */
unsigned nhbins; size_t nhbins;
size_t tcache_maxclass; size_t tcache_maxclass;
tcaches_t *tcaches; tcaches_t *tcaches;
...@@ -23,11 +23,10 @@ static tcaches_t *tcaches_avail; ...@@ -23,11 +23,10 @@ static tcaches_t *tcaches_avail;
/******************************************************************************/ /******************************************************************************/
size_t size_t tcache_salloc(const void *ptr)
tcache_salloc(tsdn_t *tsdn, const void *ptr)
{ {
return (arena_salloc(tsdn, ptr, false)); return (arena_salloc(ptr, false));
} }
void void
...@@ -68,19 +67,20 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) ...@@ -68,19 +67,20 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
tcache->next_gc_bin++; tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins) if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0; tcache->next_gc_bin = 0;
tcache->ev_cnt = 0;
} }
void * void *
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success) tcache_bin_t *tbin, szind_t binind)
{ {
void *ret; void *ret;
arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ? arena_tcache_fill_small(arena, tbin, binind, config_prof ?
tcache->prof_accumbytes : 0); tcache->prof_accumbytes : 0);
if (config_prof) if (config_prof)
tcache->prof_accumbytes = 0; tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin, tcache_success); ret = tcache_alloc_easy(tbin);
return (ret); return (ret);
} }
...@@ -102,18 +102,17 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, ...@@ -102,18 +102,17 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */ /* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
*(tbin->avail - 1)); tbin->avail[0]);
arena_t *bin_arena = extent_node_arena_get(&chunk->node); arena_t *bin_arena = extent_node_arena_get(&chunk->node);
arena_bin_t *bin = &bin_arena->bins[binind]; arena_bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) { if (config_prof && bin_arena == arena) {
if (arena_prof_accum(tsd_tsdn(tsd), arena, if (arena_prof_accum(arena, tcache->prof_accumbytes))
tcache->prof_accumbytes)) prof_idump();
prof_idump(tsd_tsdn(tsd));
tcache->prof_accumbytes = 0; tcache->prof_accumbytes = 0;
} }
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_lock(&bin->lock);
if (config_stats && bin_arena == arena) { if (config_stats && bin_arena == arena) {
assert(!merged_stats); assert(!merged_stats);
merged_stats = true; merged_stats = true;
...@@ -123,16 +122,16 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, ...@@ -123,16 +122,16 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
} }
ndeferred = 0; ndeferred = 0;
for (i = 0; i < nflush; i++) { for (i = 0; i < nflush; i++) {
ptr = *(tbin->avail - 1 - i); ptr = tbin->avail[i];
assert(ptr != NULL); assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) == bin_arena) { if (extent_node_arena_get(&chunk->node) == bin_arena) {
size_t pageind = ((uintptr_t)ptr - size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE; (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm = arena_chunk_map_bits_t *bitselm =
arena_bitselm_get_mutable(chunk, pageind); arena_bitselm_get(chunk, pageind);
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), arena_dalloc_bin_junked_locked(bin_arena, chunk,
bin_arena, chunk, ptr, bitselm); ptr, bitselm);
} else { } else {
/* /*
* This object was allocated via a different * This object was allocated via a different
...@@ -140,12 +139,11 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, ...@@ -140,12 +139,11 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* locked. Stash the object, so that it can be * locked. Stash the object, so that it can be
* handled in a future pass. * handled in a future pass.
*/ */
*(tbin->avail - 1 - ndeferred) = ptr; tbin->avail[ndeferred] = ptr;
ndeferred++; ndeferred++;
} }
} }
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_unlock(&bin->lock);
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
} }
if (config_stats && !merged_stats) { if (config_stats && !merged_stats) {
/* /*
...@@ -153,15 +151,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, ...@@ -153,15 +151,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* arena, so the stats didn't get merged. Manually do so now. * arena, so the stats didn't get merged. Manually do so now.
*/ */
arena_bin_t *bin = &arena->bins[binind]; arena_bin_t *bin = &arena->bins[binind];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_lock(&bin->lock);
bin->stats.nflushes++; bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0; tbin->tstats.nrequests = 0;
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_unlock(&bin->lock);
} }
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
sizeof(void *)); rem * sizeof(void *));
tbin->ncached = rem; tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water) if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached; tbin->low_water = tbin->ncached;
...@@ -184,13 +182,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, ...@@ -184,13 +182,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */ /* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
*(tbin->avail - 1)); tbin->avail[0]);
arena_t *locked_arena = extent_node_arena_get(&chunk->node); arena_t *locked_arena = extent_node_arena_get(&chunk->node);
UNUSED bool idump; UNUSED bool idump;
if (config_prof) if (config_prof)
idump = false; idump = false;
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock); malloc_mutex_lock(&locked_arena->lock);
if ((config_prof || config_stats) && locked_arena == arena) { if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) { if (config_prof) {
idump = arena_prof_accum_locked(arena, idump = arena_prof_accum_locked(arena,
...@@ -208,13 +206,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, ...@@ -208,13 +206,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
} }
ndeferred = 0; ndeferred = 0;
for (i = 0; i < nflush; i++) { for (i = 0; i < nflush; i++) {
ptr = *(tbin->avail - 1 - i); ptr = tbin->avail[i];
assert(ptr != NULL); assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) == if (extent_node_arena_get(&chunk->node) ==
locked_arena) { locked_arena) {
arena_dalloc_large_junked_locked(tsd_tsdn(tsd), arena_dalloc_large_junked_locked(locked_arena,
locked_arena, chunk, ptr); chunk, ptr);
} else { } else {
/* /*
* This object was allocated via a different * This object was allocated via a different
...@@ -222,56 +220,62 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, ...@@ -222,56 +220,62 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
* Stash the object, so that it can be handled * Stash the object, so that it can be handled
* in a future pass. * in a future pass.
*/ */
*(tbin->avail - 1 - ndeferred) = ptr; tbin->avail[ndeferred] = ptr;
ndeferred++; ndeferred++;
} }
} }
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock); malloc_mutex_unlock(&locked_arena->lock);
if (config_prof && idump) if (config_prof && idump)
prof_idump(tsd_tsdn(tsd)); prof_idump();
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
ndeferred);
} }
if (config_stats && !merged_stats) { if (config_stats && !merged_stats) {
/* /*
* The flush loop didn't happen to flush to this thread's * The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now. * arena, so the stats didn't get merged. Manually do so now.
*/ */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests += arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests; tbin->tstats.nrequests;
tbin->tstats.nrequests = 0; tbin->tstats.nrequests = 0;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); malloc_mutex_unlock(&arena->lock);
} }
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
sizeof(void *)); rem * sizeof(void *));
tbin->ncached = rem; tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water) if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached; tbin->low_water = tbin->ncached;
} }
static void void
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) tcache_arena_associate(tcache_t *tcache, arena_t *arena)
{ {
if (config_stats) { if (config_stats) {
/* Link into list of extant tcaches. */ /* Link into list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link); ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
} }
} }
static void void
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
{
tcache_arena_dissociate(tcache, oldarena);
tcache_arena_associate(tcache, newarena);
}
void
tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
{ {
if (config_stats) { if (config_stats) {
/* Unlink from list of extant tcaches. */ /* Unlink from list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
if (config_debug) { if (config_debug) {
bool in_ql = false; bool in_ql = false;
tcache_t *iter; tcache_t *iter;
...@@ -284,20 +288,11 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) ...@@ -284,20 +288,11 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
assert(in_ql); assert(in_ql);
} }
ql_remove(&arena->tcache_ql, tcache, link); ql_remove(&arena->tcache_ql, tcache, link);
tcache_stats_merge(tsdn, tcache, arena); tcache_stats_merge(tcache, arena);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
} }
} }
void
tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
arena_t *newarena)
{
tcache_arena_dissociate(tsdn, tcache, oldarena);
tcache_arena_associate(tsdn, tcache, newarena);
}
tcache_t * tcache_t *
tcache_get_hard(tsd_t *tsd) tcache_get_hard(tsd_t *tsd)
{ {
...@@ -311,11 +306,11 @@ tcache_get_hard(tsd_t *tsd) ...@@ -311,11 +306,11 @@ tcache_get_hard(tsd_t *tsd)
arena = arena_choose(tsd, NULL); arena = arena_choose(tsd, NULL);
if (unlikely(arena == NULL)) if (unlikely(arena == NULL))
return (NULL); return (NULL);
return (tcache_create(tsd_tsdn(tsd), arena)); return (tcache_create(tsd, arena));
} }
tcache_t * tcache_t *
tcache_create(tsdn_t *tsdn, arena_t *arena) tcache_create(tsd_t *tsd, arena_t *arena)
{ {
tcache_t *tcache; tcache_t *tcache;
size_t size, stack_offset; size_t size, stack_offset;
...@@ -329,26 +324,18 @@ tcache_create(tsdn_t *tsdn, arena_t *arena) ...@@ -329,26 +324,18 @@ tcache_create(tsdn_t *tsdn, arena_t *arena)
/* Avoid false cacheline sharing. */ /* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE); size = sa2u(size, CACHELINE);
tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true, tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
arena_get(TSDN_NULL, 0, true));
if (tcache == NULL) if (tcache == NULL)
return (NULL); return (NULL);
tcache_arena_associate(tsdn, tcache, arena); tcache_arena_associate(tcache, arena);
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
for (i = 0; i < nhbins; i++) { for (i = 0; i < nhbins; i++) {
tcache->tbins[i].lg_fill_div = 1; tcache->tbins[i].lg_fill_div = 1;
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
/*
* avail points past the available space. Allocations will
* access the slots toward higher addresses (for the benefit of
* prefetch).
*/
tcache->tbins[i].avail = (void **)((uintptr_t)tcache + tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
(uintptr_t)stack_offset); (uintptr_t)stack_offset);
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
} }
return (tcache); return (tcache);
...@@ -361,7 +348,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) ...@@ -361,7 +348,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
unsigned i; unsigned i;
arena = arena_choose(tsd, NULL); arena = arena_choose(tsd, NULL);
tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena); tcache_arena_dissociate(tcache, arena);
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i]; tcache_bin_t *tbin = &tcache->tbins[i];
...@@ -369,9 +356,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) ...@@ -369,9 +356,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
if (config_stats && tbin->tstats.nrequests != 0) { if (config_stats && tbin->tstats.nrequests != 0) {
arena_bin_t *bin = &arena->bins[i]; arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_unlock(&bin->lock);
} }
} }
...@@ -380,19 +367,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) ...@@ -380,19 +367,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
tcache_bin_flush_large(tsd, tbin, i, 0, tcache); tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) { if (config_stats && tbin->tstats.nrequests != 0) {
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests += arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests; tbin->tstats.nrequests;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); malloc_mutex_unlock(&arena->lock);
} }
} }
if (config_prof && tcache->prof_accumbytes > 0 && if (config_prof && tcache->prof_accumbytes > 0 &&
arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump(tsd_tsdn(tsd)); prof_idump();
idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true); idalloctm(tsd, tcache, false, true);
} }
void void
...@@ -416,22 +403,21 @@ tcache_enabled_cleanup(tsd_t *tsd) ...@@ -416,22 +403,21 @@ tcache_enabled_cleanup(tsd_t *tsd)
/* Do nothing. */ /* Do nothing. */
} }
/* Caller must own arena->lock. */
void void
tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{ {
unsigned i; unsigned i;
cassert(config_stats); cassert(config_stats);
malloc_mutex_assert_owner(tsdn, &arena->lock);
/* Merge and reset tcache stats. */ /* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i]; arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i]; tcache_bin_t *tbin = &tcache->tbins[i];
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(&bin->lock);
tbin->tstats.nrequests = 0; tbin->tstats.nrequests = 0;
} }
...@@ -447,12 +433,11 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) ...@@ -447,12 +433,11 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
bool bool
tcaches_create(tsd_t *tsd, unsigned *r_ind) tcaches_create(tsd_t *tsd, unsigned *r_ind)
{ {
arena_t *arena;
tcache_t *tcache; tcache_t *tcache;
tcaches_t *elm; tcaches_t *elm;
if (tcaches == NULL) { if (tcaches == NULL) {
tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) * tcaches = base_alloc(sizeof(tcache_t *) *
(MALLOCX_TCACHE_MAX+1)); (MALLOCX_TCACHE_MAX+1));
if (tcaches == NULL) if (tcaches == NULL)
return (true); return (true);
...@@ -460,10 +445,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) ...@@ -460,10 +445,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
return (true); return (true);
arena = arena_ichoose(tsd, NULL); tcache = tcache_create(tsd, a0get());
if (unlikely(arena == NULL))
return (true);
tcache = tcache_create(tsd_tsdn(tsd), arena);
if (tcache == NULL) if (tcache == NULL)
return (true); return (true);
...@@ -471,7 +453,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) ...@@ -471,7 +453,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
elm = tcaches_avail; elm = tcaches_avail;
tcaches_avail = tcaches_avail->next; tcaches_avail = tcaches_avail->next;
elm->tcache = tcache; elm->tcache = tcache;
*r_ind = (unsigned)(elm - tcaches); *r_ind = elm - tcaches;
} else { } else {
elm = &tcaches[tcaches_past]; elm = &tcaches[tcaches_past];
elm->tcache = tcache; elm->tcache = tcache;
...@@ -509,7 +491,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind) ...@@ -509,7 +491,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
} }
bool bool
tcache_boot(tsdn_t *tsdn) tcache_boot(void)
{ {
unsigned i; unsigned i;
...@@ -517,17 +499,17 @@ tcache_boot(tsdn_t *tsdn) ...@@ -517,17 +499,17 @@ tcache_boot(tsdn_t *tsdn)
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
* known. * known.
*/ */
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS) if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS; tcache_maxclass = SMALL_MAXCLASS;
else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass) else if ((1U << opt_lg_tcache_max) > large_maxclass)
tcache_maxclass = large_maxclass; tcache_maxclass = large_maxclass;
else else
tcache_maxclass = (ZU(1) << opt_lg_tcache_max); tcache_maxclass = (1U << opt_lg_tcache_max);
nhbins = size2index(tcache_maxclass) + 1; nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */ /* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins * tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
sizeof(tcache_bin_info_t)); sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL) if (tcache_bin_info == NULL)
return (true); return (true);
......
#define JEMALLOC_TICKER_C_
#include "jemalloc/internal/jemalloc_internal.h"
...@@ -77,7 +77,7 @@ tsd_cleanup(void *arg) ...@@ -77,7 +77,7 @@ tsd_cleanup(void *arg)
/* Do nothing. */ /* Do nothing. */
break; break;
case tsd_state_nominal: case tsd_state_nominal:
#define O(n, t) \ #define O(n, t) \
n##_cleanup(tsd); n##_cleanup(tsd);
MALLOC_TSD MALLOC_TSD
#undef O #undef O
...@@ -106,17 +106,15 @@ MALLOC_TSD ...@@ -106,17 +106,15 @@ MALLOC_TSD
} }
} }
tsd_t * bool
malloc_tsd_boot0(void) malloc_tsd_boot0(void)
{ {
tsd_t *tsd;
ncleanups = 0; ncleanups = 0;
if (tsd_boot0()) if (tsd_boot0())
return (NULL); return (true);
tsd = tsd_fetch(); *tsd_arenas_cache_bypassp_get(tsd_fetch()) = true;
*tsd_arenas_tdata_bypassp_get(tsd) = true; return (false);
return (tsd);
} }
void void
...@@ -124,7 +122,7 @@ malloc_tsd_boot1(void) ...@@ -124,7 +122,7 @@ malloc_tsd_boot1(void)
{ {
tsd_boot1(); tsd_boot1();
*tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false; *tsd_arenas_cache_bypassp_get(tsd_fetch()) = false;
} }
#ifdef _WIN32 #ifdef _WIN32
...@@ -150,15 +148,13 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) ...@@ -150,15 +148,13 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
#ifdef _MSC_VER #ifdef _MSC_VER
# ifdef _M_IX86 # ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used") # pragma comment(linker, "/INCLUDE:__tls_used")
# pragma comment(linker, "/INCLUDE:_tls_callback")
# else # else
# pragma comment(linker, "/INCLUDE:_tls_used") # pragma comment(linker, "/INCLUDE:_tls_used")
# pragma comment(linker, "/INCLUDE:tls_callback")
# endif # endif
# pragma section(".CRT$XLY",long,read) # pragma section(".CRT$XLY",long,read)
#endif #endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif #endif
...@@ -171,10 +167,10 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) ...@@ -171,10 +167,10 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
tsd_init_block_t *iter; tsd_init_block_t *iter;
/* Check whether this thread has already inserted into the list. */ /* Check whether this thread has already inserted into the list. */
malloc_mutex_lock(TSDN_NULL, &head->lock); malloc_mutex_lock(&head->lock);
ql_foreach(iter, &head->blocks, link) { ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) { if (iter->thread == self) {
malloc_mutex_unlock(TSDN_NULL, &head->lock); malloc_mutex_unlock(&head->lock);
return (iter->data); return (iter->data);
} }
} }
...@@ -182,7 +178,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) ...@@ -182,7 +178,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
ql_elm_new(block, link); ql_elm_new(block, link);
block->thread = self; block->thread = self;
ql_tail_insert(&head->blocks, block, link); ql_tail_insert(&head->blocks, block, link);
malloc_mutex_unlock(TSDN_NULL, &head->lock); malloc_mutex_unlock(&head->lock);
return (NULL); return (NULL);
} }
...@@ -190,8 +186,8 @@ void ...@@ -190,8 +186,8 @@ void
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
{ {
malloc_mutex_lock(TSDN_NULL, &head->lock); malloc_mutex_lock(&head->lock);
ql_remove(&head->blocks, block, link); ql_remove(&head->blocks, block, link);
malloc_mutex_unlock(TSDN_NULL, &head->lock); malloc_mutex_unlock(&head->lock);
} }
#endif #endif
/*
* Define simple versions of assertion macros that won't recurse in case
* of assertion failures in malloc_*printf().
*/
#define assert(e) do { \ #define assert(e) do { \
if (config_debug && !(e)) { \ if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \ malloc_write("<jemalloc>: Failed assertion\n"); \
...@@ -14,7 +10,6 @@ ...@@ -14,7 +10,6 @@
malloc_write("<jemalloc>: Unreachable code reached\n"); \ malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \ abort(); \
} \ } \
unreachable(); \
} while (0) } while (0)
#define not_implemented() do { \ #define not_implemented() do { \
...@@ -49,19 +44,15 @@ static void ...@@ -49,19 +44,15 @@ static void
wrtmessage(void *cbopaque, const char *s) wrtmessage(void *cbopaque, const char *s)
{ {
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) #ifdef SYS_write
/* /*
* Use syscall(2) rather than write(2) when possible in order to avoid * Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary * the possibility of memory allocation within libc. This is necessary
* on FreeBSD; most operating systems do not have this problem though. * on FreeBSD; most operating systems do not have this problem though.
*
* syscall() returns long or int, depending on platform, so capture the
* unused result in the widest plausible type to avoid compiler
* warnings.
*/ */
UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
#else #else
UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s)); UNUSED int result = write(STDERR_FILENO, s, strlen(s));
#endif #endif
} }
...@@ -91,7 +82,7 @@ buferror(int err, char *buf, size_t buflen) ...@@ -91,7 +82,7 @@ buferror(int err, char *buf, size_t buflen)
#ifdef _WIN32 #ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
(LPSTR)buf, (DWORD)buflen, NULL); (LPSTR)buf, buflen, NULL);
return (0); return (0);
#elif defined(__GLIBC__) && defined(_GNU_SOURCE) #elif defined(__GLIBC__) && defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen); char *b = strerror_r(err, buf, buflen);
...@@ -200,7 +191,7 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) ...@@ -200,7 +191,7 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
p++; p++;
} }
if (neg) if (neg)
ret = (uintmax_t)(-((intmax_t)ret)); ret = -ret;
if (p == ns) { if (p == ns) {
/* No conversion performed. */ /* No conversion performed. */
...@@ -315,9 +306,10 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) ...@@ -315,9 +306,10 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
return (s); return (s);
} }
size_t int
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
{ {
int ret;
size_t i; size_t i;
const char *f; const char *f;
...@@ -408,8 +400,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) ...@@ -408,8 +400,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
int prec = -1; int prec = -1;
int width = -1; int width = -1;
unsigned char len = '?'; unsigned char len = '?';
char *s;
size_t slen;
f++; f++;
/* Flags. */ /* Flags. */
...@@ -500,6 +490,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) ...@@ -500,6 +490,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
} }
/* Conversion specifier. */ /* Conversion specifier. */
switch (*f) { switch (*f) {
char *s;
size_t slen;
case '%': case '%':
/* %% */ /* %% */
APPEND_C(*f); APPEND_C(*f);
...@@ -585,19 +577,20 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) ...@@ -585,19 +577,20 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
str[i] = '\0'; str[i] = '\0';
else else
str[size - 1] = '\0'; str[size - 1] = '\0';
ret = i;
#undef APPEND_C #undef APPEND_C
#undef APPEND_S #undef APPEND_S
#undef APPEND_PADDED_S #undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC #undef GET_ARG_NUMERIC
return (i); return (ret);
} }
JEMALLOC_FORMAT_PRINTF(3, 4) JEMALLOC_FORMAT_PRINTF(3, 4)
size_t int
malloc_snprintf(char *str, size_t size, const char *format, ...) malloc_snprintf(char *str, size_t size, const char *format, ...)
{ {
size_t ret; int ret;
va_list ap; va_list ap;
va_start(ap, format); va_start(ap, format);
...@@ -655,12 +648,3 @@ malloc_printf(const char *format, ...) ...@@ -655,12 +648,3 @@ malloc_printf(const char *format, ...)
malloc_vcprintf(NULL, NULL, format, ap); malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap); va_end(ap);
} }
/*
* Restore normal assertion macros, in order to make it possible to compile all
* C files as a single concatenation.
*/
#undef assert
#undef not_reached
#undef not_implemented
#include "jemalloc/internal/assert.h"
#define JEMALLOC_WITNESS_C_
#include "jemalloc/internal/jemalloc_internal.h"
void
witness_init(witness_t *witness, const char *name, witness_rank_t rank,
witness_comp_t *comp)
{
witness->name = name;
witness->rank = rank;
witness->comp = comp;
}
#ifdef JEMALLOC_JET
#undef witness_lock_error
#define witness_lock_error JEMALLOC_N(n_witness_lock_error)
#endif
void
witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
{
witness_t *w;
malloc_printf("<jemalloc>: Lock rank order reversal:");
ql_foreach(w, witnesses, link) {
malloc_printf(" %s(%u)", w->name, w->rank);
}
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
abort();
}
#ifdef JEMALLOC_JET
#undef witness_lock_error
#define witness_lock_error JEMALLOC_N(witness_lock_error)
witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
#endif
#ifdef JEMALLOC_JET
#undef witness_owner_error
#define witness_owner_error JEMALLOC_N(n_witness_owner_error)
#endif
void
witness_owner_error(const witness_t *witness)
{
malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
witness->rank);
abort();
}
#ifdef JEMALLOC_JET
#undef witness_owner_error
#define witness_owner_error JEMALLOC_N(witness_owner_error)
witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
#endif
#ifdef JEMALLOC_JET
#undef witness_not_owner_error
#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
#endif
void
witness_not_owner_error(const witness_t *witness)
{
malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
witness->rank);
abort();
}
#ifdef JEMALLOC_JET
#undef witness_not_owner_error
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
witness_not_owner_error_t *witness_not_owner_error =
JEMALLOC_N(n_witness_not_owner_error);
#endif
#ifdef JEMALLOC_JET
#undef witness_lockless_error
#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
#endif
void
witness_lockless_error(const witness_list_t *witnesses)
{
witness_t *w;
malloc_printf("<jemalloc>: Should not own any locks:");
ql_foreach(w, witnesses, link) {
malloc_printf(" %s(%u)", w->name, w->rank);
}
malloc_printf("\n");
abort();
}
#ifdef JEMALLOC_JET
#undef witness_lockless_error
#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
witness_lockless_error_t *witness_lockless_error =
JEMALLOC_N(n_witness_lockless_error);
#endif
void
witnesses_cleanup(tsd_t *tsd)
{
witness_assert_lockless(tsd_tsdn(tsd));
/* Do nothing. */
}
void
witness_fork_cleanup(tsd_t *tsd)
{
/* Do nothing. */
}
void
witness_prefork(tsd_t *tsd)
{
tsd_witness_fork_set(tsd, true);
}
void
witness_postfork_parent(tsd_t *tsd)
{
tsd_witness_fork_set(tsd, false);
}
void
witness_postfork_child(tsd_t *tsd)
{
#ifndef JEMALLOC_MUTEX_INIT_CB
witness_list_t *witnesses;
witnesses = tsd_witnessesp_get(tsd);
ql_new(witnesses);
#endif
tsd_witness_fork_set(tsd, false);
}
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#endif #endif
/* /*
* The malloc_default_purgeable_zone() function is only available on >= 10.6. * The malloc_default_purgeable_zone function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import. * We need to check whether it is present at runtime, thus the weak_import.
*/ */
extern malloc_zone_t *malloc_default_purgeable_zone(void) extern malloc_zone_t *malloc_default_purgeable_zone(void)
...@@ -13,9 +13,8 @@ JEMALLOC_ATTR(weak_import); ...@@ -13,9 +13,8 @@ JEMALLOC_ATTR(weak_import);
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
static malloc_zone_t *default_zone, *purgeable_zone; static malloc_zone_t zone;
static malloc_zone_t jemalloc_zone; static struct malloc_introspection_t zone_introspect;
static struct malloc_introspection_t jemalloc_zone_introspect;
/******************************************************************************/ /******************************************************************************/
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
...@@ -57,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr) ...@@ -57,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
* not work in practice, we must check all pointers to assure that they * not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size. * reside within a mapped chunk before determining size.
*/ */
return (ivsalloc(tsdn_fetch(), ptr, config_prof)); return (ivsalloc(ptr, config_prof));
} }
static void * static void *
...@@ -88,7 +87,7 @@ static void ...@@ -88,7 +87,7 @@ static void
zone_free(malloc_zone_t *zone, void *ptr) zone_free(malloc_zone_t *zone, void *ptr)
{ {
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) { if (ivsalloc(ptr, config_prof) != 0) {
je_free(ptr); je_free(ptr);
return; return;
} }
...@@ -100,7 +99,7 @@ static void * ...@@ -100,7 +99,7 @@ static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{ {
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) if (ivsalloc(ptr, config_prof) != 0)
return (je_realloc(ptr, size)); return (je_realloc(ptr, size));
return (realloc(ptr, size)); return (realloc(ptr, size));
...@@ -122,11 +121,9 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) ...@@ -122,11 +121,9 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
static void static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{ {
size_t alloc_size;
alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof); if (ivsalloc(ptr, config_prof) != 0) {
if (alloc_size != 0) { assert(ivsalloc(ptr, config_prof) == size);
assert(alloc_size == size);
je_free(ptr); je_free(ptr);
return; return;
} }
...@@ -165,103 +162,89 @@ static void ...@@ -165,103 +162,89 @@ static void
zone_force_unlock(malloc_zone_t *zone) zone_force_unlock(malloc_zone_t *zone)
{ {
/*
* Call jemalloc_postfork_child() rather than
* jemalloc_postfork_parent(), because this function is executed by both
* parent and child. The parent can tolerate having state
* reinitialized, but the child cannot unlock mutexes that were locked
* by the parent.
*/
if (isthreaded) if (isthreaded)
jemalloc_postfork_child(); jemalloc_postfork_parent();
} }
static void JEMALLOC_ATTR(constructor)
zone_init(void) void
register_zone(void)
{ {
jemalloc_zone.size = (void *)zone_size; /*
jemalloc_zone.malloc = (void *)zone_malloc; * If something else replaced the system default zone allocator, don't
jemalloc_zone.calloc = (void *)zone_calloc; * register jemalloc's.
jemalloc_zone.valloc = (void *)zone_valloc; */
jemalloc_zone.free = (void *)zone_free; malloc_zone_t *default_zone = malloc_default_zone();
jemalloc_zone.realloc = (void *)zone_realloc; malloc_zone_t *purgeable_zone = NULL;
jemalloc_zone.destroy = (void *)zone_destroy; if (!default_zone->zone_name ||
jemalloc_zone.zone_name = "jemalloc_zone"; strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
jemalloc_zone.batch_malloc = NULL; return;
jemalloc_zone.batch_free = NULL; }
jemalloc_zone.introspect = &jemalloc_zone_introspect;
jemalloc_zone.version = JEMALLOC_ZONE_VERSION; zone.size = (void *)zone_size;
zone.malloc = (void *)zone_malloc;
zone.calloc = (void *)zone_calloc;
zone.valloc = (void *)zone_valloc;
zone.free = (void *)zone_free;
zone.realloc = (void *)zone_realloc;
zone.destroy = (void *)zone_destroy;
zone.zone_name = "jemalloc_zone";
zone.batch_malloc = NULL;
zone.batch_free = NULL;
zone.introspect = &zone_introspect;
zone.version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 5) #if (JEMALLOC_ZONE_VERSION >= 5)
jemalloc_zone.memalign = zone_memalign; zone.memalign = zone_memalign;
#endif #endif
#if (JEMALLOC_ZONE_VERSION >= 6) #if (JEMALLOC_ZONE_VERSION >= 6)
jemalloc_zone.free_definite_size = zone_free_definite_size; zone.free_definite_size = zone_free_definite_size;
#endif #endif
#if (JEMALLOC_ZONE_VERSION >= 8) #if (JEMALLOC_ZONE_VERSION >= 8)
jemalloc_zone.pressure_relief = NULL; zone.pressure_relief = NULL;
#endif #endif
jemalloc_zone_introspect.enumerator = NULL; zone_introspect.enumerator = NULL;
jemalloc_zone_introspect.good_size = (void *)zone_good_size; zone_introspect.good_size = (void *)zone_good_size;
jemalloc_zone_introspect.check = NULL; zone_introspect.check = NULL;
jemalloc_zone_introspect.print = NULL; zone_introspect.print = NULL;
jemalloc_zone_introspect.log = NULL; zone_introspect.log = NULL;
jemalloc_zone_introspect.force_lock = (void *)zone_force_lock; zone_introspect.force_lock = (void *)zone_force_lock;
jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock; zone_introspect.force_unlock = (void *)zone_force_unlock;
jemalloc_zone_introspect.statistics = NULL; zone_introspect.statistics = NULL;
#if (JEMALLOC_ZONE_VERSION >= 6) #if (JEMALLOC_ZONE_VERSION >= 6)
jemalloc_zone_introspect.zone_locked = NULL; zone_introspect.zone_locked = NULL;
#endif #endif
#if (JEMALLOC_ZONE_VERSION >= 7) #if (JEMALLOC_ZONE_VERSION >= 7)
jemalloc_zone_introspect.enable_discharge_checking = NULL; zone_introspect.enable_discharge_checking = NULL;
jemalloc_zone_introspect.disable_discharge_checking = NULL; zone_introspect.disable_discharge_checking = NULL;
jemalloc_zone_introspect.discharge = NULL; zone_introspect.discharge = NULL;
# ifdef __BLOCKS__ #ifdef __BLOCKS__
jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; zone_introspect.enumerate_discharged_pointers = NULL;
# else #else
jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; zone_introspect.enumerate_unavailable_without_blocks = NULL;
# endif #endif
#endif #endif
}
static malloc_zone_t *
zone_default_get(void)
{
malloc_zone_t **zones = NULL;
unsigned int num_zones = 0;
/* /*
* On OSX 10.12, malloc_default_zone returns a special zone that is not * The default purgeable zone is created lazily by OSX's libc. It uses
* present in the list of registered zones. That zone uses a "lite zone" * the default zone when it is created for "small" allocations
* if one is present (apparently enabled when malloc stack logging is * (< 15 KiB), but assumes the default zone is a scalable_zone. This
* enabled), or the first registered zone otherwise. In practice this * obviously fails when the default zone is the jemalloc zone, so
* means unless malloc stack logging is enabled, the first registered * malloc_default_purgeable_zone is called beforehand so that the
* zone is the default. So get the list of zones to get the first one, * default purgeable zone is created when the default zone is still
* instead of relying on malloc_default_zone. * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/ */
if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, if (malloc_default_purgeable_zone != NULL)
(vm_address_t**)&zones, &num_zones)) { purgeable_zone = malloc_default_purgeable_zone();
/*
* Reset the value in case the failure happened after it was
* set.
*/
num_zones = 0;
}
if (num_zones)
return (zones[0]);
return (malloc_default_zone());
}
/* As written, this function can only promote jemalloc_zone. */ /* Register the custom zone. At this point it won't be the default. */
static void malloc_zone_register(&zone);
zone_promote(void)
{
malloc_zone_t *zone;
do { do {
default_zone = malloc_default_zone();
/* /*
* Unregister and reregister the default zone. On OSX >= 10.6, * Unregister and reregister the default zone. On OSX >= 10.6,
* unregistering takes the last registered zone and places it * unregistering takes the last registered zone and places it
...@@ -272,7 +255,6 @@ zone_promote(void) ...@@ -272,7 +255,6 @@ zone_promote(void)
*/ */
malloc_zone_unregister(default_zone); malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone); malloc_zone_register(default_zone);
/* /*
* On OSX 10.6, having the default purgeable zone appear before * On OSX 10.6, having the default purgeable zone appear before
* the default zone makes some things crash because it thinks it * the default zone makes some things crash because it thinks it
...@@ -284,47 +266,9 @@ zone_promote(void) ...@@ -284,47 +266,9 @@ zone_promote(void)
* above, i.e. the default zone. Registering it again then puts * above, i.e. the default zone. Registering it again then puts
* it at the end, obviously after the default zone. * it at the end, obviously after the default zone.
*/ */
if (purgeable_zone != NULL) { if (purgeable_zone) {
malloc_zone_unregister(purgeable_zone); malloc_zone_unregister(purgeable_zone);
malloc_zone_register(purgeable_zone); malloc_zone_register(purgeable_zone);
} }
} while (malloc_default_zone() != &zone);
zone = zone_default_get();
} while (zone != &jemalloc_zone);
}
JEMALLOC_ATTR(constructor)
void
zone_register(void)
{
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
default_zone = zone_default_get();
if (!default_zone->zone_name || strcmp(default_zone->zone_name,
"DefaultMallocZone") != 0)
return;
/*
* The default purgeable zone is created lazily by OSX's libc. It uses
* the default zone when it is created for "small" allocations
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
* obviously fails when the default zone is the jemalloc zone, so
* malloc_default_purgeable_zone() is called beforehand so that the
* default purgeable zone is created when the default zone is still
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
malloc_default_purgeable_zone();
/* Register the custom zone. At this point it won't be the default. */
zone_init();
malloc_zone_register(&jemalloc_zone);
/* Promote the custom zone to be default. */
zone_promote();
} }
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#ifdef _WIN32 #ifdef _WIN32
# include "msvc_compat/strings.h" # include "msvc_compat/strings.h"
#endif #endif
#include <sys/time.h>
#ifdef _WIN32 #ifdef _WIN32
# include <windows.h> # include <windows.h>
...@@ -19,6 +20,39 @@ ...@@ -19,6 +20,39 @@
# include <pthread.h> # include <pthread.h>
#endif #endif
/******************************************************************************/
/*
* Define always-enabled assertion macros, so that test assertions execute even
* if assertions are disabled in the library code. These definitions must
* exist prior to including "jemalloc/internal/util.h".
*/
#define assert(e) do { \
if (!(e)) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#define not_reached() do { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} while (0)
#define not_implemented() do { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} while (0)
#define assert_not_implemented(e) do { \
if (!(e)) \
not_implemented(); \
} while (0)
#include "test/jemalloc_test_defs.h" #include "test/jemalloc_test_defs.h"
#ifdef JEMALLOC_OSSPIN #ifdef JEMALLOC_OSSPIN
...@@ -53,14 +87,6 @@ ...@@ -53,14 +87,6 @@
# include "jemalloc/internal/jemalloc_internal_defs.h" # include "jemalloc/internal/jemalloc_internal_defs.h"
# include "jemalloc/internal/jemalloc_internal_macros.h" # include "jemalloc/internal/jemalloc_internal_macros.h"
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
# define JEMALLOC_N(n) @private_namespace@##n # define JEMALLOC_N(n) @private_namespace@##n
# include "jemalloc/internal/private_namespace.h" # include "jemalloc/internal/private_namespace.h"
...@@ -68,7 +94,6 @@ static const bool config_debug = ...@@ -68,7 +94,6 @@ static const bool config_debug =
# define JEMALLOC_H_STRUCTS # define JEMALLOC_H_STRUCTS
# define JEMALLOC_H_EXTERNS # define JEMALLOC_H_EXTERNS
# define JEMALLOC_H_INLINES # define JEMALLOC_H_INLINES
# include "jemalloc/internal/nstime.h"
# include "jemalloc/internal/util.h" # include "jemalloc/internal/util.h"
# include "jemalloc/internal/qr.h" # include "jemalloc/internal/qr.h"
# include "jemalloc/internal/ql.h" # include "jemalloc/internal/ql.h"
...@@ -124,40 +149,3 @@ static const bool config_debug = ...@@ -124,40 +149,3 @@ static const bool config_debug =
#include "test/thd.h" #include "test/thd.h"
#define MEXP 19937 #define MEXP 19937
#include "test/SFMT.h" #include "test/SFMT.h"
/******************************************************************************/
/*
* Define always-enabled assertion macros, so that test assertions execute even
* if assertions are disabled in the library code.
*/
#undef assert
#undef not_reached
#undef not_implemented
#undef assert_not_implemented
#define assert(e) do { \
if (!(e)) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#define not_reached() do { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} while (0)
#define not_implemented() do { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} while (0)
#define assert_not_implemented(e) do { \
if (!(e)) \
not_implemented(); \
} while (0)
...@@ -8,8 +8,6 @@ ...@@ -8,8 +8,6 @@
typedef struct { typedef struct {
#ifdef _WIN32 #ifdef _WIN32
CRITICAL_SECTION lock; CRITICAL_SECTION lock;
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock; OSSpinLock lock;
#else #else
......
...@@ -311,9 +311,6 @@ label_test_end: \ ...@@ -311,9 +311,6 @@ label_test_end: \
#define test(...) \ #define test(...) \
p_test(__VA_ARGS__, NULL) p_test(__VA_ARGS__, NULL)
#define test_no_malloc_init(...) \
p_test_no_malloc_init(__VA_ARGS__, NULL)
#define test_skip_if(e) do { \ #define test_skip_if(e) do { \
if (e) { \ if (e) { \
test_skip("%s:%s:%d: Test skipped: (%s)", \ test_skip("%s:%s:%d: Test skipped: (%s)", \
...@@ -327,7 +324,6 @@ void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); ...@@ -327,7 +324,6 @@ void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
/* For private use by macros. */ /* For private use by macros. */
test_status_t p_test(test_t *t, ...); test_status_t p_test(test_t *t, ...);
test_status_t p_test_no_malloc_init(test_t *t, ...);
void p_test_init(const char *name); void p_test_init(const char *name);
void p_test_fini(void); void p_test_fini(void);
void p_test_fail(const char *prefix, const char *message); void p_test_fail(const char *prefix, const char *message);
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment