Commit 7178cac0 authored by antirez's avatar antirez
Browse files

Revert "Jemalloc updated to 4.4.0."

This reverts commit 153f2f00.

Jemalloc 4.4.0 is apparently causing deadlocks in certain
systems. See for example https://github.com/antirez/redis/issues/3799.
As a cautionary step we are reverting the commit back and
releasing a new stable Redis version.
parent 33fad43c
......@@ -15,21 +15,12 @@ huge_node_get(const void *ptr)
}
static bool
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
huge_node_set(const void *ptr, extent_node_t *node)
{
assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node));
return (chunk_register(tsdn, ptr, node));
}
static void
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
{
bool err;
err = huge_node_set(tsdn, ptr, node);
assert(!err);
return (chunk_register(ptr, node));
}
static void
......@@ -40,39 +31,39 @@ huge_node_unset(const void *ptr, const extent_node_t *node)
}
void *
huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache)
{
size_t usize;
assert(usize == s2u(usize));
usize = s2u(size);
if (usize == 0) {
/* size_t overflow. */
return (NULL);
}
return (huge_palloc(tsdn, arena, usize, chunksize, zero));
return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
}
void *
huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero)
huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
bool zero, tcache_t *tcache)
{
void *ret;
size_t ausize;
arena_t *iarena;
size_t usize;
extent_node_t *node;
size_t sn;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
assert(!tsdn_null(tsdn) || arena != NULL);
ausize = sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
usize = sa2u(size, alignment);
if (unlikely(usize == 0))
return (NULL);
assert(ausize >= chunksize);
assert(usize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
a0get();
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, NULL, true, iarena);
node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, tcache, true, arena);
if (node == NULL)
return (NULL);
......@@ -81,35 +72,33 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
if (likely(!tsdn_null(tsdn)))
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
idalloctm(tsdn, node, NULL, true, true);
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
size, alignment, &is_zeroed)) == NULL) {
idalloctm(tsd, node, tcache, true);
return (NULL);
}
extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
extent_node_init(node, arena, ret, size, is_zeroed, true);
if (huge_node_set(tsdn, ret, node)) {
arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
idalloctm(tsdn, node, NULL, true, true);
if (huge_node_set(ret, node)) {
arena_chunk_dalloc_huge(arena, ret, size);
idalloctm(tsd, node, tcache, true);
return (NULL);
}
/* Insert node into huge. */
malloc_mutex_lock(tsdn, &arena->huge_mtx);
malloc_mutex_lock(&arena->huge_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->huge, node, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
malloc_mutex_unlock(&arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed)
memset(ret, 0, usize);
memset(ret, 0, size);
} else if (config_fill && unlikely(opt_junk_alloc))
memset(ret, JEMALLOC_ALLOC_JUNK, usize);
memset(ret, 0xa5, size);
arena_decay_tick(tsdn, arena);
return (ret);
}
......@@ -127,7 +116,7 @@ huge_dalloc_junk(void *ptr, size_t usize)
* unmapped.
*/
if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
memset(ptr, JEMALLOC_FREE_JUNK, usize);
memset(ptr, 0x5a, usize);
}
}
#ifdef JEMALLOC_JET
......@@ -137,8 +126,8 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static void
huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero)
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
size_t usize, usize_next;
extent_node_t *node;
......@@ -162,28 +151,24 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
memset((void *)((uintptr_t)ptr + usize),
JEMALLOC_FREE_JUNK, sdiff);
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
post_zeroed = false;
} else {
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
&chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
sdiff);
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
ptr, CHUNK_CEILING(oldsize), usize, sdiff);
}
} else
post_zeroed = pre_zeroed;
malloc_mutex_lock(tsdn, &arena->huge_mtx);
malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
malloc_mutex_unlock(&arena->huge_mtx);
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
/* Fill if necessary (growing). */
if (oldsize < usize) {
......@@ -193,15 +178,14 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
usize - oldsize);
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize),
JEMALLOC_ALLOC_JUNK, usize - oldsize);
memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
oldsize);
}
}
}
static bool
huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize)
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{
extent_node_t *node;
arena_t *arena;
......@@ -212,7 +196,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
chunk_hooks = chunk_hooks_get(tsdn, arena);
chunk_hooks = chunk_hooks_get(arena);
assert(oldsize > usize);
......@@ -229,59 +213,53 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
sdiff);
post_zeroed = false;
} else {
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
&chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
usize), CHUNK_CEILING(oldsize),
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
CHUNK_CEILING(oldsize),
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
}
} else
post_zeroed = pre_zeroed;
malloc_mutex_lock(tsdn, &arena->huge_mtx);
malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
malloc_mutex_unlock(&arena->huge_mtx);
/* Zap the excess chunks. */
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
extent_node_sn_get(node));
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
return (false);
}
static bool
huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize, bool zero) {
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
extent_node_t *node;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
malloc_mutex_lock(&arena->huge_mtx);
is_zeroed_subchunk = extent_node_zeroed_get(node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
malloc_mutex_unlock(&arena->huge_mtx);
/*
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
* update extent's zeroed field, and zero as necessary.
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
* that it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed_chunk = false;
if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
is_zeroed_chunk = zero;
if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
&is_zeroed_chunk))
return (true);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
huge_node_unset(ptr, node);
malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */
extent_node_size_set(node, usize);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
is_zeroed_chunk);
huge_node_reset(tsdn, ptr, node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
malloc_mutex_unlock(&arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed_subchunk) {
......@@ -294,21 +272,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
CHUNK_CEILING(oldsize));
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
usize - oldsize);
memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
oldsize);
}
return (false);
}
bool
huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
assert(s2u(oldsize) == oldsize);
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
/* Both allocations must be huge to avoid a move. */
if (oldsize < chunksize || usize_max < chunksize)
......@@ -316,18 +292,13 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
/* Attempt to expand the allocation in-place. */
if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
zero)) {
arena_decay_tick(tsdn, huge_aalloc(ptr));
if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
return (false);
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
ptr, oldsize, usize_min, zero)) {
arena_decay_tick(tsdn, huge_aalloc(ptr));
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
oldsize, usize_min, zero))
return (false);
}
}
/*
......@@ -336,46 +307,36 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
*/
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
usize_max, zero);
arena_decay_tick(tsdn, huge_aalloc(ptr));
huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
zero);
return (false);
}
/* Attempt to shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
usize_max)) {
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
}
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
return (true);
}
static void *
huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero)
huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
{
if (alignment <= chunksize)
return (huge_malloc(tsdn, arena, usize, zero));
return (huge_palloc(tsdn, arena, usize, alignment, zero));
return (huge_malloc(tsd, arena, usize, zero, tcache));
return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
}
void *
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache)
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
size_t copysize;
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= HUGE_MAXCLASS);
/* Try to avoid moving the allocation. */
if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
zero))
if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
return (ptr);
/*
......@@ -383,19 +344,19 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
* different size class. In that case, fall back to allocating new
* space and copying.
*/
ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
zero);
ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
tcache);
if (ret == NULL)
return (NULL);
copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache, true);
isqalloc(tsd, ptr, oldsize, tcache);
return (ret);
}
void
huge_dalloc(tsdn_t *tsdn, void *ptr)
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
extent_node_t *node;
arena_t *arena;
......@@ -403,18 +364,15 @@ huge_dalloc(tsdn_t *tsdn, void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
huge_node_unset(ptr, node);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
malloc_mutex_lock(&arena->huge_mtx);
ql_remove(&arena->huge, node, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
malloc_mutex_unlock(&arena->huge_mtx);
huge_dalloc_junk(extent_node_addr_get(node),
extent_node_size_get(node));
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
extent_node_addr_get(node), extent_node_size_get(node),
extent_node_sn_get(node));
idalloctm(tsdn, node, NULL, true, true);
arena_decay_tick(tsdn, arena);
arena_chunk_dalloc_huge(extent_node_arena_get(node),
extent_node_addr_get(node), extent_node_size_get(node));
idalloctm(tsd, node, tcache, true);
}
arena_t *
......@@ -425,7 +383,7 @@ huge_aalloc(const void *ptr)
}
size_t
huge_salloc(tsdn_t *tsdn, const void *ptr)
huge_salloc(const void *ptr)
{
size_t size;
extent_node_t *node;
......@@ -433,15 +391,15 @@ huge_salloc(tsdn_t *tsdn, const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
malloc_mutex_lock(&arena->huge_mtx);
size = extent_node_size_get(node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
malloc_mutex_unlock(&arena->huge_mtx);
return (size);
}
prof_tctx_t *
huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
huge_prof_tctx_get(const void *ptr)
{
prof_tctx_t *tctx;
extent_node_t *node;
......@@ -449,29 +407,29 @@ huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
malloc_mutex_lock(&arena->huge_mtx);
tctx = extent_node_prof_tctx_get(node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
malloc_mutex_unlock(&arena->huge_mtx);
return (tctx);
}
void
huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{
extent_node_t *node;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
malloc_mutex_lock(&arena->huge_mtx);
extent_node_prof_tctx_set(node, tctx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
malloc_mutex_unlock(&arena->huge_mtx);
}
void
huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
huge_prof_tctx_reset(const void *ptr)
{
huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
}
......@@ -5,11 +5,7 @@
/* Data. */
/* Runtime configuration options. */
const char *je_malloc_conf
#ifndef _WIN32
JEMALLOC_ATTR(weak)
#endif
;
const char *je_malloc_conf JEMALLOC_ATTR(weak);
bool opt_abort =
#ifdef JEMALLOC_DEBUG
true
......@@ -44,14 +40,14 @@ bool opt_redzone = false;
bool opt_utrace = false;
bool opt_xmalloc = false;
bool opt_zero = false;
unsigned opt_narenas = 0;
size_t opt_narenas = 0;
/* Initialized to true if the process is running inside Valgrind. */
bool in_valgrind;
unsigned ncpus;
/* Protects arenas initialization. */
/* Protects arenas initialization (arenas, narenas_total). */
static malloc_mutex_t arenas_lock;
/*
* Arenas that are used to service external requests. Not all elements of the
......@@ -61,10 +57,10 @@ static malloc_mutex_t arenas_lock;
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
* takes some action to create them and allocate from them.
*/
arena_t **arenas;
static unsigned narenas_total; /* Use narenas_total_*(). */
static arena_t **arenas;
static unsigned narenas_total;
static arena_t *a0; /* arenas[0]; read-only after initialization. */
unsigned narenas_auto; /* Read-only after initialization. */
static unsigned narenas_auto; /* Read-only after initialization. */
typedef enum {
malloc_init_uninitialized = 3,
......@@ -74,37 +70,9 @@ typedef enum {
} malloc_init_t;
static malloc_init_t malloc_init_state = malloc_init_uninitialized;
/* False should be the common case. Set to true to trigger initialization. */
static bool malloc_slow = true;
/* When malloc_slow is true, set the corresponding bits for sanity check. */
enum {
flag_opt_junk_alloc = (1U),
flag_opt_junk_free = (1U << 1),
flag_opt_quarantine = (1U << 2),
flag_opt_zero = (1U << 3),
flag_opt_utrace = (1U << 4),
flag_in_valgrind = (1U << 5),
flag_opt_xmalloc = (1U << 6)
};
static uint8_t malloc_slow_flags;
JEMALLOC_ALIGNED(CACHELINE)
const size_t pind2sz_tab[NPSIZES] = {
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
#define PSZ_no(lg_grp, ndelta, lg_delta)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
PSZ_##psz(lg_grp, ndelta, lg_delta)
SIZE_CLASSES
#undef PSZ_yes
#undef PSZ_no
#undef SC
};
JEMALLOC_ALIGNED(CACHELINE)
const size_t index2size_tab[NSIZES] = {
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
SIZE_CLASSES
#undef SC
......@@ -176,7 +144,7 @@ const uint8_t size2index_tab[] = {
#define S2B_11(i) S2B_10(i) S2B_10(i)
#endif
#define S2B_no(i)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
S2B_##lg_delta_lookup(index)
SIZE_CLASSES
#undef S2B_3
......@@ -227,7 +195,7 @@ _init_init_lock(void)
* really only matters early in the process creation, before any
* separate thread normally starts doing anything. */
if (!init_lock_initialized)
malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
malloc_mutex_init(&init_lock);
init_lock_initialized = true;
}
......@@ -322,10 +290,18 @@ malloc_init(void)
}
/*
* The a0*() functions are used instead of i{d,}alloc() in situations that
* The a0*() functions are used instead of i[mcd]alloc() in situations that
* cannot tolerate TLS variable access.
*/
arena_t *
a0get(void)
{
assert(a0 != NULL);
return (a0);
}
static void *
a0ialloc(size_t size, bool zero, bool is_metadata)
{
......@@ -333,22 +309,14 @@ a0ialloc(size_t size, bool zero, bool is_metadata)
if (unlikely(malloc_init_a0()))
return (NULL);
return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
is_metadata, arena_get(TSDN_NULL, 0, true), true));
return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
}
static void
a0idalloc(void *ptr, bool is_metadata)
{
idalloctm(TSDN_NULL, ptr, false, is_metadata, true);
}
arena_t *
a0get(void)
{
return (a0);
idalloctm(NULL, ptr, false, is_metadata);
}
void *
......@@ -405,228 +373,224 @@ bootstrap_free(void *ptr)
a0idalloc(ptr, false);
}
static void
arena_set(unsigned ind, arena_t *arena)
{
atomic_write_p((void **)&arenas[ind], arena);
}
static void
narenas_total_set(unsigned narenas)
{
atomic_write_u(&narenas_total, narenas);
}
static void
narenas_total_inc(void)
{
atomic_add_u(&narenas_total, 1);
}
unsigned
narenas_total_get(void)
{
return (atomic_read_u(&narenas_total));
}
/* Create a new arena and insert it into the arenas array at index ind. */
static arena_t *
arena_init_locked(tsdn_t *tsdn, unsigned ind)
arena_init_locked(unsigned ind)
{
arena_t *arena;
assert(ind <= narenas_total_get());
/* Expand arenas if necessary. */
assert(ind <= narenas_total);
if (ind > MALLOCX_ARENA_MAX)
return (NULL);
if (ind == narenas_total_get())
narenas_total_inc();
if (ind == narenas_total) {
unsigned narenas_new = narenas_total + 1;
arena_t **arenas_new =
(arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
sizeof(arena_t *)));
if (arenas_new == NULL)
return (NULL);
memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
arenas_new[ind] = NULL;
/*
* Deallocate only if arenas came from a0malloc() (not
* base_alloc()).
*/
if (narenas_total != narenas_auto)
a0dalloc(arenas);
arenas = arenas_new;
narenas_total = narenas_new;
}
/*
* Another thread may have already initialized arenas[ind] if it's an
* auto arena.
*/
arena = arena_get(tsdn, ind, false);
arena = arenas[ind];
if (arena != NULL) {
assert(ind < narenas_auto);
return (arena);
}
/* Actually initialize the arena. */
arena = arena_new(tsdn, ind);
arena_set(ind, arena);
arena = arenas[ind] = arena_new(ind);
return (arena);
}
arena_t *
arena_init(tsdn_t *tsdn, unsigned ind)
arena_init(unsigned ind)
{
arena_t *arena;
malloc_mutex_lock(tsdn, &arenas_lock);
arena = arena_init_locked(tsdn, ind);
malloc_mutex_unlock(tsdn, &arenas_lock);
malloc_mutex_lock(&arenas_lock);
arena = arena_init_locked(ind);
malloc_mutex_unlock(&arenas_lock);
return (arena);
}
unsigned
narenas_total_get(void)
{
unsigned narenas;
malloc_mutex_lock(&arenas_lock);
narenas = narenas_total;
malloc_mutex_unlock(&arenas_lock);
return (narenas);
}
static void
arena_bind(tsd_t *tsd, unsigned ind, bool internal)
arena_bind_locked(tsd_t *tsd, unsigned ind)
{
arena_t *arena;
if (!tsd_nominal(tsd))
return;
arena = arena_get(tsd_tsdn(tsd), ind, false);
arena_nthreads_inc(arena, internal);
arena = arenas[ind];
arena->nthreads++;
if (internal)
tsd_iarena_set(tsd, arena);
else
if (tsd_nominal(tsd))
tsd_arena_set(tsd, arena);
}
static void
arena_bind(tsd_t *tsd, unsigned ind)
{
malloc_mutex_lock(&arenas_lock);
arena_bind_locked(tsd, ind);
malloc_mutex_unlock(&arenas_lock);
}
void
arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
{
arena_t *oldarena, *newarena;
oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
newarena = arena_get(tsd_tsdn(tsd), newind, false);
arena_nthreads_dec(oldarena, false);
arena_nthreads_inc(newarena, false);
malloc_mutex_lock(&arenas_lock);
oldarena = arenas[oldind];
newarena = arenas[newind];
oldarena->nthreads--;
newarena->nthreads++;
malloc_mutex_unlock(&arenas_lock);
tsd_arena_set(tsd, newarena);
}
unsigned
arena_nbound(unsigned ind)
{
unsigned nthreads;
malloc_mutex_lock(&arenas_lock);
nthreads = arenas[ind]->nthreads;
malloc_mutex_unlock(&arenas_lock);
return (nthreads);
}
static void
arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
arena_unbind(tsd_t *tsd, unsigned ind)
{
arena_t *arena;
arena = arena_get(tsd_tsdn(tsd), ind, false);
arena_nthreads_dec(arena, internal);
if (internal)
tsd_iarena_set(tsd, NULL);
else
tsd_arena_set(tsd, NULL);
malloc_mutex_lock(&arenas_lock);
arena = arenas[ind];
arena->nthreads--;
malloc_mutex_unlock(&arenas_lock);
tsd_arena_set(tsd, NULL);
}
arena_tdata_t *
arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
arena_t *
arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
{
arena_tdata_t *tdata, *arenas_tdata_old;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
unsigned narenas_tdata_old, i;
unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
arena_t *arena;
arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
unsigned narenas_cache = tsd_narenas_cache_get(tsd);
unsigned narenas_actual = narenas_total_get();
/*
* Dissociate old tdata array (and set up for deallocation upon return)
* if it's too small.
*/
if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
arenas_tdata_old = arenas_tdata;
narenas_tdata_old = narenas_tdata;
arenas_tdata = NULL;
narenas_tdata = 0;
tsd_arenas_tdata_set(tsd, arenas_tdata);
tsd_narenas_tdata_set(tsd, narenas_tdata);
} else {
arenas_tdata_old = NULL;
narenas_tdata_old = 0;
}
/* Allocate tdata array if it's missing. */
if (arenas_tdata == NULL) {
bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
*arenas_tdata_bypassp = true;
arenas_tdata = (arena_tdata_t *)a0malloc(
sizeof(arena_tdata_t) * narenas_tdata);
*arenas_tdata_bypassp = false;
/* Deallocate old cache if it's too small. */
if (arenas_cache != NULL && narenas_cache < narenas_actual) {
a0dalloc(arenas_cache);
arenas_cache = NULL;
narenas_cache = 0;
tsd_arenas_cache_set(tsd, arenas_cache);
tsd_narenas_cache_set(tsd, narenas_cache);
}
/* Allocate cache if it's missing. */
if (arenas_cache == NULL) {
bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
assert(ind < narenas_actual || !init_if_missing);
narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
*arenas_cache_bypassp = true;
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
narenas_cache);
*arenas_cache_bypassp = false;
}
if (arenas_tdata == NULL) {
tdata = NULL;
goto label_return;
if (arenas_cache == NULL) {
/*
* This function must always tell the truth, even if
* it's slow, so don't let OOM, thread cleanup (note
* tsd_nominal check), nor recursive allocation
* avoidance (note arenas_cache_bypass check) get in the
* way.
*/
if (ind >= narenas_actual)
return (NULL);
malloc_mutex_lock(&arenas_lock);
arena = arenas[ind];
malloc_mutex_unlock(&arenas_lock);
return (arena);
}
assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
tsd_arenas_tdata_set(tsd, arenas_tdata);
tsd_narenas_tdata_set(tsd, narenas_tdata);
assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
tsd_arenas_cache_set(tsd, arenas_cache);
tsd_narenas_cache_set(tsd, narenas_cache);
}
/*
* Copy to tdata array. It's possible that the actual number of arenas
* has increased since narenas_total_get() was called above, but that
* causes no correctness issues unless two threads concurrently execute
* the arenas.extend mallctl, which we trust mallctl synchronization to
* Copy to cache. It's possible that the actual number of arenas has
* increased since narenas_total_get() was called above, but that causes
* no correctness issues unless two threads concurrently execute the
* arenas.extend mallctl, which we trust mallctl synchronization to
* prevent.
*/
/* Copy/initialize tickers. */
for (i = 0; i < narenas_actual; i++) {
if (i < narenas_tdata_old) {
ticker_copy(&arenas_tdata[i].decay_ticker,
&arenas_tdata_old[i].decay_ticker);
} else {
ticker_init(&arenas_tdata[i].decay_ticker,
DECAY_NTICKS_PER_UPDATE);
}
}
if (narenas_tdata > narenas_actual) {
memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
* (narenas_tdata - narenas_actual));
}
/* Read the refreshed tdata array. */
tdata = &arenas_tdata[ind];
label_return:
if (arenas_tdata_old != NULL)
a0dalloc(arenas_tdata_old);
return (tdata);
malloc_mutex_lock(&arenas_lock);
memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
malloc_mutex_unlock(&arenas_lock);
if (narenas_cache > narenas_actual) {
memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
(narenas_cache - narenas_actual));
}
/* Read the refreshed cache, and init the arena if necessary. */
arena = arenas_cache[ind];
if (init_if_missing && arena == NULL)
arena = arenas_cache[ind] = arena_init(ind);
return (arena);
}
/* Slow path, called only by arena_choose(). */
arena_t *
arena_choose_hard(tsd_t *tsd, bool internal)
arena_choose_hard(tsd_t *tsd)
{
arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
arena_t *ret;
if (narenas_auto > 1) {
unsigned i, j, choose[2], first_null;
/*
* Determine binding for both non-internal and internal
* allocation.
*
* choose[0]: For application allocation.
* choose[1]: For internal metadata allocation.
*/
for (j = 0; j < 2; j++)
choose[j] = 0;
unsigned i, choose, first_null;
choose = 0;
first_null = narenas_auto;
malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
malloc_mutex_lock(&arenas_lock);
assert(a0get() != NULL);
for (i = 1; i < narenas_auto; i++) {
if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
if (arenas[i] != NULL) {
/*
* Choose the first arena that has the lowest
* number of threads assigned to it.
*/
for (j = 0; j < 2; j++) {
if (arena_nthreads_get(arena_get(
tsd_tsdn(tsd), i, false), !!j) <
arena_nthreads_get(arena_get(
tsd_tsdn(tsd), choose[j], false),
!!j))
choose[j] = i;
}
if (arenas[i]->nthreads <
arenas[choose]->nthreads)
choose = i;
} else if (first_null == narenas_auto) {
/*
* Record the index of the first uninitialized
......@@ -641,40 +605,27 @@ arena_choose_hard(tsd_t *tsd, bool internal)
}
}
for (j = 0; j < 2; j++) {
if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
choose[j], false), !!j) == 0 || first_null ==
narenas_auto) {
/*
* Use an unloaded arena, or the least loaded
* arena if all arenas are already initialized.
*/
if (!!j == internal) {
ret = arena_get(tsd_tsdn(tsd),
choose[j], false);
}
} else {
arena_t *arena;
/* Initialize a new arena. */
choose[j] = first_null;
arena = arena_init_locked(tsd_tsdn(tsd),
choose[j]);
if (arena == NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd),
&arenas_lock);
return (NULL);
}
if (!!j == internal)
ret = arena;
if (arenas[choose]->nthreads == 0
|| first_null == narenas_auto) {
/*
* Use an unloaded arena, or the least loaded arena if
* all arenas are already initialized.
*/
ret = arenas[choose];
} else {
/* Initialize a new arena. */
choose = first_null;
ret = arena_init_locked(choose);
if (ret == NULL) {
malloc_mutex_unlock(&arenas_lock);
return (NULL);
}
arena_bind(tsd, choose[j], !!j);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
arena_bind_locked(tsd, choose);
malloc_mutex_unlock(&arenas_lock);
} else {
ret = arena_get(tsd_tsdn(tsd), 0, false);
arena_bind(tsd, 0, false);
arena_bind(tsd, 0, true);
ret = a0get();
arena_bind(tsd, 0);
}
return (ret);
......@@ -694,16 +645,6 @@ thread_deallocated_cleanup(tsd_t *tsd)
/* Do nothing. */
}
void
iarena_cleanup(tsd_t *tsd)
{
arena_t *iarena;
iarena = tsd_iarena_get(tsd);
if (iarena != NULL)
arena_unbind(tsd, iarena->ind, true);
}
void
arena_cleanup(tsd_t *tsd)
{
......@@ -711,33 +652,30 @@ arena_cleanup(tsd_t *tsd)
arena = tsd_arena_get(tsd);
if (arena != NULL)
arena_unbind(tsd, arena->ind, false);
arena_unbind(tsd, arena->ind);
}
void
arenas_tdata_cleanup(tsd_t *tsd)
arenas_cache_cleanup(tsd_t *tsd)
{
arena_tdata_t *arenas_tdata;
/* Prevent tsd->arenas_tdata from being (re)created. */
*tsd_arenas_tdata_bypassp_get(tsd) = true;
arena_t **arenas_cache;
arenas_tdata = tsd_arenas_tdata_get(tsd);
if (arenas_tdata != NULL) {
tsd_arenas_tdata_set(tsd, NULL);
a0dalloc(arenas_tdata);
arenas_cache = tsd_arenas_cache_get(tsd);
if (arenas_cache != NULL) {
tsd_arenas_cache_set(tsd, NULL);
a0dalloc(arenas_cache);
}
}
void
narenas_tdata_cleanup(tsd_t *tsd)
narenas_cache_cleanup(tsd_t *tsd)
{
/* Do nothing. */
}
void
arenas_tdata_bypass_cleanup(tsd_t *tsd)
arenas_cache_bypass_cleanup(tsd_t *tsd)
{
/* Do nothing. */
......@@ -748,11 +686,8 @@ stats_print_atexit(void)
{
if (config_tcache && config_stats) {
tsdn_t *tsdn;
unsigned narenas, i;
tsdn = tsdn_fetch();
/*
* Merge stats from extant threads. This is racy, since
* individual threads do not lock when recording tcache stats
......@@ -761,7 +696,7 @@ stats_print_atexit(void)
* continue to allocate.
*/
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena = arena_get(tsdn, i, false);
arena_t *arena = arenas[i];
if (arena != NULL) {
tcache_t *tcache;
......@@ -771,11 +706,11 @@ stats_print_atexit(void)
* and bin locks in the opposite order,
* deadlocks may result.
*/
malloc_mutex_lock(tsdn, &arena->lock);
malloc_mutex_lock(&arena->lock);
ql_foreach(tcache, &arena->tcache_ql, link) {
tcache_stats_merge(tsdn, tcache, arena);
tcache_stats_merge(tcache, arena);
}
malloc_mutex_unlock(tsdn, &arena->lock);
malloc_mutex_unlock(&arena->lock);
}
}
}
......@@ -812,20 +747,6 @@ malloc_ncpus(void)
SYSTEM_INFO si;
GetSystemInfo(&si);
result = si.dwNumberOfProcessors;
#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
/*
* glibc >= 2.6 has the CPU_COUNT macro.
*
* glibc's sysconf() uses isspace(). glibc allocates for the first time
* *before* setting up the isspace tables. Therefore we need a
* different method to get the number of CPUs.
*/
{
cpu_set_t set;
pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
result = CPU_COUNT(&set);
}
#else
result = sysconf(_SC_NPROCESSORS_ONLN);
#endif
......@@ -917,26 +838,6 @@ malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
(int)vlen, v);
}
static void
malloc_slow_flag_init(void)
{
/*
* Combine the runtime options into malloc_slow for fast path. Called
* after processing all the options.
*/
malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
| (opt_junk_free ? flag_opt_junk_free : 0)
| (opt_quarantine ? flag_opt_quarantine : 0)
| (opt_zero ? flag_opt_zero : 0)
| (opt_utrace ? flag_opt_utrace : 0)
| (opt_xmalloc ? flag_opt_xmalloc : 0);
if (config_valgrind)
malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
malloc_slow = (malloc_slow_flags != 0);
}
static void
malloc_conf_init(void)
{
......@@ -963,13 +864,10 @@ malloc_conf_init(void)
opt_tcache = false;
}
for (i = 0; i < 4; i++) {
for (i = 0; i < 3; i++) {
/* Get runtime configuration. */
switch (i) {
case 0:
opts = config_malloc_conf;
break;
case 1:
if (je_malloc_conf != NULL) {
/*
* Use options that were compiled into the
......@@ -982,8 +880,8 @@ malloc_conf_init(void)
opts = buf;
}
break;
case 2: {
ssize_t linklen = 0;
case 1: {
int linklen = 0;
#ifndef _WIN32
int saved_errno = errno;
const char *linkname =
......@@ -1009,7 +907,7 @@ malloc_conf_init(void)
buf[linklen] = '\0';
opts = buf;
break;
} case 3: {
} case 2: {
const char *envname =
#ifdef JEMALLOC_PREFIX
JEMALLOC_CPREFIX"MALLOC_CONF"
......@@ -1056,11 +954,7 @@ malloc_conf_init(void)
if (cont) \
continue; \
}
#define CONF_MIN_no(um, min) false
#define CONF_MIN_yes(um, min) ((um) < (min))
#define CONF_MAX_no(um, max) false
#define CONF_MAX_yes(um, max) ((um) > (max))
#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
......@@ -1073,35 +967,24 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} else if (clip) { \
if (CONF_MIN_##check_min(um, \
(min))) \
o = (t)(min); \
else if (CONF_MAX_##check_max( \
um, (max))) \
o = (t)(max); \
if ((min) != 0 && um < (min)) \
o = (min); \
else if (um > (max)) \
o = (max); \
else \
o = (t)um; \
o = um; \
} else { \
if (CONF_MIN_##check_min(um, \
(min)) || \
CONF_MAX_##check_max(um, \
(max))) { \
if (((min) != 0 && um < (min)) \
|| um > (max)) { \
malloc_conf_error( \
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
} else \
o = (t)um; \
o = um; \
} \
continue; \
}
#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
clip) \
CONF_HANDLE_T_U(unsigned, o, n, min, max, \
check_min, check_max, clip)
#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
CONF_HANDLE_T_U(size_t, o, n, min, max, \
check_min, check_max, clip)
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
if (CONF_MATCH(n)) { \
long l; \
......@@ -1144,7 +1027,7 @@ malloc_conf_init(void)
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
(sizeof(size_t) << 3) - 1, yes, yes, true)
(sizeof(size_t) << 3) - 1, true)
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
......@@ -1169,47 +1052,17 @@ malloc_conf_init(void)
}
continue;
}
CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
UINT_MAX, yes, no, false)
if (strncmp("purge", k, klen) == 0) {
int i;
bool match = false;
for (i = 0; i < purge_mode_limit; i++) {
if (strncmp(purge_mode_names[i], v,
vlen) == 0) {
opt_purge = (purge_mode_t)i;
match = true;
break;
}
}
if (!match) {
malloc_conf_error("Invalid conf value",
k, klen, v, vlen);
}
continue;
}
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
SIZE_T_MAX, false)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1)
CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
NSTIME_SEC_MAX);
CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
if (config_fill) {
if (CONF_MATCH("junk")) {
if (CONF_MATCH_VALUE("true")) {
if (config_valgrind &&
unlikely(in_valgrind)) {
malloc_conf_error(
"Deallocation-time "
"junk filling cannot "
"be enabled while "
"running inside "
"Valgrind", k, klen, v,
vlen);
} else {
opt_junk = "true";
opt_junk_alloc = true;
opt_junk_free = true;
}
opt_junk = "true";
opt_junk_alloc = opt_junk_free =
true;
} else if (CONF_MATCH_VALUE("false")) {
opt_junk = "false";
opt_junk_alloc = opt_junk_free =
......@@ -1219,20 +1072,9 @@ malloc_conf_init(void)
opt_junk_alloc = true;
opt_junk_free = false;
} else if (CONF_MATCH_VALUE("free")) {
if (config_valgrind &&
unlikely(in_valgrind)) {
malloc_conf_error(
"Deallocation-time "
"junk filling cannot "
"be enabled while "
"running inside "
"Valgrind", k, klen, v,
vlen);
} else {
opt_junk = "free";
opt_junk_alloc = false;
opt_junk_free = true;
}
opt_junk = "free";
opt_junk_alloc = false;
opt_junk_free = true;
} else {
malloc_conf_error(
"Invalid conf value", k,
......@@ -1241,7 +1083,7 @@ malloc_conf_init(void)
continue;
}
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
0, SIZE_T_MAX, no, no, false)
0, SIZE_T_MAX, false)
CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
CONF_HANDLE_BOOL(opt_zero, "zero", true)
}
......@@ -1278,8 +1120,8 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
"prof_thread_active_init", true)
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
"lg_prof_sample", 0, (sizeof(uint64_t) << 3)
- 1, no, yes, true)
"lg_prof_sample", 0,
(sizeof(uint64_t) << 3) - 1, true)
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
true)
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
......@@ -1295,14 +1137,7 @@ malloc_conf_init(void)
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
#undef CONF_MATCH
#undef CONF_MATCH_VALUE
#undef CONF_HANDLE_BOOL
#undef CONF_MIN_no
#undef CONF_MIN_yes
#undef CONF_MAX_no
#undef CONF_MAX_yes
#undef CONF_HANDLE_T_U
#undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P
......@@ -1310,6 +1145,7 @@ malloc_conf_init(void)
}
}
/* init_lock must be held. */
static bool
malloc_init_hard_needed(void)
{
......@@ -1325,14 +1161,11 @@ malloc_init_hard_needed(void)
}
#ifdef JEMALLOC_THREADED_INIT
if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
spin_t spinner;
/* Busy-wait until the initializing thread completes. */
spin_init(&spinner);
do {
malloc_mutex_unlock(TSDN_NULL, &init_lock);
spin_adaptive(&spinner);
malloc_mutex_lock(TSDN_NULL, &init_lock);
malloc_mutex_unlock(&init_lock);
CPU_SPINWAIT;
malloc_mutex_lock(&init_lock);
} while (!malloc_initialized());
return (false);
}
......@@ -1340,8 +1173,9 @@ malloc_init_hard_needed(void)
return (true);
}
/* init_lock must be held. */
static bool
malloc_init_hard_a0_locked()
malloc_init_hard_a0_locked(void)
{
malloc_initializer = INITIALIZER;
......@@ -1357,7 +1191,6 @@ malloc_init_hard_a0_locked()
abort();
}
}
pages_boot();
if (base_boot())
return (true);
if (chunk_boot())
......@@ -1366,28 +1199,26 @@ malloc_init_hard_a0_locked()
return (true);
if (config_prof)
prof_boot1();
arena_boot();
if (config_tcache && tcache_boot(TSDN_NULL))
if (arena_boot())
return (true);
if (config_tcache && tcache_boot())
return (true);
if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
if (malloc_mutex_init(&arenas_lock))
return (true);
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
narenas_auto = 1;
narenas_total_set(narenas_auto);
narenas_total = narenas_auto = 1;
arenas = &a0;
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/*
* Initialize one arena here. The rest are lazily created in
* arena_choose_hard().
*/
if (arena_init(TSDN_NULL, 0) == NULL)
if (arena_init(0) == NULL)
return (true);
malloc_init_state = malloc_init_a0_initialized;
return (false);
}
......@@ -1396,42 +1227,45 @@ malloc_init_hard_a0(void)
{
bool ret;
malloc_mutex_lock(TSDN_NULL, &init_lock);
malloc_mutex_lock(&init_lock);
ret = malloc_init_hard_a0_locked();
malloc_mutex_unlock(TSDN_NULL, &init_lock);
malloc_mutex_unlock(&init_lock);
return (ret);
}
/* Initialize data structures which may trigger recursive allocation. */
static bool
/*
* Initialize data structures which may trigger recursive allocation.
*
* init_lock must be held.
*/
static void
malloc_init_hard_recursible(void)
{
malloc_init_state = malloc_init_recursible;
malloc_mutex_unlock(&init_lock);
ncpus = malloc_ncpus();
#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
&& !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
!defined(__native_client__))
/* LinuxThreads' pthread_atfork() allocates. */
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
&& !defined(_WIN32) && !defined(__native_client__))
/* LinuxThreads's pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
if (opt_abort)
abort();
return (true);
}
#endif
return (false);
malloc_mutex_lock(&init_lock);
}
/* init_lock must be held. */
static bool
malloc_init_hard_finish(tsdn_t *tsdn)
malloc_init_hard_finish(void)
{
if (malloc_mutex_boot())
if (mutex_boot())
return (true);
if (opt_narenas == 0) {
......@@ -1446,69 +1280,68 @@ malloc_init_hard_finish(tsdn_t *tsdn)
}
narenas_auto = opt_narenas;
/*
* Limit the number of arenas to the indexing range of MALLOCX_ARENA().
* Make sure that the arenas array can be allocated. In practice, this
* limit is enough to allow the allocator to function, but the ctl
* machinery will fail to allocate memory at far lower limits.
*/
if (narenas_auto > MALLOCX_ARENA_MAX) {
narenas_auto = MALLOCX_ARENA_MAX;
if (narenas_auto > chunksize / sizeof(arena_t *)) {
narenas_auto = chunksize / sizeof(arena_t *);
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
narenas_auto);
}
narenas_total_set(narenas_auto);
narenas_total = narenas_auto;
/* Allocate and initialize arenas. */
arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
(MALLOCX_ARENA_MAX+1));
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
if (arenas == NULL)
return (true);
/*
* Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure.
*/
memset(arenas, 0, sizeof(arena_t *) * narenas_total);
/* Copy the pointer to the one arena that was already initialized. */
arena_set(0, a0);
arenas[0] = a0;
malloc_init_state = malloc_init_initialized;
malloc_slow_flag_init();
return (false);
}
static bool
malloc_init_hard(void)
{
tsd_t *tsd;
#if defined(_WIN32) && _WIN32_WINNT < 0x0600
_init_init_lock();
#endif
malloc_mutex_lock(TSDN_NULL, &init_lock);
malloc_mutex_lock(&init_lock);
if (!malloc_init_hard_needed()) {
malloc_mutex_unlock(TSDN_NULL, &init_lock);
malloc_mutex_unlock(&init_lock);
return (false);
}
if (malloc_init_state != malloc_init_a0_initialized &&
malloc_init_hard_a0_locked()) {
malloc_mutex_unlock(TSDN_NULL, &init_lock);
malloc_mutex_unlock(&init_lock);
return (true);
}
malloc_mutex_unlock(TSDN_NULL, &init_lock);
/* Recursive allocation relies on functional tsd. */
tsd = malloc_tsd_boot0();
if (tsd == NULL)
return (true);
if (malloc_init_hard_recursible())
if (malloc_tsd_boot0()) {
malloc_mutex_unlock(&init_lock);
return (true);
malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
if (config_prof && prof_boot2(tsd)) {
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
}
if (config_prof && prof_boot2()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
malloc_init_hard_recursible();
if (malloc_init_hard_finish()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
malloc_mutex_unlock(&init_lock);
malloc_tsd_boot1();
return (false);
}
......@@ -1522,104 +1355,61 @@ malloc_init_hard(void)
*/
static void *
ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
prof_tctx_t *tctx, bool slow_path)
imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
void *p;
if (tctx == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
szind_t ind_large = size2index(LARGE_MINCLASS);
p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
p = imalloc(tsd, LARGE_MINCLASS);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), p, usize);
arena_prof_promoted(p, usize);
} else
p = ialloc(tsd, usize, ind, zero, slow_path);
p = imalloc(tsd, usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
imalloc_prof(tsd_t *tsd, size_t usize)
{
void *p;
prof_tctx_t *tctx;
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path);
p = imalloc_prof_sample(tsd, usize, tctx);
else
p = ialloc(tsd, usize, ind, zero, slow_path);
p = imalloc(tsd, usize);
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
prof_malloc(p, usize, tctx);
return (p);
}
/*
* ialloc_body() is inlined so that fast and slow paths are generated separately
* with statically known slow_path.
*
* This function guarantees that *tsdn is non-NULL on success.
*/
JEMALLOC_ALWAYS_INLINE_C void *
ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
bool slow_path)
imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
{
tsd_t *tsd;
szind_t ind;
if (slow_path && unlikely(malloc_init())) {
*tsdn = NULL;
return (NULL);
}
tsd = tsd_fetch();
*tsdn = tsd_tsdn(tsd);
witness_assert_lockless(tsd_tsdn(tsd));
ind = size2index(size);
if (unlikely(ind >= NSIZES))
if (unlikely(malloc_init()))
return (NULL);
*tsd = tsd_fetch();
if (config_stats || (config_prof && opt_prof) || (slow_path &&
config_valgrind && unlikely(in_valgrind))) {
*usize = index2size(ind);
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
if (config_prof && opt_prof) {
*usize = s2u(size);
if (unlikely(*usize == 0))
return (NULL);
return (imalloc_prof(*tsd, *usize));
}
if (config_prof && opt_prof)
return (ialloc_prof(tsd, *usize, ind, zero, slow_path));
return (ialloc(tsd, size, ind, zero, slow_path));
}
JEMALLOC_ALWAYS_INLINE_C void
ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
bool update_errno, bool slow_path)
{
assert(!tsdn_null(tsdn) || ret == NULL);
if (unlikely(ret == NULL)) {
if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
malloc_printf("<jemalloc>: Error in %s(): out of "
"memory\n", func);
abort();
}
if (update_errno)
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(tsdn, ret, config_prof));
*tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
}
witness_assert_lockless(tsdn);
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
*usize = s2u(size);
return (imalloc(*tsd, size));
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
......@@ -1628,22 +1418,27 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_malloc(size_t size)
{
void *ret;
tsdn_t *tsdn;
tsd_t *tsd;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0)
size = 1;
if (likely(!malloc_slow)) {
ret = ialloc_body(size, false, &tsdn, &usize, false);
ialloc_post_check(ret, tsdn, usize, "malloc", true, false);
} else {
ret = ialloc_body(size, false, &tsdn, &usize, true);
ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
UTRACE(0, size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
ret = imalloc_body(size, &tsd, &usize);
if (unlikely(ret == NULL)) {
if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in malloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(ret, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
return (ret);
}
......@@ -1660,7 +1455,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), p, usize);
arena_prof_promoted(p, usize);
} else
p = ipalloc(tsd, usize, alignment, false);
......@@ -1682,7 +1477,7 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
prof_malloc(p, usize, tctx);
return (p);
}
......@@ -1699,12 +1494,10 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
assert(min_alignment != 0);
if (unlikely(malloc_init())) {
tsd = NULL;
result = NULL;
goto label_oom;
}
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
if (size == 0)
size = 1;
......@@ -1722,7 +1515,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
}
usize = sa2u(size, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
if (unlikely(usize == 0)) {
result = NULL;
goto label_oom;
}
......@@ -1739,13 +1532,10 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
ret = 0;
label_return:
if (config_stats && likely(result != NULL)) {
assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof));
assert(usize == isalloc(result, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, result);
JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
false);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
label_oom:
assert(result == NULL);
......@@ -1755,7 +1545,6 @@ label_oom:
abort();
}
ret = ENOMEM;
witness_assert_lockless(tsd_tsdn(tsd));
goto label_return;
}
......@@ -1763,10 +1552,9 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW
JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void **memptr, size_t alignment, size_t size)
{
int ret;
ret = imemalign(memptr, alignment, size, sizeof(void *));
int ret = imemalign(memptr, alignment, size, sizeof(void *));
JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
config_prof), false);
return (ret);
}
......@@ -1782,45 +1570,114 @@ je_aligned_alloc(size_t alignment, size_t size)
ret = NULL;
set_errno(err);
}
JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
false);
return (ret);
}
static void *
icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
void *p;
if (tctx == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
p = icalloc(tsd, LARGE_MINCLASS);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = icalloc(tsd, usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
icalloc_prof(tsd_t *tsd, size_t usize)
{
void *p;
prof_tctx_t *tctx;
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
p = icalloc_prof_sample(tsd, usize, tctx);
else
p = icalloc(tsd, usize);
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
prof_malloc(p, usize, tctx);
return (p);
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
je_calloc(size_t num, size_t size)
{
void *ret;
tsdn_t *tsdn;
tsd_t *tsd;
size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (unlikely(malloc_init())) {
num_size = 0;
ret = NULL;
goto label_return;
}
tsd = tsd_fetch();
num_size = num * size;
if (unlikely(num_size == 0)) {
if (num == 0 || size == 0)
num_size = 1;
else
num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
else {
ret = NULL;
goto label_return;
}
/*
* Try to avoid division here. We know that it isn't possible to
* overflow during multiplication if neither operand uses any of the
* most significant half of the bits in a size_t.
*/
} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
2))) && (num_size / size != num)))
num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
2))) && (num_size / size != num))) {
/* size_t overflow. */
ret = NULL;
goto label_return;
}
if (likely(!malloc_slow)) {
ret = ialloc_body(num_size, true, &tsdn, &usize, false);
ialloc_post_check(ret, tsdn, usize, "calloc", true, false);
if (config_prof && opt_prof) {
usize = s2u(num_size);
if (unlikely(usize == 0)) {
ret = NULL;
goto label_return;
}
ret = icalloc_prof(tsd, usize);
} else {
ret = ialloc_body(num_size, true, &tsdn, &usize, true);
ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
UTRACE(0, num_size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
usize = s2u(num_size);
ret = icalloc(tsd, num_size);
}
label_return:
if (unlikely(ret == NULL)) {
if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in calloc(): out of "
"memory\n");
abort();
}
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(ret, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, num_size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
return (ret);
}
......@@ -1836,7 +1693,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), p, usize);
arena_prof_promoted(p, usize);
} else
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
......@@ -1851,7 +1708,7 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
old_tctx = prof_tctx_get(old_ptr);
tctx = prof_alloc_prep(tsd, usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
......@@ -1868,41 +1725,32 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
}
JEMALLOC_INLINE_C void
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
witness_assert_lockless(tsd_tsdn(tsd));
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
if (config_prof && opt_prof) {
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
usize = isalloc(ptr, config_prof);
prof_free(tsd, ptr, usize);
} else if (config_stats || config_valgrind)
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
usize = isalloc(ptr, config_prof);
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
if (likely(!slow_path))
iqalloc(tsd, ptr, tcache, false);
else {
if (config_valgrind && unlikely(in_valgrind))
rzsize = p2rz(tsd_tsdn(tsd), ptr);
iqalloc(tsd, ptr, tcache, true);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
if (config_valgrind && unlikely(in_valgrind))
rzsize = p2rz(ptr);
iqalloc(tsd, ptr, tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
JEMALLOC_INLINE_C void
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
{
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
witness_assert_lockless(tsd_tsdn(tsd));
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
......@@ -1911,8 +1759,8 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
if (config_valgrind && unlikely(in_valgrind))
rzsize = p2rz(tsd_tsdn(tsd), ptr);
isqalloc(tsd, ptr, usize, tcache, slow_path);
rzsize = p2rz(ptr);
isqalloc(tsd, ptr, usize, tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
......@@ -1922,57 +1770,44 @@ JEMALLOC_ALLOC_SIZE(2)
je_realloc(void *ptr, size_t size)
{
void *ret;
tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (unlikely(size == 0)) {
if (ptr != NULL) {
tsd_t *tsd;
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
tsd = tsd_fetch();
ifree(tsd, ptr, tcache_get(tsd, false), true);
ifree(tsd, ptr, tcache_get(tsd, false));
return (NULL);
}
size = 1;
}
if (likely(ptr != NULL)) {
tsd_t *tsd;
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind)) {
old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
u2rz(old_usize);
}
old_usize = isalloc(ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
if (config_prof && opt_prof) {
usize = s2u(size);
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
NULL : irealloc_prof(tsd, ptr, old_usize, usize);
ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
ptr, old_usize, usize);
} else {
if (config_stats || (config_valgrind &&
unlikely(in_valgrind)))
usize = s2u(size);
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
}
tsdn = tsd_tsdn(tsd);
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
if (likely(!malloc_slow))
ret = ialloc_body(size, false, &tsdn, &usize, false);
else
ret = ialloc_body(size, false, &tsdn, &usize, true);
assert(!tsdn_null(tsdn) || ret == NULL);
ret = imalloc_body(size, &tsd, &usize);
}
if (unlikely(ret == NULL)) {
......@@ -1984,17 +1819,13 @@ je_realloc(void *ptr, size_t size)
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
tsd_t *tsd;
assert(usize == isalloc(tsdn, ret, config_prof));
tsd = tsdn_tsd(tsdn);
assert(usize == isalloc(ret, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr,
old_usize, old_rzsize, maybe, false);
witness_assert_lockless(tsdn);
JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
old_rzsize, true, false);
return (ret);
}
......@@ -2005,12 +1836,7 @@ je_free(void *ptr)
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
if (likely(!malloc_slow))
ifree(tsd, ptr, tcache_get(tsd, false), false);
else
ifree(tsd, ptr, tcache_get(tsd, false), true);
witness_assert_lockless(tsd_tsdn(tsd));
ifree(tsd, ptr, tcache_get(tsd, false));
}
}
......@@ -2031,6 +1857,7 @@ je_memalign(size_t alignment, size_t size)
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
ret = NULL;
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
......@@ -2044,6 +1871,7 @@ je_valloc(size_t size)
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
ret = NULL;
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
......@@ -2073,29 +1901,6 @@ JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign;
# endif
#ifdef CPU_COUNT
/*
* To enable static linking with glibc, the libc specific malloc interface must
* be implemented also, so none of glibc's malloc.o functions are added to the
* link.
*/
#define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
/* To force macro expansion of je_ prefix before stringification. */
#define PREALIAS(je_fn) ALIAS(je_fn)
void *__libc_malloc(size_t size) PREALIAS(je_malloc);
void __libc_free(void* ptr) PREALIAS(je_free);
void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
void *__libc_valloc(size_t size) PREALIAS(je_valloc);
int __posix_memalign(void** r, size_t a, size_t s)
PREALIAS(je_posix_memalign);
#undef PREALIAS
#undef ALIAS
#endif
#endif
/*
......@@ -2107,7 +1912,7 @@ int __posix_memalign(void** r, size_t a, size_t s)
*/
JEMALLOC_ALWAYS_INLINE_C bool
imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
{
......@@ -2118,8 +1923,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
*usize = sa2u(size, *alignment);
}
if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
return (true);
assert(*usize != 0);
*zero = MALLOCX_ZERO_GET(flags);
if ((flags & MALLOCX_TCACHE_MASK) != 0) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
......@@ -2130,7 +1934,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
*tcache = tcache_get(tsd, true);
if ((flags & MALLOCX_ARENA_MASK) != 0) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
*arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
*arena = arena_get(tsd, arena_ind, true, true);
if (unlikely(*arena == NULL))
return (true);
} else
......@@ -2138,44 +1942,59 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
return (false);
}
JEMALLOC_ALWAYS_INLINE_C bool
imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
{
if (likely(flags == 0)) {
*usize = s2u(size);
assert(*usize != 0);
*alignment = 0;
*zero = false;
*tcache = tcache_get(tsd, true);
*arena = NULL;
return (false);
} else {
return (imallocx_flags_decode_hard(tsd, size, flags, usize,
alignment, zero, tcache, arena));
}
}
JEMALLOC_ALWAYS_INLINE_C void *
imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena, bool slow_path)
imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
szind_t ind;
if (unlikely(alignment != 0))
return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
ind = size2index(usize);
assert(ind < NSIZES);
return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena,
slow_path));
return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
if (unlikely(zero))
return (icalloct(tsd, usize, tcache, arena));
return (imalloct(tsd, usize, tcache, arena));
}
static void *
imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena, bool slow_path)
imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
void *p;
if (usize <= SMALL_MAXCLASS) {
assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero,
tcache, arena, slow_path);
p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsdn, p, usize);
} else {
p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
slow_path);
}
arena_prof_promoted(p, usize);
} else
p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
{
void *p;
size_t alignment;
......@@ -2188,27 +2007,25 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
&zero, &tcache, &arena)))
return (NULL);
tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero,
tcache, arena, slow_path);
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero,
tcache, arena, slow_path);
if (likely((uintptr_t)tctx == (uintptr_t)1U))
p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
else if ((uintptr_t)tctx > (uintptr_t)1U) {
p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
arena);
} else
p = NULL;
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
prof_malloc(tsd_tsdn(tsd), p, *usize, tctx);
prof_malloc(p, *usize, tctx);
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
bool slow_path)
imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
{
void *p;
size_t alignment;
......@@ -2216,53 +2033,18 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
tcache_t *tcache;
arena_t *arena;
if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
&zero, &tcache, &arena)))
return (NULL);
p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
arena, slow_path);
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p);
}
/* This function guarantees that *tsdn is non-NULL on success. */
JEMALLOC_ALWAYS_INLINE_C void *
imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
bool slow_path)
{
tsd_t *tsd;
if (slow_path && unlikely(malloc_init())) {
*tsdn = NULL;
return (NULL);
}
tsd = tsd_fetch();
*tsdn = tsd_tsdn(tsd);
witness_assert_lockless(tsd_tsdn(tsd));
if (likely(flags == 0)) {
szind_t ind = size2index(size);
if (unlikely(ind >= NSIZES))
return (NULL);
if (config_stats || (config_prof && opt_prof) || (slow_path &&
config_valgrind && unlikely(in_valgrind))) {
*usize = index2size(ind);
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
}
if (config_prof && opt_prof) {
return (ialloc_prof(tsd, *usize, ind, false,
slow_path));
}
return (ialloc(tsd, size, ind, false, slow_path));
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
*usize = s2u(size);
return (imalloc(tsd, size));
}
if (config_prof && opt_prof)
return (imallocx_prof(tsd, size, flags, usize, slow_path));
return (imallocx_no_prof(tsd, size, flags, usize, slow_path));
if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
&alignment, &zero, &tcache, &arena)))
return (NULL);
p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p);
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
......@@ -2270,24 +2052,37 @@ void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_mallocx(size_t size, int flags)
{
tsdn_t *tsdn;
tsd_t *tsd;
void *p;
size_t usize;
assert(size != 0);
if (likely(!malloc_slow)) {
p = imallocx_body(size, flags, &tsdn, &usize, false);
ialloc_post_check(p, tsdn, usize, "mallocx", false, false);
} else {
p = imallocx_body(size, flags, &tsdn, &usize, true);
ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
UTRACE(0, size, p);
JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
MALLOCX_ZERO_GET(flags));
}
if (unlikely(malloc_init()))
goto label_oom;
tsd = tsd_fetch();
if (config_prof && opt_prof)
p = imallocx_prof(tsd, size, flags, &usize);
else
p = imallocx_no_prof(tsd, size, flags, &usize);
if (unlikely(p == NULL))
goto label_oom;
if (config_stats) {
assert(usize == isalloc(p, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, p);
JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
return (p);
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
abort();
}
UTRACE(0, size, 0);
return (NULL);
}
static void *
......@@ -2304,7 +2099,7 @@ irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
zero, tcache, arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), p, usize);
arena_prof_promoted(p, usize);
} else {
p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
tcache, arena);
......@@ -2323,8 +2118,8 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
old_tctx = prof_tctx_get(old_ptr);
tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
alignment, zero, tcache, arena, tctx);
......@@ -2333,7 +2128,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
tcache, arena);
}
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, false);
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
......@@ -2346,9 +2141,9 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
*usize = isalloc(tsd_tsdn(tsd), p, config_prof);
*usize = isalloc(p, config_prof);
}
prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
old_usize, old_tctx);
return (p);
......@@ -2374,11 +2169,10 @@ je_rallocx(void *ptr, size_t size, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
arena = arena_get(tsd, arena_ind, true, true);
if (unlikely(arena == NULL))
goto label_oom;
} else
......@@ -2392,14 +2186,13 @@ je_rallocx(void *ptr, size_t size, int flags)
} else
tcache = tcache_get(tsd, true);
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
old_usize = isalloc(ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
goto label_oom;
assert(usize != 0);
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
zero, tcache, arena);
if (unlikely(p == NULL))
......@@ -2410,7 +2203,7 @@ je_rallocx(void *ptr, size_t size, int flags)
if (unlikely(p == NULL))
goto label_oom;
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
usize = isalloc(tsd_tsdn(tsd), p, config_prof);
usize = isalloc(p, config_prof);
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
......@@ -2419,9 +2212,8 @@ je_rallocx(void *ptr, size_t size, int flags)
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr,
old_usize, old_rzsize, no, zero);
witness_assert_lockless(tsd_tsdn(tsd));
JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
old_rzsize, false, zero);
return (p);
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
......@@ -2429,33 +2221,31 @@ label_oom:
abort();
}
UTRACE(ptr, size, 0);
witness_assert_lockless(tsd_tsdn(tsd));
return (NULL);
}
JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
size_t extra, size_t alignment, bool zero)
ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
size_t alignment, bool zero)
{
size_t usize;
if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero))
if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
return (old_usize);
usize = isalloc(tsdn, ptr, config_prof);
usize = isalloc(ptr, config_prof);
return (usize);
}
static size_t
ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
size_t alignment, bool zero, prof_tctx_t *tctx)
{
size_t usize;
if (tctx == NULL)
return (old_usize);
usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
zero);
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero);
return (usize);
}
......@@ -2469,36 +2259,23 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
old_tctx = prof_tctx_get(ptr);
/*
* usize isn't knowable before ixalloc() returns when extra is non-zero.
* Therefore, compute its maximum possible value and use that in
* prof_alloc_prep() to decide whether to capture a backtrace.
* prof_realloc() will use the actual usize to decide whether to sample.
*/
if (alignment == 0) {
usize_max = s2u(size+extra);
assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
} else {
usize_max = sa2u(size+extra, alignment);
if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
/*
* usize_max is out of range, and chances are that
* allocation will fail, but use the maximum possible
* value and carry on with prof_alloc_prep(), just in
* case allocation succeeds.
*/
usize_max = HUGE_MAXCLASS;
}
}
usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
alignment);
assert(usize_max != 0);
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
size, extra, alignment, zero, tctx);
usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
alignment, zero, tctx);
} else {
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
extra, alignment, zero);
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
zero);
}
if (usize == old_usize) {
prof_alloc_rollback(tsd, tctx, false);
......@@ -2525,25 +2302,18 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
old_usize = isalloc(ptr, config_prof);
/*
* The API explicitly absolves itself of protecting against (size +
* extra) numerical overflow, but we may need to clamp extra to avoid
* exceeding HUGE_MAXCLASS.
*
* Ordinarily, size limit checking is handled deeper down, but here we
* have to check as part of (size + extra) clamping, since we need the
* clamped value in the above helper functions.
*/
if (unlikely(size > HUGE_MAXCLASS)) {
usize = old_usize;
goto label_not_resized;
}
if (unlikely(HUGE_MAXCLASS - size < extra))
/* Clamp extra if necessary to avoid (size + extra) overflow. */
if (unlikely(size + extra > HUGE_MAXCLASS)) {
/* Check for size overflow. */
if (unlikely(size > HUGE_MAXCLASS)) {
usize = old_usize;
goto label_not_resized;
}
extra = HUGE_MAXCLASS - size;
}
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
......@@ -2552,8 +2322,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
alignment, zero);
} else {
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
extra, alignment, zero);
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
zero);
}
if (unlikely(usize == old_usize))
goto label_not_resized;
......@@ -2562,11 +2332,10 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr,
old_usize, old_rzsize, no, zero);
JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
old_rzsize, false, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
witness_assert_lockless(tsd_tsdn(tsd));
return (usize);
}
......@@ -2575,20 +2344,15 @@ JEMALLOC_ATTR(pure)
je_sallocx(const void *ptr, int flags)
{
size_t usize;
tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
if (config_ivsalloc)
usize = ivsalloc(tsdn, ptr, config_prof);
usize = ivsalloc(ptr, config_prof);
else
usize = isalloc(tsdn, ptr, config_prof);
usize = isalloc(ptr, config_prof);
witness_assert_lockless(tsdn);
return (usize);
}
......@@ -2602,7 +2366,6 @@ je_dallocx(void *ptr, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL;
......@@ -2612,25 +2375,19 @@ je_dallocx(void *ptr, int flags)
tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0);
if (likely(!malloc_slow))
ifree(tsd, ptr, tcache, false);
else
ifree(tsd, ptr, tcache, true);
witness_assert_lockless(tsd_tsdn(tsd));
ifree(tsd_fetch(), ptr, tcache);
}
JEMALLOC_ALWAYS_INLINE_C size_t
inallocx(tsdn_t *tsdn, size_t size, int flags)
inallocx(size_t size, int flags)
{
size_t usize;
witness_assert_lockless(tsdn);
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
usize = s2u(size);
else
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
witness_assert_lockless(tsdn);
assert(usize != 0);
return (usize);
}
......@@ -2643,11 +2400,10 @@ je_sdallocx(void *ptr, size_t size, int flags)
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch();
usize = inallocx(tsd_tsdn(tsd), size, flags);
assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
usize = inallocx(size, flags);
assert(usize == isalloc(ptr, config_prof));
witness_assert_lockless(tsd_tsdn(tsd));
tsd = tsd_fetch();
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL;
......@@ -2657,116 +2413,75 @@ je_sdallocx(void *ptr, size_t size, int flags)
tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0);
if (likely(!malloc_slow))
isfree(tsd, ptr, usize, tcache, false);
else
isfree(tsd, ptr, usize, tcache, true);
witness_assert_lockless(tsd_tsdn(tsd));
isfree(tsd, ptr, usize, tcache);
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)
je_nallocx(size_t size, int flags)
{
size_t usize;
tsdn_t *tsdn;
assert(size != 0);
if (unlikely(malloc_init()))
return (0);
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
usize = inallocx(tsdn, size, flags);
if (unlikely(usize > HUGE_MAXCLASS))
return (0);
witness_assert_lockless(tsdn);
return (usize);
return (inallocx(size, flags));
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
int ret;
tsd_t *tsd;
if (unlikely(malloc_init()))
return (EAGAIN);
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
{
int ret;
tsdn_t *tsdn;
if (unlikely(malloc_init()))
return (EAGAIN);
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
ret = ctl_nametomib(tsdn, name, mibp, miblenp);
witness_assert_lockless(tsdn);
return (ret);
return (ctl_nametomib(name, mibp, miblenp));
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
tsd_t *tsd;
if (unlikely(malloc_init()))
return (EAGAIN);
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
tsdn_t *tsdn;
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
stats_print(write_cb, cbopaque, opts);
witness_assert_lockless(tsdn);
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{
size_t ret;
tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
if (config_ivsalloc)
ret = ivsalloc(tsdn, ptr, config_prof);
ret = ivsalloc(ptr, config_prof);
else
ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
witness_assert_lockless(tsdn);
return (ret);
}
......@@ -2792,7 +2507,6 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
* to trigger the deadlock described above, but doing so would involve forking
* via a library constructor that runs before jemalloc's runs.
*/
#ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor)
static void
jemalloc_constructor(void)
......@@ -2800,7 +2514,6 @@ jemalloc_constructor(void)
malloc_init();
}
#endif
#ifndef JEMALLOC_MUTEX_INIT_CB
void
......@@ -2810,9 +2523,7 @@ JEMALLOC_EXPORT void
_malloc_prefork(void)
#endif
{
tsd_t *tsd;
unsigned i, j, narenas;
arena_t *arena;
unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized())
......@@ -2820,40 +2531,16 @@ _malloc_prefork(void)
#endif
assert(malloc_initialized());
tsd = tsd_fetch();
narenas = narenas_total_get();
witness_prefork(tsd);
/* Acquire all mutexes in a safe order. */
ctl_prefork(tsd_tsdn(tsd));
malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
prof_prefork0(tsd_tsdn(tsd));
for (i = 0; i < 3; i++) {
for (j = 0; j < narenas; j++) {
if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
NULL) {
switch (i) {
case 0:
arena_prefork0(tsd_tsdn(tsd), arena);
break;
case 1:
arena_prefork1(tsd_tsdn(tsd), arena);
break;
case 2:
arena_prefork2(tsd_tsdn(tsd), arena);
break;
default: not_reached();
}
}
}
}
base_prefork(tsd_tsdn(tsd));
for (i = 0; i < narenas; i++) {
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
arena_prefork3(tsd_tsdn(tsd), arena);
ctl_prefork();
prof_prefork();
malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_prefork(arenas[i]);
}
prof_prefork1(tsd_tsdn(tsd));
chunk_prefork();
base_prefork();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
......@@ -2864,8 +2551,7 @@ JEMALLOC_EXPORT void
_malloc_postfork(void)
#endif
{
tsd_t *tsd;
unsigned i, narenas;
unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized())
......@@ -2873,77 +2559,35 @@ _malloc_postfork(void)
#endif
assert(malloc_initialized());
tsd = tsd_fetch();
witness_postfork_parent(tsd);
/* Release all mutexes, now that fork() has completed. */
base_postfork_parent(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
arena_postfork_parent(tsd_tsdn(tsd), arena);
base_postfork_parent();
chunk_postfork_parent();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]);
}
prof_postfork_parent(tsd_tsdn(tsd));
malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
ctl_postfork_parent(tsd_tsdn(tsd));
malloc_mutex_postfork_parent(&arenas_lock);
prof_postfork_parent();
ctl_postfork_parent();
}
void
jemalloc_postfork_child(void)
{
tsd_t *tsd;
unsigned i, narenas;
unsigned i;
assert(malloc_initialized());
tsd = tsd_fetch();
witness_postfork_child(tsd);
/* Release all mutexes, now that fork() has completed. */
base_postfork_child(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
arena_postfork_child(tsd_tsdn(tsd), arena);
base_postfork_child();
chunk_postfork_child();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_child(arenas[i]);
}
prof_postfork_child(tsd_tsdn(tsd));
malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
ctl_postfork_child(tsd_tsdn(tsd));
malloc_mutex_postfork_child(&arenas_lock);
prof_postfork_child();
ctl_postfork_child();
}
/******************************************************************************/
/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
* returns 0 if the allocation is in the currently active run,
* or when it is not causing any frag issue (large or huge bin)
* returns the bin utilization and run utilization both in fixed point 16:16.
* If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
int defrag = 0;
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) { /* indication that this is not a HUGE alloc */
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* indication that this is not a LARGE alloc */
arena_t *arena = extent_node_arena_get(&chunk->node);
size_t rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
arena_run_t *run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
arena_bin_t *bin = &arena->bins[run->binind];
tsd_t *tsd = tsd_fetch();
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
/* runs that are in the same chunk in as the current chunk, are likely to be the next currun */
if (chunk != (arena_chunk_t *)CHUNK_ADDR2BASE(bin->runcur)) {
arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
size_t availregs = bin_info->nregs * bin->stats.curruns;
*bin_util = (bin->stats.curregs<<16) / availregs;
*run_util = ((bin_info->nregs - run->nfree)<<16) / bin_info->nregs;
defrag = 1;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
}
return defrag;
}
......@@ -69,7 +69,7 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
#endif
bool
malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
malloc_mutex_init(malloc_mutex_t *mutex)
{
#ifdef _WIN32
......@@ -80,8 +80,6 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
_CRT_SPINCOUNT))
return (true);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
mutex->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
......@@ -105,34 +103,31 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
}
pthread_mutexattr_destroy(&attr);
#endif
if (config_debug)
witness_init(&mutex->witness, name, rank, NULL);
return (false);
}
void
malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
malloc_mutex_prefork(malloc_mutex_t *mutex)
{
malloc_mutex_lock(tsdn, mutex);
malloc_mutex_lock(mutex);
}
void
malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
{
malloc_mutex_unlock(tsdn, mutex);
malloc_mutex_unlock(mutex);
}
void
malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
malloc_mutex_postfork_child(malloc_mutex_t *mutex)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
malloc_mutex_unlock(tsdn, mutex);
malloc_mutex_unlock(mutex);
#else
if (malloc_mutex_init(mutex, mutex->witness.name,
mutex->witness.rank)) {
if (malloc_mutex_init(mutex)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort)
......@@ -142,7 +137,7 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
}
bool
malloc_mutex_boot(void)
mutex_boot(void)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
......
#include "jemalloc/internal/jemalloc_internal.h"
#define BILLION UINT64_C(1000000000)
void
nstime_init(nstime_t *time, uint64_t ns)
{
time->ns = ns;
}
void
nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec)
{
time->ns = sec * BILLION + nsec;
}
uint64_t
nstime_ns(const nstime_t *time)
{
return (time->ns);
}
uint64_t
nstime_sec(const nstime_t *time)
{
return (time->ns / BILLION);
}
uint64_t
nstime_nsec(const nstime_t *time)
{
return (time->ns % BILLION);
}
void
nstime_copy(nstime_t *time, const nstime_t *source)
{
*time = *source;
}
int
nstime_compare(const nstime_t *a, const nstime_t *b)
{
return ((a->ns > b->ns) - (a->ns < b->ns));
}
void
nstime_add(nstime_t *time, const nstime_t *addend)
{
assert(UINT64_MAX - time->ns >= addend->ns);
time->ns += addend->ns;
}
void
nstime_subtract(nstime_t *time, const nstime_t *subtrahend)
{
assert(nstime_compare(time, subtrahend) >= 0);
time->ns -= subtrahend->ns;
}
void
nstime_imultiply(nstime_t *time, uint64_t multiplier)
{
assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
time->ns *= multiplier;
}
void
nstime_idivide(nstime_t *time, uint64_t divisor)
{
assert(divisor != 0);
time->ns /= divisor;
}
uint64_t
nstime_divide(const nstime_t *time, const nstime_t *divisor)
{
assert(divisor->ns != 0);
return (time->ns / divisor->ns);
}
#ifdef _WIN32
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time)
{
FILETIME ft;
uint64_t ticks_100ns;
GetSystemTimeAsFileTime(&ft);
ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
nstime_init(time, ticks_100ns * 100);
}
#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
}
#elif JEMALLOC_HAVE_CLOCK_MONOTONIC
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
}
#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time)
{
nstime_init(time, mach_absolute_time());
}
#else
# define NSTIME_MONOTONIC false
static void
nstime_get(nstime_t *time)
{
struct timeval tv;
gettimeofday(&tv, NULL);
nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
}
#endif
#ifdef JEMALLOC_JET
#undef nstime_monotonic
#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic)
#endif
bool
nstime_monotonic(void)
{
return (NSTIME_MONOTONIC);
#undef NSTIME_MONOTONIC
}
#ifdef JEMALLOC_JET
#undef nstime_monotonic
#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic);
#endif
#ifdef JEMALLOC_JET
#undef nstime_update
#define nstime_update JEMALLOC_N(n_nstime_update)
#endif
bool
nstime_update(nstime_t *time)
{
nstime_t old_time;
nstime_copy(&old_time, time);
nstime_get(time);
/* Handle non-monotonic clocks. */
if (unlikely(nstime_compare(&old_time, time) > 0)) {
nstime_copy(time, &old_time);
return (true);
}
return (false);
}
#ifdef JEMALLOC_JET
#undef nstime_update
#define nstime_update JEMALLOC_N(nstime_update)
nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update);
#endif
#define JEMALLOC_PAGES_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#include <sys/sysctl.h>
#endif
/******************************************************************************/
/* Data. */
#ifndef _WIN32
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
# define PAGES_PROT_DECOMMIT (PROT_NONE)
static int mmap_flags;
#endif
static bool os_overcommits;
/******************************************************************************/
void *
pages_map(void *addr, size_t size, bool *commit)
pages_map(void *addr, size_t size)
{
void *ret;
assert(size != 0);
if (os_overcommits)
*commit = true;
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
{
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
ret = mmap(addr, size, prot, mmap_flags, -1, 0);
}
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
-1, 0);
assert(ret != NULL);
if (ret == MAP_FAILED)
......@@ -87,8 +67,7 @@ pages_unmap(void *addr, size_t size)
}
void *
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
bool *commit)
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
{
void *ret = (void *)((uintptr_t)addr + leadsize);
......@@ -98,7 +77,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
void *new_addr;
pages_unmap(addr, alloc_size);
new_addr = pages_map(ret, size, commit);
new_addr = pages_map(ret, size);
if (new_addr == ret)
return (ret);
if (new_addr)
......@@ -122,17 +101,17 @@ static bool
pages_commit_impl(void *addr, size_t size, bool commit)
{
if (os_overcommits)
return (true);
#ifdef _WIN32
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
#else
{
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
-1, 0);
#ifndef _WIN32
/*
* The following decommit/commit implementation is functional, but
* always disabled because it doesn't add value beyong improved
* debugging (at the cost of extra system calls) on systems that
* overcommit.
*/
if (false) {
int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
MAP_FIXED, -1, 0);
if (result == MAP_FAILED)
return (true);
if (result != addr) {
......@@ -146,6 +125,7 @@ pages_commit_impl(void *addr, size_t size, bool commit)
return (false);
}
#endif
return (true);
}
bool
......@@ -170,16 +150,15 @@ pages_purge(void *addr, size_t size)
#ifdef _WIN32
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
unzeroed = true;
#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED))
# if defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
#elif defined(JEMALLOC_HAVE_MADVISE)
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_ZEROS true
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# else
# error No madvise(2) flag defined for purging unused dirty pages
# error "No madvise(2) flag defined for purging unused dirty pages."
# endif
int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
......@@ -192,111 +171,3 @@ pages_purge(void *addr, size_t size)
return (unzeroed);
}
bool
pages_huge(void *addr, size_t size)
{
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
#ifdef JEMALLOC_THP
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
#else
return (false);
#endif
}
bool
pages_nohuge(void *addr, size_t size)
{
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
#ifdef JEMALLOC_THP
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
#else
return (false);
#endif
}
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static bool
os_overcommits_sysctl(void)
{
int vm_overcommit;
size_t sz;
sz = sizeof(vm_overcommit);
if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
return (false); /* Error. */
return ((vm_overcommit & 0x3) == 0);
}
#endif
#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/*
* Use syscall(2) rather than {open,read,close}(2) when possible to avoid
* reentry during bootstrapping if another library has interposed system call
* wrappers.
*/
static bool
os_overcommits_proc(void)
{
int fd;
char buf[1];
ssize_t nread;
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
#else
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
#endif
if (fd == -1)
return (false); /* Error. */
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
#else
nread = read(fd, &buf, sizeof(buf));
#endif
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
syscall(SYS_close, fd);
#else
close(fd);
#endif
if (nread < 1)
return (false); /* Error. */
/*
* /proc/sys/vm/overcommit_memory meanings:
* 0: Heuristic overcommit.
* 1: Always overcommit.
* 2: Never overcommit.
*/
return (buf[0] == '0' || buf[0] == '1');
}
#endif
void
pages_boot(void)
{
#ifndef _WIN32
mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
os_overcommits = os_overcommits_sysctl();
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
os_overcommits = os_overcommits_proc();
# ifdef MAP_NORESERVE
if (os_overcommits)
mmap_flags |= MAP_NORESERVE;
# endif
#else
os_overcommits = false;
#endif
}
#define JEMALLOC_PRNG_C_
#include "jemalloc/internal/jemalloc_internal.h"
......@@ -109,7 +109,7 @@ static char prof_dump_buf[
1
#endif
];
static size_t prof_dump_buf_end;
static unsigned prof_dump_buf_end;
static int prof_dump_fd;
/* Do not dump any profiles until bootstrapping is complete. */
......@@ -121,13 +121,13 @@ static bool prof_booted = false;
* definition.
*/
static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
static bool prof_tdata_should_destroy(prof_tdata_t *tdata,
bool even_if_attached);
static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached);
static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
/******************************************************************************/
/* Red-black trees. */
......@@ -213,23 +213,22 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
}
if ((uintptr_t)tctx > (uintptr_t)1U) {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
malloc_mutex_lock(tctx->tdata->lock);
tctx->prepared = false;
if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
if (prof_tctx_should_destroy(tctx))
prof_tctx_destroy(tsd, tctx);
else
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
malloc_mutex_unlock(tctx->tdata->lock);
}
}
void
prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx)
prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
prof_tctx_set(tsdn, ptr, usize, tctx);
prof_tctx_set(ptr, usize, tctx);
malloc_mutex_lock(tsdn, tctx->tdata->lock);
malloc_mutex_lock(tctx->tdata->lock);
tctx->cnts.curobjs++;
tctx->cnts.curbytes += usize;
if (opt_prof_accum) {
......@@ -237,23 +236,23 @@ prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
tctx->cnts.accumbytes += usize;
}
tctx->prepared = false;
malloc_mutex_unlock(tsdn, tctx->tdata->lock);
malloc_mutex_unlock(tctx->tdata->lock);
}
void
prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
malloc_mutex_lock(tctx->tdata->lock);
assert(tctx->cnts.curobjs > 0);
assert(tctx->cnts.curbytes >= usize);
tctx->cnts.curobjs--;
tctx->cnts.curbytes -= usize;
if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
if (prof_tctx_should_destroy(tctx))
prof_tctx_destroy(tsd, tctx);
else
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
malloc_mutex_unlock(tctx->tdata->lock);
}
void
......@@ -278,7 +277,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
tdata->enq = true;
}
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
malloc_mutex_lock(&bt2gctx_mtx);
}
JEMALLOC_INLINE_C void
......@@ -288,7 +287,7 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
cassert(config_prof);
assert(tdata == prof_tdata_get(tsd, false));
malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
malloc_mutex_unlock(&bt2gctx_mtx);
if (tdata != NULL) {
bool idump, gdump;
......@@ -301,9 +300,9 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
tdata->enq_gdump = false;
if (idump)
prof_idump(tsd_tsdn(tsd));
prof_idump();
if (gdump)
prof_gdump(tsd_tsdn(tsd));
prof_gdump();
}
}
......@@ -547,15 +546,14 @@ prof_tdata_mutex_choose(uint64_t thr_uid)
}
static prof_gctx_t *
prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
{
/*
* Create a single allocation that has space for vec of length bt->len.
*/
size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
true);
prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t,
vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true),
true, NULL);
if (gctx == NULL)
return (NULL);
gctx->lock = prof_gctx_mutex_choose();
......@@ -587,7 +585,7 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
* into this function.
*/
prof_enter(tsd, tdata_self);
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
malloc_mutex_lock(gctx->lock);
assert(gctx->nlimbo != 0);
if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
/* Remove gctx from bt2gctx. */
......@@ -595,25 +593,24 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
not_reached();
prof_leave(tsd, tdata_self);
/* Destroy gctx. */
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
malloc_mutex_unlock(gctx->lock);
idalloctm(tsd, gctx, tcache_get(tsd, false), true);
} else {
/*
* Compensate for increment in prof_tctx_destroy() or
* prof_lookup().
*/
gctx->nlimbo--;
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
malloc_mutex_unlock(gctx->lock);
prof_leave(tsd, tdata_self);
}
}
/* tctx->tdata->lock must be held. */
static bool
prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
prof_tctx_should_destroy(prof_tctx_t *tctx)
{
malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
if (opt_prof_accum)
return (false);
if (tctx->cnts.curobjs != 0)
......@@ -636,6 +633,7 @@ prof_gctx_should_destroy(prof_gctx_t *gctx)
return (true);
}
/* tctx->tdata->lock is held upon entry, and released before return. */
static void
prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
{
......@@ -643,8 +641,6 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
prof_gctx_t *gctx = tctx->gctx;
bool destroy_tdata, destroy_tctx, destroy_gctx;
malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
assert(tctx->cnts.curobjs == 0);
assert(tctx->cnts.curbytes == 0);
assert(!opt_prof_accum);
......@@ -652,10 +648,10 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
assert(tctx->cnts.accumbytes == 0);
ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
destroy_tdata = prof_tdata_should_destroy(tdata, false);
malloc_mutex_unlock(tdata->lock);
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
malloc_mutex_lock(gctx->lock);
switch (tctx->state) {
case prof_tctx_state_nominal:
tctx_tree_remove(&gctx->tctxs, tctx);
......@@ -695,19 +691,17 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
destroy_tctx = false;
destroy_gctx = false;
}
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
malloc_mutex_unlock(gctx->lock);
if (destroy_gctx) {
prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
tdata);
}
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
if (destroy_tdata)
prof_tdata_destroy(tsd, tdata, false);
if (destroy_tctx)
idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
idalloctm(tsd, tctx, tcache_get(tsd, false), true);
}
static bool
......@@ -727,7 +721,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
prof_enter(tsd, tdata);
if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
/* bt has never been seen before. Insert it. */
gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
gctx.p = prof_gctx_create(tsd, bt);
if (gctx.v == NULL) {
prof_leave(tsd, tdata);
return (true);
......@@ -736,7 +730,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */
prof_leave(tsd, tdata);
idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true);
idalloctm(tsd, gctx.v, tcache_get(tsd, false), true);
return (true);
}
new_gctx = true;
......@@ -745,9 +739,9 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
* Increment nlimbo, in order to avoid a race condition with
* prof_tctx_destroy()/prof_gctx_try_destroy().
*/
malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
malloc_mutex_lock(gctx.p->lock);
gctx.p->nlimbo++;
malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
malloc_mutex_unlock(gctx.p->lock);
new_gctx = false;
}
prof_leave(tsd, tdata);
......@@ -774,12 +768,13 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
if (tdata == NULL)
return (NULL);
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
malloc_mutex_lock(tdata->lock);
not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
if (!not_found) /* Note double negative! */
ret.p->prepared = true;
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
malloc_mutex_unlock(tdata->lock);
if (not_found) {
tcache_t *tcache;
void *btkey;
prof_gctx_t *gctx;
bool new_gctx, error;
......@@ -793,9 +788,9 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
return (NULL);
/* Link a prof_tctx_t into gctx for this thread. */
ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
size2index(sizeof(prof_tctx_t)), false, NULL, true,
arena_ichoose(tsd, NULL), true);
tcache = tcache_get(tsd, true);
ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true,
NULL);
if (ret.p == NULL) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
......@@ -809,41 +804,41 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
ret.p->tctx_uid = tdata->tctx_uid_next++;
ret.p->prepared = true;
ret.p->state = prof_tctx_state_initializing;
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
malloc_mutex_lock(tdata->lock);
error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
malloc_mutex_unlock(tdata->lock);
if (error) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
idalloctm(tsd, ret.v, tcache, true);
return (NULL);
}
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
malloc_mutex_lock(gctx->lock);
ret.p->state = prof_tctx_state_nominal;
tctx_tree_insert(&gctx->tctxs, ret.p);
gctx->nlimbo--;
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
malloc_mutex_unlock(gctx->lock);
}
return (ret.p);
}
/*
* The bodies of this function and prof_leakcheck() are compiled out unless heap
* profiling is enabled, so that it is possible to compile jemalloc with
* floating point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a workaround for
* versions of glibc that don't properly save/restore floating point registers
* during dynamic lazy symbol loading (which internally calls into whatever
* malloc implementation happens to be integrated into the application). Note
* that some compilers (e.g. gcc 4.8) may use floating point registers for fast
* memory moves, so jemalloc must be compiled with such optimizations disabled
* (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
void
prof_sample_threshold_update(prof_tdata_t *tdata)
{
/*
* The body of this function is compiled out unless heap profiling is
* enabled, so that it is possible to compile jemalloc with floating
* point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a
* workaround for versions of glibc that don't properly save/restore
* floating point registers during dynamic lazy symbol loading (which
* internally calls into whatever malloc implementation happens to be
* integrated into the application). Note that some compilers (e.g.
* gcc 4.8) may use floating point registers for fast memory moves, so
* jemalloc must be compiled with such optimizations disabled (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
#ifdef JEMALLOC_PROF
uint64_t r;
double u;
......@@ -874,7 +869,8 @@ prof_sample_threshold_update(prof_tdata_t *tdata)
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*/
r = prng_lg_range_u64(&tdata->prng_state, 53);
prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005),
UINT64_C(1442695040888963407));
u = (double)r * (1.0/9007199254740992.0L);
tdata->bytes_until_sample = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
......@@ -897,13 +893,11 @@ size_t
prof_tdata_count(void)
{
size_t tdata_count = 0;
tsdn_t *tsdn;
tsdn = tsdn_fetch();
malloc_mutex_lock(tsdn, &tdatas_mtx);
malloc_mutex_lock(&tdatas_mtx);
tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
(void *)&tdata_count);
malloc_mutex_unlock(tsdn, &tdatas_mtx);
malloc_mutex_unlock(&tdatas_mtx);
return (tdata_count);
}
......@@ -922,9 +916,9 @@ prof_bt_count(void)
if (tdata == NULL)
return (0);
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
malloc_mutex_lock(&bt2gctx_mtx);
bt_count = ckh_count(&bt2gctx);
malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
malloc_mutex_unlock(&bt2gctx_mtx);
return (bt_count);
}
......@@ -994,7 +988,7 @@ prof_dump_close(bool propagate_err)
static bool
prof_dump_write(bool propagate_err, const char *s)
{
size_t i, slen, n;
unsigned i, slen, n;
cassert(config_prof);
......@@ -1037,21 +1031,20 @@ prof_dump_printf(bool propagate_err, const char *format, ...)
return (ret);
}
/* tctx->tdata->lock is held. */
static void
prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
{
malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
malloc_mutex_lock(tsdn, tctx->gctx->lock);
malloc_mutex_lock(tctx->gctx->lock);
switch (tctx->state) {
case prof_tctx_state_initializing:
malloc_mutex_unlock(tsdn, tctx->gctx->lock);
malloc_mutex_unlock(tctx->gctx->lock);
return;
case prof_tctx_state_nominal:
tctx->state = prof_tctx_state_dumping;
malloc_mutex_unlock(tsdn, tctx->gctx->lock);
malloc_mutex_unlock(tctx->gctx->lock);
memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
......@@ -1070,12 +1063,11 @@ prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
}
}
/* gctx->lock is held. */
static void
prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
{
malloc_mutex_assert_owner(tsdn, gctx->lock);
gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
if (opt_prof_accum) {
......@@ -1084,12 +1076,10 @@ prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
}
}
/* tctx->gctx is held. */
static prof_tctx_t *
prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
tsdn_t *tsdn = (tsdn_t *)arg;
malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
switch (tctx->state) {
case prof_tctx_state_nominal:
......@@ -1097,7 +1087,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
break;
case prof_tctx_state_dumping:
case prof_tctx_state_purgatory:
prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
prof_tctx_merge_gctx(tctx, tctx->gctx);
break;
default:
not_reached();
......@@ -1106,18 +1096,11 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
return (NULL);
}
struct prof_tctx_dump_iter_arg_s {
tsdn_t *tsdn;
bool propagate_err;
};
/* gctx->lock is held. */
static prof_tctx_t *
prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
struct prof_tctx_dump_iter_arg_s *arg =
(struct prof_tctx_dump_iter_arg_s *)opaque;
malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
bool propagate_err = *(bool *)arg;
switch (tctx->state) {
case prof_tctx_state_initializing:
......@@ -1126,7 +1109,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
break;
case prof_tctx_state_dumping:
case prof_tctx_state_purgatory:
if (prof_dump_printf(arg->propagate_err,
if (prof_dump_printf(propagate_err,
" t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
"%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
......@@ -1139,14 +1122,12 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
return (NULL);
}
/* tctx->gctx is held. */
static prof_tctx_t *
prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
tsdn_t *tsdn = (tsdn_t *)arg;
prof_tctx_t *ret;
malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
switch (tctx->state) {
case prof_tctx_state_nominal:
/* New since dumping started; ignore. */
......@@ -1167,12 +1148,12 @@ label_return:
}
static void
prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
{
cassert(config_prof);
malloc_mutex_lock(tsdn, gctx->lock);
malloc_mutex_lock(gctx->lock);
/*
* Increment nlimbo so that gctx won't go away before dump.
......@@ -1184,26 +1165,19 @@ prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
malloc_mutex_unlock(tsdn, gctx->lock);
malloc_mutex_unlock(gctx->lock);
}
struct prof_gctx_merge_iter_arg_s {
tsdn_t *tsdn;
size_t leak_ngctx;
};
static prof_gctx_t *
prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
{
struct prof_gctx_merge_iter_arg_s *arg =
(struct prof_gctx_merge_iter_arg_s *)opaque;
size_t *leak_ngctx = (size_t *)arg;
malloc_mutex_lock(arg->tsdn, gctx->lock);
tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
(void *)arg->tsdn);
malloc_mutex_lock(gctx->lock);
tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
if (gctx->cnt_summed.curobjs != 0)
arg->leak_ngctx++;
malloc_mutex_unlock(arg->tsdn, gctx->lock);
(*leak_ngctx)++;
malloc_mutex_unlock(gctx->lock);
return (NULL);
}
......@@ -1222,7 +1196,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
*/
while ((gctx = gctx_tree_first(gctxs)) != NULL) {
gctx_tree_remove(gctxs, gctx);
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
malloc_mutex_lock(gctx->lock);
{
prof_tctx_t *next;
......@@ -1230,15 +1204,14 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
do {
prof_tctx_t *to_destroy =
tctx_tree_iter(&gctx->tctxs, next,
prof_tctx_finish_iter,
(void *)tsd_tsdn(tsd));
prof_tctx_finish_iter, NULL);
if (to_destroy != NULL) {
next = tctx_tree_next(&gctx->tctxs,
to_destroy);
tctx_tree_remove(&gctx->tctxs,
to_destroy);
idalloctm(tsd_tsdn(tsd), to_destroy,
NULL, true, true);
idalloctm(tsd, to_destroy,
tcache_get(tsd, false), true);
} else
next = NULL;
} while (next != NULL);
......@@ -1246,26 +1219,19 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
gctx->nlimbo--;
if (prof_gctx_should_destroy(gctx)) {
gctx->nlimbo++;
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
malloc_mutex_unlock(gctx->lock);
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
} else
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
malloc_mutex_unlock(gctx->lock);
}
}
struct prof_tdata_merge_iter_arg_s {
tsdn_t *tsdn;
prof_cnt_t cnt_all;
};
static prof_tdata_t *
prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
void *opaque)
prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
{
struct prof_tdata_merge_iter_arg_s *arg =
(struct prof_tdata_merge_iter_arg_s *)opaque;
prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
malloc_mutex_lock(arg->tsdn, tdata->lock);
malloc_mutex_lock(tdata->lock);
if (!tdata->expired) {
size_t tabind;
union {
......@@ -1277,17 +1243,17 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
&tctx.v);)
prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
prof_tctx_merge_tdata(tctx.p, tdata);
arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
cnt_all->curobjs += tdata->cnt_summed.curobjs;
cnt_all->curbytes += tdata->cnt_summed.curbytes;
if (opt_prof_accum) {
arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
}
} else
tdata->dumping = false;
malloc_mutex_unlock(arg->tsdn, tdata->lock);
malloc_mutex_unlock(tdata->lock);
return (NULL);
}
......@@ -1316,7 +1282,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
#endif
static bool
prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
{
bool ret;
......@@ -1327,10 +1293,10 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
return (true);
malloc_mutex_lock(tsdn, &tdatas_mtx);
malloc_mutex_lock(&tdatas_mtx);
ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
(void *)&propagate_err) != NULL);
malloc_mutex_unlock(tsdn, &tdatas_mtx);
malloc_mutex_unlock(&tdatas_mtx);
return (ret);
}
#ifdef JEMALLOC_JET
......@@ -1339,16 +1305,15 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
#endif
/* gctx->lock is held. */
static bool
prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
prof_gctx_tree_t *gctxs)
{
bool ret;
unsigned i;
struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
cassert(config_prof);
malloc_mutex_assert_owner(tsdn, gctx->lock);
/* Avoid dumping such gctx's that have no useful data. */
if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
......@@ -1382,10 +1347,8 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
goto label_return;
}
prof_tctx_dump_iter_arg.tsdn = tsdn;
prof_tctx_dump_iter_arg.propagate_err = propagate_err;
if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
(void *)&prof_tctx_dump_iter_arg) != NULL) {
(void *)&propagate_err) != NULL) {
ret = true;
goto label_return;
}
......@@ -1395,7 +1358,6 @@ label_return:
return (ret);
}
#ifndef _WIN32
JEMALLOC_FORMAT_PRINTF(1, 2)
static int
prof_open_maps(const char *format, ...)
......@@ -1411,18 +1373,6 @@ prof_open_maps(const char *format, ...)
return (mfd);
}
#endif
static int
prof_getpid(void)
{
#ifdef _WIN32
return (GetCurrentProcessId());
#else
return (getpid());
#endif
}
static bool
prof_dump_maps(bool propagate_err)
......@@ -1433,11 +1383,9 @@ prof_dump_maps(bool propagate_err)
cassert(config_prof);
#ifdef __FreeBSD__
mfd = prof_open_maps("/proc/curproc/map");
#elif defined(_WIN32)
mfd = -1; // Not implemented
#else
{
int pid = prof_getpid();
int pid = getpid();
mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
if (mfd == -1)
......@@ -1478,66 +1426,39 @@ label_return:
return (ret);
}
/*
* See prof_sample_threshold_update() comment for why the body of this function
* is conditionally compiled.
*/
static void
prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
const char *filename)
{
#ifdef JEMALLOC_PROF
/*
* Scaling is equivalent AdjustSamples() in jeprof, but the result may
* differ slightly from what jeprof reports, because here we scale the
* summary values, whereas jeprof scales each context individually and
* reports the sums of the scaled values.
*/
if (cnt_all->curbytes != 0) {
double sample_period = (double)((uint64_t)1 << lg_prof_sample);
double ratio = (((double)cnt_all->curbytes) /
(double)cnt_all->curobjs) / sample_period;
double scale_factor = 1.0 / (1.0 - exp(-ratio));
uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
* scale_factor);
uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
scale_factor);
malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
" byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
FMTu64" object%s, %zu context%s\n",
cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf(
"<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
filename);
}
#endif
}
struct prof_gctx_dump_iter_arg_s {
tsdn_t *tsdn;
bool propagate_err;
};
static prof_gctx_t *
prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
{
prof_gctx_t *ret;
struct prof_gctx_dump_iter_arg_s *arg =
(struct prof_gctx_dump_iter_arg_s *)opaque;
bool propagate_err = *(bool *)arg;
malloc_mutex_lock(arg->tsdn, gctx->lock);
malloc_mutex_lock(gctx->lock);
if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
gctxs)) {
if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
ret = gctx;
goto label_return;
}
ret = NULL;
label_return:
malloc_mutex_unlock(arg->tsdn, gctx->lock);
malloc_mutex_unlock(gctx->lock);
return (ret);
}
......@@ -1545,14 +1466,13 @@ static bool
prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
{
prof_tdata_t *tdata;
struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
prof_cnt_t cnt_all;
size_t tabind;
union {
prof_gctx_t *p;
void *v;
} gctx;
struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
size_t leak_ngctx;
prof_gctx_tree_t gctxs;
cassert(config_prof);
......@@ -1561,7 +1481,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
if (tdata == NULL)
return (true);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
malloc_mutex_lock(&prof_dump_mtx);
prof_enter(tsd, tdata);
/*
......@@ -1570,24 +1490,20 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
*/
gctx_tree_new(&gctxs);
for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs);
prof_dump_gctx_prep(gctx.p, &gctxs);
/*
* Iterate over tdatas, and for the non-expired ones snapshot their tctx
* stats and merge them into the associated gctx's.
*/
prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd);
memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t));
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
(void *)&prof_tdata_merge_iter_arg);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
memset(&cnt_all, 0, sizeof(prof_cnt_t));
malloc_mutex_lock(&tdatas_mtx);
tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
malloc_mutex_unlock(&tdatas_mtx);
/* Merge tctx stats into gctx's. */
prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd);
prof_gctx_merge_iter_arg.leak_ngctx = 0;
gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
(void *)&prof_gctx_merge_iter_arg);
leak_ngctx = 0;
gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
prof_leave(tsd, tdata);
......@@ -1596,15 +1512,12 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
goto label_open_close_error;
/* Dump profile header. */
if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
&prof_tdata_merge_iter_arg.cnt_all))
if (prof_dump_header(propagate_err, &cnt_all))
goto label_write_error;
/* Dump per gctx profile stats. */
prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
prof_gctx_dump_iter_arg.propagate_err = propagate_err;
if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
(void *)&prof_gctx_dump_iter_arg) != NULL)
(void *)&propagate_err) != NULL)
goto label_write_error;
/* Dump /proc/<pid>/maps if possible. */
......@@ -1615,18 +1528,17 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
goto label_open_close_error;
prof_gctx_finish(tsd, &gctxs);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
malloc_mutex_unlock(&prof_dump_mtx);
if (leakcheck)
prof_leakcheck(&cnt_all, leak_ngctx, filename);
if (leakcheck) {
prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
prof_gctx_merge_iter_arg.leak_ngctx, filename);
}
return (false);
label_write_error:
prof_dump_close(propagate_err);
label_open_close_error:
prof_gctx_finish(tsd, &gctxs);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
malloc_mutex_unlock(&prof_dump_mtx);
return (true);
}
......@@ -1642,12 +1554,12 @@ prof_dump_filename(char *filename, char v, uint64_t vseq)
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c%"FMTu64".heap",
opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
} else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c.heap",
opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
}
prof_dump_seq++;
}
......@@ -1666,23 +1578,23 @@ prof_fdump(void)
return;
tsd = tsd_fetch();
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'f', VSEQ_INVALID);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, opt_prof_leak);
}
void
prof_idump(tsdn_t *tsdn)
prof_idump(void)
{
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
if (!prof_booted || tsdn_null(tsdn))
if (!prof_booted)
return;
tsd = tsdn_tsd(tsdn);
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL)
return;
......@@ -1693,48 +1605,50 @@ prof_idump(tsdn_t *tsdn)
if (opt_prof_prefix[0] != '\0') {
char filename[PATH_MAX + 1];
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, false);
}
}
bool
prof_mdump(tsd_t *tsd, const char *filename)
prof_mdump(const char *filename)
{
tsd_t *tsd;
char filename_buf[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
if (!opt_prof || !prof_booted)
return (true);
tsd = tsd_fetch();
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
if (opt_prof_prefix[0] == '\0')
return (true);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
malloc_mutex_unlock(&prof_dump_seq_mtx);
filename = filename_buf;
}
return (prof_dump(tsd, true, filename, false));
}
void
prof_gdump(tsdn_t *tsdn)
prof_gdump(void)
{
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
if (!prof_booted || tsdn_null(tsdn))
if (!prof_booted)
return;
tsd = tsdn_tsd(tsdn);
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL)
return;
......@@ -1745,10 +1659,10 @@ prof_gdump(tsdn_t *tsdn)
if (opt_prof_prefix[0] != '\0') {
char filename[DUMP_FILENAME_BUFSIZE];
malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, false);
}
}
......@@ -1777,14 +1691,14 @@ prof_bt_keycomp(const void *k1, const void *k2)
}
JEMALLOC_INLINE_C uint64_t
prof_thr_uid_alloc(tsdn_t *tsdn)
prof_thr_uid_alloc(void)
{
uint64_t thr_uid;
malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
malloc_mutex_lock(&next_thr_uid_mtx);
thr_uid = next_thr_uid;
next_thr_uid++;
malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
malloc_mutex_unlock(&next_thr_uid_mtx);
return (thr_uid);
}
......@@ -1794,13 +1708,14 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
char *thread_name, bool active)
{
prof_tdata_t *tdata;
tcache_t *tcache;
cassert(config_prof);
/* Initialize an empty cache for this thread. */
tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
size2index(sizeof(prof_tdata_t)), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
tcache = tcache_get(tsd, true);
tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false,
tcache, true, NULL);
if (tdata == NULL)
return (NULL);
......@@ -1812,9 +1727,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata->expired = false;
tdata->tctx_uid_next = 0;
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp)) {
idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp)) {
idalloctm(tsd, tdata, tcache, true);
return (NULL);
}
......@@ -1828,9 +1743,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata->dumping = false;
tdata->active = active;
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
malloc_mutex_lock(&tdatas_mtx);
tdata_tree_insert(&tdatas, tdata);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
malloc_mutex_unlock(&tdatas_mtx);
return (tdata);
}
......@@ -1839,12 +1754,13 @@ prof_tdata_t *
prof_tdata_init(tsd_t *tsd)
{
return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
NULL, prof_thread_active_init_get(tsd_tsdn(tsd))));
return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
prof_thread_active_init_get()));
}
/* tdata->lock must be held. */
static bool
prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
{
if (tdata->attached && !even_if_attached)
......@@ -1854,40 +1770,32 @@ prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
return (true);
}
static bool
prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
bool even_if_attached)
{
malloc_mutex_assert_owner(tsdn, tdata->lock);
return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
}
/* tdatas_mtx must be held. */
static void
prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached)
{
tcache_t *tcache;
malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
assert(prof_tdata_should_destroy(tdata, even_if_attached));
assert(tsd_prof_tdata_get(tsd) != tdata);
tdata_tree_remove(&tdatas, tdata);
assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
tcache = tcache_get(tsd, false);
if (tdata->thread_name != NULL)
idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
idalloctm(tsd, tdata->thread_name, tcache, true);
ckh_delete(tsd, &tdata->bt2tctx);
idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
idalloctm(tsd, tdata, tcache, true);
}
static void
prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
{
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
malloc_mutex_lock(&tdatas_mtx);
prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
malloc_mutex_unlock(&tdatas_mtx);
}
static void
......@@ -1895,10 +1803,9 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
{
bool destroy_tdata;
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
malloc_mutex_lock(tdata->lock);
if (tdata->attached) {
destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
true);
destroy_tdata = prof_tdata_should_destroy(tdata, true);
/*
* Only detach if !destroy_tdata, because detaching would allow
* another thread to win the race to destroy tdata.
......@@ -1908,7 +1815,7 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
tsd_prof_tdata_set(tsd, NULL);
} else
destroy_tdata = false;
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
malloc_mutex_unlock(tdata->lock);
if (destroy_tdata)
prof_tdata_destroy(tsd, tdata, true);
}
......@@ -1919,7 +1826,7 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1;
char *thread_name = (tdata->thread_name != NULL) ?
prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
bool active = tdata->active;
prof_tdata_detach(tsd, tdata);
......@@ -1928,18 +1835,18 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
}
static bool
prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
prof_tdata_expire(prof_tdata_t *tdata)
{
bool destroy_tdata;
malloc_mutex_lock(tsdn, tdata->lock);
malloc_mutex_lock(tdata->lock);
if (!tdata->expired) {
tdata->expired = true;
destroy_tdata = tdata->attached ? false :
prof_tdata_should_destroy(tsdn, tdata, false);
prof_tdata_should_destroy(tdata, false);
} else
destroy_tdata = false;
malloc_mutex_unlock(tsdn, tdata->lock);
malloc_mutex_unlock(tdata->lock);
return (destroy_tdata);
}
......@@ -1947,9 +1854,8 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
static prof_tdata_t *
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
{
tsdn_t *tsdn = (tsdn_t *)arg;
return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
return (prof_tdata_expire(tdata) ? tdata : NULL);
}
void
......@@ -1959,15 +1865,15 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
assert(lg_sample < (sizeof(uint64_t) << 3));
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
malloc_mutex_lock(&prof_dump_mtx);
malloc_mutex_lock(&tdatas_mtx);
lg_prof_sample = lg_sample;
next = NULL;
do {
prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
prof_tdata_reset_iter, (void *)tsd);
prof_tdata_reset_iter, NULL);
if (to_destroy != NULL) {
next = tdata_tree_next(&tdatas, to_destroy);
prof_tdata_destroy_locked(tsd, to_destroy, false);
......@@ -1975,8 +1881,8 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
next = NULL;
} while (next != NULL);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
malloc_mutex_unlock(&tdatas_mtx);
malloc_mutex_unlock(&prof_dump_mtx);
}
void
......@@ -1993,33 +1899,35 @@ prof_tdata_cleanup(tsd_t *tsd)
}
bool
prof_active_get(tsdn_t *tsdn)
prof_active_get(void)
{
bool prof_active_current;
malloc_mutex_lock(tsdn, &prof_active_mtx);
malloc_mutex_lock(&prof_active_mtx);
prof_active_current = prof_active;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
malloc_mutex_unlock(&prof_active_mtx);
return (prof_active_current);
}
bool
prof_active_set(tsdn_t *tsdn, bool active)
prof_active_set(bool active)
{
bool prof_active_old;
malloc_mutex_lock(tsdn, &prof_active_mtx);
malloc_mutex_lock(&prof_active_mtx);
prof_active_old = prof_active;
prof_active = active;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
malloc_mutex_unlock(&prof_active_mtx);
return (prof_active_old);
}
const char *
prof_thread_name_get(tsd_t *tsd)
prof_thread_name_get(void)
{
tsd_t *tsd;
prof_tdata_t *tdata;
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return ("");
......@@ -2027,7 +1935,7 @@ prof_thread_name_get(tsd_t *tsd)
}
static char *
prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
{
char *ret;
size_t size;
......@@ -2039,8 +1947,7 @@ prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
if (size == 1)
return ("");
ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL);
if (ret == NULL)
return (NULL);
memcpy(ret, thread_name, size);
......@@ -2067,12 +1974,13 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
return (EFAULT);
}
s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
s = prof_thread_name_alloc(tsd, thread_name);
if (s == NULL)
return (EAGAIN);
if (tdata->thread_name != NULL) {
idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
true);
tdata->thread_name = NULL;
}
if (strlen(s) > 0)
......@@ -2081,10 +1989,12 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
}
bool
prof_thread_active_get(tsd_t *tsd)
prof_thread_active_get(void)
{
tsd_t *tsd;
prof_tdata_t *tdata;
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (false);
......@@ -2092,10 +2002,12 @@ prof_thread_active_get(tsd_t *tsd)
}
bool
prof_thread_active_set(tsd_t *tsd, bool active)
prof_thread_active_set(bool active)
{
tsd_t *tsd;
prof_tdata_t *tdata;
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (true);
......@@ -2104,48 +2016,48 @@ prof_thread_active_set(tsd_t *tsd, bool active)
}
bool
prof_thread_active_init_get(tsdn_t *tsdn)
prof_thread_active_init_get(void)
{
bool active_init;
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
malloc_mutex_lock(&prof_thread_active_init_mtx);
active_init = prof_thread_active_init;
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
malloc_mutex_unlock(&prof_thread_active_init_mtx);
return (active_init);
}
bool
prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
prof_thread_active_init_set(bool active_init)
{
bool active_init_old;
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
malloc_mutex_lock(&prof_thread_active_init_mtx);
active_init_old = prof_thread_active_init;
prof_thread_active_init = active_init;
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
malloc_mutex_unlock(&prof_thread_active_init_mtx);
return (active_init_old);
}
bool
prof_gdump_get(tsdn_t *tsdn)
prof_gdump_get(void)
{
bool prof_gdump_current;
malloc_mutex_lock(tsdn, &prof_gdump_mtx);
malloc_mutex_lock(&prof_gdump_mtx);
prof_gdump_current = prof_gdump_val;
malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
malloc_mutex_unlock(&prof_gdump_mtx);
return (prof_gdump_current);
}
bool
prof_gdump_set(tsdn_t *tsdn, bool gdump)
prof_gdump_set(bool gdump)
{
bool prof_gdump_old;
malloc_mutex_lock(tsdn, &prof_gdump_mtx);
malloc_mutex_lock(&prof_gdump_mtx);
prof_gdump_old = prof_gdump_val;
prof_gdump_val = gdump;
malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
malloc_mutex_unlock(&prof_gdump_mtx);
return (prof_gdump_old);
}
......@@ -2186,54 +2098,47 @@ prof_boot1(void)
}
bool
prof_boot2(tsd_t *tsd)
prof_boot2(void)
{
cassert(config_prof);
if (opt_prof) {
tsd_t *tsd;
unsigned i;
lg_prof_sample = opt_lg_prof_sample;
prof_active = opt_prof_active;
if (malloc_mutex_init(&prof_active_mtx, "prof_active",
WITNESS_RANK_PROF_ACTIVE))
if (malloc_mutex_init(&prof_active_mtx))
return (true);
prof_gdump_val = opt_prof_gdump;
if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
WITNESS_RANK_PROF_GDUMP))
if (malloc_mutex_init(&prof_gdump_mtx))
return (true);
prof_thread_active_init = opt_prof_thread_active_init;
if (malloc_mutex_init(&prof_thread_active_init_mtx,
"prof_thread_active_init",
WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
if (malloc_mutex_init(&prof_thread_active_init_mtx))
return (true);
tsd = tsd_fetch();
if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
return (true);
if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
WITNESS_RANK_PROF_BT2GCTX))
if (malloc_mutex_init(&bt2gctx_mtx))
return (true);
tdata_tree_new(&tdatas);
if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
WITNESS_RANK_PROF_TDATAS))
if (malloc_mutex_init(&tdatas_mtx))
return (true);
next_thr_uid = 0;
if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
WITNESS_RANK_PROF_NEXT_THR_UID))
if (malloc_mutex_init(&next_thr_uid_mtx))
return (true);
if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
WITNESS_RANK_PROF_DUMP_SEQ))
if (malloc_mutex_init(&prof_dump_seq_mtx))
return (true);
if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
WITNESS_RANK_PROF_DUMP))
if (malloc_mutex_init(&prof_dump_mtx))
return (true);
if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
......@@ -2243,23 +2148,21 @@ prof_boot2(tsd_t *tsd)
abort();
}
gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
PROF_NCTX_LOCKS * sizeof(malloc_mutex_t));
gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
sizeof(malloc_mutex_t));
if (gctx_locks == NULL)
return (true);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
WITNESS_RANK_PROF_GCTX))
if (malloc_mutex_init(&gctx_locks[i]))
return (true);
}
tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
sizeof(malloc_mutex_t));
if (tdata_locks == NULL)
return (true);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
WITNESS_RANK_PROF_TDATA))
if (malloc_mutex_init(&tdata_locks[i]))
return (true);
}
}
......@@ -2278,77 +2181,56 @@ prof_boot2(tsd_t *tsd)
}
void
prof_prefork0(tsdn_t *tsdn)
prof_prefork(void)
{
if (opt_prof) {
unsigned i;
malloc_mutex_prefork(tsdn, &prof_dump_mtx);
malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
malloc_mutex_prefork(tsdn, &tdatas_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_prefork(tsdn, &tdata_locks[i]);
malloc_mutex_prefork(&tdatas_mtx);
malloc_mutex_prefork(&bt2gctx_mtx);
malloc_mutex_prefork(&next_thr_uid_mtx);
malloc_mutex_prefork(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_prefork(tsdn, &gctx_locks[i]);
}
}
void
prof_prefork1(tsdn_t *tsdn)
{
if (opt_prof) {
malloc_mutex_prefork(tsdn, &prof_active_mtx);
malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
malloc_mutex_prefork(&gctx_locks[i]);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_prefork(&tdata_locks[i]);
}
}
void
prof_postfork_parent(tsdn_t *tsdn)
prof_postfork_parent(void)
{
if (opt_prof) {
unsigned i;
malloc_mutex_postfork_parent(tsdn,
&prof_thread_active_init_mtx);
malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
malloc_mutex_postfork_parent(&tdata_locks[i]);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_parent(&gctx_locks[i]);
malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
malloc_mutex_postfork_parent(&next_thr_uid_mtx);
malloc_mutex_postfork_parent(&bt2gctx_mtx);
malloc_mutex_postfork_parent(&tdatas_mtx);
}
}
void
prof_postfork_child(tsdn_t *tsdn)
prof_postfork_child(void)
{
if (opt_prof) {
unsigned i;
malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
malloc_mutex_postfork_child(&tdata_locks[i]);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_child(&gctx_locks[i]);
malloc_mutex_postfork_child(&prof_dump_seq_mtx);
malloc_mutex_postfork_child(&next_thr_uid_mtx);
malloc_mutex_postfork_child(&bt2gctx_mtx);
malloc_mutex_postfork_child(&tdatas_mtx);
}
}
......
......@@ -13,22 +13,22 @@
/* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine);
static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine,
static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
size_t upper_bound);
/******************************************************************************/
static quarantine_t *
quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs)
quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
{
quarantine_t *quarantine;
size_t size;
size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) *
sizeof(quarantine_obj_t));
quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size),
false, NULL, true, arena_get(TSDN_NULL, 0, true), true);
assert(tsd_nominal(tsd));
quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs)
+ ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false,
tcache_get(tsd, true), true, NULL);
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
......@@ -47,7 +47,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (!tsd_nominal(tsd))
return;
quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT);
quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
/*
* Check again whether quarantine has been initialized, because
* quarantine_init() may have triggered recursive initialization.
......@@ -55,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (tsd_quarantine_get(tsd) == NULL)
tsd_quarantine_set(tsd, quarantine);
else
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
}
static quarantine_t *
......@@ -63,9 +63,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_t *ret;
ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1);
ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
if (ret == NULL) {
quarantine_drain_one(tsd_tsdn(tsd), quarantine);
quarantine_drain_one(tsd, quarantine);
return (quarantine);
}
......@@ -87,18 +87,18 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
tsd_quarantine_set(tsd, ret);
return (ret);
}
static void
quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof));
idalloctm(tsdn, obj->ptr, NULL, false, true);
assert(obj->usize == isalloc(obj->ptr, config_prof));
idalloctm(tsd, obj->ptr, NULL, false);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
......@@ -106,24 +106,24 @@ quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
}
static void
quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound)
quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
quarantine_drain_one(tsdn, quarantine);
quarantine_drain_one(tsd, quarantine);
}
void
quarantine(tsd_t *tsd, void *ptr)
{
quarantine_t *quarantine;
size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
size_t usize = isalloc(ptr, config_prof);
cassert(config_fill);
assert(opt_quarantine);
if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
idalloctm(tsd, ptr, NULL, false);
return;
}
/*
......@@ -133,7 +133,7 @@ quarantine(tsd_t *tsd, void *ptr)
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound);
quarantine_drain(tsd, quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
......@@ -158,11 +158,11 @@ quarantine(tsd_t *tsd, void *ptr)
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
memset(ptr, JEMALLOC_FREE_JUNK, usize);
memset(ptr, 0x5a, usize);
}
} else {
assert(quarantine->curbytes == 0);
idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
idalloctm(tsd, ptr, NULL, false);
}
}
......@@ -176,8 +176,8 @@ quarantine_cleanup(tsd_t *tsd)
quarantine = tsd_quarantine_get(tsd);
if (quarantine != NULL) {
quarantine_drain(tsd_tsdn(tsd), quarantine, 0);
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
quarantine_drain(tsd, quarantine, 0);
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
tsd_quarantine_set(tsd, NULL);
}
}
......@@ -15,8 +15,6 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
{
unsigned bits_in_leaf, height, i;
assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
RTREE_BITS_PER_LEVEL));
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
......@@ -96,15 +94,12 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
rtree_node_elm_t *node;
if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
spin_t spinner;
/*
* Another thread is already in the process of initializing.
* Spin-wait until initialization is complete.
*/
spin_init(&spinner);
do {
spin_adaptive(&spinner);
CPU_SPINWAIT;
node = atomic_read_p((void **)elmp);
} while (node == RTREE_NODE_INITIALIZING);
} else {
......@@ -128,5 +123,5 @@ rtree_node_elm_t *
rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
{
return (rtree_node_init(rtree, level+1, &elm->child));
return (rtree_node_init(rtree, level, &elm->child));
}
#define JEMALLOC_SPIN_C_
#include "jemalloc/internal/jemalloc_internal.h"
......@@ -3,7 +3,7 @@
#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
xmallctl(n, (void *)v, &sz, NULL, 0); \
xmallctl(n, v, &sz, NULL, 0); \
} while (0)
#define CTL_M2_GET(n, i, v, t) do { \
......@@ -12,7 +12,7 @@
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_M2_M4_GET(n, i, j, v, t) do { \
......@@ -22,7 +22,7 @@
xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \
mib[4] = (j); \
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
/******************************************************************************/
......@@ -32,107 +32,86 @@ bool opt_stats_print = false;
size_t stats_cactive = 0;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_hchunks_print(
void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i, bool bins, bool large, bool huge);
/******************************************************************************/
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool large, bool huge, unsigned i)
unsigned i)
{
size_t page;
bool config_tcache, in_gap, in_gap_prev;
bool config_tcache, in_gap;
unsigned nbins, j;
CTL_GET("arenas.page", &page, size_t);
CTL_GET("arenas.nbins", &nbins, unsigned);
if (json) {
CTL_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"bins\": [\n");
"bins: size ind allocated nmalloc"
" ndalloc nrequests curregs curruns regs"
" pgs util nfills nflushes newruns"
" reruns\n");
} else {
CTL_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"bins: size ind allocated nmalloc"
" ndalloc nrequests curregs"
" curruns regs pgs util nfills"
" nflushes newruns reruns\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"bins: size ind allocated nmalloc"
" ndalloc nrequests curregs"
" curruns regs pgs util newruns"
" reruns\n");
}
malloc_cprintf(write_cb, cbopaque,
"bins: size ind allocated nmalloc"
" ndalloc nrequests curregs curruns regs"
" pgs util newruns reruns\n");
}
CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, in_gap = false; j < nbins; j++) {
uint64_t nruns;
size_t reg_size, run_size, curregs;
size_t curruns;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nreruns;
CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
uint64_t);
in_gap_prev = in_gap;
in_gap = (nruns == 0);
if (!json && in_gap_prev && !in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
&nrequests, uint64_t);
if (config_tcache) {
CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j,
&nfills, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j,
&nflushes, uint64_t);
}
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns,
uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns,
size_t);
if (nruns == 0)
in_gap = true;
else {
size_t reg_size, run_size, curregs, availregs, milli;
size_t curruns;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns;
char util[6]; /* "x.yyy". */
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t{\n"
"\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
"\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
"\t\t\t\t\t\t\"curregs\": %zu,\n"
"\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n",
nmalloc,
ndalloc,
curregs,
nrequests);
if (config_tcache) {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
"\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n",
nfills,
nflushes);
" ---\n");
in_gap = false;
}
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n"
"\t\t\t\t\t\t\"curruns\": %zu\n"
"\t\t\t\t\t}%s\n",
nreruns,
curruns,
(j + 1 < nbins) ? "," : "");
} else if (!in_gap) {
size_t availregs, milli;
char util[6]; /* "x.yyy". */
CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
CTL_M2_GET("arenas.bin.0.run_size", j, &run_size,
size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
&nmalloc, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j,
&ndalloc, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j,
&curregs, size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
&nrequests, uint64_t);
if (config_tcache) {
CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i,
j, &nfills, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
i, j, &nflushes, uint64_t);
}
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j,
&reruns, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j,
&curruns, size_t);
availregs = nregs * curruns;
milli = (availregs != 0) ? (1000 * curregs) / availregs
......@@ -159,7 +138,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, curruns, nregs,
run_size / page, util, nfills, nflushes,
nruns, nreruns);
nruns, reruns);
} else {
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64
......@@ -168,38 +147,28 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
" %12"FMTu64"\n",
reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, curruns, nregs,
run_size / page, util, nruns, nreruns);
run_size / page, util, nruns, reruns);
}
}
}
if (json) {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t]%s\n", (large || huge) ? "," : "");
} else {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
" ---\n");
}
}
static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool huge, unsigned i)
unsigned i)
{
unsigned nbins, nlruns, j;
bool in_gap, in_gap_prev;
bool in_gap;
malloc_cprintf(write_cb, cbopaque,
"large: size ind allocated nmalloc ndalloc"
" nrequests curruns\n");
CTL_GET("arenas.nbins", &nbins, unsigned);
CTL_GET("arenas.nlruns", &nlruns, unsigned);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"lruns\": [\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"large: size ind allocated nmalloc"
" ndalloc nrequests curruns\n");
}
for (j = 0, in_gap = false; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, curruns;
......@@ -210,25 +179,17 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
uint64_t);
CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
&nrequests, uint64_t);
in_gap_prev = in_gap;
in_gap = (nrequests == 0);
if (!json && in_gap_prev && !in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns,
size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t{\n"
"\t\t\t\t\t\t\"curruns\": %zu\n"
"\t\t\t\t\t}%s\n",
curruns,
(j + 1 < nlruns) ? "," : "");
} else if (!in_gap) {
if (nrequests == 0)
in_gap = true;
else {
CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
&curruns, size_t);
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
in_gap = false;
}
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n",
......@@ -236,35 +197,25 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
ndalloc, nrequests, curruns);
}
}
if (json) {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t]%s\n", huge ? "," : "");
} else {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
" ---\n");
}
}
static void
stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
void *cbopaque, bool json, unsigned i)
void *cbopaque, unsigned i)
{
unsigned nbins, nlruns, nhchunks, j;
bool in_gap, in_gap_prev;
bool in_gap;
malloc_cprintf(write_cb, cbopaque,
"huge: size ind allocated nmalloc ndalloc"
" nrequests curhchunks\n");
CTL_GET("arenas.nbins", &nbins, unsigned);
CTL_GET("arenas.nlruns", &nlruns, unsigned);
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"hchunks\": [\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"huge: size ind allocated nmalloc"
" ndalloc nrequests curhchunks\n");
}
for (j = 0, in_gap = false; j < nhchunks; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t hchunk_size, curhchunks;
......@@ -275,25 +226,18 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
&ndalloc, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
&nrequests, uint64_t);
in_gap_prev = in_gap;
in_gap = (nrequests == 0);
if (!json && in_gap_prev && !in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t);
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j,
&curhchunks, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t{\n"
"\t\t\t\t\t\t\"curhchunks\": %zu\n"
"\t\t\t\t\t}%s\n",
curhchunks,
(j + 1 < nhchunks) ? "," : "");
} else if (!in_gap) {
if (nrequests == 0)
in_gap = true;
else {
CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size,
size_t);
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i,
j, &curhchunks, size_t);
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
in_gap = false;
}
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n",
......@@ -302,25 +246,20 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
nrequests, curhchunks);
}
}
if (json) {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t]\n");
} else {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
" ---\n");
}
}
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, unsigned i, bool bins, bool large, bool huge)
unsigned i, bool bins, bool large, bool huge)
{
unsigned nthreads;
const char *dss;
ssize_t lg_dirty_mult, decay_time;
size_t page, pactive, pdirty, mapped, retained;
ssize_t lg_dirty_mult;
size_t page, pactive, pdirty, mapped;
size_t metadata_mapped, metadata_allocated;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
......@@ -333,435 +272,240 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("arenas.page", &page, size_t);
CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"nthreads\": %u,\n", nthreads);
} else {
malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads);
}
malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads);
CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"dss\": \"%s\",\n", dss);
} else {
malloc_cprintf(write_cb, cbopaque,
"dss allocation precedence: %s\n", dss);
}
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
dss);
CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
if (json) {
if (lg_dirty_mult >= 0) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult);
"min active:dirty page ratio: %u:1\n",
(1U << lg_dirty_mult));
} else {
if (opt_purge == purge_mode_ratio) {
if (lg_dirty_mult >= 0) {
malloc_cprintf(write_cb, cbopaque,
"min active:dirty page ratio: %u:1\n",
(1U << lg_dirty_mult));
} else {
malloc_cprintf(write_cb, cbopaque,
"min active:dirty page ratio: N/A\n");
}
}
}
CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"decay_time\": %zd,\n", decay_time);
} else {
if (opt_purge == purge_mode_decay) {
if (decay_time >= 0) {
malloc_cprintf(write_cb, cbopaque,
"decay time: %zd\n", decay_time);
} else {
malloc_cprintf(write_cb, cbopaque,
"decay time: N/A\n");
}
}
"min active:dirty page ratio: N/A\n");
}
CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"pactive\": %zu,\n", pactive);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"pdirty\": %zu,\n", pdirty);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"npurge\": %"FMTu64",\n", npurge);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"purged\": %"FMTu64",\n", purged);
} else {
malloc_cprintf(write_cb, cbopaque,
"purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64
", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
}
malloc_cprintf(write_cb, cbopaque,
"dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64
" madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge ==
1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged);
malloc_cprintf(write_cb, cbopaque,
" allocated nmalloc ndalloc"
" nrequests\n");
CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
size_t);
CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
uint64_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"small\": {\n");
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"allocated\": %zu,\n", small_allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t},\n");
} else {
malloc_cprintf(write_cb, cbopaque,
" allocated nmalloc"
" ndalloc nrequests\n");
malloc_cprintf(write_cb, cbopaque,
"small: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
small_allocated, small_nmalloc, small_ndalloc,
small_nrequests);
}
malloc_cprintf(write_cb, cbopaque,
"small: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
size_t);
CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
uint64_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"large\": {\n");
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"allocated\": %zu,\n", large_allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t},\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"large: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
large_allocated, large_nmalloc, large_ndalloc,
large_nrequests);
}
malloc_cprintf(write_cb, cbopaque,
"large: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
uint64_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"huge\": {\n");
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t},\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"huge: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
small_allocated + large_allocated + huge_allocated,
small_nmalloc + large_nmalloc + huge_nmalloc,
small_ndalloc + large_ndalloc + huge_ndalloc,
small_nrequests + large_nrequests + huge_nrequests);
}
if (!json) {
malloc_cprintf(write_cb, cbopaque,
"active: %12zu\n", pactive * page);
}
malloc_cprintf(write_cb, cbopaque,
"huge: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
small_allocated + large_allocated + huge_allocated,
small_nmalloc + large_nmalloc + huge_nmalloc,
small_ndalloc + large_ndalloc + huge_ndalloc,
small_nrequests + large_nrequests + huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"active: %12zu\n", pactive * page);
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"mapped\": %zu,\n", mapped);
} else {
malloc_cprintf(write_cb, cbopaque,
"mapped: %12zu\n", mapped);
}
CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"retained\": %zu,\n", retained);
} else {
malloc_cprintf(write_cb, cbopaque,
"retained: %12zu\n", retained);
}
malloc_cprintf(write_cb, cbopaque,
"mapped: %12zu\n", mapped);
CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
size_t);
CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"metadata\": {\n");
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t},\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"metadata: mapped: %zu, allocated: %zu\n",
metadata_mapped, metadata_allocated);
}
malloc_cprintf(write_cb, cbopaque,
"metadata: mapped: %zu, allocated: %zu\n",
metadata_mapped, metadata_allocated);
if (bins) {
stats_arena_bins_print(write_cb, cbopaque, json, large, huge,
i);
}
if (bins)
stats_arena_bins_print(write_cb, cbopaque, i);
if (large)
stats_arena_lruns_print(write_cb, cbopaque, json, huge, i);
stats_arena_lruns_print(write_cb, cbopaque, i);
if (huge)
stats_arena_hchunks_print(write_cb, cbopaque, json, i);
stats_arena_hchunks_print(write_cb, cbopaque, i);
}
static void
stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool merged, bool unmerged)
void
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
const char *cpv;
bool bv;
unsigned uv;
uint32_t u32v;
uint64_t u64v;
ssize_t ssv;
size_t sv, bsz, usz, ssz, sssz, cpsz;
int err;
uint64_t epoch;
size_t u64sz;
bool general = true;
bool merged = true;
bool unmerged = true;
bool bins = true;
bool large = true;
bool huge = true;
bsz = sizeof(bool);
usz = sizeof(unsigned);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
cpsz = sizeof(const char *);
/*
* Refresh stats, in case mallctl() was called by the application.
*
* Check for OOM here, since refreshing the ctl cache can trigger
* allocation. In practice, none of the subsequent mallctl()-related
* calls in this function will cause OOM if this one succeeds.
* */
epoch = 1;
u64sz = sizeof(uint64_t);
err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
if (err != 0) {
if (err == EAGAIN) {
malloc_write("<jemalloc>: Memory allocation failure in "
"mallctl(\"epoch\", ...)\n");
return;
}
malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
"...)\n");
abort();
}
CTL_GET("version", &cpv, const char *);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"version\": \"%s\",\n", cpv);
} else
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
if (opts != NULL) {
unsigned i;
/* config. */
#define CONFIG_WRITE_BOOL_JSON(n, c) \
if (json) { \
CTL_GET("config."#n, &bv, bool); \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \
(c)); \
for (i = 0; opts[i] != '\0'; i++) {
switch (opts[i]) {
case 'g':
general = false;
break;
case 'm':
merged = false;
break;
case 'a':
unmerged = false;
break;
case 'b':
bins = false;
break;
case 'l':
large = false;
break;
case 'h':
huge = false;
break;
default:;
}
}
}
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"config\": {\n");
}
malloc_cprintf(write_cb, cbopaque,
"___ Begin jemalloc statistics ___\n");
if (general) {
const char *cpv;
bool bv;
unsigned uv;
ssize_t ssv;
size_t sv, bsz, ssz, sssz, cpsz;
CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",")
bsz = sizeof(bool);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
cpsz = sizeof(const char *);
CTL_GET("config.debug", &bv, bool);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"debug\": %s,\n", bv ? "true" : "false");
} else {
CTL_GET("version", &cpv, const char *);
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
CTL_GET("config.debug", &bv, bool);
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
bv ? "enabled" : "disabled");
}
CONFIG_WRITE_BOOL_JSON(fill, ",")
CONFIG_WRITE_BOOL_JSON(lazy_lock, ",")
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"malloc_conf\": \"%s\",\n",
config_malloc_conf);
} else {
malloc_cprintf(write_cb, cbopaque,
"config.malloc_conf: \"%s\"\n", config_malloc_conf);
}
CONFIG_WRITE_BOOL_JSON(munmap, ",")
CONFIG_WRITE_BOOL_JSON(prof, ",")
CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
CONFIG_WRITE_BOOL_JSON(stats, ",")
CONFIG_WRITE_BOOL_JSON(tcache, ",")
CONFIG_WRITE_BOOL_JSON(tls, ",")
CONFIG_WRITE_BOOL_JSON(utrace, ",")
CONFIG_WRITE_BOOL_JSON(valgrind, ",")
CONFIG_WRITE_BOOL_JSON(xmalloc, "")
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t},\n");
}
#undef CONFIG_WRITE_BOOL_JSON
/* opt. */
#define OPT_WRITE_BOOL(n, c) \
if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
"false", (c)); \
} else { \
#define OPT_WRITE_BOOL(n) \
if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
} \
}
#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
bool bv2; \
if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \
je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
"false", (c)); \
} else { \
}
#define OPT_WRITE_BOOL_MUTABLE(n, m) { \
bool bv2; \
if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \
je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s ("#m": %s)\n", bv ? "true" \
: "false", bv2 ? "true" : "false"); \
} \
} \
}
#define OPT_WRITE_UNSIGNED(n, c) \
if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %u%s\n", uv, (c)); \
} else { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %u\n", uv); \
} \
}
#define OPT_WRITE_SIZE_T(n, c) \
if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %zu%s\n", sv, (c)); \
} else { \
#define OPT_WRITE_SIZE_T(n) \
if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
} \
}
#define OPT_WRITE_SSIZE_T(n, c) \
if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
} else { \
}
#define OPT_WRITE_SSIZE_T(n) \
if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
} \
}
#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
ssize_t ssv2; \
if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \
je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
} else { \
}
#define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \
ssize_t ssv2; \
if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \
je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd ("#m": %zd)\n", \
ssv, ssv2); \
} \
} \
}
#define OPT_WRITE_CHAR_P(n, c) \
if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \
} else { \
#define OPT_WRITE_CHAR_P(n) \
if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \
} \
}
}
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"opt\": {\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"Run-time option settings:\n");
}
OPT_WRITE_BOOL(abort, ",")
OPT_WRITE_SIZE_T(lg_chunk, ",")
OPT_WRITE_CHAR_P(dss, ",")
OPT_WRITE_UNSIGNED(narenas, ",")
OPT_WRITE_CHAR_P(purge, ",")
if (json || opt_purge == purge_mode_ratio) {
OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult,
arenas.lg_dirty_mult, ",")
}
if (json || opt_purge == purge_mode_decay) {
OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",")
}
OPT_WRITE_CHAR_P(junk, ",")
OPT_WRITE_SIZE_T(quarantine, ",")
OPT_WRITE_BOOL(redzone, ",")
OPT_WRITE_BOOL(zero, ",")
OPT_WRITE_BOOL(utrace, ",")
OPT_WRITE_BOOL(xmalloc, ",")
OPT_WRITE_BOOL(tcache, ",")
OPT_WRITE_SSIZE_T(lg_tcache_max, ",")
OPT_WRITE_BOOL(prof, ",")
OPT_WRITE_CHAR_P(prof_prefix, ",")
OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",")
OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init,
",")
OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",")
OPT_WRITE_BOOL(prof_accum, ",")
OPT_WRITE_SSIZE_T(lg_prof_interval, ",")
OPT_WRITE_BOOL(prof_gdump, ",")
OPT_WRITE_BOOL(prof_final, ",")
OPT_WRITE_BOOL(prof_leak, ",")
/*
* stats_print is always emitted, so as long as stats_print comes last
* it's safe to unconditionally omit the comma here (rather than having
* to conditionally omit it elsewhere depending on configuration).
*/
OPT_WRITE_BOOL(stats_print, "")
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t},\n");
}
OPT_WRITE_BOOL(abort)
OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_CHAR_P(dss)
OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
OPT_WRITE_CHAR_P(junk)
OPT_WRITE_SIZE_T(quarantine)
OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero)
OPT_WRITE_BOOL(utrace)
OPT_WRITE_BOOL(valgrind)
OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache)
OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(prof)
OPT_WRITE_CHAR_P(prof_prefix)
OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active)
OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init,
prof.thread_active_init)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL(prof_final)
OPT_WRITE_BOOL(prof_leak)
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_BOOL_MUTABLE
......@@ -769,386 +513,128 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
/* arenas. */
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"arenas\": {\n");
}
malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
CTL_GET("arenas.narenas", &uv, unsigned);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"narenas\": %u,\n", uv);
} else
CTL_GET("arenas.narenas", &uv, unsigned);
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"lg_dirty_mult\": %zd,\n", ssv);
} else if (opt_purge == purge_mode_ratio) {
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: "
"%u:1\n", (1U << ssv));
} else {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: "
"N/A\n");
}
}
CTL_GET("arenas.decay_time", &ssv, ssize_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"decay_time\": %zd,\n", ssv);
} else if (opt_purge == purge_mode_decay) {
malloc_cprintf(write_cb, cbopaque,
"Unused dirty page decay time: %zd%s\n",
ssv, (ssv < 0) ? " (no decay)" : "");
}
malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
sizeof(void *));
CTL_GET("arenas.quantum", &sv, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"quantum\": %zu,\n", sv);
} else
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
CTL_GET("arenas.quantum", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n",
sv);
CTL_GET("arenas.page", &sv, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"page\": %zu,\n", sv);
} else
CTL_GET("arenas.page", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
if (json) {
CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"tcache_max\": %zu,\n", sv);
"Min active:dirty page ratio per arena: %u:1\n",
(1U << ssv));
} else {
malloc_cprintf(write_cb, cbopaque,
"Maximum thread-cached size class: %zu\n", sv);
"Min active:dirty page ratio per arena: N/A\n");
}
}
if (json) {
unsigned nbins, nlruns, nhchunks, i;
CTL_GET("arenas.nbins", &nbins, unsigned);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nbins\": %u,\n", nbins);
CTL_GET("arenas.nhbins", &uv, unsigned);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nhbins\": %u,\n", uv);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"bin\": [\n");
for (i = 0; i < nbins; i++) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t{\n");
CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"size\": %zu,\n", sv);
CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v);
CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"run_size\": %zu\n", sv);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : "");
}
malloc_cprintf(write_cb, cbopaque,
"\t\t\t],\n");
CTL_GET("arenas.nlruns", &nlruns, unsigned);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nlruns\": %u,\n", nlruns);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"lrun\": [\n");
for (i = 0; i < nlruns; i++) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t{\n");
CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"size\": %zu\n", sv);
if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : "");
"Maximum thread-cached size class: %zu\n", sv);
}
malloc_cprintf(write_cb, cbopaque,
"\t\t\t],\n");
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nhchunks\": %u,\n", nhchunks);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"hchunk\": [\n");
for (i = 0; i < nhchunks; i++) {
if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) {
CTL_GET("prof.lg_sample", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t{\n");
"Average profile sample interval: %"FMTu64
" (2^%zu)\n", (((uint64_t)1U) << sv), sv);
CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"size\": %zu\n", sv);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : "");
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: %"FMTu64
" (2^%zd)\n",
(((uint64_t)1U) << ssv), ssv);
} else {
malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: N/A\n");
}
}
CTL_GET("opt.lg_chunk", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t]\n");
malloc_cprintf(write_cb, cbopaque,
"\t\t},\n");
"Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv);
}
/* prof. */
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"prof\": {\n");
CTL_GET("prof.thread_active_init", &bv, bool);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"thread_active_init\": %s,\n", bv ? "true" :
"false");
CTL_GET("prof.active", &bv, bool);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"active\": %s,\n", bv ? "true" : "false");
CTL_GET("prof.gdump", &bv, bool);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"gdump\": %s,\n", bv ? "true" : "false");
CTL_GET("prof.interval", &u64v, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"interval\": %"FMTu64",\n", u64v);
CTL_GET("prof.lg_sample", &ssv, ssize_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"lg_sample\": %zd\n", ssv);
malloc_cprintf(write_cb, cbopaque,
"\t\t}%s\n", (config_stats || merged || unmerged) ? "," :
"");
}
}
static void
stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool merged, bool unmerged, bool bins, bool large, bool huge)
{
size_t *cactive;
size_t allocated, active, metadata, resident, mapped, retained;
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.metadata", &metadata, size_t);
CTL_GET("stats.resident", &resident, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
CTL_GET("stats.retained", &retained, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"stats\": {\n");
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive));
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"allocated\": %zu,\n", allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"active\": %zu,\n", active);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"metadata\": %zu,\n", metadata);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"resident\": %zu,\n", resident);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"mapped\": %zu,\n", mapped);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"retained\": %zu\n", retained);
if (config_stats) {
size_t *cactive;
size_t allocated, active, metadata, resident, mapped;
malloc_cprintf(write_cb, cbopaque,
"\t\t}%s\n", (merged || unmerged) ? "," : "");
} else {
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.metadata", &metadata, size_t);
CTL_GET("stats.resident", &resident, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, metadata: %zu,"
" resident: %zu, mapped: %zu, retained: %zu\n",
allocated, active, metadata, resident, mapped, retained);
" resident: %zu, mapped: %zu\n",
allocated, active, metadata, resident, mapped);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n",
atomic_read_z(cactive));
}
if (merged || unmerged) {
unsigned narenas;
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"stats.arenas\": {\n");
}
CTL_GET("arenas.narenas", &narenas, unsigned);
{
VARIABLE_ARRAY(bool, initialized, narenas);
size_t isz;
unsigned i, j, ninitialized;
isz = sizeof(bool) * narenas;
xmallctl("arenas.initialized", (void *)initialized,
&isz, NULL, 0);
for (i = ninitialized = 0; i < narenas; i++) {
if (initialized[i])
ninitialized++;
}
if (merged) {
unsigned narenas;
CTL_GET("arenas.narenas", &narenas, unsigned);
{
VARIABLE_ARRAY(bool, initialized, narenas);
size_t isz;
unsigned i, ninitialized;
isz = sizeof(bool) * narenas;
xmallctl("arenas.initialized", initialized,
&isz, NULL, 0);
for (i = ninitialized = 0; i < narenas; i++) {
if (initialized[i])
ninitialized++;
}
/* Merged stats. */
if (merged && (ninitialized > 1 || !unmerged)) {
/* Print merged arena stats. */
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"merged\": {\n");
} else {
if (ninitialized > 1 || !unmerged) {
/* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
}
stats_arena_print(write_cb, cbopaque, json,
narenas, bins, large, huge);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t}%s\n", (ninitialized > 1) ?
"," : "");
}
}
/* Unmerged stats. */
for (i = j = 0; i < narenas; i++) {
if (initialized[i]) {
if (json) {
j++;
malloc_cprintf(write_cb,
cbopaque,
"\t\t\t\"%u\": {\n", i);
} else {
malloc_cprintf(write_cb,
cbopaque, "\narenas[%u]:\n",
i);
}
stats_arena_print(write_cb, cbopaque,
json, i, bins, large, huge);
if (json) {
malloc_cprintf(write_cb,
cbopaque,
"\t\t\t}%s\n", (j <
ninitialized) ? "," : "");
}
narenas, bins, large, huge);
}
}
}
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t}\n");
}
}
}
if (unmerged) {
unsigned narenas;
void
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
int err;
uint64_t epoch;
size_t u64sz;
bool json = false;
bool general = true;
bool merged = true;
bool unmerged = true;
bool bins = true;
bool large = true;
bool huge = true;
/* Print stats for each arena. */
/*
* Refresh stats, in case mallctl() was called by the application.
*
* Check for OOM here, since refreshing the ctl cache can trigger
* allocation. In practice, none of the subsequent mallctl()-related
* calls in this function will cause OOM if this one succeeds.
* */
epoch = 1;
u64sz = sizeof(uint64_t);
err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
sizeof(uint64_t));
if (err != 0) {
if (err == EAGAIN) {
malloc_write("<jemalloc>: Memory allocation failure in "
"mallctl(\"epoch\", ...)\n");
return;
}
malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
"...)\n");
abort();
}
CTL_GET("arenas.narenas", &narenas, unsigned);
{
VARIABLE_ARRAY(bool, initialized, narenas);
size_t isz;
unsigned i;
if (opts != NULL) {
unsigned i;
isz = sizeof(bool) * narenas;
xmallctl("arenas.initialized", initialized,
&isz, NULL, 0);
for (i = 0; opts[i] != '\0'; i++) {
switch (opts[i]) {
case 'J':
json = true;
break;
case 'g':
general = false;
break;
case 'm':
merged = false;
break;
case 'a':
unmerged = false;
break;
case 'b':
bins = false;
break;
case 'l':
large = false;
break;
case 'h':
huge = false;
break;
default:;
for (i = 0; i < narenas; i++) {
if (initialized[i]) {
malloc_cprintf(write_cb,
cbopaque,
"\narenas[%u]:\n", i);
stats_arena_print(write_cb,
cbopaque, i, bins, large,
huge);
}
}
}
}
}
if (json) {
malloc_cprintf(write_cb, cbopaque,
"{\n"
"\t\"jemalloc\": {\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"___ Begin jemalloc statistics ___\n");
}
if (general)
stats_general_print(write_cb, cbopaque, json, merged, unmerged);
if (config_stats) {
stats_print_helper(write_cb, cbopaque, json, merged, unmerged,
bins, large, huge);
}
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t}\n"
"}\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"--- End jemalloc statistics ---\n");
}
malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
}
......@@ -10,7 +10,7 @@ ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */
unsigned nhbins;
size_t nhbins;
size_t tcache_maxclass;
tcaches_t *tcaches;
......@@ -23,11 +23,10 @@ static tcaches_t *tcaches_avail;
/******************************************************************************/
size_t
tcache_salloc(tsdn_t *tsdn, const void *ptr)
size_t tcache_salloc(const void *ptr)
{
return (arena_salloc(tsdn, ptr, false));
return (arena_salloc(ptr, false));
}
void
......@@ -68,19 +67,20 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
tcache->ev_cnt = 0;
}
void *
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind)
{
void *ret;
arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
arena_tcache_fill_small(arena, tbin, binind, config_prof ?
tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin, tcache_success);
ret = tcache_alloc_easy(tbin);
return (ret);
}
......@@ -102,18 +102,17 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
*(tbin->avail - 1));
tbin->avail[0]);
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
arena_bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) {
if (arena_prof_accum(tsd_tsdn(tsd), arena,
tcache->prof_accumbytes))
prof_idump(tsd_tsdn(tsd));
if (arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
tcache->prof_accumbytes = 0;
}
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
malloc_mutex_lock(&bin->lock);
if (config_stats && bin_arena == arena) {
assert(!merged_stats);
merged_stats = true;
......@@ -123,16 +122,16 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = *(tbin->avail - 1 - i);
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) == bin_arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
arena_bitselm_get_mutable(chunk, pageind);
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
bin_arena, chunk, ptr, bitselm);
arena_bitselm_get(chunk, pageind);
arena_dalloc_bin_junked_locked(bin_arena, chunk,
ptr, bitselm);
} else {
/*
* This object was allocated via a different
......@@ -140,12 +139,11 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
*(tbin->avail - 1 - ndeferred) = ptr;
tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
malloc_mutex_unlock(&bin->lock);
}
if (config_stats && !merged_stats) {
/*
......@@ -153,15 +151,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &arena->bins[binind];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
malloc_mutex_lock(&bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
malloc_mutex_unlock(&bin->lock);
}
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
sizeof(void *));
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
......@@ -184,13 +182,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
*(tbin->avail - 1));
tbin->avail[0]);
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
UNUSED bool idump;
if (config_prof)
idump = false;
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
malloc_mutex_lock(&locked_arena->lock);
if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
......@@ -208,13 +206,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = *(tbin->avail - 1 - i);
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) ==
locked_arena) {
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
locked_arena, chunk, ptr);
arena_dalloc_large_junked_locked(locked_arena,
chunk, ptr);
} else {
/*
* This object was allocated via a different
......@@ -222,56 +220,62 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
* Stash the object, so that it can be handled
* in a future pass.
*/
*(tbin->avail - 1 - ndeferred) = ptr;
tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
malloc_mutex_unlock(&locked_arena->lock);
if (config_prof && idump)
prof_idump(tsd_tsdn(tsd));
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
ndeferred);
prof_idump();
}
if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
malloc_mutex_unlock(&arena->lock);
}
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
sizeof(void *));
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
static void
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
void
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->lock);
malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(tsdn, &arena->lock);
malloc_mutex_unlock(&arena->lock);
}
}
static void
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
void
tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
{
tcache_arena_dissociate(tcache, oldarena);
tcache_arena_associate(tcache, newarena);
}
void
tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->lock);
malloc_mutex_lock(&arena->lock);
if (config_debug) {
bool in_ql = false;
tcache_t *iter;
......@@ -284,20 +288,11 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
assert(in_ql);
}
ql_remove(&arena->tcache_ql, tcache, link);
tcache_stats_merge(tsdn, tcache, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
tcache_stats_merge(tcache, arena);
malloc_mutex_unlock(&arena->lock);
}
}
void
tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
arena_t *newarena)
{
tcache_arena_dissociate(tsdn, tcache, oldarena);
tcache_arena_associate(tsdn, tcache, newarena);
}
tcache_t *
tcache_get_hard(tsd_t *tsd)
{
......@@ -311,11 +306,11 @@ tcache_get_hard(tsd_t *tsd)
arena = arena_choose(tsd, NULL);
if (unlikely(arena == NULL))
return (NULL);
return (tcache_create(tsd_tsdn(tsd), arena));
return (tcache_create(tsd, arena));
}
tcache_t *
tcache_create(tsdn_t *tsdn, arena_t *arena)
tcache_create(tsd_t *tsd, arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
......@@ -329,26 +324,18 @@ tcache_create(tsdn_t *tsdn, arena_t *arena)
/* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE);
tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
arena_get(TSDN_NULL, 0, true));
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
if (tcache == NULL)
return (NULL);
tcache_arena_associate(tsdn, tcache, arena);
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
tcache_arena_associate(tcache, arena);
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
for (i = 0; i < nhbins; i++) {
tcache->tbins[i].lg_fill_div = 1;
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
/*
* avail points past the available space. Allocations will
* access the slots toward higher addresses (for the benefit of
* prefetch).
*/
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
(uintptr_t)stack_offset);
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
return (tcache);
......@@ -361,7 +348,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
unsigned i;
arena = arena_choose(tsd, NULL);
tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
tcache_arena_dissociate(tcache, arena);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
......@@ -369,9 +356,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
if (config_stats && tbin->tstats.nrequests != 0) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
malloc_mutex_unlock(&bin->lock);
}
}
......@@ -380,19 +367,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
malloc_mutex_unlock(&arena->lock);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
prof_idump(tsd_tsdn(tsd));
arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
idalloctm(tsd, tcache, false, true);
}
void
......@@ -416,22 +403,21 @@ tcache_enabled_cleanup(tsd_t *tsd)
/* Do nothing. */
}
/* Caller must own arena->lock. */
void
tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
unsigned i;
cassert(config_stats);
malloc_mutex_assert_owner(tsdn, &arena->lock);
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
malloc_mutex_lock(tsdn, &bin->lock);
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(tsdn, &bin->lock);
malloc_mutex_unlock(&bin->lock);
tbin->tstats.nrequests = 0;
}
......@@ -447,12 +433,11 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
bool
tcaches_create(tsd_t *tsd, unsigned *r_ind)
{
arena_t *arena;
tcache_t *tcache;
tcaches_t *elm;
if (tcaches == NULL) {
tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) *
tcaches = base_alloc(sizeof(tcache_t *) *
(MALLOCX_TCACHE_MAX+1));
if (tcaches == NULL)
return (true);
......@@ -460,10 +445,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
return (true);
arena = arena_ichoose(tsd, NULL);
if (unlikely(arena == NULL))
return (true);
tcache = tcache_create(tsd_tsdn(tsd), arena);
tcache = tcache_create(tsd, a0get());
if (tcache == NULL)
return (true);
......@@ -471,7 +453,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
elm = tcaches_avail;
tcaches_avail = tcaches_avail->next;
elm->tcache = tcache;
*r_ind = (unsigned)(elm - tcaches);
*r_ind = elm - tcaches;
} else {
elm = &tcaches[tcaches_past];
elm->tcache = tcache;
......@@ -509,7 +491,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
}
bool
tcache_boot(tsdn_t *tsdn)
tcache_boot(void)
{
unsigned i;
......@@ -517,17 +499,17 @@ tcache_boot(tsdn_t *tsdn)
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
* known.
*/
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass)
else if ((1U << opt_lg_tcache_max) > large_maxclass)
tcache_maxclass = large_maxclass;
else
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
tcache_maxclass = (1U << opt_lg_tcache_max);
nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);
......
#define JEMALLOC_TICKER_C_
#include "jemalloc/internal/jemalloc_internal.h"
......@@ -77,7 +77,7 @@ tsd_cleanup(void *arg)
/* Do nothing. */
break;
case tsd_state_nominal:
#define O(n, t) \
#define O(n, t) \
n##_cleanup(tsd);
MALLOC_TSD
#undef O
......@@ -106,17 +106,15 @@ MALLOC_TSD
}
}
tsd_t *
bool
malloc_tsd_boot0(void)
{
tsd_t *tsd;
ncleanups = 0;
if (tsd_boot0())
return (NULL);
tsd = tsd_fetch();
*tsd_arenas_tdata_bypassp_get(tsd) = true;
return (tsd);
return (true);
*tsd_arenas_cache_bypassp_get(tsd_fetch()) = true;
return (false);
}
void
......@@ -124,7 +122,7 @@ malloc_tsd_boot1(void)
{
tsd_boot1();
*tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false;
*tsd_arenas_cache_bypassp_get(tsd_fetch()) = false;
}
#ifdef _WIN32
......@@ -150,15 +148,13 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
#ifdef _MSC_VER
# ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used")
# pragma comment(linker, "/INCLUDE:_tls_callback")
# else
# pragma comment(linker, "/INCLUDE:_tls_used")
# pragma comment(linker, "/INCLUDE:tls_callback")
# endif
# pragma section(".CRT$XLY",long,read)
#endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
......@@ -171,10 +167,10 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
tsd_init_block_t *iter;
/* Check whether this thread has already inserted into the list. */
malloc_mutex_lock(TSDN_NULL, &head->lock);
malloc_mutex_lock(&head->lock);
ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) {
malloc_mutex_unlock(TSDN_NULL, &head->lock);
malloc_mutex_unlock(&head->lock);
return (iter->data);
}
}
......@@ -182,7 +178,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
ql_elm_new(block, link);
block->thread = self;
ql_tail_insert(&head->blocks, block, link);
malloc_mutex_unlock(TSDN_NULL, &head->lock);
malloc_mutex_unlock(&head->lock);
return (NULL);
}
......@@ -190,8 +186,8 @@ void
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
{
malloc_mutex_lock(TSDN_NULL, &head->lock);
malloc_mutex_lock(&head->lock);
ql_remove(&head->blocks, block, link);
malloc_mutex_unlock(TSDN_NULL, &head->lock);
malloc_mutex_unlock(&head->lock);
}
#endif
/*
* Define simple versions of assertion macros that won't recurse in case
* of assertion failures in malloc_*printf().
*/
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
......@@ -14,7 +10,6 @@
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
unreachable(); \
} while (0)
#define not_implemented() do { \
......@@ -49,19 +44,15 @@ static void
wrtmessage(void *cbopaque, const char *s)
{
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
#ifdef SYS_write
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary
* on FreeBSD; most operating systems do not have this problem though.
*
* syscall() returns long or int, depending on platform, so capture the
* unused result in the widest plausible type to avoid compiler
* warnings.
*/
UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
#else
UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s));
UNUSED int result = write(STDERR_FILENO, s, strlen(s));
#endif
}
......@@ -91,7 +82,7 @@ buferror(int err, char *buf, size_t buflen)
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
(LPSTR)buf, (DWORD)buflen, NULL);
(LPSTR)buf, buflen, NULL);
return (0);
#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
......@@ -200,7 +191,7 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
p++;
}
if (neg)
ret = (uintmax_t)(-((intmax_t)ret));
ret = -ret;
if (p == ns) {
/* No conversion performed. */
......@@ -315,9 +306,10 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
return (s);
}
size_t
int
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
{
int ret;
size_t i;
const char *f;
......@@ -408,8 +400,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
int prec = -1;
int width = -1;
unsigned char len = '?';
char *s;
size_t slen;
f++;
/* Flags. */
......@@ -500,6 +490,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
}
/* Conversion specifier. */
switch (*f) {
char *s;
size_t slen;
case '%':
/* %% */
APPEND_C(*f);
......@@ -585,19 +577,20 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
str[i] = '\0';
else
str[size - 1] = '\0';
ret = i;
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
return (i);
return (ret);
}
JEMALLOC_FORMAT_PRINTF(3, 4)
size_t
int
malloc_snprintf(char *str, size_t size, const char *format, ...)
{
size_t ret;
int ret;
va_list ap;
va_start(ap, format);
......@@ -655,12 +648,3 @@ malloc_printf(const char *format, ...)
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}
/*
* Restore normal assertion macros, in order to make it possible to compile all
* C files as a single concatenation.
*/
#undef assert
#undef not_reached
#undef not_implemented
#include "jemalloc/internal/assert.h"
#define JEMALLOC_WITNESS_C_
#include "jemalloc/internal/jemalloc_internal.h"
void
witness_init(witness_t *witness, const char *name, witness_rank_t rank,
witness_comp_t *comp)
{
witness->name = name;
witness->rank = rank;
witness->comp = comp;
}
#ifdef JEMALLOC_JET
#undef witness_lock_error
#define witness_lock_error JEMALLOC_N(n_witness_lock_error)
#endif
void
witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
{
witness_t *w;
malloc_printf("<jemalloc>: Lock rank order reversal:");
ql_foreach(w, witnesses, link) {
malloc_printf(" %s(%u)", w->name, w->rank);
}
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
abort();
}
#ifdef JEMALLOC_JET
#undef witness_lock_error
#define witness_lock_error JEMALLOC_N(witness_lock_error)
witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
#endif
#ifdef JEMALLOC_JET
#undef witness_owner_error
#define witness_owner_error JEMALLOC_N(n_witness_owner_error)
#endif
void
witness_owner_error(const witness_t *witness)
{
malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
witness->rank);
abort();
}
#ifdef JEMALLOC_JET
#undef witness_owner_error
#define witness_owner_error JEMALLOC_N(witness_owner_error)
witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
#endif
#ifdef JEMALLOC_JET
#undef witness_not_owner_error
#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
#endif
void
witness_not_owner_error(const witness_t *witness)
{
malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
witness->rank);
abort();
}
#ifdef JEMALLOC_JET
#undef witness_not_owner_error
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
witness_not_owner_error_t *witness_not_owner_error =
JEMALLOC_N(n_witness_not_owner_error);
#endif
#ifdef JEMALLOC_JET
#undef witness_lockless_error
#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
#endif
void
witness_lockless_error(const witness_list_t *witnesses)
{
witness_t *w;
malloc_printf("<jemalloc>: Should not own any locks:");
ql_foreach(w, witnesses, link) {
malloc_printf(" %s(%u)", w->name, w->rank);
}
malloc_printf("\n");
abort();
}
#ifdef JEMALLOC_JET
#undef witness_lockless_error
#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
witness_lockless_error_t *witness_lockless_error =
JEMALLOC_N(n_witness_lockless_error);
#endif
void
witnesses_cleanup(tsd_t *tsd)
{
witness_assert_lockless(tsd_tsdn(tsd));
/* Do nothing. */
}
void
witness_fork_cleanup(tsd_t *tsd)
{
/* Do nothing. */
}
void
witness_prefork(tsd_t *tsd)
{
tsd_witness_fork_set(tsd, true);
}
void
witness_postfork_parent(tsd_t *tsd)
{
tsd_witness_fork_set(tsd, false);
}
void
witness_postfork_child(tsd_t *tsd)
{
#ifndef JEMALLOC_MUTEX_INIT_CB
witness_list_t *witnesses;
witnesses = tsd_witnessesp_get(tsd);
ql_new(witnesses);
#endif
tsd_witness_fork_set(tsd, false);
}
......@@ -4,7 +4,7 @@
#endif
/*
* The malloc_default_purgeable_zone() function is only available on >= 10.6.
* The malloc_default_purgeable_zone function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
......@@ -13,9 +13,8 @@ JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
static malloc_zone_t *default_zone, *purgeable_zone;
static malloc_zone_t jemalloc_zone;
static struct malloc_introspection_t jemalloc_zone_introspect;
static malloc_zone_t zone;
static struct malloc_introspection_t zone_introspect;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
......@@ -57,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
return (ivsalloc(tsdn_fetch(), ptr, config_prof));
return (ivsalloc(ptr, config_prof));
}
static void *
......@@ -88,7 +87,7 @@ static void
zone_free(malloc_zone_t *zone, void *ptr)
{
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) {
if (ivsalloc(ptr, config_prof) != 0) {
je_free(ptr);
return;
}
......@@ -100,7 +99,7 @@ static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0)
if (ivsalloc(ptr, config_prof) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
......@@ -122,11 +121,9 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
size_t alloc_size;
alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof);
if (alloc_size != 0) {
assert(alloc_size == size);
if (ivsalloc(ptr, config_prof) != 0) {
assert(ivsalloc(ptr, config_prof) == size);
je_free(ptr);
return;
}
......@@ -165,103 +162,89 @@ static void
zone_force_unlock(malloc_zone_t *zone)
{
/*
* Call jemalloc_postfork_child() rather than
* jemalloc_postfork_parent(), because this function is executed by both
* parent and child. The parent can tolerate having state
* reinitialized, but the child cannot unlock mutexes that were locked
* by the parent.
*/
if (isthreaded)
jemalloc_postfork_child();
jemalloc_postfork_parent();
}
static void
zone_init(void)
JEMALLOC_ATTR(constructor)
void
register_zone(void)
{
jemalloc_zone.size = (void *)zone_size;
jemalloc_zone.malloc = (void *)zone_malloc;
jemalloc_zone.calloc = (void *)zone_calloc;
jemalloc_zone.valloc = (void *)zone_valloc;
jemalloc_zone.free = (void *)zone_free;
jemalloc_zone.realloc = (void *)zone_realloc;
jemalloc_zone.destroy = (void *)zone_destroy;
jemalloc_zone.zone_name = "jemalloc_zone";
jemalloc_zone.batch_malloc = NULL;
jemalloc_zone.batch_free = NULL;
jemalloc_zone.introspect = &jemalloc_zone_introspect;
jemalloc_zone.version = JEMALLOC_ZONE_VERSION;
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
malloc_zone_t *default_zone = malloc_default_zone();
malloc_zone_t *purgeable_zone = NULL;
if (!default_zone->zone_name ||
strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
return;
}
zone.size = (void *)zone_size;
zone.malloc = (void *)zone_malloc;
zone.calloc = (void *)zone_calloc;
zone.valloc = (void *)zone_valloc;
zone.free = (void *)zone_free;
zone.realloc = (void *)zone_realloc;
zone.destroy = (void *)zone_destroy;
zone.zone_name = "jemalloc_zone";
zone.batch_malloc = NULL;
zone.batch_free = NULL;
zone.introspect = &zone_introspect;
zone.version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 5)
jemalloc_zone.memalign = zone_memalign;
zone.memalign = zone_memalign;
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
jemalloc_zone.free_definite_size = zone_free_definite_size;
zone.free_definite_size = zone_free_definite_size;
#endif
#if (JEMALLOC_ZONE_VERSION >= 8)
jemalloc_zone.pressure_relief = NULL;
zone.pressure_relief = NULL;
#endif
jemalloc_zone_introspect.enumerator = NULL;
jemalloc_zone_introspect.good_size = (void *)zone_good_size;
jemalloc_zone_introspect.check = NULL;
jemalloc_zone_introspect.print = NULL;
jemalloc_zone_introspect.log = NULL;
jemalloc_zone_introspect.force_lock = (void *)zone_force_lock;
jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock;
jemalloc_zone_introspect.statistics = NULL;
zone_introspect.enumerator = NULL;
zone_introspect.good_size = (void *)zone_good_size;
zone_introspect.check = NULL;
zone_introspect.print = NULL;
zone_introspect.log = NULL;
zone_introspect.force_lock = (void *)zone_force_lock;
zone_introspect.force_unlock = (void *)zone_force_unlock;
zone_introspect.statistics = NULL;
#if (JEMALLOC_ZONE_VERSION >= 6)
jemalloc_zone_introspect.zone_locked = NULL;
zone_introspect.zone_locked = NULL;
#endif
#if (JEMALLOC_ZONE_VERSION >= 7)
jemalloc_zone_introspect.enable_discharge_checking = NULL;
jemalloc_zone_introspect.disable_discharge_checking = NULL;
jemalloc_zone_introspect.discharge = NULL;
# ifdef __BLOCKS__
jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
# else
jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
# endif
zone_introspect.enable_discharge_checking = NULL;
zone_introspect.disable_discharge_checking = NULL;
zone_introspect.discharge = NULL;
#ifdef __BLOCKS__
zone_introspect.enumerate_discharged_pointers = NULL;
#else
zone_introspect.enumerate_unavailable_without_blocks = NULL;
#endif
#endif
}
static malloc_zone_t *
zone_default_get(void)
{
malloc_zone_t **zones = NULL;
unsigned int num_zones = 0;
/*
* On OSX 10.12, malloc_default_zone returns a special zone that is not
* present in the list of registered zones. That zone uses a "lite zone"
* if one is present (apparently enabled when malloc stack logging is
* enabled), or the first registered zone otherwise. In practice this
* means unless malloc stack logging is enabled, the first registered
* zone is the default. So get the list of zones to get the first one,
* instead of relying on malloc_default_zone.
* The default purgeable zone is created lazily by OSX's libc. It uses
* the default zone when it is created for "small" allocations
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
* obviously fails when the default zone is the jemalloc zone, so
* malloc_default_purgeable_zone is called beforehand so that the
* default purgeable zone is created when the default zone is still
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
(vm_address_t**)&zones, &num_zones)) {
/*
* Reset the value in case the failure happened after it was
* set.
*/
num_zones = 0;
}
if (num_zones)
return (zones[0]);
return (malloc_default_zone());
}
if (malloc_default_purgeable_zone != NULL)
purgeable_zone = malloc_default_purgeable_zone();
/* As written, this function can only promote jemalloc_zone. */
static void
zone_promote(void)
{
malloc_zone_t *zone;
/* Register the custom zone. At this point it won't be the default. */
malloc_zone_register(&zone);
do {
default_zone = malloc_default_zone();
/*
* Unregister and reregister the default zone. On OSX >= 10.6,
* unregistering takes the last registered zone and places it
......@@ -272,7 +255,6 @@ zone_promote(void)
*/
malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone);
/*
* On OSX 10.6, having the default purgeable zone appear before
* the default zone makes some things crash because it thinks it
......@@ -284,47 +266,9 @@ zone_promote(void)
* above, i.e. the default zone. Registering it again then puts
* it at the end, obviously after the default zone.
*/
if (purgeable_zone != NULL) {
if (purgeable_zone) {
malloc_zone_unregister(purgeable_zone);
malloc_zone_register(purgeable_zone);
}
zone = zone_default_get();
} while (zone != &jemalloc_zone);
}
JEMALLOC_ATTR(constructor)
void
zone_register(void)
{
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
default_zone = zone_default_get();
if (!default_zone->zone_name || strcmp(default_zone->zone_name,
"DefaultMallocZone") != 0)
return;
/*
* The default purgeable zone is created lazily by OSX's libc. It uses
* the default zone when it is created for "small" allocations
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
* obviously fails when the default zone is the jemalloc zone, so
* malloc_default_purgeable_zone() is called beforehand so that the
* default purgeable zone is created when the default zone is still
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
malloc_default_purgeable_zone();
/* Register the custom zone. At this point it won't be the default. */
zone_init();
malloc_zone_register(&jemalloc_zone);
/* Promote the custom zone to be default. */
zone_promote();
} while (malloc_default_zone() != &zone);
}
......@@ -11,6 +11,7 @@
#ifdef _WIN32
# include "msvc_compat/strings.h"
#endif
#include <sys/time.h>
#ifdef _WIN32
# include <windows.h>
......@@ -19,6 +20,39 @@
# include <pthread.h>
#endif
/******************************************************************************/
/*
* Define always-enabled assertion macros, so that test assertions execute even
* if assertions are disabled in the library code. These definitions must
* exist prior to including "jemalloc/internal/util.h".
*/
#define assert(e) do { \
if (!(e)) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#define not_reached() do { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} while (0)
#define not_implemented() do { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} while (0)
#define assert_not_implemented(e) do { \
if (!(e)) \
not_implemented(); \
} while (0)
#include "test/jemalloc_test_defs.h"
#ifdef JEMALLOC_OSSPIN
......@@ -53,14 +87,6 @@
# include "jemalloc/internal/jemalloc_internal_defs.h"
# include "jemalloc/internal/jemalloc_internal_macros.h"
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
# define JEMALLOC_N(n) @private_namespace@##n
# include "jemalloc/internal/private_namespace.h"
......@@ -68,7 +94,6 @@ static const bool config_debug =
# define JEMALLOC_H_STRUCTS
# define JEMALLOC_H_EXTERNS
# define JEMALLOC_H_INLINES
# include "jemalloc/internal/nstime.h"
# include "jemalloc/internal/util.h"
# include "jemalloc/internal/qr.h"
# include "jemalloc/internal/ql.h"
......@@ -124,40 +149,3 @@ static const bool config_debug =
#include "test/thd.h"
#define MEXP 19937
#include "test/SFMT.h"
/******************************************************************************/
/*
* Define always-enabled assertion macros, so that test assertions execute even
* if assertions are disabled in the library code.
*/
#undef assert
#undef not_reached
#undef not_implemented
#undef assert_not_implemented
#define assert(e) do { \
if (!(e)) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#define not_reached() do { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} while (0)
#define not_implemented() do { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} while (0)
#define assert_not_implemented(e) do { \
if (!(e)) \
not_implemented(); \
} while (0)
......@@ -8,8 +8,6 @@
typedef struct {
#ifdef _WIN32
CRITICAL_SECTION lock;
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#else
......
......@@ -311,9 +311,6 @@ label_test_end: \
#define test(...) \
p_test(__VA_ARGS__, NULL)
#define test_no_malloc_init(...) \
p_test_no_malloc_init(__VA_ARGS__, NULL)
#define test_skip_if(e) do { \
if (e) { \
test_skip("%s:%s:%d: Test skipped: (%s)", \
......@@ -327,7 +324,6 @@ void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
/* For private use by macros. */
test_status_t p_test(test_t *t, ...);
test_status_t p_test_no_malloc_init(test_t *t, ...);
void p_test_init(const char *name);
void p_test_fini(void);
void p_test_fail(const char *prefix, const char *message);
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment