Commit 08e1c8e8 authored by antirez's avatar antirez
Browse files

Jemalloc upgraded to version 5.0.1.

parent 8f4e2075
#define JEMALLOC_CHUNK_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
static void *
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
size_t alloc_size;
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
do {
void *pages;
size_t leadsize;
pages = pages_map(NULL, alloc_size);
if (pages == NULL)
return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size);
} while (ret == NULL);
assert(ret != NULL);
*zero = true;
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret);
}
void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
size_t offset;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in one or two calls to
* pages_unmap().
*
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = pages_map(NULL, size);
if (ret == NULL)
return (NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
}
assert(ret != NULL);
*zero = true;
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret);
}
bool
chunk_dalloc_mmap(void *chunk, size_t size)
{
if (config_munmap)
pages_unmap(chunk, size);
return (!config_munmap);
}
...@@ -34,8 +34,18 @@ ...@@ -34,8 +34,18 @@
* respectively. * respectively.
* *
******************************************************************************/ ******************************************************************************/
#define JEMALLOC_CKH_C_ #define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_internal.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/ /******************************************************************************/
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
...@@ -49,27 +59,26 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); ...@@ -49,27 +59,26 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX * Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise. * otherwise.
*/ */
JEMALLOC_INLINE_C size_t static size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
{
ckhc_t *cell; ckhc_t *cell;
unsigned i; unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
if (cell->key != NULL && ckh->keycomp(key, cell->key)) if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
return ((bucket << LG_CKH_BUCKET_CELLS) + i); return (bucket << LG_CKH_BUCKET_CELLS) + i;
}
} }
return (SIZE_T_MAX); return SIZE_T_MAX;
} }
/* /*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise. * Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/ */
JEMALLOC_INLINE_C size_t static size_t
ckh_isearch(ckh_t *ckh, const void *key) ckh_isearch(ckh_t *ckh, const void *key) {
{
size_t hashes[2], bucket, cell; size_t hashes[2], bucket, cell;
assert(ckh != NULL); assert(ckh != NULL);
...@@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key) ...@@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key)
/* Search primary bucket. */ /* Search primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key); cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX) if (cell != SIZE_T_MAX) {
return (cell); return cell;
}
/* Search secondary bucket. */ /* Search secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key); cell = ckh_bucket_search(ckh, bucket, key);
return (cell); return cell;
} }
JEMALLOC_INLINE_C bool static bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
const void *data) const void *data) {
{
ckhc_t *cell; ckhc_t *cell;
unsigned offset, i; unsigned offset, i;
...@@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, ...@@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position. * Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up. * The randomness avoids worst-case search overhead as buckets fill up.
*/ */
prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
...@@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, ...@@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
cell->key = key; cell->key = key;
cell->data = data; cell->data = data;
ckh->count++; ckh->count++;
return (false); return false;
} }
} }
return (true); return true;
} }
/* /*
...@@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, ...@@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an * eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle. * eviction/relocation bucket cycle.
*/ */
JEMALLOC_INLINE_C bool static bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
void const **argdata) void const **argdata) {
{
const void *key, *data, *tkey, *tdata; const void *key, *data, *tkey, *tdata;
ckhc_t *cell; ckhc_t *cell;
size_t hashes[2], bucket, tbucket; size_t hashes[2], bucket, tbucket;
...@@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, ...@@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same * were an item for which both hashes indicated the same
* bucket. * bucket.
*/ */
prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL); assert(cell->key != NULL);
...@@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, ...@@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
if (tbucket == argbucket) { if (tbucket == argbucket) {
*argkey = key; *argkey = key;
*argdata = data; *argdata = data;
return (true); return true;
} }
bucket = tbucket; bucket = tbucket;
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return (false); return false;
}
} }
} }
JEMALLOC_INLINE_C bool static bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
{
size_t hashes[2], bucket; size_t hashes[2], bucket;
const void *key = *argkey; const void *key = *argkey;
const void *data = *argdata; const void *data = *argdata;
...@@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) ...@@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
/* Try to insert in primary bucket. */ /* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return (false); return false;
}
/* Try to insert in secondary bucket. */ /* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return (false); return false;
}
/* /*
* Try to find a place for this item via iterative eviction/relocation. * Try to find a place for this item via iterative eviction/relocation.
*/ */
return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata);
} }
/* /*
* Try to rebuild the hash table from scratch by inserting all items from the * Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new. * old table into the new.
*/ */
JEMALLOC_INLINE_C bool static bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
{
size_t count, i, nins; size_t count, i, nins;
const void *key, *data; const void *key, *data;
...@@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) ...@@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
data = aTab[i].data; data = aTab[i].data;
if (ckh_try_insert(ckh, &key, &data)) { if (ckh_try_insert(ckh, &key, &data)) {
ckh->count = count; ckh->count = count;
return (true); return true;
} }
nins++; nins++;
} }
} }
return (false); return false;
} }
static bool static bool
ckh_grow(tsd_t *tsd, ckh_t *ckh) ckh_grow(tsd_t *tsd, ckh_t *ckh) {
{
bool ret; bool ret;
ckhc_t *tab, *ttab; ckhc_t *tab, *ttab;
size_t lg_curcells; unsigned lg_prevbuckets, lg_curcells;
unsigned lg_prevbuckets;
#ifdef CKH_COUNT #ifdef CKH_COUNT
ckh->ngrows++; ckh->ngrows++;
...@@ -265,13 +274,13 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ...@@ -265,13 +274,13 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
size_t usize; size_t usize;
lg_curcells++; lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0) { if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
true, NULL); true, NULL, true, arena_ichoose(tsd, NULL));
if (tab == NULL) { if (tab == NULL) {
ret = true; ret = true;
goto label_return; goto label_return;
...@@ -283,27 +292,26 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ...@@ -283,27 +292,26 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) { if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd, tab, tcache_get(tsd, false), true); idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
break; break;
} }
/* Rebuilding failed, so back out partially rebuilt table. */ /* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab; ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets; ckh->lg_curbuckets = lg_prevbuckets;
} }
ret = false; ret = false;
label_return: label_return:
return (ret); return ret;
} }
static void static void
ckh_shrink(tsd_t *tsd, ckh_t *ckh) ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
{
ckhc_t *tab, *ttab; ckhc_t *tab, *ttab;
size_t lg_curcells, usize; size_t usize;
unsigned lg_prevbuckets; unsigned lg_prevbuckets, lg_curcells;
/* /*
* It is possible (though unlikely, given well behaved hashes) that the * It is possible (though unlikely, given well behaved hashes) that the
...@@ -311,11 +319,12 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ...@@ -311,11 +319,12 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
*/ */
lg_prevbuckets = ckh->lg_curbuckets; lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0) if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return; return;
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, }
NULL); tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
true, arena_ichoose(tsd, NULL));
if (tab == NULL) { if (tab == NULL) {
/* /*
* An OOM error isn't worth propagating, since it doesn't * An OOM error isn't worth propagating, since it doesn't
...@@ -330,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ...@@ -330,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) { if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd, tab, tcache_get(tsd, false), true); idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
#ifdef CKH_COUNT #ifdef CKH_COUNT
ckh->nshrinks++; ckh->nshrinks++;
#endif #endif
...@@ -338,7 +347,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ...@@ -338,7 +347,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
} }
/* Rebuilding failed, so back out partially rebuilt table. */ /* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab; ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets; ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT #ifdef CKH_COUNT
...@@ -348,8 +357,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ...@@ -348,8 +357,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
bool bool
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp) ckh_keycomp_t *keycomp) {
{
bool ret; bool ret;
size_t mincells, usize; size_t mincells, usize;
unsigned lg_mincells; unsigned lg_mincells;
...@@ -379,20 +387,21 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ...@@ -379,20 +387,21 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS; for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells; (ZU(1) << lg_mincells) < mincells;
lg_mincells++) lg_mincells++) {
; /* Do nothing. */ /* Do nothing. */
}
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash; ckh->hash = hash;
ckh->keycomp = keycomp; ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (usize == 0) { if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
NULL); NULL, true, arena_ichoose(tsd, NULL));
if (ckh->tab == NULL) { if (ckh->tab == NULL) {
ret = true; ret = true;
goto label_return; goto label_return;
...@@ -400,13 +409,11 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ...@@ -400,13 +409,11 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ret = false; ret = false;
label_return: label_return:
return (ret); return ret;
} }
void void
ckh_delete(tsd_t *tsd, ckh_t *ckh) ckh_delete(tsd_t *tsd, ckh_t *ckh) {
{
assert(ckh != NULL); assert(ckh != NULL);
#ifdef CKH_VERBOSE #ifdef CKH_VERBOSE
...@@ -421,43 +428,42 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) ...@@ -421,43 +428,42 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(unsigned long long)ckh->nrelocs); (unsigned long long)ckh->nrelocs);
#endif #endif
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
if (config_debug) if (config_debug) {
memset(ckh, 0x5a, sizeof(ckh_t)); memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
}
} }
size_t size_t
ckh_count(ckh_t *ckh) ckh_count(ckh_t *ckh) {
{
assert(ckh != NULL); assert(ckh != NULL);
return (ckh->count); return ckh->count;
} }
bool bool
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
{
size_t i, ncells; size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) { LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
if (ckh->tab[i].key != NULL) { if (ckh->tab[i].key != NULL) {
if (key != NULL) if (key != NULL) {
*key = (void *)ckh->tab[i].key; *key = (void *)ckh->tab[i].key;
if (data != NULL) }
if (data != NULL) {
*data = (void *)ckh->tab[i].data; *data = (void *)ckh->tab[i].data;
}
*tabind = i + 1; *tabind = i + 1;
return (false); return false;
} }
} }
return (true); return true;
} }
bool bool
ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
{
bool ret; bool ret;
assert(ckh != NULL); assert(ckh != NULL);
...@@ -476,23 +482,24 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) ...@@ -476,23 +482,24 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
ret = false; ret = false;
label_return: label_return:
return (ret); return ret;
} }
bool bool
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data) void **data) {
{
size_t cell; size_t cell;
assert(ckh != NULL); assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey); cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) { if (cell != SIZE_T_MAX) {
if (key != NULL) if (key != NULL) {
*key = (void *)ckh->tab[cell].key; *key = (void *)ckh->tab[cell].key;
if (data != NULL) }
if (data != NULL) {
*data = (void *)ckh->tab[cell].data; *data = (void *)ckh->tab[cell].data;
}
ckh->tab[cell].key = NULL; ckh->tab[cell].key = NULL;
ckh->tab[cell].data = NULL; /* Not necessary. */ ckh->tab[cell].data = NULL; /* Not necessary. */
...@@ -505,51 +512,47 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, ...@@ -505,51 +512,47 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
ckh_shrink(tsd, ckh); ckh_shrink(tsd, ckh);
} }
return (false); return false;
} }
return (true); return true;
} }
bool bool
ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
{
size_t cell; size_t cell;
assert(ckh != NULL); assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey); cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) { if (cell != SIZE_T_MAX) {
if (key != NULL) if (key != NULL) {
*key = (void *)ckh->tab[cell].key; *key = (void *)ckh->tab[cell].key;
if (data != NULL) }
if (data != NULL) {
*data = (void *)ckh->tab[cell].data; *data = (void *)ckh->tab[cell].data;
return (false); }
return false;
} }
return (true); return true;
} }
void void
ckh_string_hash(const void *key, size_t r_hash[2]) ckh_string_hash(const void *key, size_t r_hash[2]) {
{
hash(key, strlen((const char *)key), 0x94122f33U, r_hash); hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
} }
bool bool
ckh_string_keycomp(const void *k1, const void *k2) ckh_string_keycomp(const void *k1, const void *k2) {
{ assert(k1 != NULL);
assert(k2 != NULL);
assert(k1 != NULL);
assert(k2 != NULL);
return (strcmp((char *)k1, (char *)k2) ? false : true); return !strcmp((char *)k1, (char *)k2);
} }
void void
ckh_pointer_hash(const void *key, size_t r_hash[2]) ckh_pointer_hash(const void *key, size_t r_hash[2]) {
{
union { union {
const void *v; const void *v;
size_t i; size_t i;
...@@ -561,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2]) ...@@ -561,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2])
} }
bool bool
ckh_pointer_keycomp(const void *k1, const void *k2) ckh_pointer_keycomp(const void *k1, const void *k2) {
{ return (k1 == k2);
return ((k1 == k2) ? true : false);
} }
#define JEMALLOC_CTL_C_ #define JEMALLOC_CTL_C_
#include "jemalloc/internal/jemalloc_internal.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
/* /*
* ctl_mtx protects the following: * ctl_mtx protects the following:
* - ctl_stats.* * - ctl_stats->*
*/ */
static malloc_mutex_t ctl_mtx; static malloc_mutex_t ctl_mtx;
static bool ctl_initialized; static bool ctl_initialized;
static uint64_t ctl_epoch; static ctl_stats_t *ctl_stats;
static ctl_stats_t ctl_stats; static ctl_arenas_t *ctl_arenas;
/******************************************************************************/ /******************************************************************************/
/* Helpers for named and indexed nodes. */ /* Helpers for named and indexed nodes. */
JEMALLOC_INLINE_C const ctl_named_node_t * static const ctl_named_node_t *
ctl_named_node(const ctl_node_t *node) ctl_named_node(const ctl_node_t *node) {
{
return ((node->named) ? (const ctl_named_node_t *)node : NULL); return ((node->named) ? (const ctl_named_node_t *)node : NULL);
} }
JEMALLOC_INLINE_C const ctl_named_node_t * static const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t *node, int index) ctl_named_children(const ctl_named_node_t *node, size_t index) {
{
const ctl_named_node_t *children = ctl_named_node(node->children); const ctl_named_node_t *children = ctl_named_node(node->children);
return (children ? &children[index] : NULL); return (children ? &children[index] : NULL);
} }
JEMALLOC_INLINE_C const ctl_indexed_node_t * static const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t *node) ctl_indexed_node(const ctl_node_t *node) {
{
return (!node->named ? (const ctl_indexed_node_t *)node : NULL); return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
} }
/******************************************************************************/ /******************************************************************************/
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \ #define CTL_PROTO(n) \
static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
size_t *oldlenp, void *newp, size_t newlen); void *oldp, size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \ #define INDEX_PROTO(n) \
static const ctl_named_node_t *n##_index(const size_t *mib, \ static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
size_t miblen, size_t i); const size_t *mib, size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
static void ctl_arena_refresh(arena_t *arena, unsigned i);
static bool ctl_grow(void);
static void ctl_refresh(void);
static bool ctl_init(void);
static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
size_t *mibp, size_t *depthp);
CTL_PROTO(version) CTL_PROTO(version)
CTL_PROTO(epoch) CTL_PROTO(epoch)
CTL_PROTO(background_thread)
CTL_PROTO(max_background_threads)
CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush) CTL_PROTO(thread_tcache_flush)
CTL_PROTO(thread_prof_name) CTL_PROTO(thread_prof_name)
...@@ -77,29 +71,33 @@ CTL_PROTO(config_cache_oblivious) ...@@ -77,29 +71,33 @@ CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug) CTL_PROTO(config_debug)
CTL_PROTO(config_fill) CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock) CTL_PROTO(config_lazy_lock)
CTL_PROTO(config_munmap) CTL_PROTO(config_malloc_conf)
CTL_PROTO(config_prof) CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libgcc)
CTL_PROTO(config_prof_libunwind) CTL_PROTO(config_prof_libunwind)
CTL_PROTO(config_stats) CTL_PROTO(config_stats)
CTL_PROTO(config_tcache)
CTL_PROTO(config_tls)
CTL_PROTO(config_utrace) CTL_PROTO(config_utrace)
CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc) CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort) CTL_PROTO(opt_abort)
CTL_PROTO(opt_abort_conf)
CTL_PROTO(opt_metadata_thp)
CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss) CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas) CTL_PROTO(opt_narenas)
CTL_PROTO(opt_lg_dirty_mult) CTL_PROTO(opt_percpu_arena)
CTL_PROTO(opt_background_thread)
CTL_PROTO(opt_max_background_threads)
CTL_PROTO(opt_dirty_decay_ms)
CTL_PROTO(opt_muzzy_decay_ms)
CTL_PROTO(opt_stats_print) CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_stats_print_opts)
CTL_PROTO(opt_junk) CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero) CTL_PROTO(opt_zero)
CTL_PROTO(opt_quarantine)
CTL_PROTO(opt_redzone)
CTL_PROTO(opt_utrace) CTL_PROTO(opt_utrace)
CTL_PROTO(opt_xmalloc) CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_tcache) CTL_PROTO(opt_tcache)
CTL_PROTO(opt_thp)
CTL_PROTO(opt_lg_extent_max_active_fit)
CTL_PROTO(opt_lg_tcache_max) CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof) CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix) CTL_PROTO(opt_prof_prefix)
...@@ -114,31 +112,34 @@ CTL_PROTO(opt_prof_accum) ...@@ -114,31 +112,34 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(tcache_create) CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush) CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy) CTL_PROTO(tcache_destroy)
CTL_PROTO(arena_i_initialized)
CTL_PROTO(arena_i_decay)
CTL_PROTO(arena_i_purge) CTL_PROTO(arena_i_purge)
static void arena_purge(unsigned arena_ind); CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_lg_dirty_mult) CTL_PROTO(arena_i_dirty_decay_ms)
CTL_PROTO(arena_i_chunk_hooks) CTL_PROTO(arena_i_muzzy_decay_ms)
CTL_PROTO(arena_i_extent_hooks)
CTL_PROTO(arena_i_retain_grow_limit)
INDEX_PROTO(arena_i) INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size) CTL_PROTO(arenas_bin_i_slab_size)
INDEX_PROTO(arenas_bin_i) INDEX_PROTO(arenas_bin_i)
CTL_PROTO(arenas_lrun_i_size) CTL_PROTO(arenas_lextent_i_size)
INDEX_PROTO(arenas_lrun_i) INDEX_PROTO(arenas_lextent_i)
CTL_PROTO(arenas_hchunk_i_size)
INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized) CTL_PROTO(arenas_dirty_decay_ms)
CTL_PROTO(arenas_lg_dirty_mult) CTL_PROTO(arenas_muzzy_decay_ms)
CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page) CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_tcache_max)
CTL_PROTO(arenas_nbins) CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins) CTL_PROTO(arenas_nhbins)
CTL_PROTO(arenas_nlruns) CTL_PROTO(arenas_nlextents)
CTL_PROTO(arenas_nhchunks) CTL_PROTO(arenas_create)
CTL_PROTO(arenas_extend) CTL_PROTO(arenas_lookup)
CTL_PROTO(prof_thread_active_init) CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active) CTL_PROTO(prof_active)
CTL_PROTO(prof_dump) CTL_PROTO(prof_dump)
...@@ -154,67 +155,94 @@ CTL_PROTO(stats_arenas_i_large_allocated) ...@@ -154,67 +155,94 @@ CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO(stats_arenas_i_large_nmalloc) CTL_PROTO(stats_arenas_i_large_nmalloc)
CTL_PROTO(stats_arenas_i_large_ndalloc) CTL_PROTO(stats_arenas_i_large_ndalloc)
CTL_PROTO(stats_arenas_i_large_nrequests) CTL_PROTO(stats_arenas_i_large_nrequests)
CTL_PROTO(stats_arenas_i_huge_allocated)
CTL_PROTO(stats_arenas_i_huge_nmalloc)
CTL_PROTO(stats_arenas_i_huge_ndalloc)
CTL_PROTO(stats_arenas_i_huge_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests) CTL_PROTO(stats_arenas_i_bins_j_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_curregs) CTL_PROTO(stats_arenas_i_bins_j_curregs)
CTL_PROTO(stats_arenas_i_bins_j_nfills) CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes) CTL_PROTO(stats_arenas_i_bins_j_nflushes)
CTL_PROTO(stats_arenas_i_bins_j_nruns) CTL_PROTO(stats_arenas_i_bins_j_nslabs)
CTL_PROTO(stats_arenas_i_bins_j_nreruns) CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
CTL_PROTO(stats_arenas_i_bins_j_curruns) CTL_PROTO(stats_arenas_i_bins_j_curslabs)
INDEX_PROTO(stats_arenas_i_bins_j) INDEX_PROTO(stats_arenas_i_bins_j)
CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
CTL_PROTO(stats_arenas_i_lruns_j_nrequests) CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
CTL_PROTO(stats_arenas_i_lruns_j_curruns) CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
INDEX_PROTO(stats_arenas_i_lruns_j) INDEX_PROTO(stats_arenas_i_lextents_j)
CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_uptime)
CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_lg_dirty_mult) CTL_PROTO(stats_arenas_i_dirty_decay_ms)
CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_pmuzzy)
CTL_PROTO(stats_arenas_i_mapped) CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_npurge) CTL_PROTO(stats_arenas_i_retained)
CTL_PROTO(stats_arenas_i_nmadvise) CTL_PROTO(stats_arenas_i_dirty_npurge)
CTL_PROTO(stats_arenas_i_purged) CTL_PROTO(stats_arenas_i_dirty_nmadvise)
CTL_PROTO(stats_arenas_i_metadata_mapped) CTL_PROTO(stats_arenas_i_dirty_purged)
CTL_PROTO(stats_arenas_i_metadata_allocated) CTL_PROTO(stats_arenas_i_muzzy_npurge)
CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
CTL_PROTO(stats_arenas_i_muzzy_purged)
CTL_PROTO(stats_arenas_i_base)
CTL_PROTO(stats_arenas_i_internal)
CTL_PROTO(stats_arenas_i_metadata_thp)
CTL_PROTO(stats_arenas_i_tcache_bytes)
CTL_PROTO(stats_arenas_i_resident)
INDEX_PROTO(stats_arenas_i) INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated) CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active) CTL_PROTO(stats_active)
CTL_PROTO(stats_background_thread_num_threads)
CTL_PROTO(stats_background_thread_num_runs)
CTL_PROTO(stats_background_thread_run_interval)
CTL_PROTO(stats_metadata) CTL_PROTO(stats_metadata)
CTL_PROTO(stats_metadata_thp)
CTL_PROTO(stats_resident) CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped) CTL_PROTO(stats_mapped)
CTL_PROTO(stats_retained)
#define MUTEX_STATS_CTL_PROTO_GEN(n) \
CTL_PROTO(stats_##n##_num_ops) \
CTL_PROTO(stats_##n##_num_wait) \
CTL_PROTO(stats_##n##_num_spin_acq) \
CTL_PROTO(stats_##n##_num_owner_switch) \
CTL_PROTO(stats_##n##_total_wait_time) \
CTL_PROTO(stats_##n##_max_wait_time) \
CTL_PROTO(stats_##n##_max_num_thds)
/* Global mutexes. */
#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
/* Per arena mutexes. */
#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
MUTEX_PROF_ARENA_MUTEXES
#undef OP
/* Arena bin mutexes. */
MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
#undef MUTEX_STATS_CTL_PROTO_GEN
CTL_PROTO(stats_mutexes_reset)
/******************************************************************************/ /******************************************************************************/
/* mallctl tree. */ /* mallctl tree. */
/* Maximum tree depth. */ #define NAME(n) {true}, n
#define CTL_MAX_DEPTH 6 #define CHILD(t, c) \
#define NAME(n) {true}, n
#define CHILD(t, c) \
sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
(ctl_node_t *)c##_node, \ (ctl_node_t *)c##_node, \
NULL NULL
#define CTL(c) 0, NULL, c##_ctl #define CTL(c) 0, NULL, c##_ctl
/* /*
* Only handles internal indexed nodes, since there are currently no external * Only handles internal indexed nodes, since there are currently no external
* ones. * ones.
*/ */
#define INDEX(i) {false}, i##_index #define INDEX(i) {false}, i##_index
static const ctl_named_node_t thread_tcache_node[] = { static const ctl_named_node_t thread_tcache_node[] = {
{NAME("enabled"), CTL(thread_tcache_enabled)}, {NAME("enabled"), CTL(thread_tcache_enabled)},
...@@ -241,32 +269,36 @@ static const ctl_named_node_t config_node[] = { ...@@ -241,32 +269,36 @@ static const ctl_named_node_t config_node[] = {
{NAME("debug"), CTL(config_debug)}, {NAME("debug"), CTL(config_debug)},
{NAME("fill"), CTL(config_fill)}, {NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)}, {NAME("lazy_lock"), CTL(config_lazy_lock)},
{NAME("munmap"), CTL(config_munmap)}, {NAME("malloc_conf"), CTL(config_malloc_conf)},
{NAME("prof"), CTL(config_prof)}, {NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
{NAME("prof_libunwind"), CTL(config_prof_libunwind)}, {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
{NAME("stats"), CTL(config_stats)}, {NAME("stats"), CTL(config_stats)},
{NAME("tcache"), CTL(config_tcache)},
{NAME("tls"), CTL(config_tls)},
{NAME("utrace"), CTL(config_utrace)}, {NAME("utrace"), CTL(config_utrace)},
{NAME("valgrind"), CTL(config_valgrind)},
{NAME("xmalloc"), CTL(config_xmalloc)} {NAME("xmalloc"), CTL(config_xmalloc)}
}; };
static const ctl_named_node_t opt_node[] = { static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)}, {NAME("abort"), CTL(opt_abort)},
{NAME("abort_conf"), CTL(opt_abort_conf)},
{NAME("metadata_thp"), CTL(opt_metadata_thp)},
{NAME("retain"), CTL(opt_retain)},
{NAME("dss"), CTL(opt_dss)}, {NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)}, {NAME("narenas"), CTL(opt_narenas)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, {NAME("percpu_arena"), CTL(opt_percpu_arena)},
{NAME("background_thread"), CTL(opt_background_thread)},
{NAME("max_background_threads"), CTL(opt_max_background_threads)},
{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
{NAME("stats_print"), CTL(opt_stats_print)}, {NAME("stats_print"), CTL(opt_stats_print)},
{NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
{NAME("junk"), CTL(opt_junk)}, {NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)}, {NAME("zero"), CTL(opt_zero)},
{NAME("quarantine"), CTL(opt_quarantine)},
{NAME("redzone"), CTL(opt_redzone)},
{NAME("utrace"), CTL(opt_utrace)}, {NAME("utrace"), CTL(opt_utrace)},
{NAME("xmalloc"), CTL(opt_xmalloc)}, {NAME("xmalloc"), CTL(opt_xmalloc)},
{NAME("tcache"), CTL(opt_tcache)}, {NAME("tcache"), CTL(opt_tcache)},
{NAME("thp"), CTL(opt_thp)},
{NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
{NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
{NAME("prof"), CTL(opt_prof)}, {NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)}, {NAME("prof_prefix"), CTL(opt_prof_prefix)},
...@@ -287,10 +319,16 @@ static const ctl_named_node_t tcache_node[] = { ...@@ -287,10 +319,16 @@ static const ctl_named_node_t tcache_node[] = {
}; };
static const ctl_named_node_t arena_i_node[] = { static const ctl_named_node_t arena_i_node[] = {
{NAME("initialized"), CTL(arena_i_initialized)},
{NAME("decay"), CTL(arena_i_decay)},
{NAME("purge"), CTL(arena_i_purge)}, {NAME("purge"), CTL(arena_i_purge)},
{NAME("reset"), CTL(arena_i_reset)},
{NAME("destroy"), CTL(arena_i_destroy)},
{NAME("dss"), CTL(arena_i_dss)}, {NAME("dss"), CTL(arena_i_dss)},
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
{NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
{NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
{NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)}
}; };
static const ctl_named_node_t super_arena_i_node[] = { static const ctl_named_node_t super_arena_i_node[] = {
{NAME(""), CHILD(named, arena_i)} {NAME(""), CHILD(named, arena_i)}
...@@ -303,7 +341,7 @@ static const ctl_indexed_node_t arena_node[] = { ...@@ -303,7 +341,7 @@ static const ctl_indexed_node_t arena_node[] = {
static const ctl_named_node_t arenas_bin_i_node[] = { static const ctl_named_node_t arenas_bin_i_node[] = {
{NAME("size"), CTL(arenas_bin_i_size)}, {NAME("size"), CTL(arenas_bin_i_size)},
{NAME("nregs"), CTL(arenas_bin_i_nregs)}, {NAME("nregs"), CTL(arenas_bin_i_nregs)},
{NAME("run_size"), CTL(arenas_bin_i_run_size)} {NAME("slab_size"), CTL(arenas_bin_i_slab_size)}
}; };
static const ctl_named_node_t super_arenas_bin_i_node[] = { static const ctl_named_node_t super_arenas_bin_i_node[] = {
{NAME(""), CHILD(named, arenas_bin_i)} {NAME(""), CHILD(named, arenas_bin_i)}
...@@ -313,43 +351,31 @@ static const ctl_indexed_node_t arenas_bin_node[] = { ...@@ -313,43 +351,31 @@ static const ctl_indexed_node_t arenas_bin_node[] = {
{INDEX(arenas_bin_i)} {INDEX(arenas_bin_i)}
}; };
static const ctl_named_node_t arenas_lrun_i_node[] = { static const ctl_named_node_t arenas_lextent_i_node[] = {
{NAME("size"), CTL(arenas_lrun_i_size)} {NAME("size"), CTL(arenas_lextent_i_size)}
}; };
static const ctl_named_node_t super_arenas_lrun_i_node[] = { static const ctl_named_node_t super_arenas_lextent_i_node[] = {
{NAME(""), CHILD(named, arenas_lrun_i)} {NAME(""), CHILD(named, arenas_lextent_i)}
}; };
static const ctl_indexed_node_t arenas_lrun_node[] = { static const ctl_indexed_node_t arenas_lextent_node[] = {
{INDEX(arenas_lrun_i)} {INDEX(arenas_lextent_i)}
};
static const ctl_named_node_t arenas_hchunk_i_node[] = {
{NAME("size"), CTL(arenas_hchunk_i_size)}
};
static const ctl_named_node_t super_arenas_hchunk_i_node[] = {
{NAME(""), CHILD(named, arenas_hchunk_i)}
};
static const ctl_indexed_node_t arenas_hchunk_node[] = {
{INDEX(arenas_hchunk_i)}
}; };
static const ctl_named_node_t arenas_node[] = { static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)}, {NAME("narenas"), CTL(arenas_narenas)},
{NAME("initialized"), CTL(arenas_initialized)}, {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
{NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)}, {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
{NAME("quantum"), CTL(arenas_quantum)}, {NAME("quantum"), CTL(arenas_quantum)},
{NAME("page"), CTL(arenas_page)}, {NAME("page"), CTL(arenas_page)},
{NAME("tcache_max"), CTL(arenas_tcache_max)}, {NAME("tcache_max"), CTL(arenas_tcache_max)},
{NAME("nbins"), CTL(arenas_nbins)}, {NAME("nbins"), CTL(arenas_nbins)},
{NAME("nhbins"), CTL(arenas_nhbins)}, {NAME("nhbins"), CTL(arenas_nhbins)},
{NAME("bin"), CHILD(indexed, arenas_bin)}, {NAME("bin"), CHILD(indexed, arenas_bin)},
{NAME("nlruns"), CTL(arenas_nlruns)}, {NAME("nlextents"), CTL(arenas_nlextents)},
{NAME("lrun"), CHILD(indexed, arenas_lrun)}, {NAME("lextent"), CHILD(indexed, arenas_lextent)},
{NAME("nhchunks"), CTL(arenas_nhchunks)}, {NAME("create"), CTL(arenas_create)},
{NAME("hchunk"), CHILD(indexed, arenas_hchunk)}, {NAME("lookup"), CTL(arenas_lookup)}
{NAME("extend"), CTL(arenas_extend)}
}; };
static const ctl_named_node_t prof_node[] = { static const ctl_named_node_t prof_node[] = {
...@@ -362,11 +388,6 @@ static const ctl_named_node_t prof_node[] = { ...@@ -362,11 +388,6 @@ static const ctl_named_node_t prof_node[] = {
{NAME("lg_sample"), CTL(lg_prof_sample)} {NAME("lg_sample"), CTL(lg_prof_sample)}
}; };
static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
{NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
{NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
};
static const ctl_named_node_t stats_arenas_i_small_node[] = { static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
...@@ -381,13 +402,27 @@ static const ctl_named_node_t stats_arenas_i_large_node[] = { ...@@ -381,13 +402,27 @@ static const ctl_named_node_t stats_arenas_i_large_node[] = {
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
}; };
static const ctl_named_node_t stats_arenas_i_huge_node[] = { #define MUTEX_PROF_DATA_NODE(prefix) \
{NAME("allocated"), CTL(stats_arenas_i_huge_allocated)}, static const ctl_named_node_t stats_##prefix##_node[] = { \
{NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)}, {NAME("num_ops"), \
{NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)}, CTL(stats_##prefix##_num_ops)}, \
{NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)} {NAME("num_wait"), \
CTL(stats_##prefix##_num_wait)}, \
{NAME("num_spin_acq"), \
CTL(stats_##prefix##_num_spin_acq)}, \
{NAME("num_owner_switch"), \
CTL(stats_##prefix##_num_owner_switch)}, \
{NAME("total_wait_time"), \
CTL(stats_##prefix##_total_wait_time)}, \
{NAME("max_wait_time"), \
CTL(stats_##prefix##_max_wait_time)}, \
{NAME("max_num_thds"), \
CTL(stats_##prefix##_max_num_thds)} \
/* Note that # of current waiting thread not provided. */ \
}; };
MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
...@@ -395,10 +430,12 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { ...@@ -395,10 +430,12 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
{NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
{NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
{NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
{NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
{NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
{NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
}; };
static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
{NAME(""), CHILD(named, stats_arenas_i_bins_j)} {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
}; };
...@@ -407,51 +444,57 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { ...@@ -407,51 +444,57 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
{INDEX(stats_arenas_i_bins_j)} {INDEX(stats_arenas_i_bins_j)}
}; };
static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
{NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)},
{NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}
}; };
static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
{NAME(""), CHILD(named, stats_arenas_i_lruns_j)} {NAME(""), CHILD(named, stats_arenas_i_lextents_j)}
}; };
static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
{INDEX(stats_arenas_i_lruns_j)} {INDEX(stats_arenas_i_lextents_j)}
}; };
static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = { #define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
{NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)}, MUTEX_PROF_ARENA_MUTEXES
{NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)}, #undef OP
{NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)},
{NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)}
};
static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = {
{NAME(""), CHILD(named, stats_arenas_i_hchunks_j)}
};
static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = { static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
{INDEX(stats_arenas_i_hchunks_j)} #define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
MUTEX_PROF_ARENA_MUTEXES
#undef OP
}; };
static const ctl_named_node_t stats_arenas_i_node[] = { static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("uptime"), CTL(stats_arenas_i_uptime)},
{NAME("dss"), CTL(stats_arenas_i_dss)}, {NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)}, {NAME("retained"), CTL(stats_arenas_i_retained)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
{NAME("purged"), CTL(stats_arenas_i_purged)}, {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
{NAME("metadata"), CHILD(named, stats_arenas_i_metadata)}, {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
{NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)},
{NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
{NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
{NAME("base"), CTL(stats_arenas_i_base)},
{NAME("internal"), CTL(stats_arenas_i_internal)},
{NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
{NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
{NAME("resident"), CTL(stats_arenas_i_resident)},
{NAME("small"), CHILD(named, stats_arenas_i_small)}, {NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("huge"), CHILD(named, stats_arenas_i_huge)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
{NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}, {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
{NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)} {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
}; };
static const ctl_named_node_t super_stats_arenas_i_node[] = { static const ctl_named_node_t super_stats_arenas_i_node[] = {
{NAME(""), CHILD(named, stats_arenas_i)} {NAME(""), CHILD(named, stats_arenas_i)}
...@@ -461,19 +504,43 @@ static const ctl_indexed_node_t stats_arenas_node[] = { ...@@ -461,19 +504,43 @@ static const ctl_indexed_node_t stats_arenas_node[] = {
{INDEX(stats_arenas_i)} {INDEX(stats_arenas_i)}
}; };
static const ctl_named_node_t stats_background_thread_node[] = {
{NAME("num_threads"), CTL(stats_background_thread_num_threads)},
{NAME("num_runs"), CTL(stats_background_thread_num_runs)},
{NAME("run_interval"), CTL(stats_background_thread_run_interval)}
};
#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
static const ctl_named_node_t stats_mutexes_node[] = {
#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
{NAME("reset"), CTL(stats_mutexes_reset)}
};
#undef MUTEX_PROF_DATA_NODE
static const ctl_named_node_t stats_node[] = { static const ctl_named_node_t stats_node[] = {
{NAME("cactive"), CTL(stats_cactive)},
{NAME("allocated"), CTL(stats_allocated)}, {NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)}, {NAME("active"), CTL(stats_active)},
{NAME("metadata"), CTL(stats_metadata)}, {NAME("metadata"), CTL(stats_metadata)},
{NAME("metadata_thp"), CTL(stats_metadata_thp)},
{NAME("resident"), CTL(stats_resident)}, {NAME("resident"), CTL(stats_resident)},
{NAME("mapped"), CTL(stats_mapped)}, {NAME("mapped"), CTL(stats_mapped)},
{NAME("retained"), CTL(stats_retained)},
{NAME("background_thread"),
CHILD(named, stats_background_thread)},
{NAME("mutexes"), CHILD(named, stats_mutexes)},
{NAME("arenas"), CHILD(indexed, stats_arenas)} {NAME("arenas"), CHILD(indexed, stats_arenas)}
}; };
static const ctl_named_node_t root_node[] = { static const ctl_named_node_t root_node[] = {
{NAME("version"), CTL(version)}, {NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)}, {NAME("epoch"), CTL(epoch)},
{NAME("background_thread"), CTL(background_thread)},
{NAME("max_background_threads"), CTL(max_background_threads)},
{NAME("thread"), CHILD(named, thread)}, {NAME("thread"), CHILD(named, thread)},
{NAME("config"), CHILD(named, config)}, {NAME("config"), CHILD(named, config)},
{NAME("opt"), CHILD(named, opt)}, {NAME("opt"), CHILD(named, opt)},
...@@ -494,312 +561,519 @@ static const ctl_named_node_t super_root_node[] = { ...@@ -494,312 +561,519 @@ static const ctl_named_node_t super_root_node[] = {
/******************************************************************************/ /******************************************************************************/
static bool /*
ctl_arena_init(ctl_arena_stats_t *astats) * Sets *dst + *src non-atomically. This is safe, since everything is
{ * synchronized by the ctl mutex.
*/
static void
ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
#else
*dst += *src;
#endif
}
/* Likewise: with ctl mutex synchronization, reading is simple. */
static uint64_t
ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(p, ATOMIC_RELAXED);
#else
return *p;
#endif
}
static void
accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
}
/******************************************************************************/
if (astats->lstats == NULL) { static unsigned
astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses * arenas_i2a_impl(size_t i, bool compat, bool validate) {
sizeof(malloc_large_stats_t)); unsigned a;
if (astats->lstats == NULL)
return (true); switch (i) {
case MALLCTL_ARENAS_ALL:
a = 0;
break;
case MALLCTL_ARENAS_DESTROYED:
a = 1;
break;
default:
if (compat && i == ctl_arenas->narenas) {
/*
* Provide deprecated backward compatibility for
* accessing the merged stats at index narenas rather
* than via MALLCTL_ARENAS_ALL. This is scheduled for
* removal in 6.0.0.
*/
a = 0;
} else if (validate && i >= ctl_arenas->narenas) {
a = UINT_MAX;
} else {
/*
* This function should never be called for an index
* more than one past the range of indices that have
* initialized ctl data.
*/
assert(i < ctl_arenas->narenas || (!validate && i ==
ctl_arenas->narenas));
a = (unsigned)i + 2;
}
break;
} }
if (astats->hstats == NULL) { return a;
astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses * }
sizeof(malloc_huge_stats_t));
if (astats->hstats == NULL) static unsigned
return (true); arenas_i2a(size_t i) {
return arenas_i2a_impl(i, true, false);
}
static ctl_arena_t *
arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
ctl_arena_t *ret;
assert(!compat || !init);
ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
if (init && ret == NULL) {
if (config_stats) {
struct container_s {
ctl_arena_t ctl_arena;
ctl_arena_stats_t astats;
};
struct container_s *cont =
(struct container_s *)base_alloc(tsd_tsdn(tsd),
b0get(), sizeof(struct container_s), QUANTUM);
if (cont == NULL) {
return NULL;
}
ret = &cont->ctl_arena;
ret->astats = &cont->astats;
} else {
ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
sizeof(ctl_arena_t), QUANTUM);
if (ret == NULL) {
return NULL;
}
}
ret->arena_ind = (unsigned)i;
ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
} }
return (false); assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
return ret;
} }
static void static ctl_arena_t *
ctl_arena_clear(ctl_arena_stats_t *astats) arenas_i(size_t i) {
{ ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
assert(ret != NULL);
return ret;
}
astats->dss = dss_prec_names[dss_prec_limit]; static void
astats->lg_dirty_mult = -1; ctl_arena_clear(ctl_arena_t *ctl_arena) {
astats->pactive = 0; ctl_arena->nthreads = 0;
astats->pdirty = 0; ctl_arena->dss = dss_prec_names[dss_prec_limit];
ctl_arena->dirty_decay_ms = -1;
ctl_arena->muzzy_decay_ms = -1;
ctl_arena->pactive = 0;
ctl_arena->pdirty = 0;
ctl_arena->pmuzzy = 0;
if (config_stats) { if (config_stats) {
memset(&astats->astats, 0, sizeof(arena_stats_t)); memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
astats->allocated_small = 0; ctl_arena->astats->allocated_small = 0;
astats->nmalloc_small = 0; ctl_arena->astats->nmalloc_small = 0;
astats->ndalloc_small = 0; ctl_arena->astats->ndalloc_small = 0;
astats->nrequests_small = 0; ctl_arena->astats->nrequests_small = 0;
memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); memset(ctl_arena->astats->bstats, 0, NBINS *
memset(astats->lstats, 0, nlclasses * sizeof(bin_stats_t));
sizeof(malloc_large_stats_t)); memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) *
memset(astats->hstats, 0, nhclasses * sizeof(arena_stats_large_t));
sizeof(malloc_huge_stats_t));
} }
} }
static void static void
ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
{
unsigned i; unsigned i;
arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult, if (config_stats) {
&cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats, arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
cstats->lstats, cstats->hstats); &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
&ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
for (i = 0; i < NBINS; i++) { &ctl_arena->pdirty, &ctl_arena->pmuzzy,
cstats->allocated_small += cstats->bstats[i].curregs * &ctl_arena->astats->astats, ctl_arena->astats->bstats,
index2size(i); ctl_arena->astats->lstats);
cstats->nmalloc_small += cstats->bstats[i].nmalloc;
cstats->ndalloc_small += cstats->bstats[i].ndalloc; for (i = 0; i < NBINS; i++) {
cstats->nrequests_small += cstats->bstats[i].nrequests; ctl_arena->astats->allocated_small +=
ctl_arena->astats->bstats[i].curregs *
sz_index2size(i);
ctl_arena->astats->nmalloc_small +=
ctl_arena->astats->bstats[i].nmalloc;
ctl_arena->astats->ndalloc_small +=
ctl_arena->astats->bstats[i].ndalloc;
ctl_arena->astats->nrequests_small +=
ctl_arena->astats->bstats[i].nrequests;
}
} else {
arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
&ctl_arena->dss, &ctl_arena->dirty_decay_ms,
&ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
&ctl_arena->pdirty, &ctl_arena->pmuzzy);
} }
} }
static void static void
ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
{ bool destroyed) {
unsigned i; unsigned i;
sstats->pactive += astats->pactive; if (!destroyed) {
sstats->pdirty += astats->pdirty; ctl_sdarena->nthreads += ctl_arena->nthreads;
ctl_sdarena->pactive += ctl_arena->pactive;
sstats->astats.mapped += astats->astats.mapped; ctl_sdarena->pdirty += ctl_arena->pdirty;
sstats->astats.npurge += astats->astats.npurge; ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
sstats->astats.nmadvise += astats->astats.nmadvise; } else {
sstats->astats.purged += astats->astats.purged; assert(ctl_arena->nthreads == 0);
assert(ctl_arena->pactive == 0);
sstats->astats.metadata_mapped += astats->astats.metadata_mapped; assert(ctl_arena->pdirty == 0);
sstats->astats.metadata_allocated += astats->astats.metadata_allocated; assert(ctl_arena->pmuzzy == 0);
sstats->allocated_small += astats->allocated_small;
sstats->nmalloc_small += astats->nmalloc_small;
sstats->ndalloc_small += astats->ndalloc_small;
sstats->nrequests_small += astats->nrequests_small;
sstats->astats.allocated_large += astats->astats.allocated_large;
sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sstats->astats.nrequests_large += astats->astats.nrequests_large;
sstats->astats.allocated_huge += astats->astats.allocated_huge;
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
for (i = 0; i < NBINS; i++) {
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
sstats->bstats[i].curregs += astats->bstats[i].curregs;
if (config_tcache) {
sstats->bstats[i].nfills += astats->bstats[i].nfills;
sstats->bstats[i].nflushes +=
astats->bstats[i].nflushes;
}
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
} }
for (i = 0; i < nlclasses; i++) { if (config_stats) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; ctl_arena_stats_t *astats = ctl_arena->astats;
sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
sstats->lstats[i].curruns += astats->lstats[i].curruns; if (!destroyed) {
} accum_atomic_zu(&sdstats->astats.mapped,
&astats->astats.mapped);
accum_atomic_zu(&sdstats->astats.retained,
&astats->astats.retained);
}
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
&astats->astats.decay_dirty.npurge);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
&astats->astats.decay_dirty.nmadvise);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
&astats->astats.decay_dirty.purged);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
&astats->astats.decay_muzzy.npurge);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
&astats->astats.decay_muzzy.nmadvise);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
&astats->astats.decay_muzzy.purged);
#define OP(mtx) malloc_mutex_prof_merge( \
&(sdstats->astats.mutex_prof_data[ \
arena_prof_mutex_##mtx]), \
&(astats->astats.mutex_prof_data[ \
arena_prof_mutex_##mtx]));
MUTEX_PROF_ARENA_MUTEXES
#undef OP
if (!destroyed) {
accum_atomic_zu(&sdstats->astats.base,
&astats->astats.base);
accum_atomic_zu(&sdstats->astats.internal,
&astats->astats.internal);
accum_atomic_zu(&sdstats->astats.resident,
&astats->astats.resident);
accum_atomic_zu(&sdstats->astats.metadata_thp,
&astats->astats.metadata_thp);
} else {
assert(atomic_load_zu(
&astats->astats.internal, ATOMIC_RELAXED) == 0);
}
if (!destroyed) {
sdstats->allocated_small += astats->allocated_small;
} else {
assert(astats->allocated_small == 0);
}
sdstats->nmalloc_small += astats->nmalloc_small;
sdstats->ndalloc_small += astats->ndalloc_small;
sdstats->nrequests_small += astats->nrequests_small;
for (i = 0; i < nhclasses; i++) { if (!destroyed) {
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; accum_atomic_zu(&sdstats->astats.allocated_large,
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; &astats->astats.allocated_large);
sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks; } else {
assert(atomic_load_zu(&astats->astats.allocated_large,
ATOMIC_RELAXED) == 0);
}
ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
&astats->astats.nmalloc_large);
ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
&astats->astats.ndalloc_large);
ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
&astats->astats.nrequests_large);
accum_atomic_zu(&sdstats->astats.tcache_bytes,
&astats->astats.tcache_bytes);
if (ctl_arena->arena_ind == 0) {
sdstats->astats.uptime = astats->astats.uptime;
}
for (i = 0; i < NBINS; i++) {
sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sdstats->bstats[i].nrequests +=
astats->bstats[i].nrequests;
if (!destroyed) {
sdstats->bstats[i].curregs +=
astats->bstats[i].curregs;
} else {
assert(astats->bstats[i].curregs == 0);
}
sdstats->bstats[i].nfills += astats->bstats[i].nfills;
sdstats->bstats[i].nflushes +=
astats->bstats[i].nflushes;
sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
if (!destroyed) {
sdstats->bstats[i].curslabs +=
astats->bstats[i].curslabs;
} else {
assert(astats->bstats[i].curslabs == 0);
}
malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
&astats->bstats[i].mutex_data);
}
for (i = 0; i < NSIZES - NBINS; i++) {
ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
&astats->lstats[i].nmalloc);
ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
&astats->lstats[i].ndalloc);
ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
&astats->lstats[i].nrequests);
if (!destroyed) {
sdstats->lstats[i].curlextents +=
astats->lstats[i].curlextents;
} else {
assert(astats->lstats[i].curlextents == 0);
}
}
} }
} }
static void static void
ctl_arena_refresh(arena_t *arena, unsigned i) ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
{ unsigned i, bool destroyed) {
ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; ctl_arena_t *ctl_arena = arenas_i(i);
ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(ctl_arena);
ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
/* Merge into sum stats as well. */
ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
}
ctl_arena_clear(astats); static unsigned
ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
unsigned arena_ind;
ctl_arena_t *ctl_arena;
sstats->nthreads += astats->nthreads; if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
if (config_stats) { NULL) {
ctl_arena_stats_amerge(astats, arena); ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
/* Merge into sum stats as well. */ arena_ind = ctl_arena->arena_ind;
ctl_arena_stats_smerge(sstats, astats);
} else { } else {
astats->pactive += arena->nactive; arena_ind = ctl_arenas->narenas;
astats->pdirty += arena->ndirty;
/* Merge into sum stats as well. */
sstats->pactive += arena->nactive;
sstats->pdirty += arena->ndirty;
} }
}
static bool /* Trigger stats allocation. */
ctl_grow(void) if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
{ return UINT_MAX;
ctl_arena_stats_t *astats; }
/* Initialize new arena. */ /* Initialize new arena. */
if (arena_init(ctl_stats.narenas) == NULL) if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
return (true); return UINT_MAX;
/* Allocate extended arena stats. */
astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
sizeof(ctl_arena_stats_t));
if (astats == NULL)
return (true);
/* Initialize the new astats element. */
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
a0dalloc(astats);
return (true);
}
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t tstats;
memcpy(&tstats, &astats[ctl_stats.narenas],
sizeof(ctl_arena_stats_t));
memcpy(&astats[ctl_stats.narenas],
&astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
memcpy(&astats[ctl_stats.narenas + 1], &tstats,
sizeof(ctl_arena_stats_t));
} }
a0dalloc(ctl_stats.arenas);
ctl_stats.arenas = astats;
ctl_stats.narenas++;
return (false); if (arena_ind == ctl_arenas->narenas) {
ctl_arenas->narenas++;
}
return arena_ind;
}
static void
ctl_background_thread_stats_read(tsdn_t *tsdn) {
background_thread_stats_t *stats = &ctl_stats->background_thread;
if (!have_background_thread ||
background_thread_stats_read(tsdn, stats)) {
memset(stats, 0, sizeof(background_thread_stats_t));
nstime_init(&stats->run_interval, 0);
}
} }
static void static void
ctl_refresh(void) ctl_refresh(tsdn_t *tsdn) {
{
tsd_t *tsd;
unsigned i; unsigned i;
bool refreshed; ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
/* /*
* Clear sum stats, since they will be merged into by * Clear sum stats, since they will be merged into by
* ctl_arena_refresh(). * ctl_arena_refresh().
*/ */
ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; ctl_arena_clear(ctl_sarena);
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
tsd = tsd_fetch();
for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
tarenas[i] = arena_get(tsd, i, false, false);
if (tarenas[i] == NULL && !refreshed) {
tarenas[i] = arena_get(tsd, i, false, true);
refreshed = true;
}
}
for (i = 0; i < ctl_stats.narenas; i++) { for (i = 0; i < ctl_arenas->narenas; i++) {
if (tarenas[i] != NULL) tarenas[i] = arena_get(tsdn, i, false);
ctl_stats.arenas[i].nthreads = arena_nbound(i);
else
ctl_stats.arenas[i].nthreads = 0;
} }
for (i = 0; i < ctl_stats.narenas; i++) { for (i = 0; i < ctl_arenas->narenas; i++) {
ctl_arena_t *ctl_arena = arenas_i(i);
bool initialized = (tarenas[i] != NULL); bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized; ctl_arena->initialized = initialized;
if (initialized) if (initialized) {
ctl_arena_refresh(tarenas[i], i); ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
false);
}
} }
if (config_stats) { if (config_stats) {
size_t base_allocated, base_resident, base_mapped; ctl_stats->allocated = ctl_sarena->astats->allocated_small +
base_stats_get(&base_allocated, &base_resident, &base_mapped); atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
ctl_stats.allocated = ATOMIC_RELAXED);
ctl_stats.arenas[ctl_stats.narenas].allocated_small + ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + ctl_stats->metadata = atomic_load_zu(
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
ctl_stats.active = atomic_load_zu(&ctl_sarena->astats->astats.internal,
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE); ATOMIC_RELAXED);
ctl_stats.metadata = base_allocated + ctl_stats->metadata_thp = atomic_load_zu(
ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
ctl_stats.arenas[ctl_stats.narenas].astats ctl_stats->resident = atomic_load_zu(
.metadata_allocated; &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
ctl_stats.resident = base_resident + ctl_stats->mapped = atomic_load_zu(
ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
((ctl_stats.arenas[ctl_stats.narenas].pactive + ctl_stats->retained = atomic_load_zu(
ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
ctl_stats.mapped = base_mapped +
ctl_stats.arenas[ctl_stats.narenas].astats.mapped; ctl_background_thread_stats_read(tsdn);
}
#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
ctl_epoch++; malloc_mutex_lock(tsdn, &mtx); \
malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
malloc_mutex_unlock(tsdn, &mtx);
if (config_prof && opt_prof) {
READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
bt2gctx_mtx);
}
if (have_background_thread) {
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_background_thread,
background_thread_lock);
} else {
memset(&ctl_stats->mutex_prof_data[
global_prof_mutex_background_thread], 0,
sizeof(mutex_prof_data_t));
}
/* We own ctl mutex already. */
malloc_mutex_prof_read(tsdn,
&ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
&ctl_mtx);
#undef READ_GLOBAL_MUTEX_PROF_DATA
}
ctl_arenas->epoch++;
} }
static bool static bool
ctl_init(void) ctl_init(tsd_t *tsd) {
{
bool ret; bool ret;
tsdn_t *tsdn = tsd_tsdn(tsd);
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsdn, &ctl_mtx);
if (!ctl_initialized) { if (!ctl_initialized) {
ctl_arena_t *ctl_sarena, *ctl_darena;
unsigned i;
/*
* Allocate demand-zeroed space for pointers to the full
* range of supported arena indices.
*/
if (ctl_arenas == NULL) {
ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
b0get(), sizeof(ctl_arenas_t), QUANTUM);
if (ctl_arenas == NULL) {
ret = true;
goto label_return;
}
}
if (config_stats && ctl_stats == NULL) {
ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
sizeof(ctl_stats_t), QUANTUM);
if (ctl_stats == NULL) {
ret = true;
goto label_return;
}
}
/* /*
* Allocate space for one extra arena stats element, which * Allocate space for the current full range of arenas
* contains summed stats across all arenas. * here rather than doing it lazily elsewhere, in order
* to limit when OOM-caused errors can occur.
*/ */
ctl_stats.narenas = narenas_total_get(); if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc( true)) == NULL) {
(ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
if (ctl_stats.arenas == NULL) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * ctl_sarena->initialized = true;
sizeof(ctl_arena_stats_t));
if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
false, true)) == NULL) {
ret = true;
goto label_return;
}
ctl_arena_clear(ctl_darena);
/* /*
* Initialize all stats structures, regardless of whether they * Don't toggle ctl_darena to initialized until an arena is
* ever get used. Lazy initialization would allow errors to * actually destroyed, so that arena.<i>.initialized can be used
* cause inconsistent state to be viewable by the application. * to query whether the stats are relevant.
*/ */
if (config_stats) {
unsigned i; ctl_arenas->narenas = narenas_total_get();
for (i = 0; i <= ctl_stats.narenas; i++) { for (i = 0; i < ctl_arenas->narenas; i++) {
if (ctl_arena_init(&ctl_stats.arenas[i])) { if (arenas_i_impl(tsd, i, false, true) == NULL) {
unsigned j; ret = true;
for (j = 0; j < i; j++) { goto label_return;
a0dalloc(
ctl_stats.arenas[j].lstats);
a0dalloc(
ctl_stats.arenas[j].hstats);
}
a0dalloc(ctl_stats.arenas);
ctl_stats.arenas = NULL;
ret = true;
goto label_return;
}
} }
} }
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0; ql_new(&ctl_arenas->destroyed);
ctl_refresh(); ctl_refresh(tsdn);
ctl_initialized = true; ctl_initialized = true;
} }
ret = false; ret = false;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret); return ret;
} }
static int static int
ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
size_t *depthp) size_t *mibp, size_t *depthp) {
{
int ret; int ret;
const char *elm, *tdot, *dot; const char *elm, *tdot, *dot;
size_t elen, i, j; size_t elen, i, j;
...@@ -827,9 +1101,10 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, ...@@ -827,9 +1101,10 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
if (strlen(child->name) == elen && if (strlen(child->name) == elen &&
strncmp(elm, child->name, elen) == 0) { strncmp(elm, child->name, elen) == 0) {
node = child; node = child;
if (nodesp != NULL) if (nodesp != NULL) {
nodesp[i] = nodesp[i] =
(const ctl_node_t *)node; (const ctl_node_t *)node;
}
mibp[i] = j; mibp[i] = j;
break; break;
} }
...@@ -850,14 +1125,15 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, ...@@ -850,14 +1125,15 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
} }
inode = ctl_indexed_node(node->children); inode = ctl_indexed_node(node->children);
node = inode->index(mibp, *depthp, (size_t)index); node = inode->index(tsdn, mibp, *depthp, (size_t)index);
if (node == NULL) { if (node == NULL) {
ret = ENOENT; ret = ENOENT;
goto label_return; goto label_return;
} }
if (nodesp != NULL) if (nodesp != NULL) {
nodesp[i] = (const ctl_node_t *)node; nodesp[i] = (const ctl_node_t *)node;
}
mibp[i] = (size_t)index; mibp[i] = (size_t)index;
} }
...@@ -890,33 +1166,33 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, ...@@ -890,33 +1166,33 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
int int
ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
size_t newlen) void *newp, size_t newlen) {
{
int ret; int ret;
size_t depth; size_t depth;
ctl_node_t const *nodes[CTL_MAX_DEPTH]; ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node; const ctl_named_node_t *node;
if (!ctl_initialized && ctl_init()) { if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
depth = CTL_MAX_DEPTH; depth = CTL_MAX_DEPTH;
ret = ctl_lookup(name, nodes, mib, &depth); ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
if (ret != 0) if (ret != 0) {
goto label_return; goto label_return;
}
node = ctl_named_node(nodes[depth-1]); node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl) if (node != NULL && node->ctl) {
ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
else { } else {
/* The name refers to a partial path through the ctl tree. */ /* The name refers to a partial path through the ctl tree. */
ret = ENOENT; ret = ENOENT;
} }
...@@ -926,29 +1202,27 @@ label_return: ...@@ -926,29 +1202,27 @@ label_return:
} }
int int
ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
{
int ret; int ret;
if (!ctl_initialized && ctl_init()) { if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
ret = ctl_lookup(name, NULL, mibp, miblenp); ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
label_return: label_return:
return(ret); return(ret);
} }
int int
ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
const ctl_named_node_t *node; const ctl_named_node_t *node;
size_t i; size_t i;
if (!ctl_initialized && ctl_init()) { if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
...@@ -970,7 +1244,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -970,7 +1244,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Indexed element. */ /* Indexed element. */
inode = ctl_indexed_node(node->children); inode = ctl_indexed_node(node->children);
node = inode->index(mib, miblen, mib[i]); node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
if (node == NULL) { if (node == NULL) {
ret = ENOENT; ret = ENOENT;
goto label_return; goto label_return;
...@@ -979,9 +1253,9 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -979,9 +1253,9 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
} }
/* Call the ctl function. */ /* Call the ctl function. */
if (node && node->ctl) if (node && node->ctl) {
ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
else { } else {
/* Partial MIB. */ /* Partial MIB. */
ret = ENOENT; ret = ENOENT;
} }
...@@ -991,56 +1265,50 @@ label_return: ...@@ -991,56 +1265,50 @@ label_return:
} }
bool bool
ctl_boot(void) ctl_boot(void) {
{ if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
malloc_mutex_rank_exclusive)) {
if (malloc_mutex_init(&ctl_mtx)) return true;
return (true); }
ctl_initialized = false; ctl_initialized = false;
return (false); return false;
} }
void void
ctl_prefork(void) ctl_prefork(tsdn_t *tsdn) {
{ malloc_mutex_prefork(tsdn, &ctl_mtx);
malloc_mutex_prefork(&ctl_mtx);
} }
void void
ctl_postfork_parent(void) ctl_postfork_parent(tsdn_t *tsdn) {
{ malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
malloc_mutex_postfork_parent(&ctl_mtx);
} }
void void
ctl_postfork_child(void) ctl_postfork_child(tsdn_t *tsdn) {
{ malloc_mutex_postfork_child(tsdn, &ctl_mtx);
malloc_mutex_postfork_child(&ctl_mtx);
} }
/******************************************************************************/ /******************************************************************************/
/* *_ctl() functions. */ /* *_ctl() functions. */
#define READONLY() do { \ #define READONLY() do { \
if (newp != NULL || newlen != 0) { \ if (newp != NULL || newlen != 0) { \
ret = EPERM; \ ret = EPERM; \
goto label_return; \ goto label_return; \
} \ } \
} while (0) } while (0)
#define WRITEONLY() do { \ #define WRITEONLY() do { \
if (oldp != NULL || oldlenp != NULL) { \ if (oldp != NULL || oldlenp != NULL) { \
ret = EPERM; \ ret = EPERM; \
goto label_return; \ goto label_return; \
} \ } \
} while (0) } while (0)
#define READ_XOR_WRITE() do { \ #define READ_XOR_WRITE() do { \
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
newlen != 0)) { \ newlen != 0)) { \
ret = EPERM; \ ret = EPERM; \
...@@ -1048,7 +1316,7 @@ ctl_postfork_child(void) ...@@ -1048,7 +1316,7 @@ ctl_postfork_child(void)
} \ } \
} while (0) } while (0)
#define READ(v, t) do { \ #define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \ if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \ if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \ size_t copylen = (sizeof(t) <= *oldlenp) \
...@@ -1061,7 +1329,7 @@ ctl_postfork_child(void) ...@@ -1061,7 +1329,7 @@ ctl_postfork_child(void)
} \ } \
} while (0) } while (0)
#define WRITE(v, t) do { \ #define WRITE(v, t) do { \
if (newp != NULL) { \ if (newp != NULL) { \
if (newlen != sizeof(t)) { \ if (newlen != sizeof(t)) { \
ret = EINVAL; \ ret = EINVAL; \
...@@ -1071,101 +1339,109 @@ ctl_postfork_child(void) ...@@ -1071,101 +1339,109 @@ ctl_postfork_child(void)
} \ } \
} while (0) } while (0)
#define MIB_UNSIGNED(v, i) do { \
if (mib[i] > UINT_MAX) { \
ret = EFAULT; \
goto label_return; \
} \
v = (unsigned)mib[i]; \
} while (0)
/* /*
* There's a lot of code duplication in the following macros due to limitations * There's a lot of code duplication in the following macros due to limitations
* in how nested cpp macros are expanded. * in how nested cpp macros are expanded.
*/ */
#define CTL_RO_CLGEN(c, l, n, v, t) \ #define CTL_RO_CLGEN(c, l, n, v, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) { \
{ \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
if (!(c)) \ if (!(c)) { \
return (ENOENT); \ return ENOENT; \
if (l) \ } \
malloc_mutex_lock(&ctl_mtx); \ if (l) { \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
} \
READONLY(); \ READONLY(); \
oldval = (v); \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
if (l) \ if (l) { \
malloc_mutex_unlock(&ctl_mtx); \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \ } \
return ret; \
} }
#define CTL_RO_CGEN(c, n, v, t) \ #define CTL_RO_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) { \
{ \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
if (!(c)) \ if (!(c)) { \
return (ENOENT); \ return ENOENT; \
malloc_mutex_lock(&ctl_mtx); \ } \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \ READONLY(); \
oldval = (v); \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
malloc_mutex_unlock(&ctl_mtx); \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \ return ret; \
} }
#define CTL_RO_GEN(n, v, t) \ #define CTL_RO_GEN(n, v, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) { \
{ \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
malloc_mutex_lock(&ctl_mtx); \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \ READONLY(); \
oldval = (v); \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
malloc_mutex_unlock(&ctl_mtx); \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \ return ret; \
} }
/* /*
* ctl_mtx is not acquired, under the assumption that no pertinent data will * ctl_mtx is not acquired, under the assumption that no pertinent data will
* mutate during the call. * mutate during the call.
*/ */
#define CTL_RO_NL_CGEN(c, n, v, t) \ #define CTL_RO_NL_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) { \
{ \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
if (!(c)) \ if (!(c)) { \
return (ENOENT); \ return ENOENT; \
} \
READONLY(); \ READONLY(); \
oldval = (v); \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
return (ret); \ return ret; \
} }
#define CTL_RO_NL_GEN(n, v, t) \ #define CTL_RO_NL_GEN(n, v, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) { \
{ \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
...@@ -1175,45 +1451,42 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ ...@@ -1175,45 +1451,42 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
return (ret); \ return ret; \
} }
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) { \
{ \
int ret; \ int ret; \
t oldval; \ t oldval; \
tsd_t *tsd; \
\ \
if (!(c)) \ if (!(c)) { \
return (ENOENT); \ return ENOENT; \
} \
READONLY(); \ READONLY(); \
tsd = tsd_fetch(); \
oldval = (m(tsd)); \ oldval = (m(tsd)); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
return (ret); \ return ret; \
} }
#define CTL_RO_BOOL_CONFIG_GEN(n) \ #define CTL_RO_CONFIG_GEN(n, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) { \
{ \
int ret; \ int ret; \
bool oldval; \ t oldval; \
\ \
READONLY(); \ READONLY(); \
oldval = n; \ oldval = n; \
READ(oldval, bool); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
return (ret); \ return ret; \
} }
/******************************************************************************/ /******************************************************************************/
...@@ -1221,57 +1494,187 @@ label_return: \ ...@@ -1221,57 +1494,187 @@ label_return: \
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int static int
epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
UNUSED uint64_t newval; UNUSED uint64_t newval;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(newval, uint64_t); WRITE(newval, uint64_t);
if (newp != NULL) if (newp != NULL) {
ctl_refresh(); ctl_refresh(tsd_tsdn(tsd));
READ(ctl_epoch, uint64_t); }
READ(ctl_arenas->epoch, uint64_t);
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
static int
background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
if (!have_background_thread) {
return ENOENT;
}
background_thread_ctl_init(tsd_tsdn(tsd));
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
if (newp == NULL) {
oldval = background_thread_enabled();
READ(oldval, bool);
} else {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
oldval = background_thread_enabled();
READ(oldval, bool);
bool newval = *(bool *)newp;
if (newval == oldval) {
ret = 0;
goto label_return;
}
background_thread_enabled_set(tsd_tsdn(tsd), newval);
if (newval) {
if (!can_enable_background_thread) {
malloc_printf("<jemalloc>: Error in dlsym("
"RTLD_NEXT, \"pthread_create\"). Cannot "
"enable background_thread\n");
ret = EFAULT;
goto label_return;
}
if (background_threads_enable(tsd)) {
ret = EFAULT;
goto label_return;
}
} else {
if (background_threads_disable(tsd)) {
ret = EFAULT;
goto label_return;
}
}
}
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
static int
max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
size_t oldval;
if (!have_background_thread) {
return ENOENT;
}
background_thread_ctl_init(tsd_tsdn(tsd));
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
if (newp == NULL) {
oldval = max_background_threads;
READ(oldval, size_t);
} else {
if (newlen != sizeof(size_t)) {
ret = EINVAL;
goto label_return;
}
oldval = max_background_threads;
READ(oldval, size_t);
size_t newval = *(size_t *)newp;
if (newval == oldval) {
ret = 0;
goto label_return;
}
if (newval > opt_max_background_threads) {
ret = EINVAL;
goto label_return;
}
if (background_thread_enabled()) {
if (!can_enable_background_thread) {
malloc_printf("<jemalloc>: Error in dlsym("
"RTLD_NEXT, \"pthread_create\"). Cannot "
"enable background_thread\n");
ret = EFAULT;
goto label_return;
}
background_thread_enabled_set(tsd_tsdn(tsd), false);
if (background_threads_disable(tsd)) {
ret = EFAULT;
goto label_return;
}
max_background_threads = newval;
background_thread_enabled_set(tsd_tsdn(tsd), true);
if (background_threads_enable(tsd)) {
ret = EFAULT;
goto label_return;
}
} else {
max_background_threads = newval;
}
}
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
return (ret); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
} }
/******************************************************************************/ /******************************************************************************/
CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious) CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
CTL_RO_BOOL_CONFIG_GEN(config_debug) CTL_RO_CONFIG_GEN(config_debug, bool)
CTL_RO_BOOL_CONFIG_GEN(config_fill) CTL_RO_CONFIG_GEN(config_fill, bool)
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
CTL_RO_BOOL_CONFIG_GEN(config_munmap) CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
CTL_RO_BOOL_CONFIG_GEN(config_prof) CTL_RO_CONFIG_GEN(config_prof, bool)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
CTL_RO_BOOL_CONFIG_GEN(config_stats) CTL_RO_CONFIG_GEN(config_stats, bool)
CTL_RO_BOOL_CONFIG_GEN(config_tcache) CTL_RO_CONFIG_GEN(config_utrace, bool)
CTL_RO_BOOL_CONFIG_GEN(config_tls) CTL_RO_CONFIG_GEN(config_xmalloc, bool)
CTL_RO_BOOL_CONFIG_GEN(config_utrace)
CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/ /******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
const char *)
CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) const char *)
CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
size_t)
CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
...@@ -1287,53 +1690,59 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) ...@@ -1287,53 +1690,59 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/ /******************************************************************************/
static int static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
tsd_t *tsd;
arena_t *oldarena; arena_t *oldarena;
unsigned newind, oldind; unsigned newind, oldind;
tsd = tsd_fetch();
oldarena = arena_choose(tsd, NULL); oldarena = arena_choose(tsd, NULL);
if (oldarena == NULL) if (oldarena == NULL) {
return (EAGAIN); return EAGAIN;
}
malloc_mutex_lock(&ctl_mtx); newind = oldind = arena_ind_get(oldarena);
newind = oldind = oldarena->ind;
WRITE(newind, unsigned); WRITE(newind, unsigned);
READ(oldind, unsigned); READ(oldind, unsigned);
if (newind != oldind) { if (newind != oldind) {
arena_t *newarena; arena_t *newarena;
if (newind >= ctl_stats.narenas) { if (newind >= narenas_total_get()) {
/* New arena index is out of range. */ /* New arena index is out of range. */
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
if (have_percpu_arena &&
PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
/*
* If perCPU arena is enabled, thread_arena
* control is not allowed for the auto arena
* range.
*/
ret = EPERM;
goto label_return;
}
}
/* Initialize arena if necessary. */ /* Initialize arena if necessary. */
newarena = arena_get(tsd, newind, true, true); newarena = arena_get(tsd_tsdn(tsd), newind, true);
if (newarena == NULL) { if (newarena == NULL) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
/* Set new arena/tcache associations. */ /* Set new arena/tcache associations. */
arena_migrate(tsd, oldind, newind); arena_migrate(tsd, oldind, newind);
if (config_tcache) { if (tcache_available(tsd)) {
tcache_t *tcache = tsd_tcache_get(tsd); tcache_arena_reassociate(tsd_tsdn(tsd),
if (tcache != NULL) { tsd_tcachep_get(tsd), newarena);
tcache_arena_reassociate(tcache, oldarena,
newarena);
}
} }
} }
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); return ret;
return (ret);
} }
CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
...@@ -1346,100 +1755,94 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, ...@@ -1346,100 +1755,94 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get, uint64_t *) tsd_thread_deallocatedp_get, uint64_t *)
static int static int
thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
bool oldval; bool oldval;
if (!config_tcache) oldval = tcache_enabled_get(tsd);
return (ENOENT);
oldval = tcache_enabled_get();
if (newp != NULL) { if (newp != NULL) {
if (newlen != sizeof(bool)) { if (newlen != sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
tcache_enabled_set(*(bool *)newp); tcache_enabled_set(tsd, *(bool *)newp);
} }
READ(oldval, bool); READ(oldval, bool);
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
static int static int
thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
if (!config_tcache) if (!tcache_available(tsd)) {
return (ENOENT); ret = EFAULT;
goto label_return;
}
READONLY(); READONLY();
WRITEONLY(); WRITEONLY();
tcache_flush(); tcache_flush(tsd);
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
static int static int
thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp, thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
if (!config_prof) if (!config_prof) {
return (ENOENT); return ENOENT;
}
READ_XOR_WRITE(); READ_XOR_WRITE();
if (newp != NULL) { if (newp != NULL) {
tsd_t *tsd;
if (newlen != sizeof(const char *)) { if (newlen != sizeof(const char *)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
tsd = tsd_fetch();
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
0) 0) {
goto label_return; goto label_return;
}
} else { } else {
const char *oldname = prof_thread_name_get(); const char *oldname = prof_thread_name_get(tsd);
READ(oldname, const char *); READ(oldname, const char *);
} }
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
static int static int
thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
bool oldval; bool oldval;
if (!config_prof) if (!config_prof) {
return (ENOENT); return ENOENT;
}
oldval = prof_thread_active_get(); oldval = prof_thread_active_get(tsd);
if (newp != NULL) { if (newp != NULL) {
if (newlen != sizeof(bool)) { if (newlen != sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
if (prof_thread_active_set(*(bool *)newp)) { if (prof_thread_active_set(tsd, *(bool *)newp)) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
...@@ -1448,25 +1851,17 @@ thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1448,25 +1851,17 @@ thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
/******************************************************************************/ /******************************************************************************/
static int static int
tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
tsd_t *tsd;
unsigned tcache_ind; unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
tsd = tsd_fetch();
malloc_mutex_lock(&ctl_mtx);
READONLY(); READONLY();
if (tcaches_create(tsd, &tcache_ind)) { if (tcaches_create(tsd, &tcache_ind)) {
ret = EFAULT; ret = EFAULT;
...@@ -1476,23 +1871,15 @@ tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1476,23 +1871,15 @@ tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); return ret;
return (ret);
} }
static int static int
tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
tsd_t *tsd;
unsigned tcache_ind; unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
tsd = tsd_fetch();
WRITEONLY(); WRITEONLY();
tcache_ind = UINT_MAX; tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned); WRITE(tcache_ind, unsigned);
...@@ -1504,22 +1891,15 @@ tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1504,22 +1891,15 @@ tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
static int static int
tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp, tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
tsd_t *tsd;
unsigned tcache_ind; unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
tsd = tsd_fetch();
WRITEONLY(); WRITEONLY();
tcache_ind = UINT_MAX; tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned); WRITE(tcache_ind, unsigned);
...@@ -1531,71 +1911,239 @@ tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1531,71 +1911,239 @@ tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
/******************************************************************************/ /******************************************************************************/
/* ctl_mutex must be held during execution of this function. */ static int
arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
tsdn_t *tsdn = tsd_tsdn(tsd);
unsigned arena_ind;
bool initialized;
READONLY();
MIB_UNSIGNED(arena_ind, 1);
malloc_mutex_lock(tsdn, &ctl_mtx);
initialized = arenas_i(arena_ind)->initialized;
malloc_mutex_unlock(tsdn, &ctl_mtx);
READ(initialized, bool);
ret = 0;
label_return:
return ret;
}
static void static void
arena_purge(unsigned arena_ind) arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
{ malloc_mutex_lock(tsdn, &ctl_mtx);
tsd_t *tsd; {
unsigned i; unsigned narenas = ctl_arenas->narenas;
bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
tsd = tsd_fetch();
for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
tarenas[i] = arena_get(tsd, i, false, false);
if (tarenas[i] == NULL && !refreshed) {
tarenas[i] = arena_get(tsd, i, false, true);
refreshed = true;
}
}
if (arena_ind == ctl_stats.narenas) { /*
unsigned i; * Access via index narenas is deprecated, and scheduled for
for (i = 0; i < ctl_stats.narenas; i++) { * removal in 6.0.0.
if (tarenas[i] != NULL) */
arena_purge_all(tarenas[i]); if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
unsigned i;
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
for (i = 0; i < narenas; i++) {
tarenas[i] = arena_get(tsdn, i, false);
}
/*
* No further need to hold ctl_mtx, since narenas and
* tarenas contain everything needed below.
*/
malloc_mutex_unlock(tsdn, &ctl_mtx);
for (i = 0; i < narenas; i++) {
if (tarenas[i] != NULL) {
arena_decay(tsdn, tarenas[i], false,
all);
}
}
} else {
arena_t *tarena;
assert(arena_ind < narenas);
tarena = arena_get(tsdn, arena_ind, false);
/* No further need to hold ctl_mtx. */
malloc_mutex_unlock(tsdn, &ctl_mtx);
if (tarena != NULL) {
arena_decay(tsdn, tarena, false, all);
}
} }
} else {
assert(arena_ind < ctl_stats.narenas);
if (tarenas[arena_ind] != NULL)
arena_purge_all(tarenas[arena_ind]);
} }
} }
static int static int
arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{ int ret;
unsigned arena_ind;
READONLY();
WRITEONLY();
MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
ret = 0;
label_return:
return ret;
}
static int
arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
unsigned arena_ind;
READONLY(); READONLY();
WRITEONLY(); WRITEONLY();
malloc_mutex_lock(&ctl_mtx); MIB_UNSIGNED(arena_ind, 1);
arena_purge(mib[1]); arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
malloc_mutex_unlock(&ctl_mtx);
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
static int static int
arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
{ arena_t **arena) {
int ret;
READONLY();
WRITEONLY();
MIB_UNSIGNED(*arena_ind, 1);
*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
if (*arena == NULL || arena_is_auto(*arena)) {
ret = EFAULT;
goto label_return;
}
ret = 0;
label_return:
return ret;
}
static void
arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
/* Temporarily disable the background thread during arena reset. */
if (have_background_thread) {
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
if (background_thread_enabled()) {
unsigned ind = arena_ind % ncpus;
background_thread_info_t *info =
&background_thread_info[ind];
assert(info->state == background_thread_started);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
info->state = background_thread_paused;
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
}
}
static void
arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
if (have_background_thread) {
if (background_thread_enabled()) {
unsigned ind = arena_ind % ncpus;
background_thread_info_t *info =
&background_thread_info[ind];
assert(info->state == background_thread_paused);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
info->state = background_thread_started;
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
}
}
static int
arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
arena_t *arena;
ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, &arena_ind, &arena);
if (ret != 0) {
return ret;
}
arena_reset_prepare_background_thread(tsd, arena_ind);
arena_reset(tsd, arena);
arena_reset_finish_background_thread(tsd, arena_ind);
return ret;
}
static int
arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
arena_t *arena;
ctl_arena_t *ctl_darena, *ctl_arena;
ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, &arena_ind, &arena);
if (ret != 0) {
goto label_return;
}
if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
true) != 0) {
ret = EFAULT;
goto label_return;
}
arena_reset_prepare_background_thread(tsd, arena_ind);
/* Merge stats after resetting and purging arena. */
arena_reset(tsd, arena);
arena_decay(tsd_tsdn(tsd), arena, false, true);
ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
ctl_darena->initialized = true;
ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
/* Destroy arena. */
arena_destroy(tsd, arena);
ctl_arena = arenas_i(arena_ind);
ctl_arena->initialized = false;
/* Record arena index for later recycling via arenas.create. */
ql_elm_new(ctl_arena, destroyed_link);
ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
arena_reset_finish_background_thread(tsd, arena_ind);
assert(ret == 0);
label_return:
return ret;
}
static int
arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
const char *dss = NULL; const char *dss = NULL;
unsigned arena_ind = mib[1]; unsigned arena_ind;
dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(dss, const char *); WRITE(dss, const char *);
MIB_UNSIGNED(arena_ind, 1);
if (dss != NULL) { if (dss != NULL) {
int i; int i;
bool match = false; bool match = false;
...@@ -1614,21 +2162,26 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1614,21 +2162,26 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
} }
} }
if (arena_ind < ctl_stats.narenas) { /*
arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true); * Access via index narenas is deprecated, and scheduled for removal in
if (arena == NULL || (dss_prec != dss_prec_limit && * 6.0.0.
arena_dss_prec_set(arena, dss_prec))) { */
if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
ctl_arenas->narenas) {
if (dss_prec != dss_prec_limit &&
extent_dss_prec_set(dss_prec)) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
dss_prec_old = arena_dss_prec_get(arena); dss_prec_old = extent_dss_prec_get();
} else { } else {
if (dss_prec != dss_prec_limit && arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
chunk_dss_prec_set(dss_prec)) { if (arena == NULL || (dss_prec != dss_prec_limit &&
arena_dss_prec_set(arena, dss_prec))) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
dss_prec_old = chunk_dss_prec_get(); dss_prec_old = arena_dss_prec_get(arena);
} }
dss = dss_prec_names[dss_prec_old]; dss = dss_prec_names[dss_prec_old];
...@@ -1636,26 +2189,27 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1636,26 +2189,27 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return ret;
} }
static int static int
arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
{
int ret; int ret;
unsigned arena_ind = mib[1]; unsigned arena_ind;
arena_t *arena; arena_t *arena;
arena = arena_get(tsd_fetch(), arena_ind, false, true); MIB_UNSIGNED(arena_ind, 1);
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) { if (arena == NULL) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
if (oldp != NULL && oldlenp != NULL) { if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_lg_dirty_mult_get(arena); size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
arena_muzzy_decay_ms_get(arena);
READ(oldval, ssize_t); READ(oldval, ssize_t);
} }
if (newp != NULL) { if (newp != NULL) {
...@@ -1663,7 +2217,9 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1663,7 +2217,9 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) { if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
arena, *(ssize_t *)newp)) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
...@@ -1671,29 +2227,67 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1671,29 +2227,67 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
static int static int
arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
{ return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
newlen, true);
}
static int
arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
newlen, false);
}
static int
arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
unsigned arena_ind = mib[1]; unsigned arena_ind;
arena_t *arena; arena_t *arena;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
if (arena_ind < narenas_total_get() && (arena = MIB_UNSIGNED(arena_ind, 1);
arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) { if (arena_ind < narenas_total_get()) {
if (newp != NULL) { extent_hooks_t *old_extent_hooks;
chunk_hooks_t old_chunk_hooks, new_chunk_hooks; arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
WRITE(new_chunk_hooks, chunk_hooks_t); if (arena == NULL) {
old_chunk_hooks = chunk_hooks_set(arena, if (arena_ind >= narenas_auto) {
&new_chunk_hooks); ret = EFAULT;
READ(old_chunk_hooks, chunk_hooks_t); goto label_return;
}
old_extent_hooks =
(extent_hooks_t *)&extent_hooks_default;
READ(old_extent_hooks, extent_hooks_t *);
if (newp != NULL) {
/* Initialize a new arena as a side effect. */
extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *);
arena = arena_init(tsd_tsdn(tsd), arena_ind,
new_extent_hooks);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
}
} else { } else {
chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena); if (newp != NULL) {
READ(old_chunk_hooks, chunk_hooks_t); extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *);
old_extent_hooks = extent_hooks_set(tsd, arena,
new_extent_hooks);
READ(old_extent_hooks, extent_hooks_t *);
} else {
old_extent_hooks = extent_hooks_get(arena);
READ(old_extent_hooks, extent_hooks_t *);
}
} }
} else { } else {
ret = EFAULT; ret = EFAULT;
...@@ -1701,85 +2295,100 @@ arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1701,85 +2295,100 @@ arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
} }
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return ret;
} }
static const ctl_named_node_t * static int
arena_i_index(const size_t *mib, size_t miblen, size_t i) arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
{ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
const ctl_named_node_t * ret; int ret;
unsigned arena_ind;
arena_t *arena;
malloc_mutex_lock(&ctl_mtx); if (!opt_retain) {
if (i > ctl_stats.narenas) { /* Only relevant when retain is enabled. */
ret = NULL; return ENOENT;
goto label_return; }
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
MIB_UNSIGNED(arena_ind, 1);
if (arena_ind < narenas_total_get() && (arena =
arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
size_t old_limit, new_limit;
if (newp != NULL) {
WRITE(new_limit, size_t);
}
bool err = arena_retain_grow_limit_get_set(tsd, arena,
&old_limit, newp != NULL ? &new_limit : NULL);
if (!err) {
READ(old_limit, size_t);
ret = 0;
} else {
ret = EFAULT;
}
} else {
ret = EFAULT;
}
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
static const ctl_named_node_t *
arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
const ctl_named_node_t *ret;
malloc_mutex_lock(tsdn, &ctl_mtx);
switch (i) {
case MALLCTL_ARENAS_ALL:
case MALLCTL_ARENAS_DESTROYED:
break;
default:
if (i > ctl_arenas->narenas) {
ret = NULL;
goto label_return;
}
break;
} }
ret = super_arena_i_node; ret = super_arena_i_node;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret); return ret;
} }
/******************************************************************************/ /******************************************************************************/
static int static int
arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
unsigned narenas; unsigned narenas;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY(); READONLY();
if (*oldlenp != sizeof(unsigned)) { if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
narenas = ctl_stats.narenas; narenas = ctl_arenas->narenas;
READ(narenas, unsigned); READ(narenas, unsigned);
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return ret;
}
static int
arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned nread, i;
malloc_mutex_lock(&ctl_mtx);
READONLY();
if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL;
nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
} else {
ret = 0;
nread = ctl_stats.narenas;
}
for (i = 0; i < nread; i++)
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
} }
static int static int
arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
{
int ret; int ret;
if (oldp != NULL && oldlenp != NULL) { if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_lg_dirty_mult_default_get(); size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
arena_muzzy_decay_ms_default_get());
READ(oldval, ssize_t); READ(oldval, ssize_t);
} }
if (newp != NULL) { if (newp != NULL) {
...@@ -1787,7 +2396,8 @@ arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1787,7 +2396,8 @@ arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) { if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
: arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
...@@ -1795,193 +2405,229 @@ arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1795,193 +2405,229 @@ arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
}
static int
arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
newlen, true);
}
static int
arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
newlen, false);
} }
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
{ if (i > NBINS) {
return NULL;
if (i > NBINS) }
return (NULL); return super_arenas_bin_i_node;
return (super_arenas_bin_i_node);
} }
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t) CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]),
size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
{ size_t i) {
if (i > NSIZES - NBINS) {
if (i > nlclasses) return NULL;
return (NULL); }
return (super_arenas_lrun_i_node); return super_arenas_lextent_i_node;
} }
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) static int
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t) arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
static const ctl_named_node_t * size_t *oldlenp, void *newp, size_t newlen) {
arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i) int ret;
{ extent_hooks_t *extent_hooks;
unsigned arena_ind;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
if (i > nhclasses) extent_hooks = (extent_hooks_t *)&extent_hooks_default;
return (NULL); WRITE(extent_hooks, extent_hooks_t *);
return (super_arenas_hchunk_i_node); if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
ret = EAGAIN;
goto label_return;
}
READ(arena_ind, unsigned);
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
} }
static int static int
arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
unsigned narenas; unsigned arena_ind;
void *ptr;
extent_t *extent;
arena_t *arena;
malloc_mutex_lock(&ctl_mtx); ptr = NULL;
READONLY(); ret = EINVAL;
if (ctl_grow()) { malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
ret = EAGAIN; WRITE(ptr, void *);
extent = iealloc(tsd_tsdn(tsd), ptr);
if (extent == NULL)
goto label_return;
arena = extent_arena_get(extent);
if (arena == NULL)
goto label_return; goto label_return;
}
narenas = ctl_stats.narenas - 1; arena_ind = arena_ind_get(arena);
READ(narenas, unsigned); READ(arena_ind, unsigned);
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return ret;
} }
/******************************************************************************/ /******************************************************************************/
static int static int
prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp, prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
bool oldval; bool oldval;
if (!config_prof) if (!config_prof) {
return (ENOENT); return ENOENT;
}
if (newp != NULL) { if (newp != NULL) {
if (newlen != sizeof(bool)) { if (newlen != sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
oldval = prof_thread_active_init_set(*(bool *)newp); oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
} else *(bool *)newp);
oldval = prof_thread_active_init_get(); } else {
oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
}
READ(oldval, bool); READ(oldval, bool);
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
static int static int
prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
bool oldval; bool oldval;
if (!config_prof) if (!config_prof) {
return (ENOENT); return ENOENT;
}
if (newp != NULL) { if (newp != NULL) {
if (newlen != sizeof(bool)) { if (newlen != sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
oldval = prof_active_set(*(bool *)newp); oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
} else } else {
oldval = prof_active_get(); oldval = prof_active_get(tsd_tsdn(tsd));
}
READ(oldval, bool); READ(oldval, bool);
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
static int static int
prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
const char *filename = NULL; const char *filename = NULL;
if (!config_prof) if (!config_prof) {
return (ENOENT); return ENOENT;
}
WRITEONLY(); WRITEONLY();
WRITE(filename, const char *); WRITE(filename, const char *);
if (prof_mdump(filename)) { if (prof_mdump(tsd, filename)) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
static int static int
prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
bool oldval; bool oldval;
if (!config_prof) if (!config_prof) {
return (ENOENT); return ENOENT;
}
if (newp != NULL) { if (newp != NULL) {
if (newlen != sizeof(bool)) { if (newlen != sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
oldval = prof_gdump_set(*(bool *)newp); oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
} else } else {
oldval = prof_gdump_get(); oldval = prof_gdump_get(tsd_tsdn(tsd));
}
READ(oldval, bool); READ(oldval, bool);
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
static int static int
prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen) {
{
int ret; int ret;
size_t lg_sample = lg_prof_sample; size_t lg_sample = lg_prof_sample;
tsd_t *tsd;
if (!config_prof) if (!config_prof) {
return (ENOENT); return ENOENT;
}
WRITEONLY(); WRITEONLY();
WRITE(lg_sample, size_t); WRITE(lg_sample, size_t);
if (lg_sample >= (sizeof(uint64_t) << 3)) if (lg_sample >= (sizeof(uint64_t) << 3)) {
lg_sample = (sizeof(uint64_t) << 3) - 1; lg_sample = (sizeof(uint64_t) << 3) - 1;
}
tsd = tsd_fetch();
prof_reset(tsd, lg_sample); prof_reset(tsd, lg_sample);
ret = 0; ret = 0;
label_return: label_return:
return (ret); return ret;
} }
CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
...@@ -1989,135 +2635,249 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) ...@@ -1989,135 +2635,249 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
/******************************************************************************/ /******************************************************************************/
CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
ctl_stats->background_thread.num_threads, size_t)
CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
ctl_stats->background_thread.num_runs, uint64_t)
CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
ssize_t)
CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
ssize_t) ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) CTL_RO_GEN(stats_arenas_i_uptime,
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
ctl_stats.arenas[mib[2]].astats.mapped, size_t) atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, size_t)
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
ctl_stats.arenas[mib[2]].astats.purged, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped, ctl_arena_stats_read_u64(
ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t) &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated, CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t) ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_base,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t) arenas_i(mib[2])->astats->allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) arenas_i(mib[2])->astats->nrequests_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
/*
* Note: "nmalloc" here instead of "nrequests" in the read. This is intentional.
*/
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) ctl_arena_stats_read_u64(
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated, &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */
ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc, /* Lock profiling related APIs below. */
ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) #define RO_MUTEX_CTL_GEN(n, l) \
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc, CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t) l.n_lock_ops, uint64_t) \
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests, CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */ l.n_wait_times, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
l.n_spin_acquired, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
l.n_owner_switches, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
nstime_ns(&l.tot_wait_time), uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
nstime_ns(&l.max_wait_time), uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
l.max_n_thds, uint32_t)
/* Global mutexes. */
#define OP(mtx) \
RO_MUTEX_CTL_GEN(mutexes_##mtx, \
ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
/* Per arena mutexes */
#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
MUTEX_PROF_ARENA_MUTEXES
#undef OP
/* tcache bin mutex */
RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
#undef RO_MUTEX_CTL_GEN
/* Resets all mutex stats, including global, arena and bin mutexes. */
static int
stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
if (!config_stats) {
return ENOENT;
}
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, tsdn_t *tsdn = tsd_tsdn(tsd);
ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t)
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
static const ctl_named_node_t * #define MUTEX_PROF_RESET(mtx) \
stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) malloc_mutex_lock(tsdn, &mtx); \
{ malloc_mutex_prof_data_reset(tsdn, &mtx); \
malloc_mutex_unlock(tsdn, &mtx);
if (j > NBINS) /* Global mutexes: ctl and prof. */
return (NULL); MUTEX_PROF_RESET(ctl_mtx);
return (super_stats_arenas_i_bins_j_node); if (have_background_thread) {
} MUTEX_PROF_RESET(background_thread_lock);
}
if (config_prof && opt_prof) {
MUTEX_PROF_RESET(bt2gctx_mtx);
}
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
static const ctl_named_node_t * /* Per arena mutexes. */
stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) unsigned n = narenas_total_get();
{
if (j > nlclasses) for (unsigned i = 0; i < n; i++) {
return (NULL); arena_t *arena = arena_get(tsdn, i, false);
return (super_stats_arenas_i_lruns_j_node); if (!arena) {
continue;
}
MUTEX_PROF_RESET(arena->large_mtx);
MUTEX_PROF_RESET(arena->extent_avail_mtx);
MUTEX_PROF_RESET(arena->extents_dirty.mtx);
MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
MUTEX_PROF_RESET(arena->extents_retained.mtx);
MUTEX_PROF_RESET(arena->decay_dirty.mtx);
MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
MUTEX_PROF_RESET(arena->base->mtx);
for (szind_t i = 0; i < NBINS; i++) {
bin_t *bin = &arena->bins[i];
MUTEX_PROF_RESET(bin->lock);
}
}
#undef MUTEX_PROF_RESET
return 0;
} }
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t) arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t) arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */ arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j) stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
{ size_t j) {
if (j > NBINS) {
return NULL;
}
return super_stats_arenas_i_bins_j_node;
}
if (j > nhclasses) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
return (NULL); ctl_arena_stats_read_u64(
return (super_stats_arenas_i_hchunks_j_node); &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j) {
if (j > NSIZES - NBINS) {
return NULL;
}
return super_stats_arenas_i_lextents_j_node;
} }
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
{ const ctl_named_node_t *ret;
const ctl_named_node_t * ret; size_t a;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsdn, &ctl_mtx);
if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { a = arenas_i2a_impl(i, true, true);
if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
ret = NULL; ret = NULL;
goto label_return; goto label_return;
} }
ret = super_stats_arenas_i_node; ret = super_stats_arenas_i_node;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret); return ret;
} }
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/assert.h"
/*
* Suppose we have n = q * d, all integers. We know n and d, and want q = n / d.
*
* For any k, we have (here, all division is exact; not C-style rounding):
* floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where
* r = (-2^k) mod d.
*
* Expanding this out:
* ... = floor(2^k / d * n / 2^k + r / d * n / 2^k)
* = floor(n / d + (r / d) * (n / 2^k)).
*
* The fractional part of n / d is 0 (because of the assumption that d divides n
* exactly), so we have:
* ... = n / d + floor((r / d) * (n / 2^k))
*
* So that our initial expression is equal to the quantity we seek, so long as
* (r / d) * (n / 2^k) < 1.
*
* r is a remainder mod d, so r < d and r / d < 1 always. We can make
* n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works.
*/
void
div_init(div_info_t *div_info, size_t d) {
/* Nonsensical. */
assert(d != 0);
/*
* This would make the value of magic too high to fit into a uint32_t
* (we would want magic = 2^32 exactly). This would mess with code gen
* on 32-bit machines.
*/
assert(d != 1);
uint64_t two_to_k = ((uint64_t)1 << 32);
uint32_t magic = (uint32_t)(two_to_k / d);
/*
* We want magic = ceil(2^k / d), but C gives us floor. We have to
* increment it unless the result was exact (i.e. unless d is a power of
* two).
*/
if (two_to_k % d != 0) {
magic++;
}
div_info->magic = magic;
#ifdef JEMALLOC_DEBUG
div_info->d = d;
#endif
}
#define JEMALLOC_EXTENT_C_ #define JEMALLOC_EXTENT_C_
#include "jemalloc/internal/jemalloc_internal.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
/******************************************************************************/
/* Data. */
rtree_t extents_rtree;
/* Keyed by the address of the extent_t being protected. */
mutex_pool_t extent_mutex_pool;
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
static const bitmap_info_t extents_bitmap_info =
BITMAP_INFO_INITIALIZER(NPSIZES+1);
static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit,
unsigned arena_ind);
static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, bool committed, unsigned arena_ind);
static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, bool committed, unsigned arena_ind);
static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind);
static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained);
static bool extent_decommit_default(extent_hooks_t *extent_hooks,
void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
#ifdef PAGES_CAN_PURGE_LAZY
static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind);
#endif
static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained);
#ifdef PAGES_CAN_PURGE_FORCED
static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
#endif
static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained);
#ifdef JEMALLOC_MAPS_COALESCE
static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t size_a, size_t size_b, bool committed,
unsigned arena_ind);
#endif
static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
bool growing_retained);
#ifdef JEMALLOC_MAPS_COALESCE
static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
size_t size_a, void *addr_b, size_t size_b, bool committed,
unsigned arena_ind);
#endif
static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
bool growing_retained);
const extent_hooks_t extent_hooks_default = {
extent_alloc_default,
extent_dalloc_default,
extent_destroy_default,
extent_commit_default,
extent_decommit_default
#ifdef PAGES_CAN_PURGE_LAZY
,
extent_purge_lazy_default
#else
,
NULL
#endif
#ifdef PAGES_CAN_PURGE_FORCED
,
extent_purge_forced_default
#else
,
NULL
#endif
#ifdef JEMALLOC_MAPS_COALESCE
,
extent_split_default,
extent_merge_default
#endif
};
/* Used exclusively for gdump triggering. */
static atomic_zu_t curpages;
static atomic_zu_t highpages;
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
bool *zero, bool *commit, bool growing_retained);
static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent, bool *coalesced, bool growing_retained);
static void extent_record(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
bool growing_retained);
/******************************************************************************/ /******************************************************************************/
JEMALLOC_INLINE_C size_t ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link,
extent_quantize(size_t size) extent_esnead_comp)
{
typedef enum {
lock_result_success,
lock_result_failure,
lock_result_no_extent
} lock_result_t;
static lock_result_t
extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
extent_t **result) {
extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
elm, true);
if (extent1 == NULL) {
return lock_result_no_extent;
}
/*
* It's possible that the extent changed out from under us, and with it
* the leaf->extent mapping. We have to recheck while holding the lock.
*/
extent_lock(tsdn, extent1);
extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
&extents_rtree, elm, true);
if (extent1 == extent2) {
*result = extent1;
return lock_result_success;
} else {
extent_unlock(tsdn, extent1);
return lock_result_failure;
}
}
/*
* Returns a pool-locked extent_t * if there's one associated with the given
* address, and NULL otherwise.
*/
static extent_t *
extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
extent_t *ret = NULL;
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)addr, false, false);
if (elm == NULL) {
return NULL;
}
lock_result_t lock_result;
do {
lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
} while (lock_result == lock_result_failure);
return ret;
}
extent_t *
extent_alloc(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
extent_t *extent = extent_avail_first(&arena->extent_avail);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
return base_alloc_extent(tsdn, arena->base);
}
extent_avail_remove(&arena->extent_avail, extent);
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
return extent;
}
void
extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
extent_avail_insert(&arena->extent_avail, extent);
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
}
extent_hooks_t *
extent_hooks_get(arena_t *arena) {
return base_extent_hooks_get(arena->base);
}
extent_hooks_t *
extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
background_thread_info_t *info;
if (have_background_thread) {
info = arena_background_thread_info_get(arena);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
}
extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
if (have_background_thread) {
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
return ret;
}
static void
extent_hooks_assure_initialized(arena_t *arena,
extent_hooks_t **r_extent_hooks) {
if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
*r_extent_hooks = extent_hooks_get(arena);
}
}
#ifndef JEMALLOC_JET
static
#endif
size_t
extent_size_quantize_floor(size_t size) {
size_t ret;
pszind_t pind;
assert(size > 0);
assert((size & PAGE_MASK) == 0);
pind = sz_psz2ind(size - sz_large_pad + 1);
if (pind == 0) {
/*
* Avoid underflow. This short-circuit would also do the right
* thing for all sizes in the range for which there are
* PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
*/
return size;
}
ret = sz_pind2sz(pind - 1) + sz_large_pad;
assert(ret <= size);
return ret;
}
#ifndef JEMALLOC_JET
static
#endif
size_t
extent_size_quantize_ceil(size_t size) {
size_t ret;
assert(size > 0);
assert(size - sz_large_pad <= LARGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
ret = extent_size_quantize_floor(size);
if (ret < size) {
/*
* Skip a quantization that may have an adequately large extent,
* because under-sized extents may be mixed in. This only
* happens when an unusual size is requested, i.e. for aligned
* allocation, and is just one of several places where linear
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
sz_large_pad;
}
return ret;
}
/* Generate pairing heap functions. */
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
bool
extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
bool delay_coalesce) {
if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
malloc_mutex_rank_exclusive)) {
return true;
}
for (unsigned i = 0; i < NPSIZES+1; i++) {
extent_heap_new(&extents->heaps[i]);
}
bitmap_init(extents->bitmap, &extents_bitmap_info, true);
extent_list_init(&extents->lru);
atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
extents->state = state;
extents->delay_coalesce = delay_coalesce;
return false;
}
extent_state_t
extents_state_get(const extents_t *extents) {
return extents->state;
}
size_t
extents_npages_get(extents_t *extents) {
return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
}
static void
extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
malloc_mutex_assert_owner(tsdn, &extents->mtx);
assert(extent_state_get(extent) == extents->state);
size_t size = extent_size_get(extent);
size_t psz = extent_size_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
if (extent_heap_empty(&extents->heaps[pind])) {
bitmap_unset(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
}
extent_heap_insert(&extents->heaps[pind], extent);
extent_list_append(&extents->lru, extent);
size_t npages = size >> LG_PAGE;
/*
* All modifications to npages hold the mutex (as asserted above), so we
* don't need an atomic fetch-add; we can get by with a load followed by
* a store.
*/
size_t cur_extents_npages =
atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
atomic_store_zu(&extents->npages, cur_extents_npages + npages,
ATOMIC_RELAXED);
}
static void
extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
malloc_mutex_assert_owner(tsdn, &extents->mtx);
assert(extent_state_get(extent) == extents->state);
size_t size = extent_size_get(extent);
size_t psz = extent_size_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
extent_heap_remove(&extents->heaps[pind], extent);
if (extent_heap_empty(&extents->heaps[pind])) {
bitmap_set(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
}
extent_list_remove(&extents->lru, extent);
size_t npages = size >> LG_PAGE;
/*
* As in extents_insert_locked, we hold extents->mtx and so don't need
* atomic operations for updating extents->npages.
*/
size_t cur_extents_npages =
atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
assert(cur_extents_npages >= npages);
atomic_store_zu(&extents->npages,
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
}
/*
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
static extent_t *
extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
size_t alignment) {
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
&extents_bitmap_info, (size_t)pind); i < pind_max; i =
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)i+1)) {
assert(i < NPSIZES);
assert(!extent_heap_empty(&extents->heaps[i]));
extent_t *extent = extent_heap_first(&extents->heaps[i]);
uintptr_t base = (uintptr_t)extent_base_get(extent);
size_t candidate_size = extent_size_get(extent);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
PAGE_CEILING(alignment));
if (base > next_align || base + candidate_size <= next_align) {
/* Overflow or not crossing the next alignment. */
continue;
}
size_t leadsize = next_align - base;
if (candidate_size - leadsize >= min_size) {
return extent;
}
}
return NULL;
}
/* Do any-best-fit extent selection, i.e. select any extent that best fits. */
static extent_t *
extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
size_t size) {
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
if (i < NPSIZES+1) {
/*
* In order to reduce fragmentation, avoid reusing and splitting
* large extents for much smaller sizes.
*/
if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
return NULL;
}
assert(!extent_heap_empty(&extents->heaps[i]));
extent_t *extent = extent_heap_first(&extents->heaps[i]);
assert(extent_size_get(extent) >= size);
return extent;
}
return NULL;
}
/*
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough.
*/
static extent_t *
extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
size_t size) {
extent_t *ret = NULL;
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
&extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)i+1)) {
assert(!extent_heap_empty(&extents->heaps[i]));
extent_t *extent = extent_heap_first(&extents->heaps[i]);
assert(extent_size_get(extent) >= size);
if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
ret = extent;
}
if (i == NPSIZES) {
break;
}
assert(i < NPSIZES);
}
return ret;
}
/*
* Do {best,first}-fit extent selection, where the selection policy choice is
* based on extents->delay_coalesce. Best-fit selection requires less
* searching, but its layout policy is less stable and may cause higher virtual
* memory fragmentation as a side effect.
*/
static extent_t *
extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
size_t esize, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &extents->mtx);
size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (max_size < esize) {
return NULL;
}
extent_t *extent = extents->delay_coalesce ?
extents_best_fit_locked(tsdn, arena, extents, max_size) :
extents_first_fit_locked(tsdn, arena, extents, max_size);
if (alignment > PAGE && extent == NULL) {
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
extent = extents_fit_alignment(extents, esize, max_size,
alignment);
}
return extent;
}
static bool
extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent) {
extent_state_set(extent, extent_state_active);
bool coalesced;
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
extents, extent, &coalesced, false);
extent_state_set(extent, extents_state_get(extents));
if (!coalesced) {
return true;
}
extents_insert_locked(tsdn, extents, extent);
return false;
}
extent_t *
extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
assert(size + pad != 0);
assert(alignment != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
new_addr, size, pad, alignment, slab, szind, zero, commit, false);
assert(extent == NULL || extent_dumpable_get(extent));
return extent;
}
void
extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent) {
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
assert(extent_dumpable_get(extent));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_addr_set(extent, extent_base_get(extent));
extent_zeroed_set(extent, false);
extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
}
extent_t *
extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, size_t npages_min) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
malloc_mutex_lock(tsdn, &extents->mtx);
/*
* Get the LRU coalesced extent, if any. If coalescing was delayed,
* the loop will iterate until the LRU extent is fully coalesced.
*/
extent_t *extent;
while (true) {
/* Get the LRU extent, if any. */
extent = extent_list_first(&extents->lru);
if (extent == NULL) {
goto label_return;
}
/* Check the eviction limit. */
size_t extents_npages = atomic_load_zu(&extents->npages,
ATOMIC_RELAXED);
if (extents_npages <= npages_min) {
extent = NULL;
goto label_return;
}
extents_remove_locked(tsdn, extents, extent);
if (!extents->delay_coalesce) {
break;
}
/* Try to coalesce. */
if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
rtree_ctx, extents, extent)) {
break;
}
/*
* The LRU extent was just coalesced and the result placed in
* the LRU at its neighbor's position. Start over.
*/
}
/*
* Either mark the extent active or deregister it to protect against
* concurrent operations.
*/
switch (extents_state_get(extents)) {
case extent_state_active:
not_reached();
case extent_state_dirty:
case extent_state_muzzy:
extent_state_set(extent, extent_state_active);
break;
case extent_state_retained:
extent_deregister(tsdn, extent);
break;
default:
not_reached();
}
label_return:
malloc_mutex_unlock(tsdn, &extents->mtx);
return extent;
}
static void
extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent, bool growing_retained) {
/*
* Leak extent after making sure its pages have already been purged, so
* that this is only a virtual memory leak.
*/
if (extents_state_get(extents) == extent_state_dirty) {
if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
extent, 0, extent_size_get(extent), growing_retained)) {
extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
extent, 0, extent_size_get(extent),
growing_retained);
}
}
extent_dalloc(tsdn, arena, extent);
}
void
extents_prefork(tsdn_t *tsdn, extents_t *extents) {
malloc_mutex_prefork(tsdn, &extents->mtx);
}
void
extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
malloc_mutex_postfork_parent(tsdn, &extents->mtx);
}
void
extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
malloc_mutex_postfork_child(tsdn, &extents->mtx);
}
static void
extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
extent_t *extent) {
assert(extent_arena_get(extent) == arena);
assert(extent_state_get(extent) == extent_state_active);
extent_state_set(extent, extents_state_get(extents));
extents_insert_locked(tsdn, extents, extent);
}
static void
extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
extent_t *extent) {
malloc_mutex_lock(tsdn, &extents->mtx);
extent_deactivate_locked(tsdn, arena, extents, extent);
malloc_mutex_unlock(tsdn, &extents->mtx);
}
static void
extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
extent_t *extent) {
assert(extent_arena_get(extent) == arena);
assert(extent_state_get(extent) == extents_state_get(extents));
extents_remove_locked(tsdn, extents, extent);
extent_state_set(extent, extent_state_active);
}
static bool
extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
const extent_t *extent, bool dependent, bool init_missing,
rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent), dependent, init_missing);
if (!dependent && *r_elm_a == NULL) {
return true;
}
assert(*r_elm_a != NULL);
*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_last_get(extent), dependent, init_missing);
if (!dependent && *r_elm_b == NULL) {
return true;
}
assert(*r_elm_b != NULL);
return false;
}
static void
extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
if (elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
slab);
}
}
static void
extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
szind_t szind) {
assert(extent_slab_get(extent));
/* Register interior. */
for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
rtree_write(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
LG_PAGE), extent, szind, true);
}
}
static void
extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
cassert(config_prof);
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (opt_prof && extent_state_get(extent) == extent_state_active) {
size_t nadd = extent_size_get(extent) >> LG_PAGE;
size_t cur = atomic_fetch_add_zu(&curpages, nadd,
ATOMIC_RELAXED) + nadd;
size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
while (cur > high && !atomic_compare_exchange_weak_zu(
&highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
/*
* Don't refresh cur, because it may have decreased
* since this thread lost the highpages update race.
* Note that high is updated in case of CAS failure.
*/
}
if (cur > high && prof_gdump_get_unlocked()) {
prof_gdump(tsdn);
}
}
}
static void
extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
cassert(config_prof);
if (opt_prof && extent_state_get(extent) == extent_state_active) {
size_t nsub = extent_size_get(extent) >> LG_PAGE;
assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
}
}
static bool
extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *elm_a, *elm_b;
/*
* We need to hold the lock to protect against a concurrent coalesce
* operation that sees us in a partial state.
*/
extent_lock(tsdn, extent);
if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
&elm_a, &elm_b)) {
return true;
}
szind_t szind = extent_szind_get_maybe_invalid(extent);
bool slab = extent_slab_get(extent);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
if (slab) {
extent_interior_register(tsdn, rtree_ctx, extent, szind);
}
extent_unlock(tsdn, extent);
if (config_prof && gdump_add) {
extent_gdump_add(tsdn, extent);
}
return false;
}
static bool
extent_register(tsdn_t *tsdn, extent_t *extent) {
return extent_register_impl(tsdn, extent, true);
}
static bool
extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
return extent_register_impl(tsdn, extent, false);
}
static void
extent_reregister(tsdn_t *tsdn, extent_t *extent) {
bool err = extent_register(tsdn, extent);
assert(!err);
}
/*
* Removes all pointers to the given extent from the global rtree indices for
* its interior. This is relevant for slab extents, for which we need to do
* metadata lookups at places other than the head of the extent. We deregister
* on the interior, then, when an extent moves from being an active slab to an
* inactive state.
*/
static void
extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
extent_t *extent) {
size_t i;
assert(extent_slab_get(extent));
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
rtree_clear(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
LG_PAGE));
}
}
/*
* Removes all pointers to the given extent from the global rtree.
*/
static void
extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *elm_a, *elm_b;
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
&elm_a, &elm_b);
extent_lock(tsdn, extent);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false);
}
extent_unlock(tsdn, extent);
if (config_prof && gdump) {
extent_gdump_sub(tsdn, extent);
}
}
static void
extent_deregister(tsdn_t *tsdn, extent_t *extent) {
extent_deregister_impl(tsdn, extent, true);
}
static void
extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
extent_deregister_impl(tsdn, extent, false);
}
/*
* Tries to find and remove an extent from extents that can be used for the
* given allocation request.
*/
static extent_t *
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(alignment > 0);
if (config_debug && new_addr != NULL) {
/*
* Non-NULL new_addr has two use cases:
*
* 1) Recycle a known-extant extent, e.g. during purging.
* 2) Perform in-place expanding reallocation.
*
* Regardless of use case, new_addr must either refer to a
* non-existing extent, or to the base of an extant extent,
* since only active slabs support interior lookups (which of
* course cannot be recycled).
*/
assert(PAGE_ADDR2BASE(new_addr) == new_addr);
assert(pad == 0);
assert(alignment <= PAGE);
}
size_t esize = size + pad;
malloc_mutex_lock(tsdn, &extents->mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_t *extent;
if (new_addr != NULL) {
extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
if (extent != NULL) {
/*
* We might null-out extent to report an error, but we
* still need to unlock the associated mutex after.
*/
extent_t *unlock_extent = extent;
assert(extent_base_get(extent) == new_addr);
if (extent_arena_get(extent) != arena ||
extent_size_get(extent) < esize ||
extent_state_get(extent) !=
extents_state_get(extents)) {
extent = NULL;
}
extent_unlock(tsdn, unlock_extent);
}
} else {
extent = extents_fit_locked(tsdn, arena, extents, esize,
alignment);
}
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &extents->mtx);
return NULL;
}
extent_activate_locked(tsdn, arena, extents, extent);
malloc_mutex_unlock(tsdn, &extents->mtx);
return extent;
}
/*
* Given an allocation request and an extent guaranteed to be able to satisfy
* it, this splits off lead and trail extents, leaving extent pointing to an
* extent satisfying the allocation.
* This function doesn't put lead or trail into any extents_t; it's the caller's
* job to ensure that they can be reused.
*/
typedef enum {
/*
* Split successfully. lead, extent, and trail, are modified to extents
* describing the ranges before, in, and after the given allocation.
*/
extent_split_interior_ok,
/*
* The extent can't satisfy the given allocation request. None of the
* input extent_t *s are touched.
*/
extent_split_interior_cant_alloc,
/*
* In a potentially invalid state. Must leak (if *to_leak is non-NULL),
* and salvage what's still salvageable (if *to_salvage is non-NULL).
* None of lead, extent, or trail are valid.
*/
extent_split_interior_error
} extent_split_interior_result_t;
static extent_split_interior_result_t
extent_split_interior(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
/* The result of splitting, in case of success. */
extent_t **extent, extent_t **lead, extent_t **trail,
/* The mess to clean up, in case of error. */
extent_t **to_leak, extent_t **to_salvage,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool growing_retained) {
size_t esize = size + pad;
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
assert(new_addr == NULL || leadsize == 0);
if (extent_size_get(*extent) < leadsize + esize) {
return extent_split_interior_cant_alloc;
}
size_t trailsize = extent_size_get(*extent) - leadsize - esize;
*lead = NULL;
*trail = NULL;
*to_leak = NULL;
*to_salvage = NULL;
/* Split the lead. */
if (leadsize != 0) {
*lead = *extent;
*extent = extent_split_impl(tsdn, arena, r_extent_hooks,
*lead, leadsize, NSIZES, false, esize + trailsize, szind,
slab, growing_retained);
if (*extent == NULL) {
*to_leak = *lead;
*lead = NULL;
return extent_split_interior_error;
}
}
/* Split the trail. */
if (trailsize != 0) {
*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
esize, szind, slab, trailsize, NSIZES, false,
growing_retained);
if (*trail == NULL) {
*to_leak = *extent;
*to_salvage = *lead;
*lead = NULL;
*extent = NULL;
return extent_split_interior_error;
}
}
if (leadsize == 0 && trailsize == 0) {
/*
* Splitting causes szind to be set as a side effect, but no
* splitting occurred.
*/
extent_szind_set(*extent, szind);
if (szind != NSIZES) {
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_addr_get(*extent), szind, slab);
if (slab && extent_size_get(*extent) > PAGE) {
rtree_szind_slab_update(tsdn, &extents_rtree,
rtree_ctx,
(uintptr_t)extent_past_get(*extent) -
(uintptr_t)PAGE, szind, slab);
}
}
}
return extent_split_interior_ok;
}
/*
* This fulfills the indicated allocation request out of the given extent (which
* the caller should have ensured was big enough). If there's any unused space
* before or after the resulting allocation, that space is given its own extent
* and put back into extents.
*/
static extent_t *
extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, extent_t *extent, bool growing_retained) {
extent_t *lead;
extent_t *trail;
extent_t *to_leak;
extent_t *to_salvage;
extent_split_interior_result_t result = extent_split_interior(
tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
&to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
growing_retained);
if (result == extent_split_interior_ok) {
if (lead != NULL) {
extent_deactivate(tsdn, arena, extents, lead);
}
if (trail != NULL) {
extent_deactivate(tsdn, arena, extents, trail);
}
return extent;
} else {
/*
* We should have picked an extent that was large enough to
* fulfill our allocation request.
*/
assert(result == extent_split_interior_error);
if (to_salvage != NULL) {
extent_deregister(tsdn, to_salvage);
}
if (to_leak != NULL) {
void *leak = extent_base_get(to_leak);
extent_deregister_no_gdump_sub(tsdn, to_leak);
extents_leak(tsdn, arena, r_extent_hooks, extents,
to_leak, growing_retained);
assert(extent_lock_from_addr(tsdn, rtree_ctx, leak)
== NULL);
}
return NULL;
}
unreachable();
}
/*
* Tries to satisfy the given allocation request by reusing one of the extents
* in the given extents_t.
*/
static extent_t *
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(new_addr == NULL || !slab);
assert(pad == 0 || !slab);
assert(!*zero || !slab);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
rtree_ctx, extents, new_addr, size, pad, alignment, slab,
growing_retained);
if (extent == NULL) {
return NULL;
}
extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
extents, new_addr, size, pad, alignment, slab, szind, extent,
growing_retained);
if (extent == NULL) {
return NULL;
}
if (*commit && !extent_committed_get(extent)) {
if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
0, extent_size_get(extent), growing_retained)) {
extent_record(tsdn, arena, r_extent_hooks, extents,
extent, growing_retained);
return NULL;
}
extent_zeroed_set(extent, true);
}
if (extent_committed_get(extent)) {
*commit = true;
}
if (extent_zeroed_get(extent)) {
*zero = true;
}
if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
}
assert(extent_state_get(extent) == extent_state_active);
if (slab) {
extent_slab_set(extent, slab);
extent_interior_register(tsdn, rtree_ctx, extent, szind);
}
if (*zero) {
void *addr = extent_base_get(extent);
size_t size = extent_size_get(extent);
if (!extent_zeroed_get(extent)) {
if (pages_purge_forced(addr, size)) {
memset(addr, 0, size);
}
} else if (config_debug) {
size_t *p = (size_t *)(uintptr_t)addr;
for (size_t i = 0; i < size / sizeof(size_t); i++) {
assert(p[i] == 0);
}
}
}
return extent;
}
/*
* If the caller specifies (!*zero), it is still possible to receive zeroed
* memory, in which case *zero is toggled to true. arena_extent_alloc() takes
* advantage of this to avoid demanding zeroed extents, but taking advantage of
* them if they are returned.
*/
static void *
extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
void *ret;
assert(size != 0);
assert(alignment != 0);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* mmap. */
if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
!= NULL) {
return ret;
}
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* All strategies for allocation failed. */
return NULL;
}
static void *
extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit) {
void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
ATOMIC_RELAXED));
if (have_madvise_huge && ret) {
pages_set_thp_state(ret, size);
}
return ret;
}
static void *
extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
tsdn_t *tsdn;
arena_t *arena;
tsdn = tsdn_fetch();
arena = arena_get(tsdn, arena_ind, false);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
*/
assert(arena != NULL);
return extent_alloc_default_impl(tsdn, arena, new_addr, size,
alignment, zero, commit);
}
static void
extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
/*
* The only legitimate case of customized extent hooks for a0 is
* hooks with no allocation activities. One such example is to
* place metadata on pre-allocated resources such as huge pages.
* In that case, rely on reentrancy_level checks to catch
* infinite recursions.
*/
pre_reentrancy(tsd, NULL);
} else {
pre_reentrancy(tsd, arena);
}
}
static void
extent_hook_post_reentrancy(tsdn_t *tsdn) {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
post_reentrancy(tsd);
}
/*
* If virtual memory is retained, create increasingly larger extents from which
* to split requested extents in order to limit the total number of disjoint
* virtual memory ranges retained by each arena.
*/
static extent_t *
extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit) {
malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
assert(pad == 0 || !slab);
assert(!*zero || !slab);
size_t esize = size + pad;
size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size_min < esize) {
goto label_err;
}
/*
* Find the next extent size in the series that would be large enough to
* satisfy this request.
*/
pszind_t egn_skip = 0;
size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
while (alloc_size < alloc_size_min) {
egn_skip++;
if (arena->extent_grow_next + egn_skip == NPSIZES) {
/* Outside legal range. */
goto label_err;
}
assert(arena->extent_grow_next + egn_skip < NPSIZES);
alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
}
extent_t *extent = extent_alloc(tsdn, arena);
if (extent == NULL) {
goto label_err;
}
bool zeroed = false;
bool committed = false;
void *ptr;
if (*r_extent_hooks == &extent_hooks_default) {
ptr = extent_alloc_default_impl(tsdn, arena, NULL,
alloc_size, PAGE, &zeroed, &committed);
} else {
extent_hook_pre_reentrancy(tsdn, arena);
ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
alloc_size, PAGE, &zeroed, &committed,
arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
}
extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
arena_extent_sn_next(arena), extent_state_active, zeroed,
committed, true);
if (ptr == NULL) {
extent_dalloc(tsdn, arena, extent);
goto label_err;
}
if (extent_register_no_gdump_add(tsdn, extent)) {
extents_leak(tsdn, arena, r_extent_hooks,
&arena->extents_retained, extent, true);
goto label_err;
}
if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
*zero = true;
}
if (extent_committed_get(extent)) {
*commit = true;
}
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *lead;
extent_t *trail;
extent_t *to_leak;
extent_t *to_salvage;
extent_split_interior_result_t result = extent_split_interior(
tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
&to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
true);
if (result == extent_split_interior_ok) {
if (lead != NULL) {
extent_record(tsdn, arena, r_extent_hooks,
&arena->extents_retained, lead, true);
}
if (trail != NULL) {
extent_record(tsdn, arena, r_extent_hooks,
&arena->extents_retained, trail, true);
}
} else {
/*
* We should have allocated a sufficiently large extent; the
* cant_alloc case should not occur.
*/
assert(result == extent_split_interior_error);
if (to_salvage != NULL) {
if (config_prof) {
extent_gdump_add(tsdn, to_salvage);
}
extent_record(tsdn, arena, r_extent_hooks,
&arena->extents_retained, to_salvage, true);
}
if (to_leak != NULL) {
extent_deregister_no_gdump_sub(tsdn, to_leak);
extents_leak(tsdn, arena, r_extent_hooks,
&arena->extents_retained, to_leak, true);
}
goto label_err;
}
if (*commit && !extent_committed_get(extent)) {
if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
extent_size_get(extent), true)) {
extent_record(tsdn, arena, r_extent_hooks,
&arena->extents_retained, extent, true);
goto label_err;
}
extent_zeroed_set(extent, true);
}
/*
* Increment extent_grow_next if doing so wouldn't exceed the allowed
* range.
*/
if (arena->extent_grow_next + egn_skip + 1 <=
arena->retain_grow_limit) {
arena->extent_grow_next += egn_skip + 1;
} else {
arena->extent_grow_next = arena->retain_grow_limit;
}
/* All opportunities for failure are past. */
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
if (config_prof) {
/* Adjust gdump stats now that extent is final size. */
extent_gdump_add(tsdn, extent);
}
if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
}
if (slab) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
extent_slab_set(extent, true);
extent_interior_register(tsdn, rtree_ctx, extent, szind);
}
if (*zero && !extent_zeroed_get(extent)) {
void *addr = extent_base_get(extent);
size_t size = extent_size_get(extent);
if (pages_purge_forced(addr, size)) {
memset(addr, 0, size);
}
}
return extent;
label_err:
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
return NULL;
}
static extent_t *
extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
assert(size != 0);
assert(alignment != 0);
malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
&arena->extents_retained, new_addr, size, pad, alignment, slab,
szind, zero, commit, true);
if (extent != NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
if (config_prof) {
extent_gdump_add(tsdn, extent);
}
} else if (opt_retain && new_addr == NULL) {
extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
pad, alignment, slab, szind, zero, commit);
/* extent_grow_retained() always releases extent_grow_mtx. */
} else {
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
}
malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
return extent;
}
static extent_t *
extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
size_t esize = size + pad;
extent_t *extent = extent_alloc(tsdn, arena);
if (extent == NULL) {
return NULL;
}
void *addr;
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
alignment, zero, commit);
} else {
extent_hook_pre_reentrancy(tsdn, arena);
addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
esize, alignment, zero, commit, arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
}
if (addr == NULL) {
extent_dalloc(tsdn, arena, extent);
return NULL;
}
extent_init(extent, arena, addr, esize, slab, szind,
arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
true);
if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
}
if (extent_register(tsdn, extent)) {
extents_leak(tsdn, arena, r_extent_hooks,
&arena->extents_retained, extent, false);
return NULL;
}
return extent;
}
extent_t *
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
if (extent == NULL) {
if (opt_retain && new_addr != NULL) {
/*
* When retain is enabled and new_addr is set, we do not
* attempt extent_alloc_wrapper_hard which does mmap
* that is very unlikely to succeed (unless it happens
* to be at the end).
*/
return NULL;
}
extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
}
assert(extent == NULL || extent_dumpable_get(extent));
return extent;
}
static bool
extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
const extent_t *outer) {
assert(extent_arena_get(inner) == arena);
if (extent_arena_get(outer) != arena) {
return false;
}
assert(extent_state_get(inner) == extent_state_active);
if (extent_state_get(outer) != extents->state) {
return false;
}
if (extent_committed_get(inner) != extent_committed_get(outer)) {
return false;
}
return true;
}
static bool
extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
bool growing_retained) {
assert(extent_can_coalesce(arena, extents, inner, outer));
extent_activate_locked(tsdn, arena, extents, outer);
malloc_mutex_unlock(tsdn, &extents->mtx);
bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
forward ? inner : outer, forward ? outer : inner, growing_retained);
malloc_mutex_lock(tsdn, &extents->mtx);
if (err) {
extent_deactivate_locked(tsdn, arena, extents, outer);
}
return err;
}
static extent_t *
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent, bool *coalesced, bool growing_retained) {
/* /*
* Round down to the nearest chunk size that can actually be requested * Continue attempting to coalesce until failure, to protect against
* during normal huge allocation. * races with other threads that are thwarted by this one.
*/ */
return (index2size(size2index(size + 1) - 1)); bool again;
do {
again = false;
/* Try to coalesce forward. */
extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
extent_past_get(extent));
if (next != NULL) {
/*
* extents->mtx only protects against races for
* like-state extents, so call extent_can_coalesce()
* before releasing next's pool lock.
*/
bool can_coalesce = extent_can_coalesce(arena, extents,
extent, next);
extent_unlock(tsdn, next);
if (can_coalesce && !extent_coalesce(tsdn, arena,
r_extent_hooks, extents, extent, next, true,
growing_retained)) {
if (extents->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
return extent;
}
again = true;
}
}
/* Try to coalesce backward. */
extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
extent_before_get(extent));
if (prev != NULL) {
bool can_coalesce = extent_can_coalesce(arena, extents,
extent, prev);
extent_unlock(tsdn, prev);
if (can_coalesce && !extent_coalesce(tsdn, arena,
r_extent_hooks, extents, extent, prev, false,
growing_retained)) {
extent = prev;
if (extents->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
return extent;
}
again = true;
}
}
} while (again);
if (extents->delay_coalesce) {
*coalesced = false;
}
return extent;
}
/*
* Does the metadata management portions of putting an unused extent into the
* given extents_t (coalesces, deregisters slab interiors, the heap operations).
*/
static void
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent, bool growing_retained) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
assert((extents_state_get(extents) != extent_state_dirty &&
extents_state_get(extents) != extent_state_muzzy) ||
!extent_zeroed_get(extent));
malloc_mutex_lock(tsdn, &extents->mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_szind_set(extent, NSIZES);
if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false);
}
assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent), true) == extent);
if (!extents->delay_coalesce) {
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
rtree_ctx, extents, extent, NULL, growing_retained);
} else if (extent_size_get(extent) >= LARGE_MINCLASS) {
/* Always coalesce large extents eagerly. */
bool coalesced;
size_t prev_size;
do {
prev_size = extent_size_get(extent);
assert(extent_state_get(extent) == extent_state_active);
extent = extent_try_coalesce(tsdn, arena,
r_extent_hooks, rtree_ctx, extents, extent,
&coalesced, growing_retained);
} while (coalesced &&
extent_size_get(extent) >= prev_size + LARGE_MINCLASS);
}
extent_deactivate_locked(tsdn, arena, extents, extent);
malloc_mutex_unlock(tsdn, &extents->mtx);
}
void
extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (extent_register(tsdn, extent)) {
extents_leak(tsdn, arena, &extent_hooks,
&arena->extents_retained, extent, false);
return;
}
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
}
static bool
extent_dalloc_default_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
return extent_dalloc_mmap(addr, size);
}
return true;
}
static bool
extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
return extent_dalloc_default_impl(addr, size);
}
static bool
extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) {
bool err;
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_addr_set(extent, extent_base_get(extent));
extent_hooks_assure_initialized(arena, r_extent_hooks);
/* Try to deallocate. */
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
err = extent_dalloc_default_impl(extent_base_get(extent),
extent_size_get(extent));
} else {
extent_hook_pre_reentrancy(tsdn, arena);
err = ((*r_extent_hooks)->dalloc == NULL ||
(*r_extent_hooks)->dalloc(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent),
extent_committed_get(extent), arena_ind_get(arena)));
extent_hook_post_reentrancy(tsdn);
}
if (!err) {
extent_dalloc(tsdn, arena, extent);
}
return err;
} }
JEMALLOC_INLINE_C int void
extent_szad_comp(extent_node_t *a, extent_node_t *b) extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
{ extent_hooks_t **r_extent_hooks, extent_t *extent) {
int ret; assert(extent_dumpable_get(extent));
size_t a_qsize = extent_quantize(extent_node_size_get(a)); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
size_t b_qsize = extent_quantize(extent_node_size_get(b)); WITNESS_RANK_CORE, 0);
/* /*
* Compare based on quantized size rather than size, in order to sort * Deregister first to avoid a race with other allocating threads, and
* equally useful extents only by address. * reregister if deallocation fails.
*/ */
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); extent_deregister(tsdn, extent);
if (ret == 0) { if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); return;
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); }
extent_reregister(tsdn, extent);
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
/* Try to decommit; purge if that fails. */
bool zeroed;
if (!extent_committed_get(extent)) {
zeroed = true;
} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
0, extent_size_get(extent))) {
zeroed = true;
} else if ((*r_extent_hooks)->purge_forced != NULL &&
!(*r_extent_hooks)->purge_forced(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
extent_size_get(extent), arena_ind_get(arena))) {
zeroed = true;
} else if (extent_state_get(extent) == extent_state_muzzy ||
((*r_extent_hooks)->purge_lazy != NULL &&
!(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
extent_size_get(extent), arena_ind_get(arena)))) {
zeroed = false;
} else {
zeroed = false;
}
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
extent_zeroed_set(extent, zeroed);
if (config_prof) {
extent_gdump_sub(tsdn, extent);
}
extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
extent, false);
}
static void
extent_destroy_default_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
pages_unmap(addr, size);
}
}
static void
extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
extent_destroy_default_impl(addr, size);
}
void
extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) {
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/* Deregister first to avoid a race with other allocating threads. */
extent_deregister(tsdn, extent);
extent_addr_set(extent, extent_base_get(extent));
extent_hooks_assure_initialized(arena, r_extent_hooks);
/* Try to destroy; silently fail otherwise. */
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
extent_destroy_default_impl(extent_base_get(extent),
extent_size_get(extent));
} else if ((*r_extent_hooks)->destroy != NULL) {
extent_hook_pre_reentrancy(tsdn, arena);
(*r_extent_hooks)->destroy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent),
extent_committed_get(extent), arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
}
extent_dalloc(tsdn, arena, extent);
}
static bool
extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
static bool
extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = ((*r_extent_hooks)->commit == NULL ||
(*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
extent_size_get(extent), offset, length, arena_ind_get(arena)));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
extent_committed_set(extent, extent_committed_get(extent) || !err);
return err;
}
bool
extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length) {
return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
length, false);
}
static bool
extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
bool
extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = ((*r_extent_hooks)->decommit == NULL ||
(*r_extent_hooks)->decommit(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
arena_ind_get(arena)));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
extent_committed_set(extent, extent_committed_get(extent) && err);
return err;
}
#ifdef PAGES_CAN_PURGE_LAZY
static bool
extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
#endif
static bool
extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
if ((*r_extent_hooks)->purge_lazy == NULL) {
return true;
}
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
arena_ind_get(arena));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
return err;
}
bool
extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length) {
return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
offset, length, false);
}
#ifdef PAGES_CAN_PURGE_FORCED
static bool
extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return pages_purge_forced((void *)((uintptr_t)addr +
(uintptr_t)offset), length);
}
#endif
static bool
extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
if ((*r_extent_hooks)->purge_forced == NULL) {
return true;
}
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
arena_ind_get(arena));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
return err;
}
bool
extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length) {
return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
offset, length, false);
}
#ifdef JEMALLOC_MAPS_COALESCE
static bool
extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
return !maps_coalesce;
}
#endif
/*
* Accepts the extent to split, and the characteristics of each side of the
* split. The 'a' parameters go with the 'lead' of the resulting pair of
* extents (the lower addressed portion of the split), and the 'b' parameters go
* with the trail (the higher addressed portion). This makes 'extent' the lead,
* and returns the trail (except in case of error).
*/
static extent_t *
extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
bool growing_retained) {
assert(extent_size_get(extent) == size_a + size_b);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
if ((*r_extent_hooks)->split == NULL) {
return NULL;
}
extent_t *trail = extent_alloc(tsdn, arena);
if (trail == NULL) {
goto label_error_a;
}
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_dumpable_get(extent));
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
{
extent_t lead;
extent_init(&lead, arena, extent_addr_get(extent), size_a,
slab_a, szind_a, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_dumpable_get(extent));
ret = (a_addr > b_addr) - (a_addr < b_addr); extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
true, &lead_elm_a, &lead_elm_b);
} }
rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
&trail_elm_a, &trail_elm_b);
return (ret); if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
|| trail_elm_b == NULL) {
goto label_error_b;
}
extent_lock2(tsdn, extent, trail);
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
size_a + size_b, size_a, size_b, extent_committed_get(extent),
arena_ind_get(arena));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
if (err) {
goto label_error_c;
}
extent_size_set(extent, size_a);
extent_szind_set(extent, szind_a);
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
szind_a, slab_a);
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
szind_b, slab_b);
extent_unlock2(tsdn, extent, trail);
return trail;
label_error_c:
extent_unlock2(tsdn, extent, trail);
label_error_b:
extent_dalloc(tsdn, arena, trail);
label_error_a:
return NULL;
} }
/* Generate red-black tree functions. */ extent_t *
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link, extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_szad_comp) extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
szind_a, slab_a, size_b, szind_b, slab_b, false);
}
JEMALLOC_INLINE_C int static bool
extent_ad_comp(extent_node_t *a, extent_node_t *b) extent_merge_default_impl(void *addr_a, void *addr_b) {
{ if (!maps_coalesce) {
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); return true;
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); }
if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
return true;
}
return false;
}
return ((a_addr > b_addr) - (a_addr < b_addr)); #ifdef JEMALLOC_MAPS_COALESCE
static bool
extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
return extent_merge_default_impl(addr_a, addr_b);
} }
#endif
/* Generate red-black tree functions. */ static bool
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp) extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
if ((*r_extent_hooks)->merge == NULL) {
return true;
}
bool err;
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
err = extent_merge_default_impl(extent_base_get(a),
extent_base_get(b));
} else {
extent_hook_pre_reentrancy(tsdn, arena);
err = (*r_extent_hooks)->merge(*r_extent_hooks,
extent_base_get(a), extent_size_get(a), extent_base_get(b),
extent_size_get(b), extent_committed_get(a),
arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
}
if (err) {
return true;
}
/*
* The rtree writes must happen while all the relevant elements are
* owned, so the following code uses decomposed helper functions rather
* than extent_{,de}register() to do things in the right order.
*/
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
&a_elm_b);
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
&b_elm_b);
extent_lock2(tsdn, a, b);
if (a_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
NSIZES, false);
}
if (b_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
NSIZES, false);
} else {
b_elm_b = b_elm_a;
}
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
extent_szind_set(a, NSIZES);
extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
extent_sn_get(a) : extent_sn_get(b));
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
extent_unlock2(tsdn, a, b);
extent_dalloc(tsdn, extent_arena_get(b), b);
return false;
}
bool
extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
}
bool
extent_boot(void) {
if (rtree_new(&extents_rtree, true)) {
return true;
}
if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
WITNESS_RANK_EXTENT_POOL)) {
return true;
}
if (have_dss) {
extent_dss_boot();
}
return false;
}
#define JEMALLOC_EXTENT_DSS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/spin.h"
/******************************************************************************/
/* Data. */
const char *opt_dss = DSS_DEFAULT;
const char *dss_prec_names[] = {
"disabled",
"primary",
"secondary",
"N/A"
};
/*
* Current dss precedence default, used when creating new arenas. NB: This is
* stored as unsigned rather than dss_prec_t because in principle there's no
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
* atomic operations to synchronize the setting.
*/
static atomic_u_t dss_prec_default = ATOMIC_INIT(
(unsigned)DSS_PREC_DEFAULT);
/* Base address of the DSS. */
static void *dss_base;
/* Atomic boolean indicating whether a thread is currently extending DSS. */
static atomic_b_t dss_extending;
/* Atomic boolean indicating whether the DSS is exhausted. */
static atomic_b_t dss_exhausted;
/* Atomic current upper limit on DSS addresses. */
static atomic_p_t dss_max;
/******************************************************************************/
static void *
extent_dss_sbrk(intptr_t increment) {
#ifdef JEMALLOC_DSS
return sbrk(increment);
#else
not_implemented();
return NULL;
#endif
}
dss_prec_t
extent_dss_prec_get(void) {
dss_prec_t ret;
if (!have_dss) {
return dss_prec_disabled;
}
ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);
return ret;
}
bool
extent_dss_prec_set(dss_prec_t dss_prec) {
if (!have_dss) {
return (dss_prec != dss_prec_disabled);
}
atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);
return false;
}
static void
extent_dss_extending_start(void) {
spin_t spinner = SPIN_INITIALIZER;
while (true) {
bool expected = false;
if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
break;
}
spin_adaptive(&spinner);
}
}
static void
extent_dss_extending_finish(void) {
assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
}
static void *
extent_dss_max_update(void *new_addr) {
/*
* Get the current end of the DSS as max_cur and assure that dss_max is
* up to date.
*/
void *max_cur = extent_dss_sbrk(0);
if (max_cur == (void *)-1) {
return NULL;
}
atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
if (new_addr != NULL && max_cur != new_addr) {
return NULL;
}
return max_cur;
}
void *
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
extent_t *gap;
cassert(have_dss);
assert(size > 0);
assert(alignment > 0);
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a large allocation request as a negative increment.
*/
if ((intptr_t)size < 0) {
return NULL;
}
gap = extent_alloc(tsdn, arena);
if (gap == NULL) {
return NULL;
}
extent_dss_extending_start();
if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
while (true) {
void *max_cur = extent_dss_max_update(new_addr);
if (max_cur == NULL) {
goto label_oom;
}
/*
* Compute how much page-aligned gap space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
void *gap_addr_page = (void *)(PAGE_CEILING(
(uintptr_t)max_cur));
void *ret = (void *)ALIGNMENT_CEILING(
(uintptr_t)gap_addr_page, alignment);
size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
extent_init(gap, arena, gap_addr_page,
gap_size_page, false, NSIZES,
arena_extent_sn_next(arena),
extent_state_active, false, true, true);
}
/*
* Compute the address just past the end of the desired
* allocation space.
*/
void *dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)max_cur ||
(uintptr_t)dss_next < (uintptr_t)max_cur) {
goto label_oom; /* Wrap-around. */
}
/* Compute the increment, including subpage bytes. */
void *gap_addr_subpage = max_cur;
size_t gap_size_subpage = (uintptr_t)ret -
(uintptr_t)gap_addr_subpage;
intptr_t incr = gap_size_subpage + size;
assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
size);
/* Try to allocate. */
void *dss_prev = extent_dss_sbrk(incr);
if (dss_prev == max_cur) {
/* Success. */
atomic_store_p(&dss_max, dss_next,
ATOMIC_RELEASE);
extent_dss_extending_finish();
if (gap_size_page != 0) {
extent_dalloc_gap(tsdn, arena, gap);
} else {
extent_dalloc(tsdn, arena, gap);
}
if (!*commit) {
*commit = pages_decommit(ret, size);
}
if (*zero && *commit) {
extent_hooks_t *extent_hooks =
EXTENT_HOOKS_INITIALIZER;
extent_t extent;
extent_init(&extent, arena, ret, size,
size, false, NSIZES,
extent_state_active, false, true,
true);
if (extent_purge_forced_wrapper(tsdn,
arena, &extent_hooks, &extent, 0,
size)) {
memset(ret, 0, size);
}
}
return ret;
}
/*
* Failure, whether due to OOM or a race with a raw
* sbrk() call from outside the allocator.
*/
if (dss_prev == (void *)-1) {
/* OOM. */
atomic_store_b(&dss_exhausted, true,
ATOMIC_RELEASE);
goto label_oom;
}
}
}
label_oom:
extent_dss_extending_finish();
extent_dalloc(tsdn, arena, gap);
return NULL;
}
static bool
extent_in_dss_helper(void *addr, void *max) {
return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
(uintptr_t)max);
}
bool
extent_in_dss(void *addr) {
cassert(have_dss);
return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
ATOMIC_ACQUIRE));
}
bool
extent_dss_mergeable(void *addr_a, void *addr_b) {
void *max;
cassert(have_dss);
if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
(uintptr_t)dss_base) {
return true;
}
max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
return (extent_in_dss_helper(addr_a, max) ==
extent_in_dss_helper(addr_b, max));
}
void
extent_dss_boot(void) {
cassert(have_dss);
dss_base = extent_dss_sbrk(0);
atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
}
/******************************************************************************/
#define JEMALLOC_EXTENT_MMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
/******************************************************************************/
/* Data. */
bool opt_retain =
#ifdef JEMALLOC_RETAIN
true
#else
false
#endif
;
/******************************************************************************/
void *
extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit) {
void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment,
PAGE), commit);
if (ret == NULL) {
return NULL;
}
assert(ret != NULL);
if (*commit) {
*zero = true;
}
return ret;
}
bool
extent_dalloc_mmap(void *addr, size_t size) {
if (!opt_retain) {
pages_unmap(addr, size);
}
return opt_retain;
}
#define JEMALLOC_HASH_C_ #define JEMALLOC_HASH_C_
#include "jemalloc/internal/jemalloc_internal.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/jemalloc_preamble.h"
/*
* The hooks are a little bit screwy -- they're not genuinely exported in the
* sense that we want them available to end-users, but we do want them visible
* from outside the generated library, so that we can use them in test code.
*/
JEMALLOC_EXPORT
void (*hooks_arena_new_hook)() = NULL;
JEMALLOC_EXPORT
void (*hooks_libc_hook)() = NULL;
#define JEMALLOC_HUGE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
static extent_node_t *
huge_node_get(const void *ptr)
{
extent_node_t *node;
node = chunk_lookup(ptr, true);
assert(!extent_node_achunk_get(node));
return (node);
}
static bool
huge_node_set(const void *ptr, extent_node_t *node)
{
assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node));
return (chunk_register(ptr, node));
}
static void
huge_node_unset(const void *ptr, const extent_node_t *node)
{
chunk_deregister(ptr, node);
}
void *
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache)
{
size_t usize;
usize = s2u(size);
if (usize == 0) {
/* size_t overflow. */
return (NULL);
}
return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
}
void *
huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
bool zero, tcache_t *tcache)
{
void *ret;
size_t usize;
extent_node_t *node;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
usize = sa2u(size, alignment);
if (unlikely(usize == 0))
return (NULL);
assert(usize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, tcache, true, arena);
if (node == NULL)
return (NULL);
/*
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
size, alignment, &is_zeroed)) == NULL) {
idalloctm(tsd, node, tcache, true);
return (NULL);
}
extent_node_init(node, arena, ret, size, is_zeroed, true);
if (huge_node_set(ret, node)) {
arena_chunk_dalloc_huge(arena, ret, size);
idalloctm(tsd, node, tcache, true);
return (NULL);
}
/* Insert node into huge. */
malloc_mutex_lock(&arena->huge_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->huge, node, ql_link);
malloc_mutex_unlock(&arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed)
memset(ret, 0, size);
} else if (config_fill && unlikely(opt_junk_alloc))
memset(ret, 0xa5, size);
return (ret);
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
static void
huge_dalloc_junk(void *ptr, size_t usize)
{
if (config_fill && have_dss && unlikely(opt_junk_free)) {
/*
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
memset(ptr, 0x5a, usize);
}
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static void
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
size_t usize, usize_next;
extent_node_t *node;
arena_t *arena;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
bool pre_zeroed, post_zeroed;
/* Increase usize to incorporate extra. */
for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
<= oldsize; usize = usize_next)
; /* Do nothing. */
if (oldsize == usize)
return;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
/* Fill if necessary (shrinking). */
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
post_zeroed = false;
} else {
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
ptr, CHUNK_CEILING(oldsize), usize, sdiff);
}
} else
post_zeroed = pre_zeroed;
malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */
assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(&arena->huge_mtx);
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
/* Fill if necessary (growing). */
if (oldsize < usize) {
if (zero || (config_fill && unlikely(opt_zero))) {
if (!pre_zeroed) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
usize - oldsize);
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
oldsize);
}
}
}
static bool
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{
extent_node_t *node;
arena_t *arena;
chunk_hooks_t chunk_hooks;
size_t cdiff;
bool pre_zeroed, post_zeroed;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
chunk_hooks = chunk_hooks_get(arena);
assert(oldsize > usize);
/* Split excess chunks. */
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
CHUNK_CEILING(usize), cdiff, true, arena->ind))
return (true);
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
sdiff);
post_zeroed = false;
} else {
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
CHUNK_CEILING(oldsize),
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
}
} else
post_zeroed = pre_zeroed;
malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */
extent_node_size_set(node, usize);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(&arena->huge_mtx);
/* Zap the excess chunks. */
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
return (false);
}
static bool
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
extent_node_t *node;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(&arena->huge_mtx);
is_zeroed_subchunk = extent_node_zeroed_get(node);
malloc_mutex_unlock(&arena->huge_mtx);
/*
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
* that it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed_chunk = zero;
if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
&is_zeroed_chunk))
return (true);
malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */
extent_node_size_set(node, usize);
malloc_mutex_unlock(&arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed_subchunk) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
CHUNK_CEILING(oldsize) - oldsize);
}
if (!is_zeroed_chunk) {
memset((void *)((uintptr_t)ptr +
CHUNK_CEILING(oldsize)), 0, usize -
CHUNK_CEILING(oldsize));
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
oldsize);
}
return (false);
}
bool
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
assert(s2u(oldsize) == oldsize);
/* Both allocations must be huge to avoid a move. */
if (oldsize < chunksize || usize_max < chunksize)
return (true);
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
/* Attempt to expand the allocation in-place. */
if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
return (false);
/* Try again, this time with usize_min. */
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
oldsize, usize_min, zero))
return (false);
}
/*
* Avoid moving the allocation if the existing chunk size accommodates
* the new size.
*/
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
zero);
return (false);
}
/* Attempt to shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
return (true);
}
static void *
huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
{
if (alignment <= chunksize)
return (huge_malloc(tsd, arena, usize, zero, tcache));
return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
}
void *
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
return (ptr);
/*
* usize and oldsize are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
tcache);
if (ret == NULL)
return (NULL);
copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache);
return (ret);
}
void
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
extent_node_t *node;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
huge_node_unset(ptr, node);
malloc_mutex_lock(&arena->huge_mtx);
ql_remove(&arena->huge, node, ql_link);
malloc_mutex_unlock(&arena->huge_mtx);
huge_dalloc_junk(extent_node_addr_get(node),
extent_node_size_get(node));
arena_chunk_dalloc_huge(extent_node_arena_get(node),
extent_node_addr_get(node), extent_node_size_get(node));
idalloctm(tsd, node, tcache, true);
}
arena_t *
huge_aalloc(const void *ptr)
{
return (extent_node_arena_get(huge_node_get(ptr)));
}
size_t
huge_salloc(const void *ptr)
{
size_t size;
extent_node_t *node;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(&arena->huge_mtx);
size = extent_node_size_get(node);
malloc_mutex_unlock(&arena->huge_mtx);
return (size);
}
prof_tctx_t *
huge_prof_tctx_get(const void *ptr)
{
prof_tctx_t *tctx;
extent_node_t *node;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(&arena->huge_mtx);
tctx = extent_node_prof_tctx_get(node);
malloc_mutex_unlock(&arena->huge_mtx);
return (tctx);
}
void
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{
extent_node_t *node;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(&arena->huge_mtx);
extent_node_prof_tctx_set(node, tctx);
malloc_mutex_unlock(&arena->huge_mtx);
}
void
huge_prof_tctx_reset(const void *ptr)
{
huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment